aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/datagram.c26
-rw-r--r--net/core/dev.c139
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/iovec.c2
-rw-r--r--net/core/neighbour.c90
-rw-r--r--net/core/net-sysfs.c4
-rw-r--r--net/core/netpoll.c71
-rw-r--r--net/core/netprio_cgroup.c10
-rw-r--r--net/core/rtnetlink.c11
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/core/sock.c31
-rw-r--r--net/core/sysctl_net_core.c4
12 files changed, 247 insertions, 147 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 68bbf9f65cb..d3cf12f62c8 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -132,6 +132,8 @@ out_noerr:
132 * __skb_recv_datagram - Receive a datagram skbuff 132 * __skb_recv_datagram - Receive a datagram skbuff
133 * @sk: socket 133 * @sk: socket
134 * @flags: MSG_ flags 134 * @flags: MSG_ flags
135 * @off: an offset in bytes to peek skb from. Returns an offset
136 * within an skb where data actually starts
135 * @peeked: returns non-zero if this packet has been seen before 137 * @peeked: returns non-zero if this packet has been seen before
136 * @err: error code returned 138 * @err: error code returned
137 * 139 *
@@ -158,7 +160,7 @@ out_noerr:
158 * the standard around please. 160 * the standard around please.
159 */ 161 */
160struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, 162struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
161 int *peeked, int *err) 163 int *peeked, int *off, int *err)
162{ 164{
163 struct sk_buff *skb; 165 struct sk_buff *skb;
164 long timeo; 166 long timeo;
@@ -180,21 +182,25 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
180 * However, this function was correct in any case. 8) 182 * However, this function was correct in any case. 8)
181 */ 183 */
182 unsigned long cpu_flags; 184 unsigned long cpu_flags;
185 struct sk_buff_head *queue = &sk->sk_receive_queue;
183 186
184 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); 187 spin_lock_irqsave(&queue->lock, cpu_flags);
185 skb = skb_peek(&sk->sk_receive_queue); 188 skb_queue_walk(queue, skb) {
186 if (skb) {
187 *peeked = skb->peeked; 189 *peeked = skb->peeked;
188 if (flags & MSG_PEEK) { 190 if (flags & MSG_PEEK) {
191 if (*off >= skb->len) {
192 *off -= skb->len;
193 continue;
194 }
189 skb->peeked = 1; 195 skb->peeked = 1;
190 atomic_inc(&skb->users); 196 atomic_inc(&skb->users);
191 } else 197 } else
192 __skb_unlink(skb, &sk->sk_receive_queue); 198 __skb_unlink(skb, queue);
193 }
194 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
195 199
196 if (skb) 200 spin_unlock_irqrestore(&queue->lock, cpu_flags);
197 return skb; 201 return skb;
202 }
203 spin_unlock_irqrestore(&queue->lock, cpu_flags);
198 204
199 /* User doesn't want to wait */ 205 /* User doesn't want to wait */
200 error = -EAGAIN; 206 error = -EAGAIN;
@@ -214,10 +220,10 @@ EXPORT_SYMBOL(__skb_recv_datagram);
214struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, 220struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
215 int noblock, int *err) 221 int noblock, int *err)
216{ 222{
217 int peeked; 223 int peeked, off = 0;
218 224
219 return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), 225 return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
220 &peeked, err); 226 &peeked, &off, err);
221} 227}
222EXPORT_SYMBOL(skb_recv_datagram); 228EXPORT_SYMBOL(skb_recv_datagram);
223 229
diff --git a/net/core/dev.c b/net/core/dev.c
index 6ca32f6b310..0f3eb7d79a2 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -134,7 +134,7 @@
134#include <linux/inetdevice.h> 134#include <linux/inetdevice.h>
135#include <linux/cpu_rmap.h> 135#include <linux/cpu_rmap.h>
136#include <linux/net_tstamp.h> 136#include <linux/net_tstamp.h>
137#include <linux/jump_label.h> 137#include <linux/static_key.h>
138#include <net/flow_keys.h> 138#include <net/flow_keys.h>
139 139
140#include "net-sysfs.h" 140#include "net-sysfs.h"
@@ -446,7 +446,7 @@ void __dev_remove_pack(struct packet_type *pt)
446 } 446 }
447 } 447 }
448 448
449 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt); 449 pr_warn("dev_remove_pack: %p not found\n", pt);
450out: 450out:
451 spin_unlock(&ptype_lock); 451 spin_unlock(&ptype_lock);
452} 452}
@@ -848,21 +848,21 @@ EXPORT_SYMBOL(dev_get_by_flags_rcu);
848 * to allow sysfs to work. We also disallow any kind of 848 * to allow sysfs to work. We also disallow any kind of
849 * whitespace. 849 * whitespace.
850 */ 850 */
851int dev_valid_name(const char *name) 851bool dev_valid_name(const char *name)
852{ 852{
853 if (*name == '\0') 853 if (*name == '\0')
854 return 0; 854 return false;
855 if (strlen(name) >= IFNAMSIZ) 855 if (strlen(name) >= IFNAMSIZ)
856 return 0; 856 return false;
857 if (!strcmp(name, ".") || !strcmp(name, "..")) 857 if (!strcmp(name, ".") || !strcmp(name, ".."))
858 return 0; 858 return false;
859 859
860 while (*name) { 860 while (*name) {
861 if (*name == '/' || isspace(*name)) 861 if (*name == '/' || isspace(*name))
862 return 0; 862 return false;
863 name++; 863 name++;
864 } 864 }
865 return 1; 865 return true;
866} 866}
867EXPORT_SYMBOL(dev_valid_name); 867EXPORT_SYMBOL(dev_valid_name);
868 868
@@ -1039,8 +1039,7 @@ rollback:
1039 memcpy(dev->name, oldname, IFNAMSIZ); 1039 memcpy(dev->name, oldname, IFNAMSIZ);
1040 goto rollback; 1040 goto rollback;
1041 } else { 1041 } else {
1042 printk(KERN_ERR 1042 pr_err("%s: name change rollback failed: %d\n",
1043 "%s: name change rollback failed: %d.\n",
1044 dev->name, ret); 1043 dev->name, ret);
1045 } 1044 }
1046 } 1045 }
@@ -1139,9 +1138,8 @@ void dev_load(struct net *net, const char *name)
1139 no_module = request_module("netdev-%s", name); 1138 no_module = request_module("netdev-%s", name);
1140 if (no_module && capable(CAP_SYS_MODULE)) { 1139 if (no_module && capable(CAP_SYS_MODULE)) {
1141 if (!request_module("%s", name)) 1140 if (!request_module("%s", name))
1142 pr_err("Loading kernel module for a network device " 1141 pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
1143"with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s " 1142 name);
1144"instead\n", name);
1145 } 1143 }
1146} 1144}
1147EXPORT_SYMBOL(dev_load); 1145EXPORT_SYMBOL(dev_load);
@@ -1441,11 +1439,11 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1441} 1439}
1442EXPORT_SYMBOL(call_netdevice_notifiers); 1440EXPORT_SYMBOL(call_netdevice_notifiers);
1443 1441
1444static struct jump_label_key netstamp_needed __read_mostly; 1442static struct static_key netstamp_needed __read_mostly;
1445#ifdef HAVE_JUMP_LABEL 1443#ifdef HAVE_JUMP_LABEL
1446/* We are not allowed to call jump_label_dec() from irq context 1444/* We are not allowed to call static_key_slow_dec() from irq context
1447 * If net_disable_timestamp() is called from irq context, defer the 1445 * If net_disable_timestamp() is called from irq context, defer the
1448 * jump_label_dec() calls. 1446 * static_key_slow_dec() calls.
1449 */ 1447 */
1450static atomic_t netstamp_needed_deferred; 1448static atomic_t netstamp_needed_deferred;
1451#endif 1449#endif
@@ -1457,12 +1455,12 @@ void net_enable_timestamp(void)
1457 1455
1458 if (deferred) { 1456 if (deferred) {
1459 while (--deferred) 1457 while (--deferred)
1460 jump_label_dec(&netstamp_needed); 1458 static_key_slow_dec(&netstamp_needed);
1461 return; 1459 return;
1462 } 1460 }
1463#endif 1461#endif
1464 WARN_ON(in_interrupt()); 1462 WARN_ON(in_interrupt());
1465 jump_label_inc(&netstamp_needed); 1463 static_key_slow_inc(&netstamp_needed);
1466} 1464}
1467EXPORT_SYMBOL(net_enable_timestamp); 1465EXPORT_SYMBOL(net_enable_timestamp);
1468 1466
@@ -1474,19 +1472,19 @@ void net_disable_timestamp(void)
1474 return; 1472 return;
1475 } 1473 }
1476#endif 1474#endif
1477 jump_label_dec(&netstamp_needed); 1475 static_key_slow_dec(&netstamp_needed);
1478} 1476}
1479EXPORT_SYMBOL(net_disable_timestamp); 1477EXPORT_SYMBOL(net_disable_timestamp);
1480 1478
1481static inline void net_timestamp_set(struct sk_buff *skb) 1479static inline void net_timestamp_set(struct sk_buff *skb)
1482{ 1480{
1483 skb->tstamp.tv64 = 0; 1481 skb->tstamp.tv64 = 0;
1484 if (static_branch(&netstamp_needed)) 1482 if (static_key_false(&netstamp_needed))
1485 __net_timestamp(skb); 1483 __net_timestamp(skb);
1486} 1484}
1487 1485
1488#define net_timestamp_check(COND, SKB) \ 1486#define net_timestamp_check(COND, SKB) \
1489 if (static_branch(&netstamp_needed)) { \ 1487 if (static_key_false(&netstamp_needed)) { \
1490 if ((COND) && !(SKB)->tstamp.tv64) \ 1488 if ((COND) && !(SKB)->tstamp.tv64) \
1491 __net_timestamp(SKB); \ 1489 __net_timestamp(SKB); \
1492 } \ 1490 } \
@@ -1655,10 +1653,9 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1655 if (skb_network_header(skb2) < skb2->data || 1653 if (skb_network_header(skb2) < skb2->data ||
1656 skb2->network_header > skb2->tail) { 1654 skb2->network_header > skb2->tail) {
1657 if (net_ratelimit()) 1655 if (net_ratelimit())
1658 printk(KERN_CRIT "protocol %04x is " 1656 pr_crit("protocol %04x is buggy, dev %s\n",
1659 "buggy, dev %s\n", 1657 ntohs(skb2->protocol),
1660 ntohs(skb2->protocol), 1658 dev->name);
1661 dev->name);
1662 skb_reset_network_header(skb2); 1659 skb_reset_network_header(skb2);
1663 } 1660 }
1664 1661
@@ -1691,9 +1688,7 @@ static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1691 1688
1692 /* If TC0 is invalidated disable TC mapping */ 1689 /* If TC0 is invalidated disable TC mapping */
1693 if (tc->offset + tc->count > txq) { 1690 if (tc->offset + tc->count > txq) {
1694 pr_warning("Number of in use tx queues changed " 1691 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1695 "invalidating tc mappings. Priority "
1696 "traffic classification disabled!\n");
1697 dev->num_tc = 0; 1692 dev->num_tc = 0;
1698 return; 1693 return;
1699 } 1694 }
@@ -1704,11 +1699,8 @@ static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1704 1699
1705 tc = &dev->tc_to_txq[q]; 1700 tc = &dev->tc_to_txq[q];
1706 if (tc->offset + tc->count > txq) { 1701 if (tc->offset + tc->count > txq) {
1707 pr_warning("Number of in use tx queues " 1702 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1708 "changed. Priority %i to tc " 1703 i, q);
1709 "mapping %i is no longer valid "
1710 "setting map to 0\n",
1711 i, q);
1712 netdev_set_prio_tc_map(dev, i, 0); 1704 netdev_set_prio_tc_map(dev, i, 0);
1713 } 1705 }
1714 } 1706 }
@@ -2014,8 +2006,7 @@ EXPORT_SYMBOL(skb_gso_segment);
2014void netdev_rx_csum_fault(struct net_device *dev) 2006void netdev_rx_csum_fault(struct net_device *dev)
2015{ 2007{
2016 if (net_ratelimit()) { 2008 if (net_ratelimit()) {
2017 printk(KERN_ERR "%s: hw csum failure.\n", 2009 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2018 dev ? dev->name : "<unknown>");
2019 dump_stack(); 2010 dump_stack();
2020 } 2011 }
2021} 2012}
@@ -2332,9 +2323,9 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2332{ 2323{
2333 if (unlikely(queue_index >= dev->real_num_tx_queues)) { 2324 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2334 if (net_ratelimit()) { 2325 if (net_ratelimit()) {
2335 pr_warning("%s selects TX queue %d, but " 2326 pr_warn("%s selects TX queue %d, but real number of TX queues is %d\n",
2336 "real number of TX queues is %d\n", 2327 dev->name, queue_index,
2337 dev->name, queue_index, dev->real_num_tx_queues); 2328 dev->real_num_tx_queues);
2338 } 2329 }
2339 return 0; 2330 return 0;
2340 } 2331 }
@@ -2578,16 +2569,16 @@ int dev_queue_xmit(struct sk_buff *skb)
2578 } 2569 }
2579 HARD_TX_UNLOCK(dev, txq); 2570 HARD_TX_UNLOCK(dev, txq);
2580 if (net_ratelimit()) 2571 if (net_ratelimit())
2581 printk(KERN_CRIT "Virtual device %s asks to " 2572 pr_crit("Virtual device %s asks to queue packet!\n",
2582 "queue packet!\n", dev->name); 2573 dev->name);
2583 } else { 2574 } else {
2584 /* Recursion is detected! It is possible, 2575 /* Recursion is detected! It is possible,
2585 * unfortunately 2576 * unfortunately
2586 */ 2577 */
2587recursion_alert: 2578recursion_alert:
2588 if (net_ratelimit()) 2579 if (net_ratelimit())
2589 printk(KERN_CRIT "Dead loop on virtual device " 2580 pr_crit("Dead loop on virtual device %s, fix it urgently!\n",
2590 "%s, fix it urgently!\n", dev->name); 2581 dev->name);
2591 } 2582 }
2592 } 2583 }
2593 2584
@@ -2660,7 +2651,7 @@ EXPORT_SYMBOL(__skb_get_rxhash);
2660struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; 2651struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
2661EXPORT_SYMBOL(rps_sock_flow_table); 2652EXPORT_SYMBOL(rps_sock_flow_table);
2662 2653
2663struct jump_label_key rps_needed __read_mostly; 2654struct static_key rps_needed __read_mostly;
2664 2655
2665static struct rps_dev_flow * 2656static struct rps_dev_flow *
2666set_rps_cpu(struct net_device *dev, struct sk_buff *skb, 2657set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
@@ -2945,7 +2936,7 @@ int netif_rx(struct sk_buff *skb)
2945 2936
2946 trace_netif_rx(skb); 2937 trace_netif_rx(skb);
2947#ifdef CONFIG_RPS 2938#ifdef CONFIG_RPS
2948 if (static_branch(&rps_needed)) { 2939 if (static_key_false(&rps_needed)) {
2949 struct rps_dev_flow voidflow, *rflow = &voidflow; 2940 struct rps_dev_flow voidflow, *rflow = &voidflow;
2950 int cpu; 2941 int cpu;
2951 2942
@@ -3069,8 +3060,8 @@ static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
3069 3060
3070 if (unlikely(MAX_RED_LOOP < ttl++)) { 3061 if (unlikely(MAX_RED_LOOP < ttl++)) {
3071 if (net_ratelimit()) 3062 if (net_ratelimit())
3072 pr_warning( "Redir loop detected Dropping packet (%d->%d)\n", 3063 pr_warn("Redir loop detected Dropping packet (%d->%d)\n",
3073 skb->skb_iif, dev->ifindex); 3064 skb->skb_iif, dev->ifindex);
3074 return TC_ACT_SHOT; 3065 return TC_ACT_SHOT;
3075 } 3066 }
3076 3067
@@ -3309,7 +3300,7 @@ int netif_receive_skb(struct sk_buff *skb)
3309 return NET_RX_SUCCESS; 3300 return NET_RX_SUCCESS;
3310 3301
3311#ifdef CONFIG_RPS 3302#ifdef CONFIG_RPS
3312 if (static_branch(&rps_needed)) { 3303 if (static_key_false(&rps_needed)) {
3313 struct rps_dev_flow voidflow, *rflow = &voidflow; 3304 struct rps_dev_flow voidflow, *rflow = &voidflow;
3314 int cpu, ret; 3305 int cpu, ret;
3315 3306
@@ -4497,16 +4488,15 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc)
4497 dev->flags &= ~IFF_PROMISC; 4488 dev->flags &= ~IFF_PROMISC;
4498 else { 4489 else {
4499 dev->promiscuity -= inc; 4490 dev->promiscuity -= inc;
4500 printk(KERN_WARNING "%s: promiscuity touches roof, " 4491 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
4501 "set promiscuity failed, promiscuity feature " 4492 dev->name);
4502 "of device might be broken.\n", dev->name);
4503 return -EOVERFLOW; 4493 return -EOVERFLOW;
4504 } 4494 }
4505 } 4495 }
4506 if (dev->flags != old_flags) { 4496 if (dev->flags != old_flags) {
4507 printk(KERN_INFO "device %s %s promiscuous mode\n", 4497 pr_info("device %s %s promiscuous mode\n",
4508 dev->name, (dev->flags & IFF_PROMISC) ? "entered" : 4498 dev->name,
4509 "left"); 4499 dev->flags & IFF_PROMISC ? "entered" : "left");
4510 if (audit_enabled) { 4500 if (audit_enabled) {
4511 current_uid_gid(&uid, &gid); 4501 current_uid_gid(&uid, &gid);
4512 audit_log(current->audit_context, GFP_ATOMIC, 4502 audit_log(current->audit_context, GFP_ATOMIC,
@@ -4579,9 +4569,8 @@ int dev_set_allmulti(struct net_device *dev, int inc)
4579 dev->flags &= ~IFF_ALLMULTI; 4569 dev->flags &= ~IFF_ALLMULTI;
4580 else { 4570 else {
4581 dev->allmulti -= inc; 4571 dev->allmulti -= inc;
4582 printk(KERN_WARNING "%s: allmulti touches roof, " 4572 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
4583 "set allmulti failed, allmulti feature of " 4573 dev->name);
4584 "device might be broken.\n", dev->name);
4585 return -EOVERFLOW; 4574 return -EOVERFLOW;
4586 } 4575 }
4587 } 4576 }
@@ -5238,8 +5227,8 @@ static void rollback_registered_many(struct list_head *head)
5238 * devices and proceed with the remaining. 5227 * devices and proceed with the remaining.
5239 */ 5228 */
5240 if (dev->reg_state == NETREG_UNINITIALIZED) { 5229 if (dev->reg_state == NETREG_UNINITIALIZED) {
5241 pr_debug("unregister_netdevice: device %s/%p never " 5230 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5242 "was registered\n", dev->name, dev); 5231 dev->name, dev);
5243 5232
5244 WARN_ON(1); 5233 WARN_ON(1);
5245 list_del(&dev->unreg_list); 5234 list_del(&dev->unreg_list);
@@ -5471,7 +5460,7 @@ static int netif_alloc_rx_queues(struct net_device *dev)
5471 5460
5472 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL); 5461 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5473 if (!rx) { 5462 if (!rx) {
5474 pr_err("netdev: Unable to allocate %u rx queues.\n", count); 5463 pr_err("netdev: Unable to allocate %u rx queues\n", count);
5475 return -ENOMEM; 5464 return -ENOMEM;
5476 } 5465 }
5477 dev->_rx = rx; 5466 dev->_rx = rx;
@@ -5505,8 +5494,7 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
5505 5494
5506 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL); 5495 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
5507 if (!tx) { 5496 if (!tx) {
5508 pr_err("netdev: Unable to allocate %u tx queues.\n", 5497 pr_err("netdev: Unable to allocate %u tx queues\n", count);
5509 count);
5510 return -ENOMEM; 5498 return -ENOMEM;
5511 } 5499 }
5512 dev->_tx = tx; 5500 dev->_tx = tx;
@@ -5765,10 +5753,8 @@ static void netdev_wait_allrefs(struct net_device *dev)
5765 refcnt = netdev_refcnt_read(dev); 5753 refcnt = netdev_refcnt_read(dev);
5766 5754
5767 if (time_after(jiffies, warning_time + 10 * HZ)) { 5755 if (time_after(jiffies, warning_time + 10 * HZ)) {
5768 printk(KERN_EMERG "unregister_netdevice: " 5756 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
5769 "waiting for %s to become free. Usage " 5757 dev->name, refcnt);
5770 "count = %d\n",
5771 dev->name, refcnt);
5772 warning_time = jiffies; 5758 warning_time = jiffies;
5773 } 5759 }
5774 } 5760 }
@@ -5819,7 +5805,7 @@ void netdev_run_todo(void)
5819 list_del(&dev->todo_list); 5805 list_del(&dev->todo_list);
5820 5806
5821 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 5807 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5822 printk(KERN_ERR "network todo '%s' but state %d\n", 5808 pr_err("network todo '%s' but state %d\n",
5823 dev->name, dev->reg_state); 5809 dev->name, dev->reg_state);
5824 dump_stack(); 5810 dump_stack();
5825 continue; 5811 continue;
@@ -5848,12 +5834,12 @@ void netdev_run_todo(void)
5848/* Convert net_device_stats to rtnl_link_stats64. They have the same 5834/* Convert net_device_stats to rtnl_link_stats64. They have the same
5849 * fields in the same order, with only the type differing. 5835 * fields in the same order, with only the type differing.
5850 */ 5836 */
5851static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 5837void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5852 const struct net_device_stats *netdev_stats) 5838 const struct net_device_stats *netdev_stats)
5853{ 5839{
5854#if BITS_PER_LONG == 64 5840#if BITS_PER_LONG == 64
5855 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats)); 5841 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
5856 memcpy(stats64, netdev_stats, sizeof(*stats64)); 5842 memcpy(stats64, netdev_stats, sizeof(*stats64));
5857#else 5843#else
5858 size_t i, n = sizeof(*stats64) / sizeof(u64); 5844 size_t i, n = sizeof(*stats64) / sizeof(u64);
5859 const unsigned long *src = (const unsigned long *)netdev_stats; 5845 const unsigned long *src = (const unsigned long *)netdev_stats;
@@ -5865,6 +5851,7 @@ static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5865 dst[i] = src[i]; 5851 dst[i] = src[i];
5866#endif 5852#endif
5867} 5853}
5854EXPORT_SYMBOL(netdev_stats_to_stats64);
5868 5855
5869/** 5856/**
5870 * dev_get_stats - get network device statistics 5857 * dev_get_stats - get network device statistics
@@ -5935,15 +5922,13 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5935 BUG_ON(strlen(name) >= sizeof(dev->name)); 5922 BUG_ON(strlen(name) >= sizeof(dev->name));
5936 5923
5937 if (txqs < 1) { 5924 if (txqs < 1) {
5938 pr_err("alloc_netdev: Unable to allocate device " 5925 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
5939 "with zero queues.\n");
5940 return NULL; 5926 return NULL;
5941 } 5927 }
5942 5928
5943#ifdef CONFIG_RPS 5929#ifdef CONFIG_RPS
5944 if (rxqs < 1) { 5930 if (rxqs < 1) {
5945 pr_err("alloc_netdev: Unable to allocate device " 5931 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
5946 "with zero RX queues.\n");
5947 return NULL; 5932 return NULL;
5948 } 5933 }
5949#endif 5934#endif
@@ -5959,7 +5944,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5959 5944
5960 p = kzalloc(alloc_size, GFP_KERNEL); 5945 p = kzalloc(alloc_size, GFP_KERNEL);
5961 if (!p) { 5946 if (!p) {
5962 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n"); 5947 pr_err("alloc_netdev: Unable to allocate device\n");
5963 return NULL; 5948 return NULL;
5964 } 5949 }
5965 5950
@@ -6492,8 +6477,8 @@ static void __net_exit default_device_exit(struct net *net)
6492 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 6477 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6493 err = dev_change_net_namespace(dev, &init_net, fb_name); 6478 err = dev_change_net_namespace(dev, &init_net, fb_name);
6494 if (err) { 6479 if (err) {
6495 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n", 6480 pr_emerg("%s: failed to move %s to init_net: %d\n",
6496 __func__, dev->name, err); 6481 __func__, dev->name, err);
6497 BUG(); 6482 BUG();
6498 } 6483 }
6499 } 6484 }
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 3f79db1b612..6d6d7d25caa 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -73,6 +73,8 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
73 [NETIF_F_RXCSUM_BIT] = "rx-checksum", 73 [NETIF_F_RXCSUM_BIT] = "rx-checksum",
74 [NETIF_F_NOCACHE_COPY_BIT] = "tx-nocache-copy", 74 [NETIF_F_NOCACHE_COPY_BIT] = "tx-nocache-copy",
75 [NETIF_F_LOOPBACK_BIT] = "loopback", 75 [NETIF_F_LOOPBACK_BIT] = "loopback",
76 [NETIF_F_RXFCS_BIT] = "rx-fcs",
77 [NETIF_F_RXALL_BIT] = "rx-all",
76}; 78};
77 79
78static int ethtool_get_features(struct net_device *dev, void __user *useraddr) 80static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
diff --git a/net/core/iovec.c b/net/core/iovec.c
index c40f27e7d20..7e7aeb01de4 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -35,7 +35,7 @@
35 * in any case. 35 * in any case.
36 */ 36 */
37 37
38int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode) 38int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode)
39{ 39{
40 int size, ct, err; 40 int size, ct, err;
41 41
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 2a83914b027..0a68045782d 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2167,6 +2167,35 @@ nla_put_failure:
2167 return -EMSGSIZE; 2167 return -EMSGSIZE;
2168} 2168}
2169 2169
2170static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2171 u32 pid, u32 seq, int type, unsigned int flags,
2172 struct neigh_table *tbl)
2173{
2174 struct nlmsghdr *nlh;
2175 struct ndmsg *ndm;
2176
2177 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2178 if (nlh == NULL)
2179 return -EMSGSIZE;
2180
2181 ndm = nlmsg_data(nlh);
2182 ndm->ndm_family = tbl->family;
2183 ndm->ndm_pad1 = 0;
2184 ndm->ndm_pad2 = 0;
2185 ndm->ndm_flags = pn->flags | NTF_PROXY;
2186 ndm->ndm_type = NDA_DST;
2187 ndm->ndm_ifindex = pn->dev->ifindex;
2188 ndm->ndm_state = NUD_NONE;
2189
2190 NLA_PUT(skb, NDA_DST, tbl->key_len, pn->key);
2191
2192 return nlmsg_end(skb, nlh);
2193
2194nla_put_failure:
2195 nlmsg_cancel(skb, nlh);
2196 return -EMSGSIZE;
2197}
2198
2170static void neigh_update_notify(struct neighbour *neigh) 2199static void neigh_update_notify(struct neighbour *neigh)
2171{ 2200{
2172 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); 2201 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
@@ -2216,23 +2245,78 @@ out:
2216 return rc; 2245 return rc;
2217} 2246}
2218 2247
2248static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2249 struct netlink_callback *cb)
2250{
2251 struct pneigh_entry *n;
2252 struct net *net = sock_net(skb->sk);
2253 int rc, h, s_h = cb->args[3];
2254 int idx, s_idx = idx = cb->args[4];
2255
2256 read_lock_bh(&tbl->lock);
2257
2258 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
2259 if (h < s_h)
2260 continue;
2261 if (h > s_h)
2262 s_idx = 0;
2263 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2264 if (dev_net(n->dev) != net)
2265 continue;
2266 if (idx < s_idx)
2267 goto next;
2268 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2269 cb->nlh->nlmsg_seq,
2270 RTM_NEWNEIGH,
2271 NLM_F_MULTI, tbl) <= 0) {
2272 read_unlock_bh(&tbl->lock);
2273 rc = -1;
2274 goto out;
2275 }
2276 next:
2277 idx++;
2278 }
2279 }
2280
2281 read_unlock_bh(&tbl->lock);
2282 rc = skb->len;
2283out:
2284 cb->args[3] = h;
2285 cb->args[4] = idx;
2286 return rc;
2287
2288}
2289
2219static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) 2290static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2220{ 2291{
2221 struct neigh_table *tbl; 2292 struct neigh_table *tbl;
2222 int t, family, s_t; 2293 int t, family, s_t;
2294 int proxy = 0;
2295 int err = 0;
2223 2296
2224 read_lock(&neigh_tbl_lock); 2297 read_lock(&neigh_tbl_lock);
2225 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family; 2298 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2299
2300 /* check for full ndmsg structure presence, family member is
2301 * the same for both structures
2302 */
2303 if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
2304 ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
2305 proxy = 1;
2306
2226 s_t = cb->args[0]; 2307 s_t = cb->args[0];
2227 2308
2228 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) { 2309 for (tbl = neigh_tables, t = 0; tbl && (err >= 0);
2310 tbl = tbl->next, t++) {
2229 if (t < s_t || (family && tbl->family != family)) 2311 if (t < s_t || (family && tbl->family != family))
2230 continue; 2312 continue;
2231 if (t > s_t) 2313 if (t > s_t)
2232 memset(&cb->args[1], 0, sizeof(cb->args) - 2314 memset(&cb->args[1], 0, sizeof(cb->args) -
2233 sizeof(cb->args[0])); 2315 sizeof(cb->args[0]));
2234 if (neigh_dump_table(tbl, skb, cb) < 0) 2316 if (proxy)
2235 break; 2317 err = pneigh_dump_table(tbl, skb, cb);
2318 else
2319 err = neigh_dump_table(tbl, skb, cb);
2236 } 2320 }
2237 read_unlock(&neigh_tbl_lock); 2321 read_unlock(&neigh_tbl_lock);
2238 2322
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index a1727cda03d..495586232aa 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -608,10 +608,10 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
608 spin_unlock(&rps_map_lock); 608 spin_unlock(&rps_map_lock);
609 609
610 if (map) 610 if (map)
611 jump_label_inc(&rps_needed); 611 static_key_slow_inc(&rps_needed);
612 if (old_map) { 612 if (old_map) {
613 kfree_rcu(old_map, rcu); 613 kfree_rcu(old_map, rcu);
614 jump_label_dec(&rps_needed); 614 static_key_slow_dec(&rps_needed);
615 } 615 }
616 free_cpumask_var(mask); 616 free_cpumask_var(mask);
617 return len; 617 return len;
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index ddefc513b44..3d84fb9d887 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -9,6 +9,8 @@
9 * Copyright (C) 2002 Red Hat, Inc. 9 * Copyright (C) 2002 Red Hat, Inc.
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/moduleparam.h> 14#include <linux/moduleparam.h>
13#include <linux/netdevice.h> 15#include <linux/netdevice.h>
14#include <linux/etherdevice.h> 16#include <linux/etherdevice.h>
@@ -45,9 +47,11 @@ static atomic_t trapped;
45#define NETPOLL_RX_ENABLED 1 47#define NETPOLL_RX_ENABLED 1
46#define NETPOLL_RX_DROP 2 48#define NETPOLL_RX_DROP 2
47 49
48#define MAX_SKB_SIZE \ 50#define MAX_SKB_SIZE \
49 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \ 51 (sizeof(struct ethhdr) + \
50 sizeof(struct iphdr) + sizeof(struct ethhdr)) 52 sizeof(struct iphdr) + \
53 sizeof(struct udphdr) + \
54 MAX_UDP_CHUNK)
51 55
52static void zap_completion_queue(void); 56static void zap_completion_queue(void);
53static void arp_reply(struct sk_buff *skb); 57static void arp_reply(struct sk_buff *skb);
@@ -55,6 +59,13 @@ static void arp_reply(struct sk_buff *skb);
55static unsigned int carrier_timeout = 4; 59static unsigned int carrier_timeout = 4;
56module_param(carrier_timeout, uint, 0644); 60module_param(carrier_timeout, uint, 0644);
57 61
62#define np_info(np, fmt, ...) \
63 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
64#define np_err(np, fmt, ...) \
65 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
66#define np_notice(np, fmt, ...) \
67 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
68
58static void queue_process(struct work_struct *work) 69static void queue_process(struct work_struct *work)
59{ 70{
60 struct netpoll_info *npinfo = 71 struct netpoll_info *npinfo =
@@ -627,18 +638,12 @@ out:
627 638
628void netpoll_print_options(struct netpoll *np) 639void netpoll_print_options(struct netpoll *np)
629{ 640{
630 printk(KERN_INFO "%s: local port %d\n", 641 np_info(np, "local port %d\n", np->local_port);
631 np->name, np->local_port); 642 np_info(np, "local IP %pI4\n", &np->local_ip);
632 printk(KERN_INFO "%s: local IP %pI4\n", 643 np_info(np, "interface '%s'\n", np->dev_name);
633 np->name, &np->local_ip); 644 np_info(np, "remote port %d\n", np->remote_port);
634 printk(KERN_INFO "%s: interface '%s'\n", 645 np_info(np, "remote IP %pI4\n", &np->remote_ip);
635 np->name, np->dev_name); 646 np_info(np, "remote ethernet address %pM\n", np->remote_mac);
636 printk(KERN_INFO "%s: remote port %d\n",
637 np->name, np->remote_port);
638 printk(KERN_INFO "%s: remote IP %pI4\n",
639 np->name, &np->remote_ip);
640 printk(KERN_INFO "%s: remote ethernet address %pM\n",
641 np->name, np->remote_mac);
642} 647}
643EXPORT_SYMBOL(netpoll_print_options); 648EXPORT_SYMBOL(netpoll_print_options);
644 649
@@ -680,8 +685,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
680 goto parse_failed; 685 goto parse_failed;
681 *delim = 0; 686 *delim = 0;
682 if (*cur == ' ' || *cur == '\t') 687 if (*cur == ' ' || *cur == '\t')
683 printk(KERN_INFO "%s: warning: whitespace" 688 np_info(np, "warning: whitespace is not allowed\n");
684 "is not allowed\n", np->name);
685 np->remote_port = simple_strtol(cur, NULL, 10); 689 np->remote_port = simple_strtol(cur, NULL, 10);
686 cur = delim; 690 cur = delim;
687 } 691 }
@@ -705,8 +709,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
705 return 0; 709 return 0;
706 710
707 parse_failed: 711 parse_failed:
708 printk(KERN_INFO "%s: couldn't parse config at '%s'!\n", 712 np_info(np, "couldn't parse config at '%s'!\n", cur);
709 np->name, cur);
710 return -1; 713 return -1;
711} 714}
712EXPORT_SYMBOL(netpoll_parse_options); 715EXPORT_SYMBOL(netpoll_parse_options);
@@ -721,8 +724,8 @@ int __netpoll_setup(struct netpoll *np)
721 724
722 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) || 725 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
723 !ndev->netdev_ops->ndo_poll_controller) { 726 !ndev->netdev_ops->ndo_poll_controller) {
724 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n", 727 np_err(np, "%s doesn't support polling, aborting\n",
725 np->name, np->dev_name); 728 np->dev_name);
726 err = -ENOTSUPP; 729 err = -ENOTSUPP;
727 goto out; 730 goto out;
728 } 731 }
@@ -785,14 +788,12 @@ int netpoll_setup(struct netpoll *np)
785 if (np->dev_name) 788 if (np->dev_name)
786 ndev = dev_get_by_name(&init_net, np->dev_name); 789 ndev = dev_get_by_name(&init_net, np->dev_name);
787 if (!ndev) { 790 if (!ndev) {
788 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n", 791 np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
789 np->name, np->dev_name);
790 return -ENODEV; 792 return -ENODEV;
791 } 793 }
792 794
793 if (ndev->master) { 795 if (ndev->master) {
794 printk(KERN_ERR "%s: %s is a slave device, aborting.\n", 796 np_err(np, "%s is a slave device, aborting\n", np->dev_name);
795 np->name, np->dev_name);
796 err = -EBUSY; 797 err = -EBUSY;
797 goto put; 798 goto put;
798 } 799 }
@@ -800,16 +801,14 @@ int netpoll_setup(struct netpoll *np)
800 if (!netif_running(ndev)) { 801 if (!netif_running(ndev)) {
801 unsigned long atmost, atleast; 802 unsigned long atmost, atleast;
802 803
803 printk(KERN_INFO "%s: device %s not up yet, forcing it\n", 804 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
804 np->name, np->dev_name);
805 805
806 rtnl_lock(); 806 rtnl_lock();
807 err = dev_open(ndev); 807 err = dev_open(ndev);
808 rtnl_unlock(); 808 rtnl_unlock();
809 809
810 if (err) { 810 if (err) {
811 printk(KERN_ERR "%s: failed to open %s\n", 811 np_err(np, "failed to open %s\n", ndev->name);
812 np->name, ndev->name);
813 goto put; 812 goto put;
814 } 813 }
815 814
@@ -817,9 +816,7 @@ int netpoll_setup(struct netpoll *np)
817 atmost = jiffies + carrier_timeout * HZ; 816 atmost = jiffies + carrier_timeout * HZ;
818 while (!netif_carrier_ok(ndev)) { 817 while (!netif_carrier_ok(ndev)) {
819 if (time_after(jiffies, atmost)) { 818 if (time_after(jiffies, atmost)) {
820 printk(KERN_NOTICE 819 np_notice(np, "timeout waiting for carrier\n");
821 "%s: timeout waiting for carrier\n",
822 np->name);
823 break; 820 break;
824 } 821 }
825 msleep(1); 822 msleep(1);
@@ -831,9 +828,7 @@ int netpoll_setup(struct netpoll *np)
831 */ 828 */
832 829
833 if (time_before(jiffies, atleast)) { 830 if (time_before(jiffies, atleast)) {
834 printk(KERN_NOTICE "%s: carrier detect appears" 831 np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
835 " untrustworthy, waiting 4 seconds\n",
836 np->name);
837 msleep(4000); 832 msleep(4000);
838 } 833 }
839 } 834 }
@@ -844,15 +839,15 @@ int netpoll_setup(struct netpoll *np)
844 839
845 if (!in_dev || !in_dev->ifa_list) { 840 if (!in_dev || !in_dev->ifa_list) {
846 rcu_read_unlock(); 841 rcu_read_unlock();
847 printk(KERN_ERR "%s: no IP address for %s, aborting\n", 842 np_err(np, "no IP address for %s, aborting\n",
848 np->name, np->dev_name); 843 np->dev_name);
849 err = -EDESTADDRREQ; 844 err = -EDESTADDRREQ;
850 goto put; 845 goto put;
851 } 846 }
852 847
853 np->local_ip = in_dev->ifa_list->ifa_local; 848 np->local_ip = in_dev->ifa_list->ifa_local;
854 rcu_read_unlock(); 849 rcu_read_unlock();
855 printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip); 850 np_info(np, "local IP %pI4\n", &np->local_ip);
856 } 851 }
857 852
858 np->dev = ndev; 853 np->dev = ndev;
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 4dacc44637e..ba6900f7390 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -23,9 +23,8 @@
23#include <net/sock.h> 23#include <net/sock.h>
24#include <net/netprio_cgroup.h> 24#include <net/netprio_cgroup.h>
25 25
26static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, 26static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp);
27 struct cgroup *cgrp); 27static void cgrp_destroy(struct cgroup *cgrp);
28static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp);
29static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp); 28static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp);
30 29
31struct cgroup_subsys net_prio_subsys = { 30struct cgroup_subsys net_prio_subsys = {
@@ -121,8 +120,7 @@ static void update_netdev_tables(void)
121 rtnl_unlock(); 120 rtnl_unlock();
122} 121}
123 122
124static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, 123static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp)
125 struct cgroup *cgrp)
126{ 124{
127 struct cgroup_netprio_state *cs; 125 struct cgroup_netprio_state *cs;
128 int ret; 126 int ret;
@@ -146,7 +144,7 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
146 return &cs->css; 144 return &cs->css;
147} 145}
148 146
149static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) 147static void cgrp_destroy(struct cgroup *cgrp)
150{ 148{
151 struct cgroup_netprio_state *cs; 149 struct cgroup_netprio_state *cs;
152 struct net_device *dev; 150 struct net_device *dev;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index f965dce6f20..1a63c6efd2e 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1133,6 +1133,8 @@ static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
1133 .len = sizeof(struct ifla_vf_vlan) }, 1133 .len = sizeof(struct ifla_vf_vlan) },
1134 [IFLA_VF_TX_RATE] = { .type = NLA_BINARY, 1134 [IFLA_VF_TX_RATE] = { .type = NLA_BINARY,
1135 .len = sizeof(struct ifla_vf_tx_rate) }, 1135 .len = sizeof(struct ifla_vf_tx_rate) },
1136 [IFLA_VF_SPOOFCHK] = { .type = NLA_BINARY,
1137 .len = sizeof(struct ifla_vf_spoofchk) },
1136}; 1138};
1137 1139
1138static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = { 1140static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
@@ -2019,8 +2021,13 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2019 2021
2020 __rtnl_unlock(); 2022 __rtnl_unlock();
2021 rtnl = net->rtnl; 2023 rtnl = net->rtnl;
2022 err = netlink_dump_start(rtnl, skb, nlh, dumpit, 2024 {
2023 NULL, min_dump_alloc); 2025 struct netlink_dump_control c = {
2026 .dump = dumpit,
2027 .min_dump_alloc = min_dump_alloc,
2028 };
2029 err = netlink_dump_start(rtnl, skb, nlh, &c);
2030 }
2024 rtnl_lock(); 2031 rtnl_lock();
2025 return err; 2032 return err;
2026 } 2033 }
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index da0c97f2fab..6eb656acdfe 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -592,6 +592,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
592 new->rxhash = old->rxhash; 592 new->rxhash = old->rxhash;
593 new->ooo_okay = old->ooo_okay; 593 new->ooo_okay = old->ooo_okay;
594 new->l4_rxhash = old->l4_rxhash; 594 new->l4_rxhash = old->l4_rxhash;
595 new->no_fcs = old->no_fcs;
595#ifdef CONFIG_XFRM 596#ifdef CONFIG_XFRM
596 new->sp = secpath_get(old->sp); 597 new->sp = secpath_get(old->sp);
597#endif 598#endif
@@ -2906,7 +2907,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2906 nskb->prev = p; 2907 nskb->prev = p;
2907 2908
2908 nskb->data_len += p->len; 2909 nskb->data_len += p->len;
2909 nskb->truesize += p->len; 2910 nskb->truesize += p->truesize;
2910 nskb->len += p->len; 2911 nskb->len += p->len;
2911 2912
2912 *head = nskb; 2913 *head = nskb;
@@ -2916,6 +2917,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2916 p = nskb; 2917 p = nskb;
2917 2918
2918merge: 2919merge:
2920 p->truesize += skb->truesize - len;
2919 if (offset > headlen) { 2921 if (offset > headlen) {
2920 unsigned int eat = offset - headlen; 2922 unsigned int eat = offset - headlen;
2921 2923
diff --git a/net/core/sock.c b/net/core/sock.c
index 02f8dfe320b..9be6d0d6c53 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -111,7 +111,7 @@
111#include <linux/init.h> 111#include <linux/init.h>
112#include <linux/highmem.h> 112#include <linux/highmem.h>
113#include <linux/user_namespace.h> 113#include <linux/user_namespace.h>
114#include <linux/jump_label.h> 114#include <linux/static_key.h>
115#include <linux/memcontrol.h> 115#include <linux/memcontrol.h>
116 116
117#include <asm/uaccess.h> 117#include <asm/uaccess.h>
@@ -160,19 +160,19 @@ int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
160out: 160out:
161 list_for_each_entry_continue_reverse(proto, &proto_list, node) 161 list_for_each_entry_continue_reverse(proto, &proto_list, node)
162 if (proto->destroy_cgroup) 162 if (proto->destroy_cgroup)
163 proto->destroy_cgroup(cgrp, ss); 163 proto->destroy_cgroup(cgrp);
164 mutex_unlock(&proto_list_mutex); 164 mutex_unlock(&proto_list_mutex);
165 return ret; 165 return ret;
166} 166}
167 167
168void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss) 168void mem_cgroup_sockets_destroy(struct cgroup *cgrp)
169{ 169{
170 struct proto *proto; 170 struct proto *proto;
171 171
172 mutex_lock(&proto_list_mutex); 172 mutex_lock(&proto_list_mutex);
173 list_for_each_entry_reverse(proto, &proto_list, node) 173 list_for_each_entry_reverse(proto, &proto_list, node)
174 if (proto->destroy_cgroup) 174 if (proto->destroy_cgroup)
175 proto->destroy_cgroup(cgrp, ss); 175 proto->destroy_cgroup(cgrp);
176 mutex_unlock(&proto_list_mutex); 176 mutex_unlock(&proto_list_mutex);
177} 177}
178#endif 178#endif
@@ -184,7 +184,7 @@ void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss)
184static struct lock_class_key af_family_keys[AF_MAX]; 184static struct lock_class_key af_family_keys[AF_MAX];
185static struct lock_class_key af_family_slock_keys[AF_MAX]; 185static struct lock_class_key af_family_slock_keys[AF_MAX];
186 186
187struct jump_label_key memcg_socket_limit_enabled; 187struct static_key memcg_socket_limit_enabled;
188EXPORT_SYMBOL(memcg_socket_limit_enabled); 188EXPORT_SYMBOL(memcg_socket_limit_enabled);
189 189
190/* 190/*
@@ -793,6 +793,17 @@ set_rcvbuf:
793 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool); 793 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
794 break; 794 break;
795 795
796 case SO_PEEK_OFF:
797 if (sock->ops->set_peek_off)
798 sock->ops->set_peek_off(sk, val);
799 else
800 ret = -EOPNOTSUPP;
801 break;
802
803 case SO_NOFCS:
804 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
805 break;
806
796 default: 807 default:
797 ret = -ENOPROTOOPT; 808 ret = -ENOPROTOOPT;
798 break; 809 break;
@@ -1018,6 +1029,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
1018 v.val = !!sock_flag(sk, SOCK_WIFI_STATUS); 1029 v.val = !!sock_flag(sk, SOCK_WIFI_STATUS);
1019 break; 1030 break;
1020 1031
1032 case SO_PEEK_OFF:
1033 if (!sock->ops->set_peek_off)
1034 return -EOPNOTSUPP;
1035
1036 v.val = sk->sk_peek_off;
1037 break;
1038 case SO_NOFCS:
1039 v.val = !!sock_flag(sk, SOCK_NOFCS);
1040 break;
1021 default: 1041 default:
1022 return -ENOPROTOOPT; 1042 return -ENOPROTOOPT;
1023 } 1043 }
@@ -2092,6 +2112,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
2092 2112
2093 sk->sk_sndmsg_page = NULL; 2113 sk->sk_sndmsg_page = NULL;
2094 sk->sk_sndmsg_off = 0; 2114 sk->sk_sndmsg_off = 0;
2115 sk->sk_peek_off = -1;
2095 2116
2096 sk->sk_peer_pid = NULL; 2117 sk->sk_peer_pid = NULL;
2097 sk->sk_peer_cred = NULL; 2118 sk->sk_peer_cred = NULL;
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index d05559d4d9c..0c285087425 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -69,9 +69,9 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
69 if (sock_table != orig_sock_table) { 69 if (sock_table != orig_sock_table) {
70 rcu_assign_pointer(rps_sock_flow_table, sock_table); 70 rcu_assign_pointer(rps_sock_flow_table, sock_table);
71 if (sock_table) 71 if (sock_table)
72 jump_label_inc(&rps_needed); 72 static_key_slow_inc(&rps_needed);
73 if (orig_sock_table) { 73 if (orig_sock_table) {
74 jump_label_dec(&rps_needed); 74 static_key_slow_dec(&rps_needed);
75 synchronize_rcu(); 75 synchronize_rcu();
76 vfree(orig_sock_table); 76 vfree(orig_sock_table);
77 } 77 }