aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-07-13 20:42:22 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-13 20:42:22 -0400
commitbe9c6d9169705504296bdb42ffec8f406691d99f (patch)
tree60e25d5f33f80a1c09476e770b89ca4661f2e944 /net
parent03ce3ca4b02bfc1e6567a7851ae231ad3cc9418e (diff)
parent307f2fb95e9b96b3577916e73d92e104f8f26494 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "Just a bunch of small fixes and tidy ups: 1) Finish the "busy_poll" renames, from Eliezer Tamir. 2) Fix RCU stalls in IFB driver, from Ding Tianhong. 3) Linearize buffers properly in tun/macvtap zerocopy code. 4) Don't crash on rmmod in vxlan, from Pravin B Shelar. 5) Spinlock used before init in alx driver, from Maarten Lankhorst. 6) A sparse warning fix in bnx2x broke TSO checksums, fix from Dmitry Kravkov. 7) Dummy and ifb driver load failure paths can oops, fixes from Tan Xiaojun and Ding Tianhong. 8) Correct MTU calculations in IP tunnels, from Alexander Duyck. 9) Account all TCP retransmits in SNMP stats properly, from Yuchung Cheng. 10) atl1e and via-rhine do not handle DMA mapping failures properly, from Neil Horman. 11) Various equal-cost multipath route fixes in ipv6 from Hannes Frederic Sowa" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (36 commits) ipv6: only static routes qualify for equal cost multipathing via-rhine: fix dma mapping errors atl1e: fix dma mapping warnings tcp: account all retransmit failures usb/net/r815x: fix cast to restricted __le32 usb/net/r8152: fix integer overflow in expression net: access page->private by using page_private net: strict_strtoul is obsolete, use kstrtoul instead drivers/net/ieee802154: don't use devm_pinctrl_get_select_default() in probe drivers/net/ethernet/cadence: don't use devm_pinctrl_get_select_default() in probe drivers/net/can/c_can: don't use devm_pinctrl_get_select_default() in probe net/usb: add relative mii functions for r815x net/tipc: use %*phC to dump small buffers in hex form qlcnic: Adding Maintainers. gre: Fix MTU sizing check for gretap tunnels pkt_sched: sch_qfq: remove forward declaration of qfq_update_agg_ts pkt_sched: sch_qfq: improve efficiency of make_eligible gso: Update tunnel segmentation to support Tx checksum offload inet: fix spacing in assignment ifb: fix oops when loading the ifb failed ...
Diffstat (limited to 'net')
-rw-r--r--net/9p/trans_common.c10
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/dev.c14
-rw-r--r--net/core/skbuff.c6
-rw-r--r--net/core/sock.c8
-rw-r--r--net/core/sysctl_net_core.c10
-rw-r--r--net/dns_resolver/dns_key.c2
-rw-r--r--net/ipv4/gre_offload.c3
-rw-r--r--net/ipv4/inet_hashtables.c2
-rw-r--r--net/ipv4/ip_tunnel.c2
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_ipv4.c4
-rw-r--r--net/ipv4/tcp_output.c7
-rw-r--r--net/ipv4/udp.c8
-rw-r--r--net/ipv6/ip6_fib.c15
-rw-r--r--net/ipv6/route.c72
-rw-r--r--net/ipv6/tcp_ipv6.c4
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/sched/sch_qfq.c127
-rw-r--r--net/socket.c6
-rw-r--r--net/tipc/ib_media.c8
21 files changed, 170 insertions, 146 deletions
diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c
index de8df957867d..2ee3879161b1 100644
--- a/net/9p/trans_common.c
+++ b/net/9p/trans_common.c
@@ -24,11 +24,11 @@
24 */ 24 */
25void p9_release_pages(struct page **pages, int nr_pages) 25void p9_release_pages(struct page **pages, int nr_pages)
26{ 26{
27 int i = 0; 27 int i;
28 while (pages[i] && nr_pages--) { 28
29 put_page(pages[i]); 29 for (i = 0; i < nr_pages; i++)
30 i++; 30 if (pages[i])
31 } 31 put_page(pages[i]);
32} 32}
33EXPORT_SYMBOL(p9_release_pages); 33EXPORT_SYMBOL(p9_release_pages);
34 34
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 6e9ab31e457e..8ab48cd89559 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -56,7 +56,7 @@
56#include <net/sock.h> 56#include <net/sock.h>
57#include <net/tcp_states.h> 57#include <net/tcp_states.h>
58#include <trace/events/skb.h> 58#include <trace/events/skb.h>
59#include <net/ll_poll.h> 59#include <net/busy_poll.h>
60 60
61/* 61/*
62 * Is a socket 'connection oriented' ? 62 * Is a socket 'connection oriented' ?
diff --git a/net/core/dev.c b/net/core/dev.c
index 560dafd83adf..a3d8d44cb7f4 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2481,10 +2481,10 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2481} 2481}
2482 2482
2483static netdev_features_t harmonize_features(struct sk_buff *skb, 2483static netdev_features_t harmonize_features(struct sk_buff *skb,
2484 __be16 protocol, netdev_features_t features) 2484 netdev_features_t features)
2485{ 2485{
2486 if (skb->ip_summed != CHECKSUM_NONE && 2486 if (skb->ip_summed != CHECKSUM_NONE &&
2487 !can_checksum_protocol(features, protocol)) { 2487 !can_checksum_protocol(features, skb_network_protocol(skb))) {
2488 features &= ~NETIF_F_ALL_CSUM; 2488 features &= ~NETIF_F_ALL_CSUM;
2489 } else if (illegal_highdma(skb->dev, skb)) { 2489 } else if (illegal_highdma(skb->dev, skb)) {
2490 features &= ~NETIF_F_SG; 2490 features &= ~NETIF_F_SG;
@@ -2505,20 +2505,18 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
2505 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2505 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2506 protocol = veh->h_vlan_encapsulated_proto; 2506 protocol = veh->h_vlan_encapsulated_proto;
2507 } else if (!vlan_tx_tag_present(skb)) { 2507 } else if (!vlan_tx_tag_present(skb)) {
2508 return harmonize_features(skb, protocol, features); 2508 return harmonize_features(skb, features);
2509 } 2509 }
2510 2510
2511 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX | 2511 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
2512 NETIF_F_HW_VLAN_STAG_TX); 2512 NETIF_F_HW_VLAN_STAG_TX);
2513 2513
2514 if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD)) { 2514 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
2515 return harmonize_features(skb, protocol, features);
2516 } else {
2517 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | 2515 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
2518 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX | 2516 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
2519 NETIF_F_HW_VLAN_STAG_TX; 2517 NETIF_F_HW_VLAN_STAG_TX;
2520 return harmonize_features(skb, protocol, features); 2518
2521 } 2519 return harmonize_features(skb, features);
2522} 2520}
2523EXPORT_SYMBOL(netif_skb_features); 2521EXPORT_SYMBOL(netif_skb_features);
2524 2522
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 724bb7cb173f..20e02d2605ec 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -824,7 +824,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
824 page = alloc_page(gfp_mask); 824 page = alloc_page(gfp_mask);
825 if (!page) { 825 if (!page) {
826 while (head) { 826 while (head) {
827 struct page *next = (struct page *)head->private; 827 struct page *next = (struct page *)page_private(head);
828 put_page(head); 828 put_page(head);
829 head = next; 829 head = next;
830 } 830 }
@@ -834,7 +834,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
834 memcpy(page_address(page), 834 memcpy(page_address(page),
835 vaddr + f->page_offset, skb_frag_size(f)); 835 vaddr + f->page_offset, skb_frag_size(f));
836 kunmap_atomic(vaddr); 836 kunmap_atomic(vaddr);
837 page->private = (unsigned long)head; 837 set_page_private(page, (unsigned long)head);
838 head = page; 838 head = page;
839 } 839 }
840 840
@@ -848,7 +848,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
848 for (i = num_frags - 1; i >= 0; i--) { 848 for (i = num_frags - 1; i >= 0; i--) {
849 __skb_fill_page_desc(skb, i, head, 0, 849 __skb_fill_page_desc(skb, i, head, 0,
850 skb_shinfo(skb)->frags[i].size); 850 skb_shinfo(skb)->frags[i].size);
851 head = (struct page *)head->private; 851 head = (struct page *)page_private(head);
852 } 852 }
853 853
854 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 854 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
diff --git a/net/core/sock.c b/net/core/sock.c
index ab06b719f5b1..548d716c5f62 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -139,7 +139,7 @@
139#include <net/tcp.h> 139#include <net/tcp.h>
140#endif 140#endif
141 141
142#include <net/ll_poll.h> 142#include <net/busy_poll.h>
143 143
144static DEFINE_MUTEX(proto_list_mutex); 144static DEFINE_MUTEX(proto_list_mutex);
145static LIST_HEAD(proto_list); 145static LIST_HEAD(proto_list);
@@ -901,7 +901,7 @@ set_rcvbuf:
901 break; 901 break;
902 902
903#ifdef CONFIG_NET_LL_RX_POLL 903#ifdef CONFIG_NET_LL_RX_POLL
904 case SO_LL: 904 case SO_BUSY_POLL:
905 /* allow unprivileged users to decrease the value */ 905 /* allow unprivileged users to decrease the value */
906 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN)) 906 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
907 ret = -EPERM; 907 ret = -EPERM;
@@ -1171,7 +1171,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
1171 break; 1171 break;
1172 1172
1173#ifdef CONFIG_NET_LL_RX_POLL 1173#ifdef CONFIG_NET_LL_RX_POLL
1174 case SO_LL: 1174 case SO_BUSY_POLL:
1175 v.val = sk->sk_ll_usec; 1175 v.val = sk->sk_ll_usec;
1176 break; 1176 break;
1177#endif 1177#endif
@@ -2294,7 +2294,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
2294 2294
2295#ifdef CONFIG_NET_LL_RX_POLL 2295#ifdef CONFIG_NET_LL_RX_POLL
2296 sk->sk_napi_id = 0; 2296 sk->sk_napi_id = 0;
2297 sk->sk_ll_usec = sysctl_net_ll_read; 2297 sk->sk_ll_usec = sysctl_net_busy_read;
2298#endif 2298#endif
2299 2299
2300 /* 2300 /*
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index afc677eadd93..660968616637 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -19,7 +19,7 @@
19#include <net/ip.h> 19#include <net/ip.h>
20#include <net/sock.h> 20#include <net/sock.h>
21#include <net/net_ratelimit.h> 21#include <net/net_ratelimit.h>
22#include <net/ll_poll.h> 22#include <net/busy_poll.h>
23 23
24static int one = 1; 24static int one = 1;
25 25
@@ -300,15 +300,15 @@ static struct ctl_table net_core_table[] = {
300#endif /* CONFIG_NET_FLOW_LIMIT */ 300#endif /* CONFIG_NET_FLOW_LIMIT */
301#ifdef CONFIG_NET_LL_RX_POLL 301#ifdef CONFIG_NET_LL_RX_POLL
302 { 302 {
303 .procname = "low_latency_poll", 303 .procname = "busy_poll",
304 .data = &sysctl_net_ll_poll, 304 .data = &sysctl_net_busy_poll,
305 .maxlen = sizeof(unsigned int), 305 .maxlen = sizeof(unsigned int),
306 .mode = 0644, 306 .mode = 0644,
307 .proc_handler = proc_dointvec 307 .proc_handler = proc_dointvec
308 }, 308 },
309 { 309 {
310 .procname = "low_latency_read", 310 .procname = "busy_read",
311 .data = &sysctl_net_ll_read, 311 .data = &sysctl_net_busy_read,
312 .maxlen = sizeof(unsigned int), 312 .maxlen = sizeof(unsigned int),
313 .mode = 0644, 313 .mode = 0644,
314 .proc_handler = proc_dointvec 314 .proc_handler = proc_dointvec
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 0a69d0757795..f347a2ca7d7e 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -118,7 +118,7 @@ dns_resolver_instantiate(struct key *key, struct key_preparsed_payload *prep)
118 if (opt_vlen <= 0) 118 if (opt_vlen <= 0)
119 goto bad_option_value; 119 goto bad_option_value;
120 120
121 ret = strict_strtoul(eq, 10, &derrno); 121 ret = kstrtoul(eq, 10, &derrno);
122 if (ret < 0) 122 if (ret < 0)
123 goto bad_option_value; 123 goto bad_option_value;
124 124
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index 775d5b532ece..55e6bfb3a289 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -100,6 +100,9 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
100 } 100 }
101 __skb_push(skb, tnl_hlen - ghl); 101 __skb_push(skb, tnl_hlen - ghl);
102 102
103 skb_reset_inner_headers(skb);
104 skb->encapsulation = 1;
105
103 skb_reset_mac_header(skb); 106 skb_reset_mac_header(skb);
104 skb_set_network_header(skb, mac_len); 107 skb_set_network_header(skb, mac_len);
105 skb->mac_len = mac_len; 108 skb->mac_len = mac_len;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 6af375afeeef..7bd8983dbfcf 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -467,7 +467,7 @@ void inet_unhash(struct sock *sk)
467 lock = inet_ehash_lockp(hashinfo, sk->sk_hash); 467 lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
468 468
469 spin_lock_bh(lock); 469 spin_lock_bh(lock);
470 done =__sk_nulls_del_node_init_rcu(sk); 470 done = __sk_nulls_del_node_init_rcu(sk);
471 if (done) 471 if (done)
472 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 472 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
473 spin_unlock_bh(lock); 473 spin_unlock_bh(lock);
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 945734b2f209..ca1cb2d5f6e2 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -476,7 +476,7 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
476 struct rtable *rt, __be16 df) 476 struct rtable *rt, __be16 df)
477{ 477{
478 struct ip_tunnel *tunnel = netdev_priv(dev); 478 struct ip_tunnel *tunnel = netdev_priv(dev);
479 int pkt_size = skb->len - tunnel->hlen; 479 int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len;
480 int mtu; 480 int mtu;
481 481
482 if (df) 482 if (df)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 15cbfa94bd8e..5423223e93c2 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -279,7 +279,7 @@
279 279
280#include <asm/uaccess.h> 280#include <asm/uaccess.h>
281#include <asm/ioctls.h> 281#include <asm/ioctls.h>
282#include <net/ll_poll.h> 282#include <net/busy_poll.h>
283 283
284int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT; 284int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
285 285
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 35675e46aff8..b299da5ff499 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -75,7 +75,7 @@
75#include <net/netdma.h> 75#include <net/netdma.h>
76#include <net/secure_seq.h> 76#include <net/secure_seq.h>
77#include <net/tcp_memcontrol.h> 77#include <net/tcp_memcontrol.h>
78#include <net/ll_poll.h> 78#include <net/busy_poll.h>
79 79
80#include <linux/inet.h> 80#include <linux/inet.h>
81#include <linux/ipv6.h> 81#include <linux/ipv6.h>
@@ -1994,7 +1994,7 @@ process:
1994 if (sk_filter(sk, skb)) 1994 if (sk_filter(sk, skb))
1995 goto discard_and_relse; 1995 goto discard_and_relse;
1996 1996
1997 sk_mark_ll(sk, skb); 1997 sk_mark_napi_id(sk, skb);
1998 skb->dev = NULL; 1998 skb->dev = NULL;
1999 1999
2000 bh_lock_sock_nested(sk); 2000 bh_lock_sock_nested(sk);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 3d609490f118..92fde8d1aa82 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2407,6 +2407,8 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2407 * see tcp_input.c tcp_sacktag_write_queue(). 2407 * see tcp_input.c tcp_sacktag_write_queue().
2408 */ 2408 */
2409 TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt; 2409 TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
2410 } else {
2411 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
2410 } 2412 }
2411 return err; 2413 return err;
2412} 2414}
@@ -2528,10 +2530,9 @@ begin_fwd:
2528 if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS)) 2530 if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
2529 continue; 2531 continue;
2530 2532
2531 if (tcp_retransmit_skb(sk, skb)) { 2533 if (tcp_retransmit_skb(sk, skb))
2532 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
2533 return; 2534 return;
2534 } 2535
2535 NET_INC_STATS_BH(sock_net(sk), mib_idx); 2536 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2536 2537
2537 if (tcp_in_cwnd_reduction(sk)) 2538 if (tcp_in_cwnd_reduction(sk))
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 6b270e53c207..766e6bab9113 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -109,7 +109,7 @@
109#include <trace/events/udp.h> 109#include <trace/events/udp.h>
110#include <linux/static_key.h> 110#include <linux/static_key.h>
111#include <trace/events/skb.h> 111#include <trace/events/skb.h>
112#include <net/ll_poll.h> 112#include <net/busy_poll.h>
113#include "udp_impl.h" 113#include "udp_impl.h"
114 114
115struct udp_table udp_table __read_mostly; 115struct udp_table udp_table __read_mostly;
@@ -1713,7 +1713,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1713 if (sk != NULL) { 1713 if (sk != NULL) {
1714 int ret; 1714 int ret;
1715 1715
1716 sk_mark_ll(sk, skb); 1716 sk_mark_napi_id(sk, skb);
1717 ret = udp_queue_rcv_skb(sk, skb); 1717 ret = udp_queue_rcv_skb(sk, skb);
1718 sock_put(sk); 1718 sock_put(sk);
1719 1719
@@ -2323,6 +2323,9 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
2323 struct udphdr *uh; 2323 struct udphdr *uh;
2324 int udp_offset = outer_hlen - tnl_hlen; 2324 int udp_offset = outer_hlen - tnl_hlen;
2325 2325
2326 skb_reset_inner_headers(skb);
2327 skb->encapsulation = 1;
2328
2326 skb->mac_len = mac_len; 2329 skb->mac_len = mac_len;
2327 2330
2328 skb_push(skb, outer_hlen); 2331 skb_push(skb, outer_hlen);
@@ -2345,7 +2348,6 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
2345 uh->check = CSUM_MANGLED_0; 2348 uh->check = CSUM_MANGLED_0;
2346 2349
2347 } 2350 }
2348 skb->ip_summed = CHECKSUM_NONE;
2349 skb->protocol = protocol; 2351 skb->protocol = protocol;
2350 } while ((skb = skb->next)); 2352 } while ((skb = skb->next));
2351out: 2353out:
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 192dd1a0e188..5fc9c7a68d8d 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -632,6 +632,12 @@ insert_above:
632 return ln; 632 return ln;
633} 633}
634 634
635static inline bool rt6_qualify_for_ecmp(struct rt6_info *rt)
636{
637 return (rt->rt6i_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
638 RTF_GATEWAY;
639}
640
635/* 641/*
636 * Insert routing information in a node. 642 * Insert routing information in a node.
637 */ 643 */
@@ -646,6 +652,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
646 int add = (!info->nlh || 652 int add = (!info->nlh ||
647 (info->nlh->nlmsg_flags & NLM_F_CREATE)); 653 (info->nlh->nlmsg_flags & NLM_F_CREATE));
648 int found = 0; 654 int found = 0;
655 bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
649 656
650 ins = &fn->leaf; 657 ins = &fn->leaf;
651 658
@@ -691,9 +698,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
691 * To avoid long list, we only had siblings if the 698 * To avoid long list, we only had siblings if the
692 * route have a gateway. 699 * route have a gateway.
693 */ 700 */
694 if (rt->rt6i_flags & RTF_GATEWAY && 701 if (rt_can_ecmp &&
695 !(rt->rt6i_flags & RTF_EXPIRES) && 702 rt6_qualify_for_ecmp(iter))
696 !(iter->rt6i_flags & RTF_EXPIRES))
697 rt->rt6i_nsiblings++; 703 rt->rt6i_nsiblings++;
698 } 704 }
699 705
@@ -715,7 +721,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
715 /* Find the first route that have the same metric */ 721 /* Find the first route that have the same metric */
716 sibling = fn->leaf; 722 sibling = fn->leaf;
717 while (sibling) { 723 while (sibling) {
718 if (sibling->rt6i_metric == rt->rt6i_metric) { 724 if (sibling->rt6i_metric == rt->rt6i_metric &&
725 rt6_qualify_for_ecmp(sibling)) {
719 list_add_tail(&rt->rt6i_siblings, 726 list_add_tail(&rt->rt6i_siblings,
720 &sibling->rt6i_siblings); 727 &sibling->rt6i_siblings);
721 break; 728 break;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index bd5fd7054031..a8c891aa2464 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -65,6 +65,12 @@
65#include <linux/sysctl.h> 65#include <linux/sysctl.h>
66#endif 66#endif
67 67
68enum rt6_nud_state {
69 RT6_NUD_FAIL_HARD = -2,
70 RT6_NUD_FAIL_SOFT = -1,
71 RT6_NUD_SUCCEED = 1
72};
73
68static struct rt6_info *ip6_rt_copy(struct rt6_info *ort, 74static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
69 const struct in6_addr *dest); 75 const struct in6_addr *dest);
70static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie); 76static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
@@ -531,28 +537,29 @@ static inline int rt6_check_dev(struct rt6_info *rt, int oif)
531 return 0; 537 return 0;
532} 538}
533 539
534static inline bool rt6_check_neigh(struct rt6_info *rt) 540static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt)
535{ 541{
536 struct neighbour *neigh; 542 struct neighbour *neigh;
537 bool ret = false; 543 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
538 544
539 if (rt->rt6i_flags & RTF_NONEXTHOP || 545 if (rt->rt6i_flags & RTF_NONEXTHOP ||
540 !(rt->rt6i_flags & RTF_GATEWAY)) 546 !(rt->rt6i_flags & RTF_GATEWAY))
541 return true; 547 return RT6_NUD_SUCCEED;
542 548
543 rcu_read_lock_bh(); 549 rcu_read_lock_bh();
544 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway); 550 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
545 if (neigh) { 551 if (neigh) {
546 read_lock(&neigh->lock); 552 read_lock(&neigh->lock);
547 if (neigh->nud_state & NUD_VALID) 553 if (neigh->nud_state & NUD_VALID)
548 ret = true; 554 ret = RT6_NUD_SUCCEED;
549#ifdef CONFIG_IPV6_ROUTER_PREF 555#ifdef CONFIG_IPV6_ROUTER_PREF
550 else if (!(neigh->nud_state & NUD_FAILED)) 556 else if (!(neigh->nud_state & NUD_FAILED))
551 ret = true; 557 ret = RT6_NUD_SUCCEED;
552#endif 558#endif
553 read_unlock(&neigh->lock); 559 read_unlock(&neigh->lock);
554 } else if (IS_ENABLED(CONFIG_IPV6_ROUTER_PREF)) { 560 } else {
555 ret = true; 561 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
562 RT6_NUD_SUCCEED : RT6_NUD_FAIL_SOFT;
556 } 563 }
557 rcu_read_unlock_bh(); 564 rcu_read_unlock_bh();
558 565
@@ -566,43 +573,52 @@ static int rt6_score_route(struct rt6_info *rt, int oif,
566 573
567 m = rt6_check_dev(rt, oif); 574 m = rt6_check_dev(rt, oif);
568 if (!m && (strict & RT6_LOOKUP_F_IFACE)) 575 if (!m && (strict & RT6_LOOKUP_F_IFACE))
569 return -1; 576 return RT6_NUD_FAIL_HARD;
570#ifdef CONFIG_IPV6_ROUTER_PREF 577#ifdef CONFIG_IPV6_ROUTER_PREF
571 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2; 578 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
572#endif 579#endif
573 if (!rt6_check_neigh(rt) && (strict & RT6_LOOKUP_F_REACHABLE)) 580 if (strict & RT6_LOOKUP_F_REACHABLE) {
574 return -1; 581 int n = rt6_check_neigh(rt);
582 if (n < 0)
583 return n;
584 }
575 return m; 585 return m;
576} 586}
577 587
578static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict, 588static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
579 int *mpri, struct rt6_info *match) 589 int *mpri, struct rt6_info *match,
590 bool *do_rr)
580{ 591{
581 int m; 592 int m;
593 bool match_do_rr = false;
582 594
583 if (rt6_check_expired(rt)) 595 if (rt6_check_expired(rt))
584 goto out; 596 goto out;
585 597
586 m = rt6_score_route(rt, oif, strict); 598 m = rt6_score_route(rt, oif, strict);
587 if (m < 0) 599 if (m == RT6_NUD_FAIL_SOFT && !IS_ENABLED(CONFIG_IPV6_ROUTER_PREF)) {
600 match_do_rr = true;
601 m = 0; /* lowest valid score */
602 } else if (m < 0) {
588 goto out; 603 goto out;
604 }
605
606 if (strict & RT6_LOOKUP_F_REACHABLE)
607 rt6_probe(rt);
589 608
590 if (m > *mpri) { 609 if (m > *mpri) {
591 if (strict & RT6_LOOKUP_F_REACHABLE) 610 *do_rr = match_do_rr;
592 rt6_probe(match);
593 *mpri = m; 611 *mpri = m;
594 match = rt; 612 match = rt;
595 } else if (strict & RT6_LOOKUP_F_REACHABLE) {
596 rt6_probe(rt);
597 } 613 }
598
599out: 614out:
600 return match; 615 return match;
601} 616}
602 617
603static struct rt6_info *find_rr_leaf(struct fib6_node *fn, 618static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
604 struct rt6_info *rr_head, 619 struct rt6_info *rr_head,
605 u32 metric, int oif, int strict) 620 u32 metric, int oif, int strict,
621 bool *do_rr)
606{ 622{
607 struct rt6_info *rt, *match; 623 struct rt6_info *rt, *match;
608 int mpri = -1; 624 int mpri = -1;
@@ -610,10 +626,10 @@ static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
610 match = NULL; 626 match = NULL;
611 for (rt = rr_head; rt && rt->rt6i_metric == metric; 627 for (rt = rr_head; rt && rt->rt6i_metric == metric;
612 rt = rt->dst.rt6_next) 628 rt = rt->dst.rt6_next)
613 match = find_match(rt, oif, strict, &mpri, match); 629 match = find_match(rt, oif, strict, &mpri, match, do_rr);
614 for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric; 630 for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
615 rt = rt->dst.rt6_next) 631 rt = rt->dst.rt6_next)
616 match = find_match(rt, oif, strict, &mpri, match); 632 match = find_match(rt, oif, strict, &mpri, match, do_rr);
617 633
618 return match; 634 return match;
619} 635}
@@ -622,15 +638,16 @@ static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
622{ 638{
623 struct rt6_info *match, *rt0; 639 struct rt6_info *match, *rt0;
624 struct net *net; 640 struct net *net;
641 bool do_rr = false;
625 642
626 rt0 = fn->rr_ptr; 643 rt0 = fn->rr_ptr;
627 if (!rt0) 644 if (!rt0)
628 fn->rr_ptr = rt0 = fn->leaf; 645 fn->rr_ptr = rt0 = fn->leaf;
629 646
630 match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict); 647 match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict,
648 &do_rr);
631 649
632 if (!match && 650 if (do_rr) {
633 (strict & RT6_LOOKUP_F_REACHABLE)) {
634 struct rt6_info *next = rt0->dst.rt6_next; 651 struct rt6_info *next = rt0->dst.rt6_next;
635 652
636 /* no entries matched; do round-robin */ 653 /* no entries matched; do round-robin */
@@ -1080,10 +1097,13 @@ static void ip6_link_failure(struct sk_buff *skb)
1080 1097
1081 rt = (struct rt6_info *) skb_dst(skb); 1098 rt = (struct rt6_info *) skb_dst(skb);
1082 if (rt) { 1099 if (rt) {
1083 if (rt->rt6i_flags & RTF_CACHE) 1100 if (rt->rt6i_flags & RTF_CACHE) {
1084 rt6_update_expires(rt, 0); 1101 dst_hold(&rt->dst);
1085 else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) 1102 if (ip6_del_rt(rt))
1103 dst_free(&rt->dst);
1104 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
1086 rt->rt6i_node->fn_sernum = -1; 1105 rt->rt6i_node->fn_sernum = -1;
1106 }
1087 } 1107 }
1088} 1108}
1089 1109
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5cffa5c3e6b8..6e1649d58533 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -63,7 +63,7 @@
63#include <net/inet_common.h> 63#include <net/inet_common.h>
64#include <net/secure_seq.h> 64#include <net/secure_seq.h>
65#include <net/tcp_memcontrol.h> 65#include <net/tcp_memcontrol.h>
66#include <net/ll_poll.h> 66#include <net/busy_poll.h>
67 67
68#include <asm/uaccess.h> 68#include <asm/uaccess.h>
69 69
@@ -1499,7 +1499,7 @@ process:
1499 if (sk_filter(sk, skb)) 1499 if (sk_filter(sk, skb))
1500 goto discard_and_relse; 1500 goto discard_and_relse;
1501 1501
1502 sk_mark_ll(sk, skb); 1502 sk_mark_napi_id(sk, skb);
1503 skb->dev = NULL; 1503 skb->dev = NULL;
1504 1504
1505 bh_lock_sock_nested(sk); 1505 bh_lock_sock_nested(sk);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index b6f31437a1f8..f4058150262b 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -46,7 +46,7 @@
46#include <net/ip6_checksum.h> 46#include <net/ip6_checksum.h>
47#include <net/xfrm.h> 47#include <net/xfrm.h>
48#include <net/inet6_hashtables.h> 48#include <net/inet6_hashtables.h>
49#include <net/ll_poll.h> 49#include <net/busy_poll.h>
50 50
51#include <linux/proc_fs.h> 51#include <linux/proc_fs.h>
52#include <linux/seq_file.h> 52#include <linux/seq_file.h>
@@ -844,7 +844,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
844 if (sk != NULL) { 844 if (sk != NULL) {
845 int ret; 845 int ret;
846 846
847 sk_mark_ll(sk, skb); 847 sk_mark_napi_id(sk, skb);
848 ret = udpv6_queue_rcv_skb(sk, skb); 848 ret = udpv6_queue_rcv_skb(sk, skb);
849 sock_put(sk); 849 sock_put(sk);
850 850
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 7c195d972bf0..a7ab323849b6 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -821,7 +821,14 @@ static void qfq_make_eligible(struct qfq_sched *q)
821 unsigned long old_vslot = q->oldV >> q->min_slot_shift; 821 unsigned long old_vslot = q->oldV >> q->min_slot_shift;
822 822
823 if (vslot != old_vslot) { 823 if (vslot != old_vslot) {
824 unsigned long mask = (1ULL << fls(vslot ^ old_vslot)) - 1; 824 unsigned long mask;
825 int last_flip_pos = fls(vslot ^ old_vslot);
826
827 if (last_flip_pos > 31) /* higher than the number of groups */
828 mask = ~0UL; /* make all groups eligible */
829 else
830 mask = (1UL << last_flip_pos) - 1;
831
825 qfq_move_groups(q, mask, IR, ER); 832 qfq_move_groups(q, mask, IR, ER);
826 qfq_move_groups(q, mask, IB, EB); 833 qfq_move_groups(q, mask, IB, EB);
827 } 834 }
@@ -1003,9 +1010,61 @@ static inline void charge_actual_service(struct qfq_aggregate *agg)
1003 agg->F = agg->S + (u64)service_received * agg->inv_w; 1010 agg->F = agg->S + (u64)service_received * agg->inv_w;
1004} 1011}
1005 1012
1006static inline void qfq_update_agg_ts(struct qfq_sched *q, 1013/* Assign a reasonable start time for a new aggregate in group i.
1007 struct qfq_aggregate *agg, 1014 * Admissible values for \hat(F) are multiples of \sigma_i
1008 enum update_reason reason); 1015 * no greater than V+\sigma_i . Larger values mean that
1016 * we had a wraparound so we consider the timestamp to be stale.
1017 *
1018 * If F is not stale and F >= V then we set S = F.
1019 * Otherwise we should assign S = V, but this may violate
1020 * the ordering in EB (see [2]). So, if we have groups in ER,
1021 * set S to the F_j of the first group j which would be blocking us.
1022 * We are guaranteed not to move S backward because
1023 * otherwise our group i would still be blocked.
1024 */
1025static void qfq_update_start(struct qfq_sched *q, struct qfq_aggregate *agg)
1026{
1027 unsigned long mask;
1028 u64 limit, roundedF;
1029 int slot_shift = agg->grp->slot_shift;
1030
1031 roundedF = qfq_round_down(agg->F, slot_shift);
1032 limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift);
1033
1034 if (!qfq_gt(agg->F, q->V) || qfq_gt(roundedF, limit)) {
1035 /* timestamp was stale */
1036 mask = mask_from(q->bitmaps[ER], agg->grp->index);
1037 if (mask) {
1038 struct qfq_group *next = qfq_ffs(q, mask);
1039 if (qfq_gt(roundedF, next->F)) {
1040 if (qfq_gt(limit, next->F))
1041 agg->S = next->F;
1042 else /* preserve timestamp correctness */
1043 agg->S = limit;
1044 return;
1045 }
1046 }
1047 agg->S = q->V;
1048 } else /* timestamp is not stale */
1049 agg->S = agg->F;
1050}
1051
1052/* Update the timestamps of agg before scheduling/rescheduling it for
1053 * service. In particular, assign to agg->F its maximum possible
1054 * value, i.e., the virtual finish time with which the aggregate
1055 * should be labeled if it used all its budget once in service.
1056 */
1057static inline void
1058qfq_update_agg_ts(struct qfq_sched *q,
1059 struct qfq_aggregate *agg, enum update_reason reason)
1060{
1061 if (reason != requeue)
1062 qfq_update_start(q, agg);
1063 else /* just charge agg for the service received */
1064 agg->S = agg->F;
1065
1066 agg->F = agg->S + (u64)agg->budgetmax * agg->inv_w;
1067}
1009 1068
1010static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg); 1069static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg);
1011 1070
@@ -1128,66 +1187,6 @@ static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q)
1128 return agg; 1187 return agg;
1129} 1188}
1130 1189
1131/*
1132 * Assign a reasonable start time for a new aggregate in group i.
1133 * Admissible values for \hat(F) are multiples of \sigma_i
1134 * no greater than V+\sigma_i . Larger values mean that
1135 * we had a wraparound so we consider the timestamp to be stale.
1136 *
1137 * If F is not stale and F >= V then we set S = F.
1138 * Otherwise we should assign S = V, but this may violate
1139 * the ordering in EB (see [2]). So, if we have groups in ER,
1140 * set S to the F_j of the first group j which would be blocking us.
1141 * We are guaranteed not to move S backward because
1142 * otherwise our group i would still be blocked.
1143 */
1144static void qfq_update_start(struct qfq_sched *q, struct qfq_aggregate *agg)
1145{
1146 unsigned long mask;
1147 u64 limit, roundedF;
1148 int slot_shift = agg->grp->slot_shift;
1149
1150 roundedF = qfq_round_down(agg->F, slot_shift);
1151 limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift);
1152
1153 if (!qfq_gt(agg->F, q->V) || qfq_gt(roundedF, limit)) {
1154 /* timestamp was stale */
1155 mask = mask_from(q->bitmaps[ER], agg->grp->index);
1156 if (mask) {
1157 struct qfq_group *next = qfq_ffs(q, mask);
1158 if (qfq_gt(roundedF, next->F)) {
1159 if (qfq_gt(limit, next->F))
1160 agg->S = next->F;
1161 else /* preserve timestamp correctness */
1162 agg->S = limit;
1163 return;
1164 }
1165 }
1166 agg->S = q->V;
1167 } else /* timestamp is not stale */
1168 agg->S = agg->F;
1169}
1170
1171/*
1172 * Update the timestamps of agg before scheduling/rescheduling it for
1173 * service. In particular, assign to agg->F its maximum possible
1174 * value, i.e., the virtual finish time with which the aggregate
1175 * should be labeled if it used all its budget once in service.
1176 */
1177static inline void
1178qfq_update_agg_ts(struct qfq_sched *q,
1179 struct qfq_aggregate *agg, enum update_reason reason)
1180{
1181 if (reason != requeue)
1182 qfq_update_start(q, agg);
1183 else /* just charge agg for the service received */
1184 agg->S = agg->F;
1185
1186 agg->F = agg->S + (u64)agg->budgetmax * agg->inv_w;
1187}
1188
1189static void qfq_schedule_agg(struct qfq_sched *, struct qfq_aggregate *);
1190
1191static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) 1190static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1192{ 1191{
1193 struct qfq_sched *q = qdisc_priv(sch); 1192 struct qfq_sched *q = qdisc_priv(sch);
diff --git a/net/socket.c b/net/socket.c
index 45afa648364a..829b460acb87 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -104,11 +104,11 @@
104#include <linux/route.h> 104#include <linux/route.h>
105#include <linux/sockios.h> 105#include <linux/sockios.h>
106#include <linux/atalk.h> 106#include <linux/atalk.h>
107#include <net/ll_poll.h> 107#include <net/busy_poll.h>
108 108
109#ifdef CONFIG_NET_LL_RX_POLL 109#ifdef CONFIG_NET_LL_RX_POLL
110unsigned int sysctl_net_ll_read __read_mostly; 110unsigned int sysctl_net_busy_read __read_mostly;
111unsigned int sysctl_net_ll_poll __read_mostly; 111unsigned int sysctl_net_busy_poll __read_mostly;
112#endif 112#endif
113 113
114static int sock_no_open(struct inode *irrelevant, struct file *dontcare); 114static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
diff --git a/net/tipc/ib_media.c b/net/tipc/ib_media.c
index ad2e1ec4117e..9934a32bfa87 100644
--- a/net/tipc/ib_media.c
+++ b/net/tipc/ib_media.c
@@ -292,13 +292,7 @@ static int ib_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size)
292 if (str_size < 60) /* 60 = 19 * strlen("xx:") + strlen("xx\0") */ 292 if (str_size < 60) /* 60 = 19 * strlen("xx:") + strlen("xx\0") */
293 return 1; 293 return 1;
294 294
295 sprintf(str_buf, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:" 295 sprintf(str_buf, "%20phC", a->value);
296 "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
297 a->value[0], a->value[1], a->value[2], a->value[3],
298 a->value[4], a->value[5], a->value[6], a->value[7],
299 a->value[8], a->value[9], a->value[10], a->value[11],
300 a->value[12], a->value[13], a->value[14], a->value[15],
301 a->value[16], a->value[17], a->value[18], a->value[19]);
302 296
303 return 0; 297 return 0;
304} 298}