aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/batman-adv/gateway_client.c6
-rw-r--r--net/batman-adv/translation-table.c1
-rw-r--r--net/ceph/crypto.c1
-rw-r--r--net/ceph/crypto.h3
-rw-r--r--net/core/dev.c7
-rw-r--r--net/core/dst.c10
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/ip_output.c8
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_ipv4.c14
-rw-r--r--net/ipv4/tcp_metrics.c12
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv6/tcp_ipv6.c28
-rw-r--r--net/llc/llc_station.c6
-rw-r--r--net/packet/af_packet.c3
-rw-r--r--net/sched/act_gact.c14
-rw-r--r--net/sched/act_ipt.c7
-rw-r--r--net/sched/act_pedit.c5
-rw-r--r--net/sched/act_simple.c5
-rw-r--r--net/sched/sch_qfq.c95
22 files changed, 179 insertions, 58 deletions
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index b421cc49d2cd..fc866f2e4528 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -200,11 +200,11 @@ void batadv_gw_election(struct batadv_priv *bat_priv)
200 if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT) 200 if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT)
201 goto out; 201 goto out;
202 202
203 if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect))
204 goto out;
205
206 curr_gw = batadv_gw_get_selected_gw_node(bat_priv); 203 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
207 204
205 if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect) && curr_gw)
206 goto out;
207
208 next_gw = batadv_gw_get_best_gw_node(bat_priv); 208 next_gw = batadv_gw_get_best_gw_node(bat_priv);
209 209
210 if (curr_gw == next_gw) 210 if (curr_gw == next_gw)
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index a438f4b582fc..99dd8f75b3ff 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -197,6 +197,7 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
197del: 197del:
198 list_del(&entry->list); 198 list_del(&entry->list);
199 kfree(entry); 199 kfree(entry);
200 kfree(tt_change_node);
200 event_removed = true; 201 event_removed = true;
201 goto unlock; 202 goto unlock;
202 } 203 }
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index b780cb7947dd..9da7fdd3cd8a 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -466,6 +466,7 @@ void ceph_key_destroy(struct key *key) {
466 struct ceph_crypto_key *ckey = key->payload.data; 466 struct ceph_crypto_key *ckey = key->payload.data;
467 467
468 ceph_crypto_key_destroy(ckey); 468 ceph_crypto_key_destroy(ckey);
469 kfree(ckey);
469} 470}
470 471
471struct key_type key_type_ceph = { 472struct key_type key_type_ceph = {
diff --git a/net/ceph/crypto.h b/net/ceph/crypto.h
index 1919d1550d75..3572dc518bc9 100644
--- a/net/ceph/crypto.h
+++ b/net/ceph/crypto.h
@@ -16,7 +16,8 @@ struct ceph_crypto_key {
16 16
17static inline void ceph_crypto_key_destroy(struct ceph_crypto_key *key) 17static inline void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
18{ 18{
19 kfree(key->key); 19 if (key)
20 kfree(key->key);
20} 21}
21 22
22extern int ceph_crypto_key_clone(struct ceph_crypto_key *dst, 23extern int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
diff --git a/net/core/dev.c b/net/core/dev.c
index f91abf800161..a39354ee1432 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1055,6 +1055,8 @@ rollback:
1055 */ 1055 */
1056int dev_set_alias(struct net_device *dev, const char *alias, size_t len) 1056int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1057{ 1057{
1058 char *new_ifalias;
1059
1058 ASSERT_RTNL(); 1060 ASSERT_RTNL();
1059 1061
1060 if (len >= IFALIASZ) 1062 if (len >= IFALIASZ)
@@ -1068,9 +1070,10 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1068 return 0; 1070 return 0;
1069 } 1071 }
1070 1072
1071 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL); 1073 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1072 if (!dev->ifalias) 1074 if (!new_ifalias)
1073 return -ENOMEM; 1075 return -ENOMEM;
1076 dev->ifalias = new_ifalias;
1074 1077
1075 strlcpy(dev->ifalias, alias, len+1); 1078 strlcpy(dev->ifalias, alias, len+1);
1076 return len; 1079 return len;
diff --git a/net/core/dst.c b/net/core/dst.c
index 069d51d29414..56d63612e1e4 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -149,7 +149,15 @@ int dst_discard(struct sk_buff *skb)
149} 149}
150EXPORT_SYMBOL(dst_discard); 150EXPORT_SYMBOL(dst_discard);
151 151
152const u32 dst_default_metrics[RTAX_MAX]; 152const u32 dst_default_metrics[RTAX_MAX + 1] = {
153 /* This initializer is needed to force linker to place this variable
154 * into const section. Otherwise it might end into bss section.
155 * We really want to avoid false sharing on this variable, and catch
156 * any writes on it.
157 */
158 [RTAX_MAX] = 0xdeadbeef,
159};
160
153 161
154void *dst_alloc(struct dst_ops *ops, struct net_device *dev, 162void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
155 int initial_ref, int initial_obsolete, unsigned short flags) 163 int initial_ref, int initial_obsolete, unsigned short flags)
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index f0cdb30921c0..57bd978483e1 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -367,7 +367,7 @@ static void __leaf_free_rcu(struct rcu_head *head)
367 367
368static inline void free_leaf(struct leaf *l) 368static inline void free_leaf(struct leaf *l)
369{ 369{
370 call_rcu_bh(&l->rcu, __leaf_free_rcu); 370 call_rcu(&l->rcu, __leaf_free_rcu);
371} 371}
372 372
373static inline void free_leaf_info(struct leaf_info *leaf) 373static inline void free_leaf_info(struct leaf_info *leaf)
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index ba39a52d18c1..147ccc3e93db 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -197,7 +197,7 @@ static inline int ip_finish_output2(struct sk_buff *skb)
197 neigh = __ipv4_neigh_lookup_noref(dev, nexthop); 197 neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
198 if (unlikely(!neigh)) 198 if (unlikely(!neigh))
199 neigh = __neigh_create(&arp_tbl, &nexthop, dev, false); 199 neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
200 if (neigh) { 200 if (!IS_ERR(neigh)) {
201 int res = dst_neigh_output(dst, neigh, skb); 201 int res = dst_neigh_output(dst, neigh, skb);
202 202
203 rcu_read_unlock_bh(); 203 rcu_read_unlock_bh();
@@ -1366,9 +1366,8 @@ out:
1366 return skb; 1366 return skb;
1367} 1367}
1368 1368
1369int ip_send_skb(struct sk_buff *skb) 1369int ip_send_skb(struct net *net, struct sk_buff *skb)
1370{ 1370{
1371 struct net *net = sock_net(skb->sk);
1372 int err; 1371 int err;
1373 1372
1374 err = ip_local_out(skb); 1373 err = ip_local_out(skb);
@@ -1391,7 +1390,7 @@ int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
1391 return 0; 1390 return 0;
1392 1391
1393 /* Netfilter gets whole the not fragmented skb. */ 1392 /* Netfilter gets whole the not fragmented skb. */
1394 return ip_send_skb(skb); 1393 return ip_send_skb(sock_net(sk), skb);
1395} 1394}
1396 1395
1397/* 1396/*
@@ -1536,6 +1535,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
1536 arg->csumoffset) = csum_fold(csum_add(nskb->csum, 1535 arg->csumoffset) = csum_fold(csum_add(nskb->csum,
1537 arg->csum)); 1536 arg->csum));
1538 nskb->ip_summed = CHECKSUM_NONE; 1537 nskb->ip_summed = CHECKSUM_NONE;
1538 skb_orphan(nskb);
1539 skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb)); 1539 skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
1540 ip_push_pending_frames(sk, &fl4); 1540 ip_push_pending_frames(sk, &fl4);
1541 } 1541 }
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2fd2bc9e3c64..85308b90df80 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5392,6 +5392,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5392{ 5392{
5393 struct tcp_sock *tp = tcp_sk(sk); 5393 struct tcp_sock *tp = tcp_sk(sk);
5394 5394
5395 if (unlikely(sk->sk_rx_dst == NULL))
5396 inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
5395 /* 5397 /*
5396 * Header prediction. 5398 * Header prediction.
5397 * The code loosely follows the one in the famous 5399 * The code loosely follows the one in the famous
@@ -5605,7 +5607,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
5605 tcp_set_state(sk, TCP_ESTABLISHED); 5607 tcp_set_state(sk, TCP_ESTABLISHED);
5606 5608
5607 if (skb != NULL) { 5609 if (skb != NULL) {
5608 inet_sk_rx_dst_set(sk, skb); 5610 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
5609 security_inet_conn_established(sk, skb); 5611 security_inet_conn_established(sk, skb);
5610 } 5612 }
5611 5613
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 42b2a6a73092..767823764016 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1627,9 +1627,6 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1627 sk->sk_rx_dst = NULL; 1627 sk->sk_rx_dst = NULL;
1628 } 1628 }
1629 } 1629 }
1630 if (unlikely(sk->sk_rx_dst == NULL))
1631 inet_sk_rx_dst_set(sk, skb);
1632
1633 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { 1630 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1634 rsk = sk; 1631 rsk = sk;
1635 goto reset; 1632 goto reset;
@@ -1872,10 +1869,21 @@ static struct timewait_sock_ops tcp_timewait_sock_ops = {
1872 .twsk_destructor= tcp_twsk_destructor, 1869 .twsk_destructor= tcp_twsk_destructor,
1873}; 1870};
1874 1871
1872void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1873{
1874 struct dst_entry *dst = skb_dst(skb);
1875
1876 dst_hold(dst);
1877 sk->sk_rx_dst = dst;
1878 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1879}
1880EXPORT_SYMBOL(inet_sk_rx_dst_set);
1881
1875const struct inet_connection_sock_af_ops ipv4_specific = { 1882const struct inet_connection_sock_af_ops ipv4_specific = {
1876 .queue_xmit = ip_queue_xmit, 1883 .queue_xmit = ip_queue_xmit,
1877 .send_check = tcp_v4_send_check, 1884 .send_check = tcp_v4_send_check,
1878 .rebuild_header = inet_sk_rebuild_header, 1885 .rebuild_header = inet_sk_rebuild_header,
1886 .sk_rx_dst_set = inet_sk_rx_dst_set,
1879 .conn_request = tcp_v4_conn_request, 1887 .conn_request = tcp_v4_conn_request,
1880 .syn_recv_sock = tcp_v4_syn_recv_sock, 1888 .syn_recv_sock = tcp_v4_syn_recv_sock,
1881 .net_header_len = sizeof(struct iphdr), 1889 .net_header_len = sizeof(struct iphdr),
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 2288a6399e1e..0abe67bb4d3a 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -731,6 +731,18 @@ static int __net_init tcp_net_metrics_init(struct net *net)
731 731
732static void __net_exit tcp_net_metrics_exit(struct net *net) 732static void __net_exit tcp_net_metrics_exit(struct net *net)
733{ 733{
734 unsigned int i;
735
736 for (i = 0; i < (1U << net->ipv4.tcp_metrics_hash_log) ; i++) {
737 struct tcp_metrics_block *tm, *next;
738
739 tm = rcu_dereference_protected(net->ipv4.tcp_metrics_hash[i].chain, 1);
740 while (tm) {
741 next = rcu_dereference_protected(tm->tcpm_next, 1);
742 kfree(tm);
743 tm = next;
744 }
745 }
734 kfree(net->ipv4.tcp_metrics_hash); 746 kfree(net->ipv4.tcp_metrics_hash);
735} 747}
736 748
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 232a90c3ec86..d9c9dcef2de3 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -387,7 +387,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
387 struct tcp_sock *oldtp = tcp_sk(sk); 387 struct tcp_sock *oldtp = tcp_sk(sk);
388 struct tcp_cookie_values *oldcvp = oldtp->cookie_values; 388 struct tcp_cookie_values *oldcvp = oldtp->cookie_values;
389 389
390 inet_sk_rx_dst_set(newsk, skb); 390 newicsk->icsk_af_ops->sk_rx_dst_set(newsk, skb);
391 391
392 /* TCP Cookie Transactions require space for the cookie pair, 392 /* TCP Cookie Transactions require space for the cookie pair,
393 * as it differs for each connection. There is no need to 393 * as it differs for each connection. There is no need to
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index a7b3ec9b6c3e..20dfd892c86f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -940,7 +940,7 @@ void __init tcp_tasklet_init(void)
940 * We cant xmit new skbs from this context, as we might already 940 * We cant xmit new skbs from this context, as we might already
941 * hold qdisc lock. 941 * hold qdisc lock.
942 */ 942 */
943void tcp_wfree(struct sk_buff *skb) 943static void tcp_wfree(struct sk_buff *skb)
944{ 944{
945 struct sock *sk = skb->sk; 945 struct sock *sk = skb->sk;
946 struct tcp_sock *tp = tcp_sk(sk); 946 struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index b4c3582a991f..6f6d1aca3c3d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -758,7 +758,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
758 uh->check = CSUM_MANGLED_0; 758 uh->check = CSUM_MANGLED_0;
759 759
760send: 760send:
761 err = ip_send_skb(skb); 761 err = ip_send_skb(sock_net(sk), skb);
762 if (err) { 762 if (err) {
763 if (err == -ENOBUFS && !inet->recverr) { 763 if (err == -ENOBUFS && !inet->recverr) {
764 UDP_INC_STATS_USER(sock_net(sk), 764 UDP_INC_STATS_USER(sock_net(sk),
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index c66b90f71c9b..bb9ce2b2f377 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1447,7 +1447,17 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1447 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC)); 1447 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1448 1448
1449 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1449 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1450 struct dst_entry *dst = sk->sk_rx_dst;
1451
1450 sock_rps_save_rxhash(sk, skb); 1452 sock_rps_save_rxhash(sk, skb);
1453 if (dst) {
1454 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1455 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1456 dst_release(dst);
1457 sk->sk_rx_dst = NULL;
1458 }
1459 }
1460
1451 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) 1461 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1452 goto reset; 1462 goto reset;
1453 if (opt_skb) 1463 if (opt_skb)
@@ -1705,9 +1715,9 @@ static void tcp_v6_early_demux(struct sk_buff *skb)
1705 struct dst_entry *dst = sk->sk_rx_dst; 1715 struct dst_entry *dst = sk->sk_rx_dst;
1706 struct inet_sock *icsk = inet_sk(sk); 1716 struct inet_sock *icsk = inet_sk(sk);
1707 if (dst) 1717 if (dst)
1708 dst = dst_check(dst, 0); 1718 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1709 if (dst && 1719 if (dst &&
1710 icsk->rx_dst_ifindex == inet6_iif(skb)) 1720 icsk->rx_dst_ifindex == skb->skb_iif)
1711 skb_dst_set_noref(skb, dst); 1721 skb_dst_set_noref(skb, dst);
1712 } 1722 }
1713 } 1723 }
@@ -1719,10 +1729,23 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1719 .twsk_destructor= tcp_twsk_destructor, 1729 .twsk_destructor= tcp_twsk_destructor,
1720}; 1730};
1721 1731
1732static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1733{
1734 struct dst_entry *dst = skb_dst(skb);
1735 const struct rt6_info *rt = (const struct rt6_info *)dst;
1736
1737 dst_hold(dst);
1738 sk->sk_rx_dst = dst;
1739 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1740 if (rt->rt6i_node)
1741 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
1742}
1743
1722static const struct inet_connection_sock_af_ops ipv6_specific = { 1744static const struct inet_connection_sock_af_ops ipv6_specific = {
1723 .queue_xmit = inet6_csk_xmit, 1745 .queue_xmit = inet6_csk_xmit,
1724 .send_check = tcp_v6_send_check, 1746 .send_check = tcp_v6_send_check,
1725 .rebuild_header = inet6_sk_rebuild_header, 1747 .rebuild_header = inet6_sk_rebuild_header,
1748 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1726 .conn_request = tcp_v6_conn_request, 1749 .conn_request = tcp_v6_conn_request,
1727 .syn_recv_sock = tcp_v6_syn_recv_sock, 1750 .syn_recv_sock = tcp_v6_syn_recv_sock,
1728 .net_header_len = sizeof(struct ipv6hdr), 1751 .net_header_len = sizeof(struct ipv6hdr),
@@ -1754,6 +1777,7 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = {
1754 .queue_xmit = ip_queue_xmit, 1777 .queue_xmit = ip_queue_xmit,
1755 .send_check = tcp_v4_send_check, 1778 .send_check = tcp_v4_send_check,
1756 .rebuild_header = inet_sk_rebuild_header, 1779 .rebuild_header = inet_sk_rebuild_header,
1780 .sk_rx_dst_set = inet_sk_rx_dst_set,
1757 .conn_request = tcp_v6_conn_request, 1781 .conn_request = tcp_v6_conn_request,
1758 .syn_recv_sock = tcp_v6_syn_recv_sock, 1782 .syn_recv_sock = tcp_v6_syn_recv_sock,
1759 .net_header_len = sizeof(struct iphdr), 1783 .net_header_len = sizeof(struct iphdr),
diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
index 39a8d8924b9c..6828e39ec2ec 100644
--- a/net/llc/llc_station.c
+++ b/net/llc/llc_station.c
@@ -268,7 +268,7 @@ static int llc_station_ac_send_null_dsap_xid_c(struct sk_buff *skb)
268out: 268out:
269 return rc; 269 return rc;
270free: 270free:
271 kfree_skb(skb); 271 kfree_skb(nskb);
272 goto out; 272 goto out;
273} 273}
274 274
@@ -293,7 +293,7 @@ static int llc_station_ac_send_xid_r(struct sk_buff *skb)
293out: 293out:
294 return rc; 294 return rc;
295free: 295free:
296 kfree_skb(skb); 296 kfree_skb(nskb);
297 goto out; 297 goto out;
298} 298}
299 299
@@ -322,7 +322,7 @@ static int llc_station_ac_send_test_r(struct sk_buff *skb)
322out: 322out:
323 return rc; 323 return rc;
324free: 324free:
325 kfree_skb(skb); 325 kfree_skb(nskb);
326 goto out; 326 goto out;
327} 327}
328 328
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ceaca7c134a0..8ac890a1a4c0 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1079,7 +1079,7 @@ static void *packet_current_rx_frame(struct packet_sock *po,
1079 default: 1079 default:
1080 WARN(1, "TPACKET version not supported\n"); 1080 WARN(1, "TPACKET version not supported\n");
1081 BUG(); 1081 BUG();
1082 return 0; 1082 return NULL;
1083 } 1083 }
1084} 1084}
1085 1085
@@ -1936,7 +1936,6 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
1936 1936
1937 if (likely(po->tx_ring.pg_vec)) { 1937 if (likely(po->tx_ring.pg_vec)) {
1938 ph = skb_shinfo(skb)->destructor_arg; 1938 ph = skb_shinfo(skb)->destructor_arg;
1939 BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
1940 BUG_ON(atomic_read(&po->tx_ring.pending) == 0); 1939 BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
1941 atomic_dec(&po->tx_ring.pending); 1940 atomic_dec(&po->tx_ring.pending);
1942 __packet_set_status(po, ph, TP_STATUS_AVAILABLE); 1941 __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index f10fb8256442..05d60859d8e3 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -67,6 +67,9 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
67 struct tcf_common *pc; 67 struct tcf_common *pc;
68 int ret = 0; 68 int ret = 0;
69 int err; 69 int err;
70#ifdef CONFIG_GACT_PROB
71 struct tc_gact_p *p_parm = NULL;
72#endif
70 73
71 if (nla == NULL) 74 if (nla == NULL)
72 return -EINVAL; 75 return -EINVAL;
@@ -82,6 +85,12 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
82#ifndef CONFIG_GACT_PROB 85#ifndef CONFIG_GACT_PROB
83 if (tb[TCA_GACT_PROB] != NULL) 86 if (tb[TCA_GACT_PROB] != NULL)
84 return -EOPNOTSUPP; 87 return -EOPNOTSUPP;
88#else
89 if (tb[TCA_GACT_PROB]) {
90 p_parm = nla_data(tb[TCA_GACT_PROB]);
91 if (p_parm->ptype >= MAX_RAND)
92 return -EINVAL;
93 }
85#endif 94#endif
86 95
87 pc = tcf_hash_check(parm->index, a, bind, &gact_hash_info); 96 pc = tcf_hash_check(parm->index, a, bind, &gact_hash_info);
@@ -103,8 +112,7 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
103 spin_lock_bh(&gact->tcf_lock); 112 spin_lock_bh(&gact->tcf_lock);
104 gact->tcf_action = parm->action; 113 gact->tcf_action = parm->action;
105#ifdef CONFIG_GACT_PROB 114#ifdef CONFIG_GACT_PROB
106 if (tb[TCA_GACT_PROB] != NULL) { 115 if (p_parm) {
107 struct tc_gact_p *p_parm = nla_data(tb[TCA_GACT_PROB]);
108 gact->tcfg_paction = p_parm->paction; 116 gact->tcfg_paction = p_parm->paction;
109 gact->tcfg_pval = p_parm->pval; 117 gact->tcfg_pval = p_parm->pval;
110 gact->tcfg_ptype = p_parm->ptype; 118 gact->tcfg_ptype = p_parm->ptype;
@@ -133,7 +141,7 @@ static int tcf_gact(struct sk_buff *skb, const struct tc_action *a,
133 141
134 spin_lock(&gact->tcf_lock); 142 spin_lock(&gact->tcf_lock);
135#ifdef CONFIG_GACT_PROB 143#ifdef CONFIG_GACT_PROB
136 if (gact->tcfg_ptype && gact_rand[gact->tcfg_ptype] != NULL) 144 if (gact->tcfg_ptype)
137 action = gact_rand[gact->tcfg_ptype](gact); 145 action = gact_rand[gact->tcfg_ptype](gact);
138 else 146 else
139 action = gact->tcf_action; 147 action = gact->tcf_action;
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 60e281ad0f07..58fb3c7aab9e 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -185,7 +185,12 @@ err3:
185err2: 185err2:
186 kfree(tname); 186 kfree(tname);
187err1: 187err1:
188 kfree(pc); 188 if (ret == ACT_P_CREATED) {
189 if (est)
190 gen_kill_estimator(&pc->tcfc_bstats,
191 &pc->tcfc_rate_est);
192 kfree_rcu(pc, tcfc_rcu);
193 }
189 return err; 194 return err;
190} 195}
191 196
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 26aa2f6ce257..45c53ab067a6 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -74,7 +74,10 @@ static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est,
74 p = to_pedit(pc); 74 p = to_pedit(pc);
75 keys = kmalloc(ksize, GFP_KERNEL); 75 keys = kmalloc(ksize, GFP_KERNEL);
76 if (keys == NULL) { 76 if (keys == NULL) {
77 kfree(pc); 77 if (est)
78 gen_kill_estimator(&pc->tcfc_bstats,
79 &pc->tcfc_rate_est);
80 kfree_rcu(pc, tcfc_rcu);
78 return -ENOMEM; 81 return -ENOMEM;
79 } 82 }
80 ret = ACT_P_CREATED; 83 ret = ACT_P_CREATED;
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 3922f2a2821b..3714f60f0b3c 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -131,7 +131,10 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est,
131 d = to_defact(pc); 131 d = to_defact(pc);
132 ret = alloc_defdata(d, defdata); 132 ret = alloc_defdata(d, defdata);
133 if (ret < 0) { 133 if (ret < 0) {
134 kfree(pc); 134 if (est)
135 gen_kill_estimator(&pc->tcfc_bstats,
136 &pc->tcfc_rate_est);
137 kfree_rcu(pc, tcfc_rcu);
135 return ret; 138 return ret;
136 } 139 }
137 d->tcf_action = parm->action; 140 d->tcf_action = parm->action;
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 9af01f3df18c..e4723d31fdd5 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -203,6 +203,34 @@ out:
203 return index; 203 return index;
204} 204}
205 205
206/* Length of the next packet (0 if the queue is empty). */
207static unsigned int qdisc_peek_len(struct Qdisc *sch)
208{
209 struct sk_buff *skb;
210
211 skb = sch->ops->peek(sch);
212 return skb ? qdisc_pkt_len(skb) : 0;
213}
214
215static void qfq_deactivate_class(struct qfq_sched *, struct qfq_class *);
216static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl,
217 unsigned int len);
218
219static void qfq_update_class_params(struct qfq_sched *q, struct qfq_class *cl,
220 u32 lmax, u32 inv_w, int delta_w)
221{
222 int i;
223
224 /* update qfq-specific data */
225 cl->lmax = lmax;
226 cl->inv_w = inv_w;
227 i = qfq_calc_index(cl->inv_w, cl->lmax);
228
229 cl->grp = &q->groups[i];
230
231 q->wsum += delta_w;
232}
233
206static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, 234static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
207 struct nlattr **tca, unsigned long *arg) 235 struct nlattr **tca, unsigned long *arg)
208{ 236{
@@ -250,6 +278,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
250 lmax = 1UL << QFQ_MTU_SHIFT; 278 lmax = 1UL << QFQ_MTU_SHIFT;
251 279
252 if (cl != NULL) { 280 if (cl != NULL) {
281 bool need_reactivation = false;
282
253 if (tca[TCA_RATE]) { 283 if (tca[TCA_RATE]) {
254 err = gen_replace_estimator(&cl->bstats, &cl->rate_est, 284 err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
255 qdisc_root_sleeping_lock(sch), 285 qdisc_root_sleeping_lock(sch),
@@ -258,12 +288,29 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
258 return err; 288 return err;
259 } 289 }
260 290
261 if (inv_w != cl->inv_w) { 291 if (lmax == cl->lmax && inv_w == cl->inv_w)
262 sch_tree_lock(sch); 292 return 0; /* nothing to update */
263 q->wsum += delta_w; 293
264 cl->inv_w = inv_w; 294 i = qfq_calc_index(inv_w, lmax);
265 sch_tree_unlock(sch); 295 sch_tree_lock(sch);
296 if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) {
297 /*
298 * shift cl->F back, to not charge the
299 * class for the not-yet-served head
300 * packet
301 */
302 cl->F = cl->S;
303 /* remove class from its slot in the old group */
304 qfq_deactivate_class(q, cl);
305 need_reactivation = true;
266 } 306 }
307
308 qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
309
310 if (need_reactivation) /* activate in new group */
311 qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc));
312 sch_tree_unlock(sch);
313
267 return 0; 314 return 0;
268 } 315 }
269 316
@@ -273,11 +320,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
273 320
274 cl->refcnt = 1; 321 cl->refcnt = 1;
275 cl->common.classid = classid; 322 cl->common.classid = classid;
276 cl->lmax = lmax;
277 cl->inv_w = inv_w;
278 i = qfq_calc_index(cl->inv_w, cl->lmax);
279 323
280 cl->grp = &q->groups[i]; 324 qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
281 325
282 cl->qdisc = qdisc_create_dflt(sch->dev_queue, 326 cl->qdisc = qdisc_create_dflt(sch->dev_queue,
283 &pfifo_qdisc_ops, classid); 327 &pfifo_qdisc_ops, classid);
@@ -294,7 +338,6 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
294 return err; 338 return err;
295 } 339 }
296 } 340 }
297 q->wsum += weight;
298 341
299 sch_tree_lock(sch); 342 sch_tree_lock(sch);
300 qdisc_class_hash_insert(&q->clhash, &cl->common); 343 qdisc_class_hash_insert(&q->clhash, &cl->common);
@@ -711,15 +754,6 @@ static void qfq_update_eligible(struct qfq_sched *q, u64 old_V)
711 } 754 }
712} 755}
713 756
714/* What is length of next packet in queue (0 if queue is empty) */
715static unsigned int qdisc_peek_len(struct Qdisc *sch)
716{
717 struct sk_buff *skb;
718
719 skb = sch->ops->peek(sch);
720 return skb ? qdisc_pkt_len(skb) : 0;
721}
722
723/* 757/*
724 * Updates the class, returns true if also the group needs to be updated. 758 * Updates the class, returns true if also the group needs to be updated.
725 */ 759 */
@@ -843,11 +877,8 @@ static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
843static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) 877static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
844{ 878{
845 struct qfq_sched *q = qdisc_priv(sch); 879 struct qfq_sched *q = qdisc_priv(sch);
846 struct qfq_group *grp;
847 struct qfq_class *cl; 880 struct qfq_class *cl;
848 int err; 881 int err;
849 u64 roundedS;
850 int s;
851 882
852 cl = qfq_classify(skb, sch, &err); 883 cl = qfq_classify(skb, sch, &err);
853 if (cl == NULL) { 884 if (cl == NULL) {
@@ -876,11 +907,25 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
876 return err; 907 return err;
877 908
878 /* If reach this point, queue q was idle */ 909 /* If reach this point, queue q was idle */
879 grp = cl->grp; 910 qfq_activate_class(q, cl, qdisc_pkt_len(skb));
911
912 return err;
913}
914
915/*
916 * Handle class switch from idle to backlogged.
917 */
918static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl,
919 unsigned int pkt_len)
920{
921 struct qfq_group *grp = cl->grp;
922 u64 roundedS;
923 int s;
924
880 qfq_update_start(q, cl); 925 qfq_update_start(q, cl);
881 926
882 /* compute new finish time and rounded start. */ 927 /* compute new finish time and rounded start. */
883 cl->F = cl->S + (u64)qdisc_pkt_len(skb) * cl->inv_w; 928 cl->F = cl->S + (u64)pkt_len * cl->inv_w;
884 roundedS = qfq_round_down(cl->S, grp->slot_shift); 929 roundedS = qfq_round_down(cl->S, grp->slot_shift);
885 930
886 /* 931 /*
@@ -917,8 +962,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
917 962
918skip_update: 963skip_update:
919 qfq_slot_insert(grp, cl, roundedS); 964 qfq_slot_insert(grp, cl, roundedS);
920
921 return err;
922} 965}
923 966
924 967