diff options
Diffstat (limited to 'net')
35 files changed, 123 insertions, 109 deletions
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index bb86d2932394..6da5daeebab7 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c | |||
@@ -1392,7 +1392,7 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr, | |||
1392 | ax25_cb *ax25; | 1392 | ax25_cb *ax25; |
1393 | int err = 0; | 1393 | int err = 0; |
1394 | 1394 | ||
1395 | memset(fsa, 0, sizeof(fsa)); | 1395 | memset(fsa, 0, sizeof(*fsa)); |
1396 | lock_sock(sk); | 1396 | lock_sock(sk); |
1397 | ax25 = ax25_sk(sk); | 1397 | ax25 = ax25_sk(sk); |
1398 | 1398 | ||
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 1bf0cf503796..8184c031d028 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c | |||
@@ -740,12 +740,12 @@ static int setsockopt(struct socket *sock, | |||
740 | if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL) | 740 | if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL) |
741 | return -ENOPROTOOPT; | 741 | return -ENOPROTOOPT; |
742 | lock_sock(&(cf_sk->sk)); | 742 | lock_sock(&(cf_sk->sk)); |
743 | cf_sk->conn_req.param.size = ol; | ||
744 | if (ol > sizeof(cf_sk->conn_req.param.data) || | 743 | if (ol > sizeof(cf_sk->conn_req.param.data) || |
745 | copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) { | 744 | copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) { |
746 | release_sock(&cf_sk->sk); | 745 | release_sock(&cf_sk->sk); |
747 | return -EINVAL; | 746 | return -EINVAL; |
748 | } | 747 | } |
748 | cf_sk->conn_req.param.size = ol; | ||
749 | release_sock(&cf_sk->sk); | 749 | release_sock(&cf_sk->sk); |
750 | return 0; | 750 | return 0; |
751 | 751 | ||
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index 84a422c98941..fa9dab372b68 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c | |||
@@ -76,6 +76,8 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt) | |||
76 | struct chnl_net *priv = container_of(layr, struct chnl_net, chnl); | 76 | struct chnl_net *priv = container_of(layr, struct chnl_net, chnl); |
77 | int pktlen; | 77 | int pktlen; |
78 | int err = 0; | 78 | int err = 0; |
79 | const u8 *ip_version; | ||
80 | u8 buf; | ||
79 | 81 | ||
80 | priv = container_of(layr, struct chnl_net, chnl); | 82 | priv = container_of(layr, struct chnl_net, chnl); |
81 | 83 | ||
@@ -90,7 +92,21 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt) | |||
90 | * send the packet to the net stack. | 92 | * send the packet to the net stack. |
91 | */ | 93 | */ |
92 | skb->dev = priv->netdev; | 94 | skb->dev = priv->netdev; |
93 | skb->protocol = htons(ETH_P_IP); | 95 | |
96 | /* check the version of IP */ | ||
97 | ip_version = skb_header_pointer(skb, 0, 1, &buf); | ||
98 | if (!ip_version) | ||
99 | return -EINVAL; | ||
100 | switch (*ip_version >> 4) { | ||
101 | case 4: | ||
102 | skb->protocol = htons(ETH_P_IP); | ||
103 | break; | ||
104 | case 6: | ||
105 | skb->protocol = htons(ETH_P_IPV6); | ||
106 | break; | ||
107 | default: | ||
108 | return -EINVAL; | ||
109 | } | ||
94 | 110 | ||
95 | /* If we change the header in loop mode, the checksum is corrupted. */ | 111 | /* If we change the header in loop mode, the checksum is corrupted. */ |
96 | if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP) | 112 | if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP) |
diff --git a/net/core/dev.c b/net/core/dev.c index 3fe443be4b15..a3ef808b5e36 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2297,7 +2297,10 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, | |||
2297 | */ | 2297 | */ |
2298 | if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) | 2298 | if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) |
2299 | skb_dst_force(skb); | 2299 | skb_dst_force(skb); |
2300 | __qdisc_update_bstats(q, skb->len); | 2300 | |
2301 | qdisc_skb_cb(skb)->pkt_len = skb->len; | ||
2302 | qdisc_bstats_update(q, skb); | ||
2303 | |||
2301 | if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { | 2304 | if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { |
2302 | if (unlikely(contended)) { | 2305 | if (unlikely(contended)) { |
2303 | spin_unlock(&q->busylock); | 2306 | spin_unlock(&q->busylock); |
@@ -5617,18 +5620,20 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) | |||
5617 | } | 5620 | } |
5618 | 5621 | ||
5619 | /** | 5622 | /** |
5620 | * alloc_netdev_mq - allocate network device | 5623 | * alloc_netdev_mqs - allocate network device |
5621 | * @sizeof_priv: size of private data to allocate space for | 5624 | * @sizeof_priv: size of private data to allocate space for |
5622 | * @name: device name format string | 5625 | * @name: device name format string |
5623 | * @setup: callback to initialize device | 5626 | * @setup: callback to initialize device |
5624 | * @queue_count: the number of subqueues to allocate | 5627 | * @txqs: the number of TX subqueues to allocate |
5628 | * @rxqs: the number of RX subqueues to allocate | ||
5625 | * | 5629 | * |
5626 | * Allocates a struct net_device with private data area for driver use | 5630 | * Allocates a struct net_device with private data area for driver use |
5627 | * and performs basic initialization. Also allocates subquue structs | 5631 | * and performs basic initialization. Also allocates subquue structs |
5628 | * for each queue on the device at the end of the netdevice. | 5632 | * for each queue on the device. |
5629 | */ | 5633 | */ |
5630 | struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | 5634 | struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, |
5631 | void (*setup)(struct net_device *), unsigned int queue_count) | 5635 | void (*setup)(struct net_device *), |
5636 | unsigned int txqs, unsigned int rxqs) | ||
5632 | { | 5637 | { |
5633 | struct net_device *dev; | 5638 | struct net_device *dev; |
5634 | size_t alloc_size; | 5639 | size_t alloc_size; |
@@ -5636,12 +5641,20 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5636 | 5641 | ||
5637 | BUG_ON(strlen(name) >= sizeof(dev->name)); | 5642 | BUG_ON(strlen(name) >= sizeof(dev->name)); |
5638 | 5643 | ||
5639 | if (queue_count < 1) { | 5644 | if (txqs < 1) { |
5640 | pr_err("alloc_netdev: Unable to allocate device " | 5645 | pr_err("alloc_netdev: Unable to allocate device " |
5641 | "with zero queues.\n"); | 5646 | "with zero queues.\n"); |
5642 | return NULL; | 5647 | return NULL; |
5643 | } | 5648 | } |
5644 | 5649 | ||
5650 | #ifdef CONFIG_RPS | ||
5651 | if (rxqs < 1) { | ||
5652 | pr_err("alloc_netdev: Unable to allocate device " | ||
5653 | "with zero RX queues.\n"); | ||
5654 | return NULL; | ||
5655 | } | ||
5656 | #endif | ||
5657 | |||
5645 | alloc_size = sizeof(struct net_device); | 5658 | alloc_size = sizeof(struct net_device); |
5646 | if (sizeof_priv) { | 5659 | if (sizeof_priv) { |
5647 | /* ensure 32-byte alignment of private area */ | 5660 | /* ensure 32-byte alignment of private area */ |
@@ -5672,14 +5685,14 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5672 | 5685 | ||
5673 | dev_net_set(dev, &init_net); | 5686 | dev_net_set(dev, &init_net); |
5674 | 5687 | ||
5675 | dev->num_tx_queues = queue_count; | 5688 | dev->num_tx_queues = txqs; |
5676 | dev->real_num_tx_queues = queue_count; | 5689 | dev->real_num_tx_queues = txqs; |
5677 | if (netif_alloc_netdev_queues(dev)) | 5690 | if (netif_alloc_netdev_queues(dev)) |
5678 | goto free_pcpu; | 5691 | goto free_pcpu; |
5679 | 5692 | ||
5680 | #ifdef CONFIG_RPS | 5693 | #ifdef CONFIG_RPS |
5681 | dev->num_rx_queues = queue_count; | 5694 | dev->num_rx_queues = rxqs; |
5682 | dev->real_num_rx_queues = queue_count; | 5695 | dev->real_num_rx_queues = rxqs; |
5683 | if (netif_alloc_rx_queues(dev)) | 5696 | if (netif_alloc_rx_queues(dev)) |
5684 | goto free_pcpu; | 5697 | goto free_pcpu; |
5685 | #endif | 5698 | #endif |
@@ -5707,7 +5720,7 @@ free_p: | |||
5707 | kfree(p); | 5720 | kfree(p); |
5708 | return NULL; | 5721 | return NULL; |
5709 | } | 5722 | } |
5710 | EXPORT_SYMBOL(alloc_netdev_mq); | 5723 | EXPORT_SYMBOL(alloc_netdev_mqs); |
5711 | 5724 | ||
5712 | /** | 5725 | /** |
5713 | * free_netdev - free network device | 5726 | * free_netdev - free network device |
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index f00ef2f1d814..44d2b42fda56 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c | |||
@@ -347,10 +347,11 @@ void ether_setup(struct net_device *dev) | |||
347 | EXPORT_SYMBOL(ether_setup); | 347 | EXPORT_SYMBOL(ether_setup); |
348 | 348 | ||
349 | /** | 349 | /** |
350 | * alloc_etherdev_mq - Allocates and sets up an Ethernet device | 350 | * alloc_etherdev_mqs - Allocates and sets up an Ethernet device |
351 | * @sizeof_priv: Size of additional driver-private structure to be allocated | 351 | * @sizeof_priv: Size of additional driver-private structure to be allocated |
352 | * for this Ethernet device | 352 | * for this Ethernet device |
353 | * @queue_count: The number of queues this device has. | 353 | * @txqs: The number of TX queues this device has. |
354 | * @rxqs: The number of RX queues this device has. | ||
354 | * | 355 | * |
355 | * Fill in the fields of the device structure with Ethernet-generic | 356 | * Fill in the fields of the device structure with Ethernet-generic |
356 | * values. Basically does everything except registering the device. | 357 | * values. Basically does everything except registering the device. |
@@ -360,11 +361,12 @@ EXPORT_SYMBOL(ether_setup); | |||
360 | * this private data area. | 361 | * this private data area. |
361 | */ | 362 | */ |
362 | 363 | ||
363 | struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count) | 364 | struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs, |
365 | unsigned int rxqs) | ||
364 | { | 366 | { |
365 | return alloc_netdev_mq(sizeof_priv, "eth%d", ether_setup, queue_count); | 367 | return alloc_netdev_mqs(sizeof_priv, "eth%d", ether_setup, txqs, rxqs); |
366 | } | 368 | } |
367 | EXPORT_SYMBOL(alloc_etherdev_mq); | 369 | EXPORT_SYMBOL(alloc_etherdev_mqs); |
368 | 370 | ||
369 | static size_t _format_mac_addr(char *buf, int buflen, | 371 | static size_t _format_mac_addr(char *buf, int buflen, |
370 | const unsigned char *addr, int len) | 372 | const unsigned char *addr, int len) |
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 880a5ec6dce0..86961bec70ab 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c | |||
@@ -314,14 +314,15 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb) | |||
314 | 314 | ||
315 | skb->ip_summed = CHECKSUM_NONE; | 315 | skb->ip_summed = CHECKSUM_NONE; |
316 | 316 | ||
317 | ah = (struct ip_auth_hdr *)skb->data; | ||
318 | iph = ip_hdr(skb); | ||
319 | ihl = ip_hdrlen(skb); | ||
320 | 317 | ||
321 | if ((err = skb_cow_data(skb, 0, &trailer)) < 0) | 318 | if ((err = skb_cow_data(skb, 0, &trailer)) < 0) |
322 | goto out; | 319 | goto out; |
323 | nfrags = err; | 320 | nfrags = err; |
324 | 321 | ||
322 | ah = (struct ip_auth_hdr *)skb->data; | ||
323 | iph = ip_hdr(skb); | ||
324 | ihl = ip_hdrlen(skb); | ||
325 | |||
325 | work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len); | 326 | work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len); |
326 | if (!work_iph) | 327 | if (!work_iph) |
327 | goto out; | 328 | goto out; |
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index a2fc7b961dbc..04c8b69fd426 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -1143,6 +1143,23 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev) | |||
1143 | return err; | 1143 | return err; |
1144 | } | 1144 | } |
1145 | 1145 | ||
1146 | int arp_invalidate(struct net_device *dev, __be32 ip) | ||
1147 | { | ||
1148 | struct neighbour *neigh = neigh_lookup(&arp_tbl, &ip, dev); | ||
1149 | int err = -ENXIO; | ||
1150 | |||
1151 | if (neigh) { | ||
1152 | if (neigh->nud_state & ~NUD_NOARP) | ||
1153 | err = neigh_update(neigh, NULL, NUD_FAILED, | ||
1154 | NEIGH_UPDATE_F_OVERRIDE| | ||
1155 | NEIGH_UPDATE_F_ADMIN); | ||
1156 | neigh_release(neigh); | ||
1157 | } | ||
1158 | |||
1159 | return err; | ||
1160 | } | ||
1161 | EXPORT_SYMBOL(arp_invalidate); | ||
1162 | |||
1146 | static int arp_req_delete_public(struct net *net, struct arpreq *r, | 1163 | static int arp_req_delete_public(struct net *net, struct arpreq *r, |
1147 | struct net_device *dev) | 1164 | struct net_device *dev) |
1148 | { | 1165 | { |
@@ -1163,7 +1180,6 @@ static int arp_req_delete(struct net *net, struct arpreq *r, | |||
1163 | { | 1180 | { |
1164 | int err; | 1181 | int err; |
1165 | __be32 ip; | 1182 | __be32 ip; |
1166 | struct neighbour *neigh; | ||
1167 | 1183 | ||
1168 | if (r->arp_flags & ATF_PUBL) | 1184 | if (r->arp_flags & ATF_PUBL) |
1169 | return arp_req_delete_public(net, r, dev); | 1185 | return arp_req_delete_public(net, r, dev); |
@@ -1181,16 +1197,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r, | |||
1181 | if (!dev) | 1197 | if (!dev) |
1182 | return -EINVAL; | 1198 | return -EINVAL; |
1183 | } | 1199 | } |
1184 | err = -ENXIO; | 1200 | return arp_invalidate(dev, ip); |
1185 | neigh = neigh_lookup(&arp_tbl, &ip, dev); | ||
1186 | if (neigh) { | ||
1187 | if (neigh->nud_state & ~NUD_NOARP) | ||
1188 | err = neigh_update(neigh, NULL, NUD_FAILED, | ||
1189 | NEIGH_UPDATE_F_OVERRIDE| | ||
1190 | NEIGH_UPDATE_F_ADMIN); | ||
1191 | neigh_release(neigh); | ||
1192 | } | ||
1193 | return err; | ||
1194 | } | 1201 | } |
1195 | 1202 | ||
1196 | /* | 1203 | /* |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 25e318153f14..97e5fb765265 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -73,7 +73,7 @@ int inet_csk_bind_conflict(const struct sock *sk, | |||
73 | !sk2->sk_bound_dev_if || | 73 | !sk2->sk_bound_dev_if || |
74 | sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { | 74 | sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { |
75 | if (!reuse || !sk2->sk_reuse || | 75 | if (!reuse || !sk2->sk_reuse || |
76 | sk2->sk_state == TCP_LISTEN) { | 76 | ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) { |
77 | const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); | 77 | const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); |
78 | if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || | 78 | if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || |
79 | sk2_rcv_saddr == sk_rcv_saddr(sk)) | 79 | sk2_rcv_saddr == sk_rcv_saddr(sk)) |
@@ -122,7 +122,8 @@ again: | |||
122 | (tb->num_owners < smallest_size || smallest_size == -1)) { | 122 | (tb->num_owners < smallest_size || smallest_size == -1)) { |
123 | smallest_size = tb->num_owners; | 123 | smallest_size = tb->num_owners; |
124 | smallest_rover = rover; | 124 | smallest_rover = rover; |
125 | if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { | 125 | if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 && |
126 | !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) { | ||
126 | spin_unlock(&head->lock); | 127 | spin_unlock(&head->lock); |
127 | snum = smallest_rover; | 128 | snum = smallest_rover; |
128 | goto have_snum; | 129 | goto have_snum; |
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index ee82d4ef26ce..1aba54ae53c4 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c | |||
@@ -538,14 +538,16 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) | |||
538 | if (!pskb_may_pull(skb, ah_hlen)) | 538 | if (!pskb_may_pull(skb, ah_hlen)) |
539 | goto out; | 539 | goto out; |
540 | 540 | ||
541 | ip6h = ipv6_hdr(skb); | ||
542 | |||
543 | skb_push(skb, hdr_len); | ||
544 | 541 | ||
545 | if ((err = skb_cow_data(skb, 0, &trailer)) < 0) | 542 | if ((err = skb_cow_data(skb, 0, &trailer)) < 0) |
546 | goto out; | 543 | goto out; |
547 | nfrags = err; | 544 | nfrags = err; |
548 | 545 | ||
546 | ah = (struct ip_auth_hdr *)skb->data; | ||
547 | ip6h = ipv6_hdr(skb); | ||
548 | |||
549 | skb_push(skb, hdr_len); | ||
550 | |||
549 | work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len); | 551 | work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len); |
550 | if (!work_iph) | 552 | if (!work_iph) |
551 | goto out; | 553 | goto out; |
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index e46305d1815a..d144e629d2b4 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c | |||
@@ -44,7 +44,7 @@ int inet6_csk_bind_conflict(const struct sock *sk, | |||
44 | !sk2->sk_bound_dev_if || | 44 | !sk2->sk_bound_dev_if || |
45 | sk->sk_bound_dev_if == sk2->sk_bound_dev_if) && | 45 | sk->sk_bound_dev_if == sk2->sk_bound_dev_if) && |
46 | (!sk->sk_reuse || !sk2->sk_reuse || | 46 | (!sk->sk_reuse || !sk2->sk_reuse || |
47 | sk2->sk_state == TCP_LISTEN) && | 47 | ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) && |
48 | ipv6_rcv_saddr_equal(sk, sk2)) | 48 | ipv6_rcv_saddr_equal(sk, sk2)) |
49 | break; | 49 | break; |
50 | } | 50 | } |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 94b5bf132b2e..5f8d242be3f3 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -401,6 +401,9 @@ int ip6_forward(struct sk_buff *skb) | |||
401 | goto drop; | 401 | goto drop; |
402 | } | 402 | } |
403 | 403 | ||
404 | if (skb->pkt_type != PACKET_HOST) | ||
405 | goto drop; | ||
406 | |||
404 | skb_forward_csum(skb); | 407 | skb_forward_csum(skb); |
405 | 408 | ||
406 | /* | 409 | /* |
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c index fd95beb72f5d..1072b2c19d31 100644 --- a/net/phonet/af_phonet.c +++ b/net/phonet/af_phonet.c | |||
@@ -37,7 +37,7 @@ | |||
37 | /* Transport protocol registration */ | 37 | /* Transport protocol registration */ |
38 | static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly; | 38 | static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly; |
39 | 39 | ||
40 | static struct phonet_protocol *phonet_proto_get(int protocol) | 40 | static struct phonet_protocol *phonet_proto_get(unsigned int protocol) |
41 | { | 41 | { |
42 | struct phonet_protocol *pp; | 42 | struct phonet_protocol *pp; |
43 | 43 | ||
@@ -458,7 +458,7 @@ static struct packet_type phonet_packet_type __read_mostly = { | |||
458 | 458 | ||
459 | static DEFINE_MUTEX(proto_tab_lock); | 459 | static DEFINE_MUTEX(proto_tab_lock); |
460 | 460 | ||
461 | int __init_or_module phonet_proto_register(int protocol, | 461 | int __init_or_module phonet_proto_register(unsigned int protocol, |
462 | struct phonet_protocol *pp) | 462 | struct phonet_protocol *pp) |
463 | { | 463 | { |
464 | int err = 0; | 464 | int err = 0; |
@@ -481,7 +481,7 @@ int __init_or_module phonet_proto_register(int protocol, | |||
481 | } | 481 | } |
482 | EXPORT_SYMBOL(phonet_proto_register); | 482 | EXPORT_SYMBOL(phonet_proto_register); |
483 | 483 | ||
484 | void phonet_proto_unregister(int protocol, struct phonet_protocol *pp) | 484 | void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp) |
485 | { | 485 | { |
486 | mutex_lock(&proto_tab_lock); | 486 | mutex_lock(&proto_tab_lock); |
487 | BUG_ON(proto_tab[protocol] != pp); | 487 | BUG_ON(proto_tab[protocol] != pp); |
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index 67dc7ce9b63a..83ddfc07e45d 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c | |||
@@ -508,8 +508,7 @@ static int tcf_csum(struct sk_buff *skb, | |||
508 | 508 | ||
509 | spin_lock(&p->tcf_lock); | 509 | spin_lock(&p->tcf_lock); |
510 | p->tcf_tm.lastuse = jiffies; | 510 | p->tcf_tm.lastuse = jiffies; |
511 | p->tcf_bstats.bytes += qdisc_pkt_len(skb); | 511 | bstats_update(&p->tcf_bstats, skb); |
512 | p->tcf_bstats.packets++; | ||
513 | action = p->tcf_action; | 512 | action = p->tcf_action; |
514 | update_flags = p->update_flags; | 513 | update_flags = p->update_flags; |
515 | spin_unlock(&p->tcf_lock); | 514 | spin_unlock(&p->tcf_lock); |
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 8daef9632255..c2a7c20e81c1 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c | |||
@@ -209,8 +209,7 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a, | |||
209 | spin_lock(&ipt->tcf_lock); | 209 | spin_lock(&ipt->tcf_lock); |
210 | 210 | ||
211 | ipt->tcf_tm.lastuse = jiffies; | 211 | ipt->tcf_tm.lastuse = jiffies; |
212 | ipt->tcf_bstats.bytes += qdisc_pkt_len(skb); | 212 | bstats_update(&ipt->tcf_bstats, skb); |
213 | ipt->tcf_bstats.packets++; | ||
214 | 213 | ||
215 | /* yes, we have to worry about both in and out dev | 214 | /* yes, we have to worry about both in and out dev |
216 | worry later - danger - this API seems to have changed | 215 | worry later - danger - this API seems to have changed |
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 0c311be92827..d765067e99db 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c | |||
@@ -165,8 +165,7 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a, | |||
165 | 165 | ||
166 | spin_lock(&m->tcf_lock); | 166 | spin_lock(&m->tcf_lock); |
167 | m->tcf_tm.lastuse = jiffies; | 167 | m->tcf_tm.lastuse = jiffies; |
168 | m->tcf_bstats.bytes += qdisc_pkt_len(skb); | 168 | bstats_update(&m->tcf_bstats, skb); |
169 | m->tcf_bstats.packets++; | ||
170 | 169 | ||
171 | dev = m->tcfm_dev; | 170 | dev = m->tcfm_dev; |
172 | if (!dev) { | 171 | if (!dev) { |
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 186eb837e600..178a4bd7b7cb 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c | |||
@@ -125,8 +125,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a, | |||
125 | egress = p->flags & TCA_NAT_FLAG_EGRESS; | 125 | egress = p->flags & TCA_NAT_FLAG_EGRESS; |
126 | action = p->tcf_action; | 126 | action = p->tcf_action; |
127 | 127 | ||
128 | p->tcf_bstats.bytes += qdisc_pkt_len(skb); | 128 | bstats_update(&p->tcf_bstats, skb); |
129 | p->tcf_bstats.packets++; | ||
130 | 129 | ||
131 | spin_unlock(&p->tcf_lock); | 130 | spin_unlock(&p->tcf_lock); |
132 | 131 | ||
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index a0593c9640db..445bef716f77 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c | |||
@@ -187,8 +187,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, | |||
187 | bad: | 187 | bad: |
188 | p->tcf_qstats.overlimits++; | 188 | p->tcf_qstats.overlimits++; |
189 | done: | 189 | done: |
190 | p->tcf_bstats.bytes += qdisc_pkt_len(skb); | 190 | bstats_update(&p->tcf_bstats, skb); |
191 | p->tcf_bstats.packets++; | ||
192 | spin_unlock(&p->tcf_lock); | 191 | spin_unlock(&p->tcf_lock); |
193 | return p->tcf_action; | 192 | return p->tcf_action; |
194 | } | 193 | } |
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 7ebf7439b478..e2f08b1e2e58 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
@@ -298,8 +298,7 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a, | |||
298 | 298 | ||
299 | spin_lock(&police->tcf_lock); | 299 | spin_lock(&police->tcf_lock); |
300 | 300 | ||
301 | police->tcf_bstats.bytes += qdisc_pkt_len(skb); | 301 | bstats_update(&police->tcf_bstats, skb); |
302 | police->tcf_bstats.packets++; | ||
303 | 302 | ||
304 | if (police->tcfp_ewma_rate && | 303 | if (police->tcfp_ewma_rate && |
305 | police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { | 304 | police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { |
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 97e84f3ee775..7287cff7af3e 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c | |||
@@ -42,8 +42,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result | |||
42 | 42 | ||
43 | spin_lock(&d->tcf_lock); | 43 | spin_lock(&d->tcf_lock); |
44 | d->tcf_tm.lastuse = jiffies; | 44 | d->tcf_tm.lastuse = jiffies; |
45 | d->tcf_bstats.bytes += qdisc_pkt_len(skb); | 45 | bstats_update(&d->tcf_bstats, skb); |
46 | d->tcf_bstats.packets++; | ||
47 | 46 | ||
48 | /* print policy string followed by _ then packet count | 47 | /* print policy string followed by _ then packet count |
49 | * Example if this was the 3rd packet and the string was "hello" | 48 | * Example if this was the 3rd packet and the string was "hello" |
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 66cbf4eb8855..836f5fee9e58 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c | |||
@@ -46,8 +46,7 @@ static int tcf_skbedit(struct sk_buff *skb, struct tc_action *a, | |||
46 | 46 | ||
47 | spin_lock(&d->tcf_lock); | 47 | spin_lock(&d->tcf_lock); |
48 | d->tcf_tm.lastuse = jiffies; | 48 | d->tcf_tm.lastuse = jiffies; |
49 | d->tcf_bstats.bytes += qdisc_pkt_len(skb); | 49 | bstats_update(&d->tcf_bstats, skb); |
50 | d->tcf_bstats.packets++; | ||
51 | 50 | ||
52 | if (d->flags & SKBEDIT_F_PRIORITY) | 51 | if (d->flags & SKBEDIT_F_PRIORITY) |
53 | skb->priority = d->priority; | 52 | skb->priority = d->priority; |
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 282540778aa8..943d733409d0 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c | |||
@@ -422,10 +422,8 @@ drop: __maybe_unused | |||
422 | } | 422 | } |
423 | return ret; | 423 | return ret; |
424 | } | 424 | } |
425 | sch->bstats.bytes += qdisc_pkt_len(skb); | 425 | qdisc_bstats_update(sch, skb); |
426 | sch->bstats.packets++; | 426 | bstats_update(&flow->bstats, skb); |
427 | flow->bstats.bytes += qdisc_pkt_len(skb); | ||
428 | flow->bstats.packets++; | ||
429 | /* | 427 | /* |
430 | * Okay, this may seem weird. We pretend we've dropped the packet if | 428 | * Okay, this may seem weird. We pretend we've dropped the packet if |
431 | * it goes via ATM. The reason for this is that the outer qdisc | 429 | * it goes via ATM. The reason for this is that the outer qdisc |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index eb7631590865..c80d1c210c5d 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -390,8 +390,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
390 | ret = qdisc_enqueue(skb, cl->q); | 390 | ret = qdisc_enqueue(skb, cl->q); |
391 | if (ret == NET_XMIT_SUCCESS) { | 391 | if (ret == NET_XMIT_SUCCESS) { |
392 | sch->q.qlen++; | 392 | sch->q.qlen++; |
393 | sch->bstats.packets++; | 393 | qdisc_bstats_update(sch, skb); |
394 | sch->bstats.bytes += qdisc_pkt_len(skb); | ||
395 | cbq_mark_toplevel(q, cl); | 394 | cbq_mark_toplevel(q, cl); |
396 | if (!cl->next_alive) | 395 | if (!cl->next_alive) |
397 | cbq_activate_class(cl); | 396 | cbq_activate_class(cl); |
@@ -650,8 +649,7 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) | |||
650 | ret = qdisc_enqueue(skb, cl->q); | 649 | ret = qdisc_enqueue(skb, cl->q); |
651 | if (ret == NET_XMIT_SUCCESS) { | 650 | if (ret == NET_XMIT_SUCCESS) { |
652 | sch->q.qlen++; | 651 | sch->q.qlen++; |
653 | sch->bstats.packets++; | 652 | qdisc_bstats_update(sch, skb); |
654 | sch->bstats.bytes += qdisc_pkt_len(skb); | ||
655 | if (!cl->next_alive) | 653 | if (!cl->next_alive) |
656 | cbq_activate_class(cl); | 654 | cbq_activate_class(cl); |
657 | return 0; | 655 | return 0; |
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index aa8b5313f8cf..de55e642eafc 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c | |||
@@ -351,7 +351,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
351 | { | 351 | { |
352 | struct drr_sched *q = qdisc_priv(sch); | 352 | struct drr_sched *q = qdisc_priv(sch); |
353 | struct drr_class *cl; | 353 | struct drr_class *cl; |
354 | unsigned int len; | ||
355 | int err; | 354 | int err; |
356 | 355 | ||
357 | cl = drr_classify(skb, sch, &err); | 356 | cl = drr_classify(skb, sch, &err); |
@@ -362,7 +361,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
362 | return err; | 361 | return err; |
363 | } | 362 | } |
364 | 363 | ||
365 | len = qdisc_pkt_len(skb); | ||
366 | err = qdisc_enqueue(skb, cl->qdisc); | 364 | err = qdisc_enqueue(skb, cl->qdisc); |
367 | if (unlikely(err != NET_XMIT_SUCCESS)) { | 365 | if (unlikely(err != NET_XMIT_SUCCESS)) { |
368 | if (net_xmit_drop_count(err)) { | 366 | if (net_xmit_drop_count(err)) { |
@@ -377,10 +375,8 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
377 | cl->deficit = cl->quantum; | 375 | cl->deficit = cl->quantum; |
378 | } | 376 | } |
379 | 377 | ||
380 | cl->bstats.packets++; | 378 | bstats_update(&cl->bstats, skb); |
381 | cl->bstats.bytes += len; | 379 | qdisc_bstats_update(sch, skb); |
382 | sch->bstats.packets++; | ||
383 | sch->bstats.bytes += len; | ||
384 | 380 | ||
385 | sch->q.qlen++; | 381 | sch->q.qlen++; |
386 | return err; | 382 | return err; |
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 1d295d62bb5c..60f4bdd4408e 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c | |||
@@ -260,8 +260,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
260 | return err; | 260 | return err; |
261 | } | 261 | } |
262 | 262 | ||
263 | sch->bstats.bytes += qdisc_pkt_len(skb); | 263 | qdisc_bstats_update(sch, skb); |
264 | sch->bstats.packets++; | ||
265 | sch->q.qlen++; | 264 | sch->q.qlen++; |
266 | 265 | ||
267 | return NET_XMIT_SUCCESS; | 266 | return NET_XMIT_SUCCESS; |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 069c62b7bb36..2e45791d4f6c 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -1599,10 +1599,8 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
1599 | if (cl->qdisc->q.qlen == 1) | 1599 | if (cl->qdisc->q.qlen == 1) |
1600 | set_active(cl, qdisc_pkt_len(skb)); | 1600 | set_active(cl, qdisc_pkt_len(skb)); |
1601 | 1601 | ||
1602 | cl->bstats.packets++; | 1602 | bstats_update(&cl->bstats, skb); |
1603 | cl->bstats.bytes += qdisc_pkt_len(skb); | 1603 | qdisc_bstats_update(sch, skb); |
1604 | sch->bstats.packets++; | ||
1605 | sch->bstats.bytes += qdisc_pkt_len(skb); | ||
1606 | sch->q.qlen++; | 1604 | sch->q.qlen++; |
1607 | 1605 | ||
1608 | return NET_XMIT_SUCCESS; | 1606 | return NET_XMIT_SUCCESS; |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 01b519d6c52d..984c1b0c6836 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -569,15 +569,12 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
569 | } | 569 | } |
570 | return ret; | 570 | return ret; |
571 | } else { | 571 | } else { |
572 | cl->bstats.packets += | 572 | bstats_update(&cl->bstats, skb); |
573 | skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; | ||
574 | cl->bstats.bytes += qdisc_pkt_len(skb); | ||
575 | htb_activate(q, cl); | 573 | htb_activate(q, cl); |
576 | } | 574 | } |
577 | 575 | ||
578 | sch->q.qlen++; | 576 | sch->q.qlen++; |
579 | sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; | 577 | qdisc_bstats_update(sch, skb); |
580 | sch->bstats.bytes += qdisc_pkt_len(skb); | ||
581 | return NET_XMIT_SUCCESS; | 578 | return NET_XMIT_SUCCESS; |
582 | } | 579 | } |
583 | 580 | ||
@@ -648,12 +645,10 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, | |||
648 | htb_add_to_wait_tree(q, cl, diff); | 645 | htb_add_to_wait_tree(q, cl, diff); |
649 | } | 646 | } |
650 | 647 | ||
651 | /* update byte stats except for leaves which are already updated */ | 648 | /* update basic stats except for leaves which are already updated */ |
652 | if (cl->level) { | 649 | if (cl->level) |
653 | cl->bstats.bytes += bytes; | 650 | bstats_update(&cl->bstats, skb); |
654 | cl->bstats.packets += skb_is_gso(skb)? | 651 | |
655 | skb_shinfo(skb)->gso_segs:1; | ||
656 | } | ||
657 | cl = cl->parent; | 652 | cl = cl->parent; |
658 | } | 653 | } |
659 | } | 654 | } |
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index f10e34a68445..bce1665239b8 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c | |||
@@ -63,8 +63,7 @@ static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
63 | 63 | ||
64 | result = tc_classify(skb, p->filter_list, &res); | 64 | result = tc_classify(skb, p->filter_list, &res); |
65 | 65 | ||
66 | sch->bstats.packets++; | 66 | qdisc_bstats_update(sch, skb); |
67 | sch->bstats.bytes += qdisc_pkt_len(skb); | ||
68 | switch (result) { | 67 | switch (result) { |
69 | case TC_ACT_SHOT: | 68 | case TC_ACT_SHOT: |
70 | result = TC_ACT_SHOT; | 69 | result = TC_ACT_SHOT; |
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index 32690deab5d0..21f13da24763 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c | |||
@@ -83,8 +83,7 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
83 | 83 | ||
84 | ret = qdisc_enqueue(skb, qdisc); | 84 | ret = qdisc_enqueue(skb, qdisc); |
85 | if (ret == NET_XMIT_SUCCESS) { | 85 | if (ret == NET_XMIT_SUCCESS) { |
86 | sch->bstats.bytes += qdisc_pkt_len(skb); | 86 | qdisc_bstats_update(sch, skb); |
87 | sch->bstats.packets++; | ||
88 | sch->q.qlen++; | 87 | sch->q.qlen++; |
89 | return NET_XMIT_SUCCESS; | 88 | return NET_XMIT_SUCCESS; |
90 | } | 89 | } |
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index e5593c083a78..1c4bce863479 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -240,8 +240,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
240 | 240 | ||
241 | if (likely(ret == NET_XMIT_SUCCESS)) { | 241 | if (likely(ret == NET_XMIT_SUCCESS)) { |
242 | sch->q.qlen++; | 242 | sch->q.qlen++; |
243 | sch->bstats.bytes += qdisc_pkt_len(skb); | 243 | qdisc_bstats_update(sch, skb); |
244 | sch->bstats.packets++; | ||
245 | } else if (net_xmit_drop_count(ret)) { | 244 | } else if (net_xmit_drop_count(ret)) { |
246 | sch->qstats.drops++; | 245 | sch->qstats.drops++; |
247 | } | 246 | } |
@@ -477,8 +476,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) | |||
477 | __skb_queue_after(list, skb, nskb); | 476 | __skb_queue_after(list, skb, nskb); |
478 | 477 | ||
479 | sch->qstats.backlog += qdisc_pkt_len(nskb); | 478 | sch->qstats.backlog += qdisc_pkt_len(nskb); |
480 | sch->bstats.bytes += qdisc_pkt_len(nskb); | 479 | qdisc_bstats_update(sch, nskb); |
481 | sch->bstats.packets++; | ||
482 | 480 | ||
483 | return NET_XMIT_SUCCESS; | 481 | return NET_XMIT_SUCCESS; |
484 | } | 482 | } |
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index b1c95bce33ce..966158d49dd1 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
@@ -84,8 +84,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
84 | 84 | ||
85 | ret = qdisc_enqueue(skb, qdisc); | 85 | ret = qdisc_enqueue(skb, qdisc); |
86 | if (ret == NET_XMIT_SUCCESS) { | 86 | if (ret == NET_XMIT_SUCCESS) { |
87 | sch->bstats.bytes += qdisc_pkt_len(skb); | 87 | qdisc_bstats_update(sch, skb); |
88 | sch->bstats.packets++; | ||
89 | sch->q.qlen++; | 88 | sch->q.qlen++; |
90 | return NET_XMIT_SUCCESS; | 89 | return NET_XMIT_SUCCESS; |
91 | } | 90 | } |
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index a67ba3c5a0cc..a6009c5a2c97 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c | |||
@@ -94,8 +94,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
94 | 94 | ||
95 | ret = qdisc_enqueue(skb, child); | 95 | ret = qdisc_enqueue(skb, child); |
96 | if (likely(ret == NET_XMIT_SUCCESS)) { | 96 | if (likely(ret == NET_XMIT_SUCCESS)) { |
97 | sch->bstats.bytes += qdisc_pkt_len(skb); | 97 | qdisc_bstats_update(sch, skb); |
98 | sch->bstats.packets++; | ||
99 | sch->q.qlen++; | 98 | sch->q.qlen++; |
100 | } else if (net_xmit_drop_count(ret)) { | 99 | } else if (net_xmit_drop_count(ret)) { |
101 | q->stats.pdrop++; | 100 | q->stats.pdrop++; |
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index d54ac94066c2..239ec53a634d 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c | |||
@@ -403,8 +403,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
403 | slot->allot = q->scaled_quantum; | 403 | slot->allot = q->scaled_quantum; |
404 | } | 404 | } |
405 | if (++sch->q.qlen <= q->limit) { | 405 | if (++sch->q.qlen <= q->limit) { |
406 | sch->bstats.bytes += qdisc_pkt_len(skb); | 406 | qdisc_bstats_update(sch, skb); |
407 | sch->bstats.packets++; | ||
408 | return NET_XMIT_SUCCESS; | 407 | return NET_XMIT_SUCCESS; |
409 | } | 408 | } |
410 | 409 | ||
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 641a30d64635..77565e721811 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
@@ -134,8 +134,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
134 | } | 134 | } |
135 | 135 | ||
136 | sch->q.qlen++; | 136 | sch->q.qlen++; |
137 | sch->bstats.bytes += qdisc_pkt_len(skb); | 137 | qdisc_bstats_update(sch, skb); |
138 | sch->bstats.packets++; | ||
139 | return NET_XMIT_SUCCESS; | 138 | return NET_XMIT_SUCCESS; |
140 | } | 139 | } |
141 | 140 | ||
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 106479a7c94a..af9360d1f6eb 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -83,8 +83,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
83 | 83 | ||
84 | if (q->q.qlen < dev->tx_queue_len) { | 84 | if (q->q.qlen < dev->tx_queue_len) { |
85 | __skb_queue_tail(&q->q, skb); | 85 | __skb_queue_tail(&q->q, skb); |
86 | sch->bstats.bytes += qdisc_pkt_len(skb); | 86 | qdisc_bstats_update(sch, skb); |
87 | sch->bstats.packets++; | ||
88 | return NET_XMIT_SUCCESS; | 87 | return NET_XMIT_SUCCESS; |
89 | } | 88 | } |
90 | 89 | ||
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 6a8da81ff66f..d5e1e0b08890 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <net/sock.h> | 26 | #include <net/sock.h> |
27 | #include <net/xfrm.h> | 27 | #include <net/xfrm.h> |
28 | #include <net/netlink.h> | 28 | #include <net/netlink.h> |
29 | #include <net/ah.h> | ||
29 | #include <asm/uaccess.h> | 30 | #include <asm/uaccess.h> |
30 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 31 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
31 | #include <linux/in6.h> | 32 | #include <linux/in6.h> |
@@ -302,7 +303,8 @@ static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props, | |||
302 | algo = xfrm_aalg_get_byname(ualg->alg_name, 1); | 303 | algo = xfrm_aalg_get_byname(ualg->alg_name, 1); |
303 | if (!algo) | 304 | if (!algo) |
304 | return -ENOSYS; | 305 | return -ENOSYS; |
305 | if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) | 306 | if ((ualg->alg_trunc_len / 8) > MAX_AH_AUTH_LEN || |
307 | ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) | ||
306 | return -EINVAL; | 308 | return -EINVAL; |
307 | *props = algo->desc.sadb_alg_id; | 309 | *props = algo->desc.sadb_alg_id; |
308 | 310 | ||