aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_core.c111
-rw-r--r--net/8021q/vlan_dev.c14
-rw-r--r--net/appletalk/aarp.c5
-rw-r--r--net/bluetooth/bnep/bnep.h1
-rw-r--r--net/bluetooth/bnep/core.c12
-rw-r--r--net/bluetooth/bnep/netdev.c33
-rw-r--r--net/bridge/netfilter/ebtables.c3
-rw-r--r--net/can/af_can.c15
-rw-r--r--net/can/bcm.c12
-rw-r--r--net/can/raw.c15
-rw-r--r--net/core/dev.c238
-rw-r--r--net/dccp/ccids/Kconfig2
-rw-r--r--net/dccp/ccids/lib/tfrc.c2
-rw-r--r--net/dsa/slave.c51
-rw-r--r--net/ipv4/tcp.c7
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv6/af_inet6.c107
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/sysctl_net_ipv6.c2
-rw-r--r--net/ipv6/tcp_ipv6.c47
-rw-r--r--net/phonet/pep-gprs.c12
-rw-r--r--net/sched/sch_sfq.c2
-rw-r--r--net/sched/sch_teql.c20
-rw-r--r--net/sctp/auth.c2
-rw-r--r--net/tipc/bcast.h2
-rw-r--r--net/wimax/Kconfig14
-rw-r--r--net/wimax/id-table.c8
-rw-r--r--net/wimax/op-rfkill.c2
-rw-r--r--net/wireless/wext.c4
-rw-r--r--net/xfrm/xfrm_ipcomp.c1
31 files changed, 457 insertions, 293 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index dd86a1dc4cd0..6c1323940263 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -3,46 +3,35 @@
3#include <linux/if_vlan.h> 3#include <linux/if_vlan.h>
4#include "vlan.h" 4#include "vlan.h"
5 5
6struct vlan_hwaccel_cb {
7 struct net_device *dev;
8};
9
10static inline struct vlan_hwaccel_cb *vlan_hwaccel_cb(struct sk_buff *skb)
11{
12 return (struct vlan_hwaccel_cb *)skb->cb;
13}
14
15/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ 6/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
16int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, 7int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
17 u16 vlan_tci, int polling) 8 u16 vlan_tci, int polling)
18{ 9{
19 struct vlan_hwaccel_cb *cb = vlan_hwaccel_cb(skb); 10 if (skb_bond_should_drop(skb))
20 11 goto drop;
21 if (skb_bond_should_drop(skb)) {
22 dev_kfree_skb_any(skb);
23 return NET_RX_DROP;
24 }
25 12
26 skb->vlan_tci = vlan_tci; 13 skb->vlan_tci = vlan_tci;
27 cb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); 14 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
15
16 if (!skb->dev)
17 goto drop;
28 18
29 return (polling ? netif_receive_skb(skb) : netif_rx(skb)); 19 return (polling ? netif_receive_skb(skb) : netif_rx(skb));
20
21drop:
22 dev_kfree_skb_any(skb);
23 return NET_RX_DROP;
30} 24}
31EXPORT_SYMBOL(__vlan_hwaccel_rx); 25EXPORT_SYMBOL(__vlan_hwaccel_rx);
32 26
33int vlan_hwaccel_do_receive(struct sk_buff *skb) 27int vlan_hwaccel_do_receive(struct sk_buff *skb)
34{ 28{
35 struct vlan_hwaccel_cb *cb = vlan_hwaccel_cb(skb); 29 struct net_device *dev = skb->dev;
36 struct net_device *dev = cb->dev;
37 struct net_device_stats *stats; 30 struct net_device_stats *stats;
38 31
32 skb->dev = vlan_dev_info(dev)->real_dev;
39 netif_nit_deliver(skb); 33 netif_nit_deliver(skb);
40 34
41 if (dev == NULL) {
42 kfree_skb(skb);
43 return -1;
44 }
45
46 skb->dev = dev; 35 skb->dev = dev;
47 skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci); 36 skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci);
48 skb->vlan_tci = 0; 37 skb->vlan_tci = 0;
@@ -80,3 +69,79 @@ u16 vlan_dev_vlan_id(const struct net_device *dev)
80 return vlan_dev_info(dev)->vlan_id; 69 return vlan_dev_info(dev)->vlan_id;
81} 70}
82EXPORT_SYMBOL_GPL(vlan_dev_vlan_id); 71EXPORT_SYMBOL_GPL(vlan_dev_vlan_id);
72
73static int vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
74 unsigned int vlan_tci, struct sk_buff *skb)
75{
76 struct sk_buff *p;
77
78 if (skb_bond_should_drop(skb))
79 goto drop;
80
81 skb->vlan_tci = vlan_tci;
82 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
83
84 if (!skb->dev)
85 goto drop;
86
87 for (p = napi->gro_list; p; p = p->next) {
88 NAPI_GRO_CB(p)->same_flow = p->dev == skb->dev;
89 NAPI_GRO_CB(p)->flush = 0;
90 }
91
92 return dev_gro_receive(napi, skb);
93
94drop:
95 return 2;
96}
97
98int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
99 unsigned int vlan_tci, struct sk_buff *skb)
100{
101 int err = NET_RX_SUCCESS;
102
103 switch (vlan_gro_common(napi, grp, vlan_tci, skb)) {
104 case -1:
105 return netif_receive_skb(skb);
106
107 case 2:
108 err = NET_RX_DROP;
109 /* fall through */
110
111 case 1:
112 kfree_skb(skb);
113 break;
114 }
115
116 return err;
117}
118EXPORT_SYMBOL(vlan_gro_receive);
119
120int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
121 unsigned int vlan_tci, struct napi_gro_fraginfo *info)
122{
123 struct sk_buff *skb = napi_fraginfo_skb(napi, info);
124 int err = NET_RX_DROP;
125
126 if (!skb)
127 goto out;
128
129 err = NET_RX_SUCCESS;
130
131 switch (vlan_gro_common(napi, grp, vlan_tci, skb)) {
132 case -1:
133 return netif_receive_skb(skb);
134
135 case 2:
136 err = NET_RX_DROP;
137 /* fall through */
138
139 case 1:
140 napi_reuse_skb(napi, skb);
141 break;
142 }
143
144out:
145 return err;
146}
147EXPORT_SYMBOL(vlan_gro_frags);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 89a3bbdfca3f..4a19acd3a32b 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -546,6 +546,18 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
546 return err; 546 return err;
547} 547}
548 548
549static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
550{
551 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
552 const struct net_device_ops *ops = real_dev->netdev_ops;
553 int err = 0;
554
555 if (netif_device_present(real_dev) && ops->ndo_neigh_setup)
556 err = ops->ndo_neigh_setup(dev, pa);
557
558 return err;
559}
560
549static void vlan_dev_change_rx_flags(struct net_device *dev, int change) 561static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
550{ 562{
551 struct net_device *real_dev = vlan_dev_info(dev)->real_dev; 563 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
@@ -713,6 +725,7 @@ static const struct net_device_ops vlan_netdev_ops = {
713 .ndo_set_multicast_list = vlan_dev_set_rx_mode, 725 .ndo_set_multicast_list = vlan_dev_set_rx_mode,
714 .ndo_change_rx_flags = vlan_dev_change_rx_flags, 726 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
715 .ndo_do_ioctl = vlan_dev_ioctl, 727 .ndo_do_ioctl = vlan_dev_ioctl,
728 .ndo_neigh_setup = vlan_dev_neigh_setup,
716}; 729};
717 730
718static const struct net_device_ops vlan_netdev_accel_ops = { 731static const struct net_device_ops vlan_netdev_accel_ops = {
@@ -728,6 +741,7 @@ static const struct net_device_ops vlan_netdev_accel_ops = {
728 .ndo_set_multicast_list = vlan_dev_set_rx_mode, 741 .ndo_set_multicast_list = vlan_dev_set_rx_mode,
729 .ndo_change_rx_flags = vlan_dev_change_rx_flags, 742 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
730 .ndo_do_ioctl = vlan_dev_ioctl, 743 .ndo_do_ioctl = vlan_dev_ioctl,
744 .ndo_neigh_setup = vlan_dev_neigh_setup,
731}; 745};
732 746
733void vlan_setup(struct net_device *dev) 747void vlan_setup(struct net_device *dev)
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index b03ff58e9308..89f99d3beb60 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -443,13 +443,14 @@ static void aarp_send_probe_phase1(struct atalk_iface *iface)
443{ 443{
444 struct ifreq atreq; 444 struct ifreq atreq;
445 struct sockaddr_at *sa = (struct sockaddr_at *)&atreq.ifr_addr; 445 struct sockaddr_at *sa = (struct sockaddr_at *)&atreq.ifr_addr;
446 const struct net_device_ops *ops = iface->dev->netdev_ops;
446 447
447 sa->sat_addr.s_node = iface->address.s_node; 448 sa->sat_addr.s_node = iface->address.s_node;
448 sa->sat_addr.s_net = ntohs(iface->address.s_net); 449 sa->sat_addr.s_net = ntohs(iface->address.s_net);
449 450
450 /* We pass the Net:Node to the drivers/cards by a Device ioctl. */ 451 /* We pass the Net:Node to the drivers/cards by a Device ioctl. */
451 if (!(iface->dev->do_ioctl(iface->dev, &atreq, SIOCSIFADDR))) { 452 if (!(ops->ndo_do_ioctl(iface->dev, &atreq, SIOCSIFADDR))) {
452 (void)iface->dev->do_ioctl(iface->dev, &atreq, SIOCGIFADDR); 453 ops->ndo_do_ioctl(iface->dev, &atreq, SIOCGIFADDR);
453 if (iface->address.s_net != htons(sa->sat_addr.s_net) || 454 if (iface->address.s_net != htons(sa->sat_addr.s_net) ||
454 iface->address.s_node != sa->sat_addr.s_node) 455 iface->address.s_node != sa->sat_addr.s_node)
455 iface->status |= ATIF_PROBE_FAIL; 456 iface->status |= ATIF_PROBE_FAIL;
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index d20f8a40f36e..0d9e506f5d5a 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -165,7 +165,6 @@ struct bnep_session {
165 165
166 struct socket *sock; 166 struct socket *sock;
167 struct net_device *dev; 167 struct net_device *dev;
168 struct net_device_stats stats;
169}; 168};
170 169
171void bnep_net_setup(struct net_device *dev); 170void bnep_net_setup(struct net_device *dev);
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 70fea8bdb4e5..52a6ce0d772b 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -306,7 +306,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
306 struct sk_buff *nskb; 306 struct sk_buff *nskb;
307 u8 type; 307 u8 type;
308 308
309 s->stats.rx_bytes += skb->len; 309 dev->stats.rx_bytes += skb->len;
310 310
311 type = *(u8 *) skb->data; skb_pull(skb, 1); 311 type = *(u8 *) skb->data; skb_pull(skb, 1);
312 312
@@ -343,7 +343,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
343 * may not be modified and because of the alignment requirements. */ 343 * may not be modified and because of the alignment requirements. */
344 nskb = alloc_skb(2 + ETH_HLEN + skb->len, GFP_KERNEL); 344 nskb = alloc_skb(2 + ETH_HLEN + skb->len, GFP_KERNEL);
345 if (!nskb) { 345 if (!nskb) {
346 s->stats.rx_dropped++; 346 dev->stats.rx_dropped++;
347 kfree_skb(skb); 347 kfree_skb(skb);
348 return -ENOMEM; 348 return -ENOMEM;
349 } 349 }
@@ -378,14 +378,14 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
378 skb_copy_from_linear_data(skb, __skb_put(nskb, skb->len), skb->len); 378 skb_copy_from_linear_data(skb, __skb_put(nskb, skb->len), skb->len);
379 kfree_skb(skb); 379 kfree_skb(skb);
380 380
381 s->stats.rx_packets++; 381 dev->stats.rx_packets++;
382 nskb->ip_summed = CHECKSUM_NONE; 382 nskb->ip_summed = CHECKSUM_NONE;
383 nskb->protocol = eth_type_trans(nskb, dev); 383 nskb->protocol = eth_type_trans(nskb, dev);
384 netif_rx_ni(nskb); 384 netif_rx_ni(nskb);
385 return 0; 385 return 0;
386 386
387badframe: 387badframe:
388 s->stats.rx_errors++; 388 dev->stats.rx_errors++;
389 kfree_skb(skb); 389 kfree_skb(skb);
390 return 0; 390 return 0;
391} 391}
@@ -448,8 +448,8 @@ send:
448 kfree_skb(skb); 448 kfree_skb(skb);
449 449
450 if (len > 0) { 450 if (len > 0) {
451 s->stats.tx_bytes += len; 451 s->dev->stats.tx_bytes += len;
452 s->stats.tx_packets++; 452 s->dev->stats.tx_packets++;
453 return 0; 453 return 0;
454 } 454 }
455 455
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index f897da6e0444..d7a0e9722def 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -55,12 +55,6 @@ static int bnep_net_close(struct net_device *dev)
55 return 0; 55 return 0;
56} 56}
57 57
58static struct net_device_stats *bnep_net_get_stats(struct net_device *dev)
59{
60 struct bnep_session *s = netdev_priv(dev);
61 return &s->stats;
62}
63
64static void bnep_net_set_mc_list(struct net_device *dev) 58static void bnep_net_set_mc_list(struct net_device *dev)
65{ 59{
66#ifdef CONFIG_BT_BNEP_MC_FILTER 60#ifdef CONFIG_BT_BNEP_MC_FILTER
@@ -128,11 +122,6 @@ static void bnep_net_timeout(struct net_device *dev)
128 netif_wake_queue(dev); 122 netif_wake_queue(dev);
129} 123}
130 124
131static int bnep_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
132{
133 return -EINVAL;
134}
135
136#ifdef CONFIG_BT_BNEP_MC_FILTER 125#ifdef CONFIG_BT_BNEP_MC_FILTER
137static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) 126static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s)
138{ 127{
@@ -217,6 +206,18 @@ static int bnep_net_xmit(struct sk_buff *skb, struct net_device *dev)
217 return 0; 206 return 0;
218} 207}
219 208
209static const struct net_device_ops bnep_netdev_ops = {
210 .ndo_open = bnep_net_open,
211 .ndo_stop = bnep_net_close,
212 .ndo_start_xmit = bnep_net_xmit,
213 .ndo_validate_addr = eth_validate_addr,
214 .ndo_set_multicast_list = bnep_net_set_mc_list,
215 .ndo_set_mac_address = bnep_net_set_mac_addr,
216 .ndo_tx_timeout = bnep_net_timeout,
217 .ndo_change_mtu = eth_change_mtu,
218
219};
220
220void bnep_net_setup(struct net_device *dev) 221void bnep_net_setup(struct net_device *dev)
221{ 222{
222 223
@@ -224,15 +225,7 @@ void bnep_net_setup(struct net_device *dev)
224 dev->addr_len = ETH_ALEN; 225 dev->addr_len = ETH_ALEN;
225 226
226 ether_setup(dev); 227 ether_setup(dev);
227 228 dev->netdev_ops = &bnep_netdev_ops;
228 dev->open = bnep_net_open;
229 dev->stop = bnep_net_close;
230 dev->hard_start_xmit = bnep_net_xmit;
231 dev->get_stats = bnep_net_get_stats;
232 dev->do_ioctl = bnep_net_ioctl;
233 dev->set_mac_address = bnep_net_set_mac_addr;
234 dev->set_multicast_list = bnep_net_set_mc_list;
235 229
236 dev->watchdog_timeo = HZ * 2; 230 dev->watchdog_timeo = HZ * 2;
237 dev->tx_timeout = bnep_net_timeout;
238} 231}
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index fa108c46e851..8a8743d7d6e7 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -85,12 +85,13 @@ static inline int ebt_do_match (struct ebt_entry_match *m,
85static inline int ebt_dev_check(char *entry, const struct net_device *device) 85static inline int ebt_dev_check(char *entry, const struct net_device *device)
86{ 86{
87 int i = 0; 87 int i = 0;
88 const char *devname = device->name; 88 const char *devname;
89 89
90 if (*entry == '\0') 90 if (*entry == '\0')
91 return 0; 91 return 0;
92 if (!device) 92 if (!device)
93 return 1; 93 return 1;
94 devname = device->name;
94 /* 1 is the wildcard token */ 95 /* 1 is the wildcard token */
95 while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i]) 96 while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
96 i++; 97 i++;
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 3dadb338addd..fa417ca6cbe6 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -414,6 +414,12 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
414 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can 414 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
415 * filter for error frames (CAN_ERR_FLAG bit set in mask). 415 * filter for error frames (CAN_ERR_FLAG bit set in mask).
416 * 416 *
417 * The provided pointer to the sk_buff is guaranteed to be valid as long as
418 * the callback function is running. The callback function must *not* free
419 * the given sk_buff while processing it's task. When the given sk_buff is
420 * needed after the end of the callback function it must be cloned inside
421 * the callback function with skb_clone().
422 *
417 * Return: 423 * Return:
418 * 0 on success 424 * 0 on success
419 * -ENOMEM on missing cache mem to create subscription entry 425 * -ENOMEM on missing cache mem to create subscription entry
@@ -569,13 +575,8 @@ EXPORT_SYMBOL(can_rx_unregister);
569 575
570static inline void deliver(struct sk_buff *skb, struct receiver *r) 576static inline void deliver(struct sk_buff *skb, struct receiver *r)
571{ 577{
572 struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); 578 r->func(skb, r->data);
573 579 r->matches++;
574 if (clone) {
575 clone->sk = skb->sk;
576 r->func(clone, r->data);
577 r->matches++;
578 }
579} 580}
580 581
581static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) 582static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 6248ae2502c7..1649c8ab2c2f 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -633,7 +633,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
633 hrtimer_cancel(&op->timer); 633 hrtimer_cancel(&op->timer);
634 634
635 if (op->can_id != rxframe->can_id) 635 if (op->can_id != rxframe->can_id)
636 goto rx_freeskb; 636 return;
637 637
638 /* save rx timestamp */ 638 /* save rx timestamp */
639 op->rx_stamp = skb->tstamp; 639 op->rx_stamp = skb->tstamp;
@@ -645,19 +645,19 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
645 if (op->flags & RX_RTR_FRAME) { 645 if (op->flags & RX_RTR_FRAME) {
646 /* send reply for RTR-request (placed in op->frames[0]) */ 646 /* send reply for RTR-request (placed in op->frames[0]) */
647 bcm_can_tx(op); 647 bcm_can_tx(op);
648 goto rx_freeskb; 648 return;
649 } 649 }
650 650
651 if (op->flags & RX_FILTER_ID) { 651 if (op->flags & RX_FILTER_ID) {
652 /* the easiest case */ 652 /* the easiest case */
653 bcm_rx_update_and_send(op, &op->last_frames[0], rxframe); 653 bcm_rx_update_and_send(op, &op->last_frames[0], rxframe);
654 goto rx_freeskb_starttimer; 654 goto rx_starttimer;
655 } 655 }
656 656
657 if (op->nframes == 1) { 657 if (op->nframes == 1) {
658 /* simple compare with index 0 */ 658 /* simple compare with index 0 */
659 bcm_rx_cmp_to_index(op, 0, rxframe); 659 bcm_rx_cmp_to_index(op, 0, rxframe);
660 goto rx_freeskb_starttimer; 660 goto rx_starttimer;
661 } 661 }
662 662
663 if (op->nframes > 1) { 663 if (op->nframes > 1) {
@@ -678,10 +678,8 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
678 } 678 }
679 } 679 }
680 680
681rx_freeskb_starttimer: 681rx_starttimer:
682 bcm_rx_starttimer(op); 682 bcm_rx_starttimer(op);
683rx_freeskb:
684 kfree_skb(skb);
685} 683}
686 684
687/* 685/*
diff --git a/net/can/raw.c b/net/can/raw.c
index 27aab63df467..0703cba4bf9f 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -99,13 +99,14 @@ static void raw_rcv(struct sk_buff *skb, void *data)
99 struct raw_sock *ro = raw_sk(sk); 99 struct raw_sock *ro = raw_sk(sk);
100 struct sockaddr_can *addr; 100 struct sockaddr_can *addr;
101 101
102 if (!ro->recv_own_msgs) { 102 /* check the received tx sock reference */
103 /* check the received tx sock reference */ 103 if (!ro->recv_own_msgs && skb->sk == sk)
104 if (skb->sk == sk) { 104 return;
105 kfree_skb(skb); 105
106 return; 106 /* clone the given skb to be able to enqueue it into the rcv queue */
107 } 107 skb = skb_clone(skb, GFP_ATOMIC);
108 } 108 if (!skb)
109 return;
109 110
110 /* 111 /*
111 * Put the datagram to the queue so that raw_recvmsg() can 112 * Put the datagram to the queue so that raw_recvmsg() can
diff --git a/net/core/dev.c b/net/core/dev.c
index 382df6c09eec..b715a55cccc4 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -170,25 +170,6 @@ static DEFINE_SPINLOCK(ptype_lock);
170static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 170static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
171static struct list_head ptype_all __read_mostly; /* Taps */ 171static struct list_head ptype_all __read_mostly; /* Taps */
172 172
173#ifdef CONFIG_NET_DMA
174struct net_dma {
175 struct dma_client client;
176 spinlock_t lock;
177 cpumask_t channel_mask;
178 struct dma_chan **channels;
179};
180
181static enum dma_state_client
182netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
183 enum dma_state state);
184
185static struct net_dma net_dma = {
186 .client = {
187 .event_callback = netdev_dma_event,
188 },
189};
190#endif
191
192/* 173/*
193 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 174 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
194 * semaphore. 175 * semaphore.
@@ -1107,6 +1088,11 @@ int dev_open(struct net_device *dev)
1107 dev->flags |= IFF_UP; 1088 dev->flags |= IFF_UP;
1108 1089
1109 /* 1090 /*
1091 * Enable NET_DMA
1092 */
1093 dmaengine_get();
1094
1095 /*
1110 * Initialize multicasting status 1096 * Initialize multicasting status
1111 */ 1097 */
1112 dev_set_rx_mode(dev); 1098 dev_set_rx_mode(dev);
@@ -1183,6 +1169,11 @@ int dev_close(struct net_device *dev)
1183 */ 1169 */
1184 call_netdevice_notifiers(NETDEV_DOWN, dev); 1170 call_netdevice_notifiers(NETDEV_DOWN, dev);
1185 1171
1172 /*
1173 * Shutdown NET_DMA
1174 */
1175 dmaengine_put();
1176
1186 return 0; 1177 return 0;
1187} 1178}
1188 1179
@@ -2387,7 +2378,7 @@ void napi_gro_flush(struct napi_struct *napi)
2387} 2378}
2388EXPORT_SYMBOL(napi_gro_flush); 2379EXPORT_SYMBOL(napi_gro_flush);
2389 2380
2390static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2381int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2391{ 2382{
2392 struct sk_buff **pp = NULL; 2383 struct sk_buff **pp = NULL;
2393 struct packet_type *ptype; 2384 struct packet_type *ptype;
@@ -2417,11 +2408,14 @@ static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2417 2408
2418 for (p = napi->gro_list; p; p = p->next) { 2409 for (p = napi->gro_list; p; p = p->next) {
2419 count++; 2410 count++;
2420 NAPI_GRO_CB(p)->same_flow = 2411
2421 p->mac_len == mac_len && 2412 if (!NAPI_GRO_CB(p)->same_flow)
2422 !memcmp(skb_mac_header(p), skb_mac_header(skb), 2413 continue;
2423 mac_len); 2414
2424 NAPI_GRO_CB(p)->flush = 0; 2415 if (p->mac_len != mac_len ||
2416 memcmp(skb_mac_header(p), skb_mac_header(skb),
2417 mac_len))
2418 NAPI_GRO_CB(p)->same_flow = 0;
2425 } 2419 }
2426 2420
2427 pp = ptype->gro_receive(&napi->gro_list, skb); 2421 pp = ptype->gro_receive(&napi->gro_list, skb);
@@ -2463,6 +2457,19 @@ ok:
2463normal: 2457normal:
2464 return -1; 2458 return -1;
2465} 2459}
2460EXPORT_SYMBOL(dev_gro_receive);
2461
2462static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2463{
2464 struct sk_buff *p;
2465
2466 for (p = napi->gro_list; p; p = p->next) {
2467 NAPI_GRO_CB(p)->same_flow = 1;
2468 NAPI_GRO_CB(p)->flush = 0;
2469 }
2470
2471 return dev_gro_receive(napi, skb);
2472}
2466 2473
2467int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2474int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2468{ 2475{
@@ -2479,11 +2486,26 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2479} 2486}
2480EXPORT_SYMBOL(napi_gro_receive); 2487EXPORT_SYMBOL(napi_gro_receive);
2481 2488
2482int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info) 2489void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2490{
2491 skb_shinfo(skb)->nr_frags = 0;
2492
2493 skb->len -= skb->data_len;
2494 skb->truesize -= skb->data_len;
2495 skb->data_len = 0;
2496
2497 __skb_pull(skb, skb_headlen(skb));
2498 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2499
2500 napi->skb = skb;
2501}
2502EXPORT_SYMBOL(napi_reuse_skb);
2503
2504struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
2505 struct napi_gro_fraginfo *info)
2483{ 2506{
2484 struct net_device *dev = napi->dev; 2507 struct net_device *dev = napi->dev;
2485 struct sk_buff *skb = napi->skb; 2508 struct sk_buff *skb = napi->skb;
2486 int err = NET_RX_DROP;
2487 2509
2488 napi->skb = NULL; 2510 napi->skb = NULL;
2489 2511
@@ -2503,16 +2525,31 @@ int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
2503 skb->len += info->len; 2525 skb->len += info->len;
2504 skb->truesize += info->len; 2526 skb->truesize += info->len;
2505 2527
2506 if (!pskb_may_pull(skb, ETH_HLEN)) 2528 if (!pskb_may_pull(skb, ETH_HLEN)) {
2507 goto reuse; 2529 napi_reuse_skb(napi, skb);
2508 2530 goto out;
2509 err = NET_RX_SUCCESS; 2531 }
2510 2532
2511 skb->protocol = eth_type_trans(skb, dev); 2533 skb->protocol = eth_type_trans(skb, dev);
2512 2534
2513 skb->ip_summed = info->ip_summed; 2535 skb->ip_summed = info->ip_summed;
2514 skb->csum = info->csum; 2536 skb->csum = info->csum;
2515 2537
2538out:
2539 return skb;
2540}
2541EXPORT_SYMBOL(napi_fraginfo_skb);
2542
2543int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
2544{
2545 struct sk_buff *skb = napi_fraginfo_skb(napi, info);
2546 int err = NET_RX_DROP;
2547
2548 if (!skb)
2549 goto out;
2550
2551 err = NET_RX_SUCCESS;
2552
2516 switch (__napi_gro_receive(napi, skb)) { 2553 switch (__napi_gro_receive(napi, skb)) {
2517 case -1: 2554 case -1:
2518 return netif_receive_skb(skb); 2555 return netif_receive_skb(skb);
@@ -2521,17 +2558,7 @@ int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
2521 goto out; 2558 goto out;
2522 } 2559 }
2523 2560
2524reuse: 2561 napi_reuse_skb(napi, skb);
2525 skb_shinfo(skb)->nr_frags = 0;
2526
2527 skb->len -= skb->data_len;
2528 skb->truesize -= skb->data_len;
2529 skb->data_len = 0;
2530
2531 __skb_pull(skb, skb_headlen(skb));
2532 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2533
2534 napi->skb = skb;
2535 2562
2536out: 2563out:
2537 return err; 2564 return err;
@@ -2718,14 +2745,7 @@ out:
2718 * There may not be any more sk_buffs coming right now, so push 2745 * There may not be any more sk_buffs coming right now, so push
2719 * any pending DMA copies to hardware 2746 * any pending DMA copies to hardware
2720 */ 2747 */
2721 if (!cpus_empty(net_dma.channel_mask)) { 2748 dma_issue_pending_all();
2722 int chan_idx;
2723 for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
2724 struct dma_chan *chan = net_dma.channels[chan_idx];
2725 if (chan)
2726 dma_async_memcpy_issue_pending(chan);
2727 }
2728 }
2729#endif 2749#endif
2730 2750
2731 return; 2751 return;
@@ -4916,122 +4936,6 @@ static int dev_cpu_callback(struct notifier_block *nfb,
4916 return NOTIFY_OK; 4936 return NOTIFY_OK;
4917} 4937}
4918 4938
4919#ifdef CONFIG_NET_DMA
4920/**
4921 * net_dma_rebalance - try to maintain one DMA channel per CPU
4922 * @net_dma: DMA client and associated data (lock, channels, channel_mask)
4923 *
4924 * This is called when the number of channels allocated to the net_dma client
4925 * changes. The net_dma client tries to have one DMA channel per CPU.
4926 */
4927
4928static void net_dma_rebalance(struct net_dma *net_dma)
4929{
4930 unsigned int cpu, i, n, chan_idx;
4931 struct dma_chan *chan;
4932
4933 if (cpus_empty(net_dma->channel_mask)) {
4934 for_each_online_cpu(cpu)
4935 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
4936 return;
4937 }
4938
4939 i = 0;
4940 cpu = first_cpu(cpu_online_map);
4941
4942 for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
4943 chan = net_dma->channels[chan_idx];
4944
4945 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
4946 + (i < (num_online_cpus() %
4947 cpus_weight(net_dma->channel_mask)) ? 1 : 0));
4948
4949 while(n) {
4950 per_cpu(softnet_data, cpu).net_dma = chan;
4951 cpu = next_cpu(cpu, cpu_online_map);
4952 n--;
4953 }
4954 i++;
4955 }
4956}
4957
4958/**
4959 * netdev_dma_event - event callback for the net_dma_client
4960 * @client: should always be net_dma_client
4961 * @chan: DMA channel for the event
4962 * @state: DMA state to be handled
4963 */
4964static enum dma_state_client
4965netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4966 enum dma_state state)
4967{
4968 int i, found = 0, pos = -1;
4969 struct net_dma *net_dma =
4970 container_of(client, struct net_dma, client);
4971 enum dma_state_client ack = DMA_DUP; /* default: take no action */
4972
4973 spin_lock(&net_dma->lock);
4974 switch (state) {
4975 case DMA_RESOURCE_AVAILABLE:
4976 for (i = 0; i < nr_cpu_ids; i++)
4977 if (net_dma->channels[i] == chan) {
4978 found = 1;
4979 break;
4980 } else if (net_dma->channels[i] == NULL && pos < 0)
4981 pos = i;
4982
4983 if (!found && pos >= 0) {
4984 ack = DMA_ACK;
4985 net_dma->channels[pos] = chan;
4986 cpu_set(pos, net_dma->channel_mask);
4987 net_dma_rebalance(net_dma);
4988 }
4989 break;
4990 case DMA_RESOURCE_REMOVED:
4991 for (i = 0; i < nr_cpu_ids; i++)
4992 if (net_dma->channels[i] == chan) {
4993 found = 1;
4994 pos = i;
4995 break;
4996 }
4997
4998 if (found) {
4999 ack = DMA_ACK;
5000 cpu_clear(pos, net_dma->channel_mask);
5001 net_dma->channels[i] = NULL;
5002 net_dma_rebalance(net_dma);
5003 }
5004 break;
5005 default:
5006 break;
5007 }
5008 spin_unlock(&net_dma->lock);
5009
5010 return ack;
5011}
5012
5013/**
5014 * netdev_dma_register - register the networking subsystem as a DMA client
5015 */
5016static int __init netdev_dma_register(void)
5017{
5018 net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
5019 GFP_KERNEL);
5020 if (unlikely(!net_dma.channels)) {
5021 printk(KERN_NOTICE
5022 "netdev_dma: no memory for net_dma.channels\n");
5023 return -ENOMEM;
5024 }
5025 spin_lock_init(&net_dma.lock);
5026 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
5027 dma_async_client_register(&net_dma.client);
5028 dma_async_client_chan_request(&net_dma.client);
5029 return 0;
5030}
5031
5032#else
5033static int __init netdev_dma_register(void) { return -ENODEV; }
5034#endif /* CONFIG_NET_DMA */
5035 4939
5036/** 4940/**
5037 * netdev_increment_features - increment feature set by one 4941 * netdev_increment_features - increment feature set by one
@@ -5251,8 +5155,6 @@ static int __init net_dev_init(void)
5251 if (register_pernet_device(&default_device_ops)) 5155 if (register_pernet_device(&default_device_ops))
5252 goto out; 5156 goto out;
5253 5157
5254 netdev_dma_register();
5255
5256 open_softirq(NET_TX_SOFTIRQ, net_tx_action); 5158 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5257 open_softirq(NET_RX_SOFTIRQ, net_rx_action); 5159 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
5258 5160
diff --git a/net/dccp/ccids/Kconfig b/net/dccp/ccids/Kconfig
index b28bf962edc3..4b5db44970aa 100644
--- a/net/dccp/ccids/Kconfig
+++ b/net/dccp/ccids/Kconfig
@@ -29,7 +29,7 @@ config IP_DCCP_CCID3
29 http://www.ietf.org/rfc/rfc4342.txt 29 http://www.ietf.org/rfc/rfc4342.txt
30 30
31 The TFRC congestion control algorithms were initially described in 31 The TFRC congestion control algorithms were initially described in
32 RFC 5448. 32 RFC 5348.
33 33
34 This text was extracted from RFC 4340 (sec. 10.2), 34 This text was extracted from RFC 4340 (sec. 10.2),
35 http://www.ietf.org/rfc/rfc4340.txt 35 http://www.ietf.org/rfc/rfc4340.txt
diff --git a/net/dccp/ccids/lib/tfrc.c b/net/dccp/ccids/lib/tfrc.c
index 60c412ccfeef..4902029854d8 100644
--- a/net/dccp/ccids/lib/tfrc.c
+++ b/net/dccp/ccids/lib/tfrc.c
@@ -36,7 +36,7 @@ out:
36 return rc; 36 return rc;
37} 37}
38 38
39void __exit tfrc_lib_exit(void) 39void tfrc_lib_exit(void)
40{ 40{
41 tfrc_rx_packet_history_exit(); 41 tfrc_rx_packet_history_exit();
42 tfrc_tx_packet_history_exit(); 42 tfrc_tx_packet_history_exit();
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index a3a410d20da0..a68fd79e9eca 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -286,6 +286,42 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
286 .get_sset_count = dsa_slave_get_sset_count, 286 .get_sset_count = dsa_slave_get_sset_count,
287}; 287};
288 288
289#ifdef CONFIG_NET_DSA_TAG_DSA
290static const struct net_device_ops dsa_netdev_ops = {
291 .ndo_open = dsa_slave_open,
292 .ndo_stop = dsa_slave_close,
293 .ndo_start_xmit = dsa_xmit,
294 .ndo_change_rx_flags = dsa_slave_change_rx_flags,
295 .ndo_set_rx_mode = dsa_slave_set_rx_mode,
296 .ndo_set_multicast_list = dsa_slave_set_rx_mode,
297 .ndo_set_mac_address = dsa_slave_set_mac_address,
298 .ndo_do_ioctl = dsa_slave_ioctl,
299};
300#endif
301#ifdef CONFIG_NET_DSA_TAG_EDSA
302static const struct net_device_ops edsa_netdev_ops = {
303 .ndo_open = dsa_slave_open,
304 .ndo_stop = dsa_slave_close,
305 .ndo_start_xmit = edsa_xmit,
306 .ndo_change_rx_flags = dsa_slave_change_rx_flags,
307 .ndo_set_rx_mode = dsa_slave_set_rx_mode,
308 .ndo_set_multicast_list = dsa_slave_set_rx_mode,
309 .ndo_set_mac_address = dsa_slave_set_mac_address,
310 .ndo_do_ioctl = dsa_slave_ioctl,
311};
312#endif
313#ifdef CONFIG_NET_DSA_TAG_TRAILER
314static const struct net_device_ops trailer_netdev_ops = {
315 .ndo_open = dsa_slave_open,
316 .ndo_stop = dsa_slave_close,
317 .ndo_start_xmit = trailer_xmit,
318 .ndo_change_rx_flags = dsa_slave_change_rx_flags,
319 .ndo_set_rx_mode = dsa_slave_set_rx_mode,
320 .ndo_set_multicast_list = dsa_slave_set_rx_mode,
321 .ndo_set_mac_address = dsa_slave_set_mac_address,
322 .ndo_do_ioctl = dsa_slave_ioctl,
323};
324#endif
289 325
290/* slave device setup *******************************************************/ 326/* slave device setup *******************************************************/
291struct net_device * 327struct net_device *
@@ -306,32 +342,27 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent,
306 SET_ETHTOOL_OPS(slave_dev, &dsa_slave_ethtool_ops); 342 SET_ETHTOOL_OPS(slave_dev, &dsa_slave_ethtool_ops);
307 memcpy(slave_dev->dev_addr, master->dev_addr, ETH_ALEN); 343 memcpy(slave_dev->dev_addr, master->dev_addr, ETH_ALEN);
308 slave_dev->tx_queue_len = 0; 344 slave_dev->tx_queue_len = 0;
345
309 switch (ds->tag_protocol) { 346 switch (ds->tag_protocol) {
310#ifdef CONFIG_NET_DSA_TAG_DSA 347#ifdef CONFIG_NET_DSA_TAG_DSA
311 case htons(ETH_P_DSA): 348 case htons(ETH_P_DSA):
312 slave_dev->hard_start_xmit = dsa_xmit; 349 slave_dev->netdev_ops = &dsa_netdev_ops;
313 break; 350 break;
314#endif 351#endif
315#ifdef CONFIG_NET_DSA_TAG_EDSA 352#ifdef CONFIG_NET_DSA_TAG_EDSA
316 case htons(ETH_P_EDSA): 353 case htons(ETH_P_EDSA):
317 slave_dev->hard_start_xmit = edsa_xmit; 354 slave_dev->netdev_ops = &edsa_netdev_ops;
318 break; 355 break;
319#endif 356#endif
320#ifdef CONFIG_NET_DSA_TAG_TRAILER 357#ifdef CONFIG_NET_DSA_TAG_TRAILER
321 case htons(ETH_P_TRAILER): 358 case htons(ETH_P_TRAILER):
322 slave_dev->hard_start_xmit = trailer_xmit; 359 slave_dev->netdev_ops = &trailer_netdev_ops;
323 break; 360 break;
324#endif 361#endif
325 default: 362 default:
326 BUG(); 363 BUG();
327 } 364 }
328 slave_dev->open = dsa_slave_open; 365
329 slave_dev->stop = dsa_slave_close;
330 slave_dev->change_rx_flags = dsa_slave_change_rx_flags;
331 slave_dev->set_rx_mode = dsa_slave_set_rx_mode;
332 slave_dev->set_multicast_list = dsa_slave_set_rx_mode;
333 slave_dev->set_mac_address = dsa_slave_set_mac_address;
334 slave_dev->do_ioctl = dsa_slave_ioctl;
335 SET_NETDEV_DEV(slave_dev, parent); 366 SET_NETDEV_DEV(slave_dev, parent);
336 slave_dev->vlan_features = master->vlan_features; 367 slave_dev->vlan_features = master->vlan_features;
337 368
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 35bcddf8a932..ce572f9dff02 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1313,7 +1313,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1313 if ((available < target) && 1313 if ((available < target) &&
1314 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && 1314 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1315 !sysctl_tcp_low_latency && 1315 !sysctl_tcp_low_latency &&
1316 __get_cpu_var(softnet_data).net_dma) { 1316 dma_find_channel(DMA_MEMCPY)) {
1317 preempt_enable_no_resched(); 1317 preempt_enable_no_resched();
1318 tp->ucopy.pinned_list = 1318 tp->ucopy.pinned_list =
1319 dma_pin_iovec_pages(msg->msg_iov, len); 1319 dma_pin_iovec_pages(msg->msg_iov, len);
@@ -1523,7 +1523,7 @@ do_prequeue:
1523 if (!(flags & MSG_TRUNC)) { 1523 if (!(flags & MSG_TRUNC)) {
1524#ifdef CONFIG_NET_DMA 1524#ifdef CONFIG_NET_DMA
1525 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1525 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1526 tp->ucopy.dma_chan = get_softnet_dma(); 1526 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1527 1527
1528 if (tp->ucopy.dma_chan) { 1528 if (tp->ucopy.dma_chan) {
1529 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( 1529 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
@@ -1628,7 +1628,6 @@ skip_copy:
1628 1628
1629 /* Safe to free early-copied skbs now */ 1629 /* Safe to free early-copied skbs now */
1630 __skb_queue_purge(&sk->sk_async_wait_queue); 1630 __skb_queue_purge(&sk->sk_async_wait_queue);
1631 dma_chan_put(tp->ucopy.dma_chan);
1632 tp->ucopy.dma_chan = NULL; 1631 tp->ucopy.dma_chan = NULL;
1633 } 1632 }
1634 if (tp->ucopy.pinned_list) { 1633 if (tp->ucopy.pinned_list) {
@@ -2542,6 +2541,7 @@ out:
2542 2541
2543 return pp; 2542 return pp;
2544} 2543}
2544EXPORT_SYMBOL(tcp_gro_receive);
2545 2545
2546int tcp_gro_complete(struct sk_buff *skb) 2546int tcp_gro_complete(struct sk_buff *skb)
2547{ 2547{
@@ -2558,6 +2558,7 @@ int tcp_gro_complete(struct sk_buff *skb)
2558 2558
2559 return 0; 2559 return 0;
2560} 2560}
2561EXPORT_SYMBOL(tcp_gro_complete);
2561 2562
2562#ifdef CONFIG_TCP_MD5SIG 2563#ifdef CONFIG_TCP_MD5SIG
2563static unsigned long tcp_md5sig_users; 2564static unsigned long tcp_md5sig_users;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 99b7ecbe8893..a6961d75c7ea 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5005,7 +5005,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
5005 return 0; 5005 return 0;
5006 5006
5007 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 5007 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
5008 tp->ucopy.dma_chan = get_softnet_dma(); 5008 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
5009 5009
5010 if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { 5010 if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
5011 5011
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 9d839fa9331e..19d7b429a262 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1594,7 +1594,7 @@ process:
1594#ifdef CONFIG_NET_DMA 1594#ifdef CONFIG_NET_DMA
1595 struct tcp_sock *tp = tcp_sk(sk); 1595 struct tcp_sock *tp = tcp_sk(sk);
1596 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1596 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1597 tp->ucopy.dma_chan = get_softnet_dma(); 1597 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1598 if (tp->ucopy.dma_chan) 1598 if (tp->ucopy.dma_chan)
1599 ret = tcp_v4_do_rcv(sk, skb); 1599 ret = tcp_v4_do_rcv(sk, skb);
1600 else 1600 else
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 437b750b98fd..94f74f5b0cbf 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -672,8 +672,7 @@ int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb)
672 672
673EXPORT_SYMBOL_GPL(ipv6_opt_accepted); 673EXPORT_SYMBOL_GPL(ipv6_opt_accepted);
674 674
675static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb, 675static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
676 int proto)
677{ 676{
678 struct inet6_protocol *ops = NULL; 677 struct inet6_protocol *ops = NULL;
679 678
@@ -704,7 +703,7 @@ static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb,
704 __skb_pull(skb, len); 703 __skb_pull(skb, len);
705 } 704 }
706 705
707 return ops; 706 return proto;
708} 707}
709 708
710static int ipv6_gso_send_check(struct sk_buff *skb) 709static int ipv6_gso_send_check(struct sk_buff *skb)
@@ -721,7 +720,9 @@ static int ipv6_gso_send_check(struct sk_buff *skb)
721 err = -EPROTONOSUPPORT; 720 err = -EPROTONOSUPPORT;
722 721
723 rcu_read_lock(); 722 rcu_read_lock();
724 ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); 723 ops = rcu_dereference(inet6_protos[
724 ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
725
725 if (likely(ops && ops->gso_send_check)) { 726 if (likely(ops && ops->gso_send_check)) {
726 skb_reset_transport_header(skb); 727 skb_reset_transport_header(skb);
727 err = ops->gso_send_check(skb); 728 err = ops->gso_send_check(skb);
@@ -757,7 +758,9 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
757 segs = ERR_PTR(-EPROTONOSUPPORT); 758 segs = ERR_PTR(-EPROTONOSUPPORT);
758 759
759 rcu_read_lock(); 760 rcu_read_lock();
760 ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); 761 ops = rcu_dereference(inet6_protos[
762 ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
763
761 if (likely(ops && ops->gso_segment)) { 764 if (likely(ops && ops->gso_segment)) {
762 skb_reset_transport_header(skb); 765 skb_reset_transport_header(skb);
763 segs = ops->gso_segment(skb, features); 766 segs = ops->gso_segment(skb, features);
@@ -777,11 +780,105 @@ out:
777 return segs; 780 return segs;
778} 781}
779 782
783struct ipv6_gro_cb {
784 struct napi_gro_cb napi;
785 int proto;
786};
787
788#define IPV6_GRO_CB(skb) ((struct ipv6_gro_cb *)(skb)->cb)
789
790static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
791 struct sk_buff *skb)
792{
793 struct inet6_protocol *ops;
794 struct sk_buff **pp = NULL;
795 struct sk_buff *p;
796 struct ipv6hdr *iph;
797 unsigned int nlen;
798 int flush = 1;
799 int proto;
800
801 if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
802 goto out;
803
804 iph = ipv6_hdr(skb);
805 __skb_pull(skb, sizeof(*iph));
806
807 flush += ntohs(iph->payload_len) != skb->len;
808
809 rcu_read_lock();
810 proto = ipv6_gso_pull_exthdrs(skb, iph->nexthdr);
811 IPV6_GRO_CB(skb)->proto = proto;
812 ops = rcu_dereference(inet6_protos[proto]);
813 if (!ops || !ops->gro_receive)
814 goto out_unlock;
815
816 flush--;
817 skb_reset_transport_header(skb);
818 nlen = skb_network_header_len(skb);
819
820 for (p = *head; p; p = p->next) {
821 struct ipv6hdr *iph2;
822
823 if (!NAPI_GRO_CB(p)->same_flow)
824 continue;
825
826 iph2 = ipv6_hdr(p);
827
828 /* All fields must match except length. */
829 if (nlen != skb_network_header_len(p) ||
830 memcmp(iph, iph2, offsetof(struct ipv6hdr, payload_len)) ||
831 memcmp(&iph->nexthdr, &iph2->nexthdr,
832 nlen - offsetof(struct ipv6hdr, nexthdr))) {
833 NAPI_GRO_CB(p)->same_flow = 0;
834 continue;
835 }
836
837 NAPI_GRO_CB(p)->flush |= flush;
838 }
839
840 NAPI_GRO_CB(skb)->flush |= flush;
841
842 pp = ops->gro_receive(head, skb);
843
844out_unlock:
845 rcu_read_unlock();
846
847out:
848 NAPI_GRO_CB(skb)->flush |= flush;
849
850 return pp;
851}
852
853static int ipv6_gro_complete(struct sk_buff *skb)
854{
855 struct inet6_protocol *ops;
856 struct ipv6hdr *iph = ipv6_hdr(skb);
857 int err = -ENOSYS;
858
859 iph->payload_len = htons(skb->len - skb_network_offset(skb) -
860 sizeof(*iph));
861
862 rcu_read_lock();
863 ops = rcu_dereference(inet6_protos[IPV6_GRO_CB(skb)->proto]);
864 if (WARN_ON(!ops || !ops->gro_complete))
865 goto out_unlock;
866
867 err = ops->gro_complete(skb);
868
869out_unlock:
870 rcu_read_unlock();
871
872 return err;
873}
874
780static struct packet_type ipv6_packet_type = { 875static struct packet_type ipv6_packet_type = {
781 .type = __constant_htons(ETH_P_IPV6), 876 .type = __constant_htons(ETH_P_IPV6),
782 .func = ipv6_rcv, 877 .func = ipv6_rcv,
783 .gso_send_check = ipv6_gso_send_check, 878 .gso_send_check = ipv6_gso_send_check,
784 .gso_segment = ipv6_gso_segment, 879 .gso_segment = ipv6_gso_segment,
880 .gro_receive = ipv6_gro_receive,
881 .gro_complete = ipv6_gro_complete,
785}; 882};
786 883
787static int __init ipv6_packet_init(void) 884static int __init ipv6_packet_init(void)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 76f06b94ab9f..c4a59824ac2c 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2752,7 +2752,7 @@ int __init ip6_route_init(void)
2752 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0, 2752 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
2753 SLAB_HWCACHE_ALIGN, NULL); 2753 SLAB_HWCACHE_ALIGN, NULL);
2754 if (!ip6_dst_ops_template.kmem_cachep) 2754 if (!ip6_dst_ops_template.kmem_cachep)
2755 goto out;; 2755 goto out;
2756 2756
2757 ret = register_pernet_subsys(&ip6_route_net_ops); 2757 ret = register_pernet_subsys(&ip6_route_net_ops);
2758 if (ret) 2758 if (ret)
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 9048fe7e7ea7..a031034720b4 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -128,7 +128,7 @@ static struct ctl_table_header *ip6_header;
128 128
129int ipv6_sysctl_register(void) 129int ipv6_sysctl_register(void)
130{ 130{
131 int err = -ENOMEM;; 131 int err = -ENOMEM;
132 132
133 ip6_header = register_net_sysctl_rotable(net_ipv6_ctl_path, ipv6_table); 133 ip6_header = register_net_sysctl_rotable(net_ipv6_ctl_path, ipv6_table);
134 if (ip6_header == NULL) 134 if (ip6_header == NULL)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index e8b8337a8310..e5b85d45bee8 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -101,7 +101,7 @@ static void tcp_v6_hash(struct sock *sk)
101 } 101 }
102} 102}
103 103
104static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len, 104static __inline__ __sum16 tcp_v6_check(int len,
105 struct in6_addr *saddr, 105 struct in6_addr *saddr,
106 struct in6_addr *daddr, 106 struct in6_addr *daddr,
107 __wsum base) 107 __wsum base)
@@ -501,7 +501,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req)
501 if (skb) { 501 if (skb) {
502 struct tcphdr *th = tcp_hdr(skb); 502 struct tcphdr *th = tcp_hdr(skb);
503 503
504 th->check = tcp_v6_check(th, skb->len, 504 th->check = tcp_v6_check(skb->len,
505 &treq->loc_addr, &treq->rmt_addr, 505 &treq->loc_addr, &treq->rmt_addr,
506 csum_partial(th, skb->len, skb->csum)); 506 csum_partial(th, skb->len, skb->csum));
507 507
@@ -942,6 +942,41 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb)
942 return 0; 942 return 0;
943} 943}
944 944
945struct sk_buff **tcp6_gro_receive(struct sk_buff **head, struct sk_buff *skb)
946{
947 struct ipv6hdr *iph = ipv6_hdr(skb);
948
949 switch (skb->ip_summed) {
950 case CHECKSUM_COMPLETE:
951 if (!tcp_v6_check(skb->len, &iph->saddr, &iph->daddr,
952 skb->csum)) {
953 skb->ip_summed = CHECKSUM_UNNECESSARY;
954 break;
955 }
956
957 /* fall through */
958 case CHECKSUM_NONE:
959 NAPI_GRO_CB(skb)->flush = 1;
960 return NULL;
961 }
962
963 return tcp_gro_receive(head, skb);
964}
965EXPORT_SYMBOL(tcp6_gro_receive);
966
967int tcp6_gro_complete(struct sk_buff *skb)
968{
969 struct ipv6hdr *iph = ipv6_hdr(skb);
970 struct tcphdr *th = tcp_hdr(skb);
971
972 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
973 &iph->saddr, &iph->daddr, 0);
974 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
975
976 return tcp_gro_complete(skb);
977}
978EXPORT_SYMBOL(tcp6_gro_complete);
979
945static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, 980static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
946 u32 ts, struct tcp_md5sig_key *key, int rst) 981 u32 ts, struct tcp_md5sig_key *key, int rst)
947{ 982{
@@ -1429,14 +1464,14 @@ out:
1429static __sum16 tcp_v6_checksum_init(struct sk_buff *skb) 1464static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1430{ 1465{
1431 if (skb->ip_summed == CHECKSUM_COMPLETE) { 1466 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1432 if (!tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr, 1467 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1433 &ipv6_hdr(skb)->daddr, skb->csum)) { 1468 &ipv6_hdr(skb)->daddr, skb->csum)) {
1434 skb->ip_summed = CHECKSUM_UNNECESSARY; 1469 skb->ip_summed = CHECKSUM_UNNECESSARY;
1435 return 0; 1470 return 0;
1436 } 1471 }
1437 } 1472 }
1438 1473
1439 skb->csum = ~csum_unfold(tcp_v6_check(tcp_hdr(skb), skb->len, 1474 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1440 &ipv6_hdr(skb)->saddr, 1475 &ipv6_hdr(skb)->saddr,
1441 &ipv6_hdr(skb)->daddr, 0)); 1476 &ipv6_hdr(skb)->daddr, 0));
1442 1477
@@ -1640,7 +1675,7 @@ process:
1640#ifdef CONFIG_NET_DMA 1675#ifdef CONFIG_NET_DMA
1641 struct tcp_sock *tp = tcp_sk(sk); 1676 struct tcp_sock *tp = tcp_sk(sk);
1642 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1677 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1643 tp->ucopy.dma_chan = get_softnet_dma(); 1678 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1644 if (tp->ucopy.dma_chan) 1679 if (tp->ucopy.dma_chan)
1645 ret = tcp_v6_do_rcv(sk, skb); 1680 ret = tcp_v6_do_rcv(sk, skb);
1646 else 1681 else
@@ -2062,6 +2097,8 @@ static struct inet6_protocol tcpv6_protocol = {
2062 .err_handler = tcp_v6_err, 2097 .err_handler = tcp_v6_err,
2063 .gso_send_check = tcp_v6_gso_send_check, 2098 .gso_send_check = tcp_v6_gso_send_check,
2064 .gso_segment = tcp_tso_segment, 2099 .gso_segment = tcp_tso_segment,
2100 .gro_receive = tcp6_gro_receive,
2101 .gro_complete = tcp6_gro_complete,
2065 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 2102 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2066}; 2103};
2067 2104
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index b0ceac2d6cd1..6a91a32a80c1 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -227,6 +227,13 @@ static int gprs_set_mtu(struct net_device *dev, int new_mtu)
227 return 0; 227 return 0;
228} 228}
229 229
230static const struct net_device_ops gprs_netdev_ops = {
231 .ndo_open = gprs_open,
232 .ndo_stop = gprs_close,
233 .ndo_start_xmit = gprs_xmit,
234 .ndo_change_mtu = gprs_set_mtu,
235};
236
230static void gprs_setup(struct net_device *dev) 237static void gprs_setup(struct net_device *dev)
231{ 238{
232 dev->features = NETIF_F_FRAGLIST; 239 dev->features = NETIF_F_FRAGLIST;
@@ -237,11 +244,8 @@ static void gprs_setup(struct net_device *dev)
237 dev->addr_len = 0; 244 dev->addr_len = 0;
238 dev->tx_queue_len = 10; 245 dev->tx_queue_len = 10;
239 246
247 dev->netdev_ops = &gprs_netdev_ops;
240 dev->destructor = free_netdev; 248 dev->destructor = free_netdev;
241 dev->open = gprs_open;
242 dev->stop = gprs_close;
243 dev->hard_start_xmit = gprs_xmit; /* mandatory */
244 dev->change_mtu = gprs_set_mtu;
245} 249}
246 250
247/* 251/*
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index f3965df00559..33133d27b539 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -435,7 +435,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
435 int i; 435 int i;
436 436
437 q->perturb_timer.function = sfq_perturbation; 437 q->perturb_timer.function = sfq_perturbation;
438 q->perturb_timer.data = (unsigned long)sch;; 438 q->perturb_timer.data = (unsigned long)sch;
439 init_timer_deferrable(&q->perturb_timer); 439 init_timer_deferrable(&q->perturb_timer);
440 440
441 for (i = 0; i < SFQ_HASH_DIVISOR; i++) 441 for (i = 0; i < SFQ_HASH_DIVISOR; i++)
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index cfc8e7caba62..ec697cebb63b 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -289,9 +289,9 @@ restart:
289 289
290 do { 290 do {
291 struct net_device *slave = qdisc_dev(q); 291 struct net_device *slave = qdisc_dev(q);
292 struct netdev_queue *slave_txq; 292 struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0);
293 const struct net_device_ops *slave_ops = slave->netdev_ops;
293 294
294 slave_txq = netdev_get_tx_queue(slave, 0);
295 if (slave_txq->qdisc_sleeping != q) 295 if (slave_txq->qdisc_sleeping != q)
296 continue; 296 continue;
297 if (__netif_subqueue_stopped(slave, subq) || 297 if (__netif_subqueue_stopped(slave, subq) ||
@@ -305,7 +305,7 @@ restart:
305 if (__netif_tx_trylock(slave_txq)) { 305 if (__netif_tx_trylock(slave_txq)) {
306 if (!netif_tx_queue_stopped(slave_txq) && 306 if (!netif_tx_queue_stopped(slave_txq) &&
307 !netif_tx_queue_frozen(slave_txq) && 307 !netif_tx_queue_frozen(slave_txq) &&
308 slave->hard_start_xmit(skb, slave) == 0) { 308 slave_ops->ndo_start_xmit(skb, slave) == 0) {
309 __netif_tx_unlock(slave_txq); 309 __netif_tx_unlock(slave_txq);
310 master->slaves = NEXT_SLAVE(q); 310 master->slaves = NEXT_SLAVE(q);
311 netif_wake_queue(dev); 311 netif_wake_queue(dev);
@@ -420,6 +420,14 @@ static int teql_master_mtu(struct net_device *dev, int new_mtu)
420 return 0; 420 return 0;
421} 421}
422 422
423static const struct net_device_ops teql_netdev_ops = {
424 .ndo_open = teql_master_open,
425 .ndo_stop = teql_master_close,
426 .ndo_start_xmit = teql_master_xmit,
427 .ndo_get_stats = teql_master_stats,
428 .ndo_change_mtu = teql_master_mtu,
429};
430
423static __init void teql_master_setup(struct net_device *dev) 431static __init void teql_master_setup(struct net_device *dev)
424{ 432{
425 struct teql_master *master = netdev_priv(dev); 433 struct teql_master *master = netdev_priv(dev);
@@ -436,11 +444,7 @@ static __init void teql_master_setup(struct net_device *dev)
436 ops->destroy = teql_destroy; 444 ops->destroy = teql_destroy;
437 ops->owner = THIS_MODULE; 445 ops->owner = THIS_MODULE;
438 446
439 dev->open = teql_master_open; 447 dev->netdev_ops = &teql_netdev_ops;
440 dev->hard_start_xmit = teql_master_xmit;
441 dev->stop = teql_master_close;
442 dev->get_stats = teql_master_stats;
443 dev->change_mtu = teql_master_mtu;
444 dev->type = ARPHRD_VOID; 448 dev->type = ARPHRD_VOID;
445 dev->mtu = 1500; 449 dev->mtu = 1500;
446 dev->tx_queue_len = 100; 450 dev->tx_queue_len = 100;
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 20c576f530fa..56935bbc1496 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -489,7 +489,7 @@ int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp)
489 return 0; 489 return 0;
490 490
491out_err: 491out_err:
492 /* Clean up any successfull allocations */ 492 /* Clean up any successful allocations */
493 sctp_auth_destroy_hmacs(ep->auth_hmacs); 493 sctp_auth_destroy_hmacs(ep->auth_hmacs);
494 return -ENOMEM; 494 return -ENOMEM;
495} 495}
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 5aa024b99c55..2f2d731bc1c2 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -124,7 +124,7 @@ static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, struct tipc_node_m
124static inline void tipc_nmap_diff(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b, 124static inline void tipc_nmap_diff(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b,
125 struct tipc_node_map *nm_diff) 125 struct tipc_node_map *nm_diff)
126{ 126{
127 int stop = sizeof(nm_a->map) / sizeof(u32); 127 int stop = ARRAY_SIZE(nm_a->map);
128 int w; 128 int w;
129 int b; 129 int b;
130 u32 map; 130 u32 map;
diff --git a/net/wimax/Kconfig b/net/wimax/Kconfig
index 0bdbb6928205..18495cdcd10d 100644
--- a/net/wimax/Kconfig
+++ b/net/wimax/Kconfig
@@ -1,9 +1,23 @@
1# 1#
2# WiMAX LAN device configuration 2# WiMAX LAN device configuration
3# 3#
4# Note the ugly 'depends on' on WIMAX: that disallows RFKILL to be a
5# module if WIMAX is to be linked in. The WiMAX code is done in such a
6# way that it doesn't require and explicit dependency on RFKILL in
7# case an embedded system wants to rip it out.
8#
9# As well, enablement of the RFKILL code means we need the INPUT layer
10# support to inject events coming from hw rfkill switches. That
11# dependency could be killed if input.h provided appropiate means to
12# work when input is disabled.
13
14comment "WiMAX Wireless Broadband support requires CONFIG_INPUT enabled"
15 depends on INPUT = n && RFKILL != n
4 16
5menuconfig WIMAX 17menuconfig WIMAX
6 tristate "WiMAX Wireless Broadband support" 18 tristate "WiMAX Wireless Broadband support"
19 depends on (y && RFKILL != m) || m
20 depends on (INPUT && RFKILL != n) || RFKILL = n
7 help 21 help
8 22
9 Select to configure support for devices that provide 23 Select to configure support for devices that provide
diff --git a/net/wimax/id-table.c b/net/wimax/id-table.c
index d3b88558682c..5e685f7eda90 100644
--- a/net/wimax/id-table.c
+++ b/net/wimax/id-table.c
@@ -123,15 +123,17 @@ void wimax_id_table_rm(struct wimax_dev *wimax_dev)
123/* 123/*
124 * Release the gennetlink family id / mapping table 124 * Release the gennetlink family id / mapping table
125 * 125 *
126 * On debug, verify that the table is empty upon removal. 126 * On debug, verify that the table is empty upon removal. We want the
127 * code always compiled, to ensure it doesn't bit rot. It will be
128 * compiled out if CONFIG_BUG is disabled.
127 */ 129 */
128void wimax_id_table_release(void) 130void wimax_id_table_release(void)
129{ 131{
132 struct wimax_dev *wimax_dev;
133
130#ifndef CONFIG_BUG 134#ifndef CONFIG_BUG
131 return; 135 return;
132#endif 136#endif
133 struct wimax_dev *wimax_dev;
134
135 spin_lock(&wimax_id_table_lock); 137 spin_lock(&wimax_id_table_lock);
136 list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) { 138 list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) {
137 printk(KERN_ERR "BUG: %s wimax_dev %p ifindex %d not cleared\n", 139 printk(KERN_ERR "BUG: %s wimax_dev %p ifindex %d not cleared\n",
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c
index 8745bac173f1..2b75aee04217 100644
--- a/net/wimax/op-rfkill.c
+++ b/net/wimax/op-rfkill.c
@@ -71,7 +71,7 @@
71#define D_SUBMODULE op_rfkill 71#define D_SUBMODULE op_rfkill
72#include "debug-levels.h" 72#include "debug-levels.h"
73 73
74#ifdef CONFIG_RFKILL 74#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
75 75
76 76
77/** 77/**
diff --git a/net/wireless/wext.c b/net/wireless/wext.c
index e49a2d1ef1e4..cb6a5bb85d80 100644
--- a/net/wireless/wext.c
+++ b/net/wireless/wext.c
@@ -1055,8 +1055,8 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
1055 return private(dev, iwr, cmd, info, handler); 1055 return private(dev, iwr, cmd, info, handler);
1056 } 1056 }
1057 /* Old driver API : call driver ioctl handler */ 1057 /* Old driver API : call driver ioctl handler */
1058 if (dev->do_ioctl) 1058 if (dev->netdev_ops->ndo_do_ioctl)
1059 return dev->do_ioctl(dev, ifr, cmd); 1059 return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
1060 return -EOPNOTSUPP; 1060 return -EOPNOTSUPP;
1061} 1061}
1062 1062
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index c609a4b98e15..42cd18391f46 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -63,7 +63,6 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
63 if (len > skb_tailroom(skb)) 63 if (len > skb_tailroom(skb))
64 len = skb_tailroom(skb); 64 len = skb_tailroom(skb);
65 65
66 skb->truesize += len;
67 __skb_put(skb, len); 66 __skb_put(skb, len);
68 67
69 len += plen; 68 len += plen;