diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-08 17:25:41 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-08 17:25:41 -0500 |
commit | 5fbbf5f648a9c4ef99276854f05b2255d1b004d3 (patch) | |
tree | 59c9ae762c3df2800e894001b3de58c5f1972486 /net | |
parent | ce279e6ec91c49f2c5f59f7492e19d39edbf8bbd (diff) | |
parent | 56cf391a9462a4897ea660a6af3662dda5ae8c84 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (84 commits)
wimax: fix kernel-doc for debufs_dentry member of struct wimax_dev
net: convert pegasus driver to net_device_ops
bnx2x: Prevent eeprom set when driver is down
net: switch kaweth driver to netdevops
pcnet32: round off carrier watch timer
i2400m/usb: wrap USB power saving in #ifdef CONFIG_PM
wimax: testing for rfkill support should also test for CONFIG_RFKILL_MODULE
wimax: fix kconfig interactions with rfkill and input layers
wimax: fix '#ifndef CONFIG_BUG' layout to avoid warning
r6040: bump release number to 0.20
r6040: warn about MAC address being unset
r6040: check PHY status when bringing interface up
r6040: make printks consistent with DRV_NAME
gianfar: Fixup use of BUS_ID_SIZE
mlx4_en: Returning real Max in get_ringparam
mlx4_en: Consider inline packets on completion
netdev: bfin_mac: enable bfin_mac net dev driver for BF51x
qeth: convert to net_device_ops
vlan: add neigh_setup
dm9601: warn on invalid mac address
...
Diffstat (limited to 'net')
-rw-r--r-- | net/8021q/vlan_core.c | 111 | ||||
-rw-r--r-- | net/8021q/vlan_dev.c | 14 | ||||
-rw-r--r-- | net/appletalk/aarp.c | 5 | ||||
-rw-r--r-- | net/bluetooth/bnep/bnep.h | 1 | ||||
-rw-r--r-- | net/bluetooth/bnep/core.c | 12 | ||||
-rw-r--r-- | net/bluetooth/bnep/netdev.c | 33 | ||||
-rw-r--r-- | net/can/af_can.c | 15 | ||||
-rw-r--r-- | net/can/bcm.c | 12 | ||||
-rw-r--r-- | net/can/raw.c | 15 | ||||
-rw-r--r-- | net/core/dev.c | 82 | ||||
-rw-r--r-- | net/dsa/slave.c | 51 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 2 | ||||
-rw-r--r-- | net/ipv6/af_inet6.c | 107 | ||||
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 45 | ||||
-rw-r--r-- | net/phonet/pep-gprs.c | 12 | ||||
-rw-r--r-- | net/sched/sch_teql.c | 20 | ||||
-rw-r--r-- | net/wimax/Kconfig | 14 | ||||
-rw-r--r-- | net/wimax/id-table.c | 8 | ||||
-rw-r--r-- | net/wimax/op-rfkill.c | 2 | ||||
-rw-r--r-- | net/wireless/wext.c | 4 |
20 files changed, 432 insertions, 133 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index dd86a1dc4cd0..6c1323940263 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
@@ -3,46 +3,35 @@ | |||
3 | #include <linux/if_vlan.h> | 3 | #include <linux/if_vlan.h> |
4 | #include "vlan.h" | 4 | #include "vlan.h" |
5 | 5 | ||
6 | struct vlan_hwaccel_cb { | ||
7 | struct net_device *dev; | ||
8 | }; | ||
9 | |||
10 | static inline struct vlan_hwaccel_cb *vlan_hwaccel_cb(struct sk_buff *skb) | ||
11 | { | ||
12 | return (struct vlan_hwaccel_cb *)skb->cb; | ||
13 | } | ||
14 | |||
15 | /* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ | 6 | /* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ |
16 | int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, | 7 | int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, |
17 | u16 vlan_tci, int polling) | 8 | u16 vlan_tci, int polling) |
18 | { | 9 | { |
19 | struct vlan_hwaccel_cb *cb = vlan_hwaccel_cb(skb); | 10 | if (skb_bond_should_drop(skb)) |
20 | 11 | goto drop; | |
21 | if (skb_bond_should_drop(skb)) { | ||
22 | dev_kfree_skb_any(skb); | ||
23 | return NET_RX_DROP; | ||
24 | } | ||
25 | 12 | ||
26 | skb->vlan_tci = vlan_tci; | 13 | skb->vlan_tci = vlan_tci; |
27 | cb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); | 14 | skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); |
15 | |||
16 | if (!skb->dev) | ||
17 | goto drop; | ||
28 | 18 | ||
29 | return (polling ? netif_receive_skb(skb) : netif_rx(skb)); | 19 | return (polling ? netif_receive_skb(skb) : netif_rx(skb)); |
20 | |||
21 | drop: | ||
22 | dev_kfree_skb_any(skb); | ||
23 | return NET_RX_DROP; | ||
30 | } | 24 | } |
31 | EXPORT_SYMBOL(__vlan_hwaccel_rx); | 25 | EXPORT_SYMBOL(__vlan_hwaccel_rx); |
32 | 26 | ||
33 | int vlan_hwaccel_do_receive(struct sk_buff *skb) | 27 | int vlan_hwaccel_do_receive(struct sk_buff *skb) |
34 | { | 28 | { |
35 | struct vlan_hwaccel_cb *cb = vlan_hwaccel_cb(skb); | 29 | struct net_device *dev = skb->dev; |
36 | struct net_device *dev = cb->dev; | ||
37 | struct net_device_stats *stats; | 30 | struct net_device_stats *stats; |
38 | 31 | ||
32 | skb->dev = vlan_dev_info(dev)->real_dev; | ||
39 | netif_nit_deliver(skb); | 33 | netif_nit_deliver(skb); |
40 | 34 | ||
41 | if (dev == NULL) { | ||
42 | kfree_skb(skb); | ||
43 | return -1; | ||
44 | } | ||
45 | |||
46 | skb->dev = dev; | 35 | skb->dev = dev; |
47 | skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci); | 36 | skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci); |
48 | skb->vlan_tci = 0; | 37 | skb->vlan_tci = 0; |
@@ -80,3 +69,79 @@ u16 vlan_dev_vlan_id(const struct net_device *dev) | |||
80 | return vlan_dev_info(dev)->vlan_id; | 69 | return vlan_dev_info(dev)->vlan_id; |
81 | } | 70 | } |
82 | EXPORT_SYMBOL_GPL(vlan_dev_vlan_id); | 71 | EXPORT_SYMBOL_GPL(vlan_dev_vlan_id); |
72 | |||
73 | static int vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, | ||
74 | unsigned int vlan_tci, struct sk_buff *skb) | ||
75 | { | ||
76 | struct sk_buff *p; | ||
77 | |||
78 | if (skb_bond_should_drop(skb)) | ||
79 | goto drop; | ||
80 | |||
81 | skb->vlan_tci = vlan_tci; | ||
82 | skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); | ||
83 | |||
84 | if (!skb->dev) | ||
85 | goto drop; | ||
86 | |||
87 | for (p = napi->gro_list; p; p = p->next) { | ||
88 | NAPI_GRO_CB(p)->same_flow = p->dev == skb->dev; | ||
89 | NAPI_GRO_CB(p)->flush = 0; | ||
90 | } | ||
91 | |||
92 | return dev_gro_receive(napi, skb); | ||
93 | |||
94 | drop: | ||
95 | return 2; | ||
96 | } | ||
97 | |||
98 | int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, | ||
99 | unsigned int vlan_tci, struct sk_buff *skb) | ||
100 | { | ||
101 | int err = NET_RX_SUCCESS; | ||
102 | |||
103 | switch (vlan_gro_common(napi, grp, vlan_tci, skb)) { | ||
104 | case -1: | ||
105 | return netif_receive_skb(skb); | ||
106 | |||
107 | case 2: | ||
108 | err = NET_RX_DROP; | ||
109 | /* fall through */ | ||
110 | |||
111 | case 1: | ||
112 | kfree_skb(skb); | ||
113 | break; | ||
114 | } | ||
115 | |||
116 | return err; | ||
117 | } | ||
118 | EXPORT_SYMBOL(vlan_gro_receive); | ||
119 | |||
120 | int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, | ||
121 | unsigned int vlan_tci, struct napi_gro_fraginfo *info) | ||
122 | { | ||
123 | struct sk_buff *skb = napi_fraginfo_skb(napi, info); | ||
124 | int err = NET_RX_DROP; | ||
125 | |||
126 | if (!skb) | ||
127 | goto out; | ||
128 | |||
129 | err = NET_RX_SUCCESS; | ||
130 | |||
131 | switch (vlan_gro_common(napi, grp, vlan_tci, skb)) { | ||
132 | case -1: | ||
133 | return netif_receive_skb(skb); | ||
134 | |||
135 | case 2: | ||
136 | err = NET_RX_DROP; | ||
137 | /* fall through */ | ||
138 | |||
139 | case 1: | ||
140 | napi_reuse_skb(napi, skb); | ||
141 | break; | ||
142 | } | ||
143 | |||
144 | out: | ||
145 | return err; | ||
146 | } | ||
147 | EXPORT_SYMBOL(vlan_gro_frags); | ||
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 89a3bbdfca3f..4a19acd3a32b 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -546,6 +546,18 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
546 | return err; | 546 | return err; |
547 | } | 547 | } |
548 | 548 | ||
549 | static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa) | ||
550 | { | ||
551 | struct net_device *real_dev = vlan_dev_info(dev)->real_dev; | ||
552 | const struct net_device_ops *ops = real_dev->netdev_ops; | ||
553 | int err = 0; | ||
554 | |||
555 | if (netif_device_present(real_dev) && ops->ndo_neigh_setup) | ||
556 | err = ops->ndo_neigh_setup(dev, pa); | ||
557 | |||
558 | return err; | ||
559 | } | ||
560 | |||
549 | static void vlan_dev_change_rx_flags(struct net_device *dev, int change) | 561 | static void vlan_dev_change_rx_flags(struct net_device *dev, int change) |
550 | { | 562 | { |
551 | struct net_device *real_dev = vlan_dev_info(dev)->real_dev; | 563 | struct net_device *real_dev = vlan_dev_info(dev)->real_dev; |
@@ -713,6 +725,7 @@ static const struct net_device_ops vlan_netdev_ops = { | |||
713 | .ndo_set_multicast_list = vlan_dev_set_rx_mode, | 725 | .ndo_set_multicast_list = vlan_dev_set_rx_mode, |
714 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, | 726 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, |
715 | .ndo_do_ioctl = vlan_dev_ioctl, | 727 | .ndo_do_ioctl = vlan_dev_ioctl, |
728 | .ndo_neigh_setup = vlan_dev_neigh_setup, | ||
716 | }; | 729 | }; |
717 | 730 | ||
718 | static const struct net_device_ops vlan_netdev_accel_ops = { | 731 | static const struct net_device_ops vlan_netdev_accel_ops = { |
@@ -728,6 +741,7 @@ static const struct net_device_ops vlan_netdev_accel_ops = { | |||
728 | .ndo_set_multicast_list = vlan_dev_set_rx_mode, | 741 | .ndo_set_multicast_list = vlan_dev_set_rx_mode, |
729 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, | 742 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, |
730 | .ndo_do_ioctl = vlan_dev_ioctl, | 743 | .ndo_do_ioctl = vlan_dev_ioctl, |
744 | .ndo_neigh_setup = vlan_dev_neigh_setup, | ||
731 | }; | 745 | }; |
732 | 746 | ||
733 | void vlan_setup(struct net_device *dev) | 747 | void vlan_setup(struct net_device *dev) |
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c index b03ff58e9308..89f99d3beb60 100644 --- a/net/appletalk/aarp.c +++ b/net/appletalk/aarp.c | |||
@@ -443,13 +443,14 @@ static void aarp_send_probe_phase1(struct atalk_iface *iface) | |||
443 | { | 443 | { |
444 | struct ifreq atreq; | 444 | struct ifreq atreq; |
445 | struct sockaddr_at *sa = (struct sockaddr_at *)&atreq.ifr_addr; | 445 | struct sockaddr_at *sa = (struct sockaddr_at *)&atreq.ifr_addr; |
446 | const struct net_device_ops *ops = iface->dev->netdev_ops; | ||
446 | 447 | ||
447 | sa->sat_addr.s_node = iface->address.s_node; | 448 | sa->sat_addr.s_node = iface->address.s_node; |
448 | sa->sat_addr.s_net = ntohs(iface->address.s_net); | 449 | sa->sat_addr.s_net = ntohs(iface->address.s_net); |
449 | 450 | ||
450 | /* We pass the Net:Node to the drivers/cards by a Device ioctl. */ | 451 | /* We pass the Net:Node to the drivers/cards by a Device ioctl. */ |
451 | if (!(iface->dev->do_ioctl(iface->dev, &atreq, SIOCSIFADDR))) { | 452 | if (!(ops->ndo_do_ioctl(iface->dev, &atreq, SIOCSIFADDR))) { |
452 | (void)iface->dev->do_ioctl(iface->dev, &atreq, SIOCGIFADDR); | 453 | ops->ndo_do_ioctl(iface->dev, &atreq, SIOCGIFADDR); |
453 | if (iface->address.s_net != htons(sa->sat_addr.s_net) || | 454 | if (iface->address.s_net != htons(sa->sat_addr.s_net) || |
454 | iface->address.s_node != sa->sat_addr.s_node) | 455 | iface->address.s_node != sa->sat_addr.s_node) |
455 | iface->status |= ATIF_PROBE_FAIL; | 456 | iface->status |= ATIF_PROBE_FAIL; |
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h index d20f8a40f36e..0d9e506f5d5a 100644 --- a/net/bluetooth/bnep/bnep.h +++ b/net/bluetooth/bnep/bnep.h | |||
@@ -165,7 +165,6 @@ struct bnep_session { | |||
165 | 165 | ||
166 | struct socket *sock; | 166 | struct socket *sock; |
167 | struct net_device *dev; | 167 | struct net_device *dev; |
168 | struct net_device_stats stats; | ||
169 | }; | 168 | }; |
170 | 169 | ||
171 | void bnep_net_setup(struct net_device *dev); | 170 | void bnep_net_setup(struct net_device *dev); |
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index 70fea8bdb4e5..52a6ce0d772b 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c | |||
@@ -306,7 +306,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) | |||
306 | struct sk_buff *nskb; | 306 | struct sk_buff *nskb; |
307 | u8 type; | 307 | u8 type; |
308 | 308 | ||
309 | s->stats.rx_bytes += skb->len; | 309 | dev->stats.rx_bytes += skb->len; |
310 | 310 | ||
311 | type = *(u8 *) skb->data; skb_pull(skb, 1); | 311 | type = *(u8 *) skb->data; skb_pull(skb, 1); |
312 | 312 | ||
@@ -343,7 +343,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) | |||
343 | * may not be modified and because of the alignment requirements. */ | 343 | * may not be modified and because of the alignment requirements. */ |
344 | nskb = alloc_skb(2 + ETH_HLEN + skb->len, GFP_KERNEL); | 344 | nskb = alloc_skb(2 + ETH_HLEN + skb->len, GFP_KERNEL); |
345 | if (!nskb) { | 345 | if (!nskb) { |
346 | s->stats.rx_dropped++; | 346 | dev->stats.rx_dropped++; |
347 | kfree_skb(skb); | 347 | kfree_skb(skb); |
348 | return -ENOMEM; | 348 | return -ENOMEM; |
349 | } | 349 | } |
@@ -378,14 +378,14 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) | |||
378 | skb_copy_from_linear_data(skb, __skb_put(nskb, skb->len), skb->len); | 378 | skb_copy_from_linear_data(skb, __skb_put(nskb, skb->len), skb->len); |
379 | kfree_skb(skb); | 379 | kfree_skb(skb); |
380 | 380 | ||
381 | s->stats.rx_packets++; | 381 | dev->stats.rx_packets++; |
382 | nskb->ip_summed = CHECKSUM_NONE; | 382 | nskb->ip_summed = CHECKSUM_NONE; |
383 | nskb->protocol = eth_type_trans(nskb, dev); | 383 | nskb->protocol = eth_type_trans(nskb, dev); |
384 | netif_rx_ni(nskb); | 384 | netif_rx_ni(nskb); |
385 | return 0; | 385 | return 0; |
386 | 386 | ||
387 | badframe: | 387 | badframe: |
388 | s->stats.rx_errors++; | 388 | dev->stats.rx_errors++; |
389 | kfree_skb(skb); | 389 | kfree_skb(skb); |
390 | return 0; | 390 | return 0; |
391 | } | 391 | } |
@@ -448,8 +448,8 @@ send: | |||
448 | kfree_skb(skb); | 448 | kfree_skb(skb); |
449 | 449 | ||
450 | if (len > 0) { | 450 | if (len > 0) { |
451 | s->stats.tx_bytes += len; | 451 | s->dev->stats.tx_bytes += len; |
452 | s->stats.tx_packets++; | 452 | s->dev->stats.tx_packets++; |
453 | return 0; | 453 | return 0; |
454 | } | 454 | } |
455 | 455 | ||
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c index f897da6e0444..d7a0e9722def 100644 --- a/net/bluetooth/bnep/netdev.c +++ b/net/bluetooth/bnep/netdev.c | |||
@@ -55,12 +55,6 @@ static int bnep_net_close(struct net_device *dev) | |||
55 | return 0; | 55 | return 0; |
56 | } | 56 | } |
57 | 57 | ||
58 | static struct net_device_stats *bnep_net_get_stats(struct net_device *dev) | ||
59 | { | ||
60 | struct bnep_session *s = netdev_priv(dev); | ||
61 | return &s->stats; | ||
62 | } | ||
63 | |||
64 | static void bnep_net_set_mc_list(struct net_device *dev) | 58 | static void bnep_net_set_mc_list(struct net_device *dev) |
65 | { | 59 | { |
66 | #ifdef CONFIG_BT_BNEP_MC_FILTER | 60 | #ifdef CONFIG_BT_BNEP_MC_FILTER |
@@ -128,11 +122,6 @@ static void bnep_net_timeout(struct net_device *dev) | |||
128 | netif_wake_queue(dev); | 122 | netif_wake_queue(dev); |
129 | } | 123 | } |
130 | 124 | ||
131 | static int bnep_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | ||
132 | { | ||
133 | return -EINVAL; | ||
134 | } | ||
135 | |||
136 | #ifdef CONFIG_BT_BNEP_MC_FILTER | 125 | #ifdef CONFIG_BT_BNEP_MC_FILTER |
137 | static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) | 126 | static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) |
138 | { | 127 | { |
@@ -217,6 +206,18 @@ static int bnep_net_xmit(struct sk_buff *skb, struct net_device *dev) | |||
217 | return 0; | 206 | return 0; |
218 | } | 207 | } |
219 | 208 | ||
209 | static const struct net_device_ops bnep_netdev_ops = { | ||
210 | .ndo_open = bnep_net_open, | ||
211 | .ndo_stop = bnep_net_close, | ||
212 | .ndo_start_xmit = bnep_net_xmit, | ||
213 | .ndo_validate_addr = eth_validate_addr, | ||
214 | .ndo_set_multicast_list = bnep_net_set_mc_list, | ||
215 | .ndo_set_mac_address = bnep_net_set_mac_addr, | ||
216 | .ndo_tx_timeout = bnep_net_timeout, | ||
217 | .ndo_change_mtu = eth_change_mtu, | ||
218 | |||
219 | }; | ||
220 | |||
220 | void bnep_net_setup(struct net_device *dev) | 221 | void bnep_net_setup(struct net_device *dev) |
221 | { | 222 | { |
222 | 223 | ||
@@ -224,15 +225,7 @@ void bnep_net_setup(struct net_device *dev) | |||
224 | dev->addr_len = ETH_ALEN; | 225 | dev->addr_len = ETH_ALEN; |
225 | 226 | ||
226 | ether_setup(dev); | 227 | ether_setup(dev); |
227 | 228 | dev->netdev_ops = &bnep_netdev_ops; | |
228 | dev->open = bnep_net_open; | ||
229 | dev->stop = bnep_net_close; | ||
230 | dev->hard_start_xmit = bnep_net_xmit; | ||
231 | dev->get_stats = bnep_net_get_stats; | ||
232 | dev->do_ioctl = bnep_net_ioctl; | ||
233 | dev->set_mac_address = bnep_net_set_mac_addr; | ||
234 | dev->set_multicast_list = bnep_net_set_mc_list; | ||
235 | 229 | ||
236 | dev->watchdog_timeo = HZ * 2; | 230 | dev->watchdog_timeo = HZ * 2; |
237 | dev->tx_timeout = bnep_net_timeout; | ||
238 | } | 231 | } |
diff --git a/net/can/af_can.c b/net/can/af_can.c index 3dadb338addd..fa417ca6cbe6 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c | |||
@@ -414,6 +414,12 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask, | |||
414 | * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can | 414 | * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can |
415 | * filter for error frames (CAN_ERR_FLAG bit set in mask). | 415 | * filter for error frames (CAN_ERR_FLAG bit set in mask). |
416 | * | 416 | * |
417 | * The provided pointer to the sk_buff is guaranteed to be valid as long as | ||
418 | * the callback function is running. The callback function must *not* free | ||
419 | * the given sk_buff while processing it's task. When the given sk_buff is | ||
420 | * needed after the end of the callback function it must be cloned inside | ||
421 | * the callback function with skb_clone(). | ||
422 | * | ||
417 | * Return: | 423 | * Return: |
418 | * 0 on success | 424 | * 0 on success |
419 | * -ENOMEM on missing cache mem to create subscription entry | 425 | * -ENOMEM on missing cache mem to create subscription entry |
@@ -569,13 +575,8 @@ EXPORT_SYMBOL(can_rx_unregister); | |||
569 | 575 | ||
570 | static inline void deliver(struct sk_buff *skb, struct receiver *r) | 576 | static inline void deliver(struct sk_buff *skb, struct receiver *r) |
571 | { | 577 | { |
572 | struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); | 578 | r->func(skb, r->data); |
573 | 579 | r->matches++; | |
574 | if (clone) { | ||
575 | clone->sk = skb->sk; | ||
576 | r->func(clone, r->data); | ||
577 | r->matches++; | ||
578 | } | ||
579 | } | 580 | } |
580 | 581 | ||
581 | static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) | 582 | static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) |
diff --git a/net/can/bcm.c b/net/can/bcm.c index 6248ae2502c7..1649c8ab2c2f 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
@@ -633,7 +633,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data) | |||
633 | hrtimer_cancel(&op->timer); | 633 | hrtimer_cancel(&op->timer); |
634 | 634 | ||
635 | if (op->can_id != rxframe->can_id) | 635 | if (op->can_id != rxframe->can_id) |
636 | goto rx_freeskb; | 636 | return; |
637 | 637 | ||
638 | /* save rx timestamp */ | 638 | /* save rx timestamp */ |
639 | op->rx_stamp = skb->tstamp; | 639 | op->rx_stamp = skb->tstamp; |
@@ -645,19 +645,19 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data) | |||
645 | if (op->flags & RX_RTR_FRAME) { | 645 | if (op->flags & RX_RTR_FRAME) { |
646 | /* send reply for RTR-request (placed in op->frames[0]) */ | 646 | /* send reply for RTR-request (placed in op->frames[0]) */ |
647 | bcm_can_tx(op); | 647 | bcm_can_tx(op); |
648 | goto rx_freeskb; | 648 | return; |
649 | } | 649 | } |
650 | 650 | ||
651 | if (op->flags & RX_FILTER_ID) { | 651 | if (op->flags & RX_FILTER_ID) { |
652 | /* the easiest case */ | 652 | /* the easiest case */ |
653 | bcm_rx_update_and_send(op, &op->last_frames[0], rxframe); | 653 | bcm_rx_update_and_send(op, &op->last_frames[0], rxframe); |
654 | goto rx_freeskb_starttimer; | 654 | goto rx_starttimer; |
655 | } | 655 | } |
656 | 656 | ||
657 | if (op->nframes == 1) { | 657 | if (op->nframes == 1) { |
658 | /* simple compare with index 0 */ | 658 | /* simple compare with index 0 */ |
659 | bcm_rx_cmp_to_index(op, 0, rxframe); | 659 | bcm_rx_cmp_to_index(op, 0, rxframe); |
660 | goto rx_freeskb_starttimer; | 660 | goto rx_starttimer; |
661 | } | 661 | } |
662 | 662 | ||
663 | if (op->nframes > 1) { | 663 | if (op->nframes > 1) { |
@@ -678,10 +678,8 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data) | |||
678 | } | 678 | } |
679 | } | 679 | } |
680 | 680 | ||
681 | rx_freeskb_starttimer: | 681 | rx_starttimer: |
682 | bcm_rx_starttimer(op); | 682 | bcm_rx_starttimer(op); |
683 | rx_freeskb: | ||
684 | kfree_skb(skb); | ||
685 | } | 683 | } |
686 | 684 | ||
687 | /* | 685 | /* |
diff --git a/net/can/raw.c b/net/can/raw.c index 27aab63df467..0703cba4bf9f 100644 --- a/net/can/raw.c +++ b/net/can/raw.c | |||
@@ -99,13 +99,14 @@ static void raw_rcv(struct sk_buff *skb, void *data) | |||
99 | struct raw_sock *ro = raw_sk(sk); | 99 | struct raw_sock *ro = raw_sk(sk); |
100 | struct sockaddr_can *addr; | 100 | struct sockaddr_can *addr; |
101 | 101 | ||
102 | if (!ro->recv_own_msgs) { | 102 | /* check the received tx sock reference */ |
103 | /* check the received tx sock reference */ | 103 | if (!ro->recv_own_msgs && skb->sk == sk) |
104 | if (skb->sk == sk) { | 104 | return; |
105 | kfree_skb(skb); | 105 | |
106 | return; | 106 | /* clone the given skb to be able to enqueue it into the rcv queue */ |
107 | } | 107 | skb = skb_clone(skb, GFP_ATOMIC); |
108 | } | 108 | if (!skb) |
109 | return; | ||
109 | 110 | ||
110 | /* | 111 | /* |
111 | * Put the datagram to the queue so that raw_recvmsg() can | 112 | * Put the datagram to the queue so that raw_recvmsg() can |
diff --git a/net/core/dev.c b/net/core/dev.c index 382df6c09eec..bab8bcedd62e 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2387,7 +2387,7 @@ void napi_gro_flush(struct napi_struct *napi) | |||
2387 | } | 2387 | } |
2388 | EXPORT_SYMBOL(napi_gro_flush); | 2388 | EXPORT_SYMBOL(napi_gro_flush); |
2389 | 2389 | ||
2390 | static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | 2390 | int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) |
2391 | { | 2391 | { |
2392 | struct sk_buff **pp = NULL; | 2392 | struct sk_buff **pp = NULL; |
2393 | struct packet_type *ptype; | 2393 | struct packet_type *ptype; |
@@ -2417,11 +2417,14 @@ static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
2417 | 2417 | ||
2418 | for (p = napi->gro_list; p; p = p->next) { | 2418 | for (p = napi->gro_list; p; p = p->next) { |
2419 | count++; | 2419 | count++; |
2420 | NAPI_GRO_CB(p)->same_flow = | 2420 | |
2421 | p->mac_len == mac_len && | 2421 | if (!NAPI_GRO_CB(p)->same_flow) |
2422 | !memcmp(skb_mac_header(p), skb_mac_header(skb), | 2422 | continue; |
2423 | mac_len); | 2423 | |
2424 | NAPI_GRO_CB(p)->flush = 0; | 2424 | if (p->mac_len != mac_len || |
2425 | memcmp(skb_mac_header(p), skb_mac_header(skb), | ||
2426 | mac_len)) | ||
2427 | NAPI_GRO_CB(p)->same_flow = 0; | ||
2425 | } | 2428 | } |
2426 | 2429 | ||
2427 | pp = ptype->gro_receive(&napi->gro_list, skb); | 2430 | pp = ptype->gro_receive(&napi->gro_list, skb); |
@@ -2463,6 +2466,19 @@ ok: | |||
2463 | normal: | 2466 | normal: |
2464 | return -1; | 2467 | return -1; |
2465 | } | 2468 | } |
2469 | EXPORT_SYMBOL(dev_gro_receive); | ||
2470 | |||
2471 | static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | ||
2472 | { | ||
2473 | struct sk_buff *p; | ||
2474 | |||
2475 | for (p = napi->gro_list; p; p = p->next) { | ||
2476 | NAPI_GRO_CB(p)->same_flow = 1; | ||
2477 | NAPI_GRO_CB(p)->flush = 0; | ||
2478 | } | ||
2479 | |||
2480 | return dev_gro_receive(napi, skb); | ||
2481 | } | ||
2466 | 2482 | ||
2467 | int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | 2483 | int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) |
2468 | { | 2484 | { |
@@ -2479,11 +2495,26 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
2479 | } | 2495 | } |
2480 | EXPORT_SYMBOL(napi_gro_receive); | 2496 | EXPORT_SYMBOL(napi_gro_receive); |
2481 | 2497 | ||
2482 | int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info) | 2498 | void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) |
2499 | { | ||
2500 | skb_shinfo(skb)->nr_frags = 0; | ||
2501 | |||
2502 | skb->len -= skb->data_len; | ||
2503 | skb->truesize -= skb->data_len; | ||
2504 | skb->data_len = 0; | ||
2505 | |||
2506 | __skb_pull(skb, skb_headlen(skb)); | ||
2507 | skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb)); | ||
2508 | |||
2509 | napi->skb = skb; | ||
2510 | } | ||
2511 | EXPORT_SYMBOL(napi_reuse_skb); | ||
2512 | |||
2513 | struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi, | ||
2514 | struct napi_gro_fraginfo *info) | ||
2483 | { | 2515 | { |
2484 | struct net_device *dev = napi->dev; | 2516 | struct net_device *dev = napi->dev; |
2485 | struct sk_buff *skb = napi->skb; | 2517 | struct sk_buff *skb = napi->skb; |
2486 | int err = NET_RX_DROP; | ||
2487 | 2518 | ||
2488 | napi->skb = NULL; | 2519 | napi->skb = NULL; |
2489 | 2520 | ||
@@ -2503,16 +2534,31 @@ int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info) | |||
2503 | skb->len += info->len; | 2534 | skb->len += info->len; |
2504 | skb->truesize += info->len; | 2535 | skb->truesize += info->len; |
2505 | 2536 | ||
2506 | if (!pskb_may_pull(skb, ETH_HLEN)) | 2537 | if (!pskb_may_pull(skb, ETH_HLEN)) { |
2507 | goto reuse; | 2538 | napi_reuse_skb(napi, skb); |
2508 | 2539 | goto out; | |
2509 | err = NET_RX_SUCCESS; | 2540 | } |
2510 | 2541 | ||
2511 | skb->protocol = eth_type_trans(skb, dev); | 2542 | skb->protocol = eth_type_trans(skb, dev); |
2512 | 2543 | ||
2513 | skb->ip_summed = info->ip_summed; | 2544 | skb->ip_summed = info->ip_summed; |
2514 | skb->csum = info->csum; | 2545 | skb->csum = info->csum; |
2515 | 2546 | ||
2547 | out: | ||
2548 | return skb; | ||
2549 | } | ||
2550 | EXPORT_SYMBOL(napi_fraginfo_skb); | ||
2551 | |||
2552 | int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info) | ||
2553 | { | ||
2554 | struct sk_buff *skb = napi_fraginfo_skb(napi, info); | ||
2555 | int err = NET_RX_DROP; | ||
2556 | |||
2557 | if (!skb) | ||
2558 | goto out; | ||
2559 | |||
2560 | err = NET_RX_SUCCESS; | ||
2561 | |||
2516 | switch (__napi_gro_receive(napi, skb)) { | 2562 | switch (__napi_gro_receive(napi, skb)) { |
2517 | case -1: | 2563 | case -1: |
2518 | return netif_receive_skb(skb); | 2564 | return netif_receive_skb(skb); |
@@ -2521,17 +2567,7 @@ int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info) | |||
2521 | goto out; | 2567 | goto out; |
2522 | } | 2568 | } |
2523 | 2569 | ||
2524 | reuse: | 2570 | napi_reuse_skb(napi, skb); |
2525 | skb_shinfo(skb)->nr_frags = 0; | ||
2526 | |||
2527 | skb->len -= skb->data_len; | ||
2528 | skb->truesize -= skb->data_len; | ||
2529 | skb->data_len = 0; | ||
2530 | |||
2531 | __skb_pull(skb, skb_headlen(skb)); | ||
2532 | skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb)); | ||
2533 | |||
2534 | napi->skb = skb; | ||
2535 | 2571 | ||
2536 | out: | 2572 | out: |
2537 | return err; | 2573 | return err; |
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index a3a410d20da0..a68fd79e9eca 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
@@ -286,6 +286,42 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = { | |||
286 | .get_sset_count = dsa_slave_get_sset_count, | 286 | .get_sset_count = dsa_slave_get_sset_count, |
287 | }; | 287 | }; |
288 | 288 | ||
289 | #ifdef CONFIG_NET_DSA_TAG_DSA | ||
290 | static const struct net_device_ops dsa_netdev_ops = { | ||
291 | .ndo_open = dsa_slave_open, | ||
292 | .ndo_stop = dsa_slave_close, | ||
293 | .ndo_start_xmit = dsa_xmit, | ||
294 | .ndo_change_rx_flags = dsa_slave_change_rx_flags, | ||
295 | .ndo_set_rx_mode = dsa_slave_set_rx_mode, | ||
296 | .ndo_set_multicast_list = dsa_slave_set_rx_mode, | ||
297 | .ndo_set_mac_address = dsa_slave_set_mac_address, | ||
298 | .ndo_do_ioctl = dsa_slave_ioctl, | ||
299 | }; | ||
300 | #endif | ||
301 | #ifdef CONFIG_NET_DSA_TAG_EDSA | ||
302 | static const struct net_device_ops edsa_netdev_ops = { | ||
303 | .ndo_open = dsa_slave_open, | ||
304 | .ndo_stop = dsa_slave_close, | ||
305 | .ndo_start_xmit = edsa_xmit, | ||
306 | .ndo_change_rx_flags = dsa_slave_change_rx_flags, | ||
307 | .ndo_set_rx_mode = dsa_slave_set_rx_mode, | ||
308 | .ndo_set_multicast_list = dsa_slave_set_rx_mode, | ||
309 | .ndo_set_mac_address = dsa_slave_set_mac_address, | ||
310 | .ndo_do_ioctl = dsa_slave_ioctl, | ||
311 | }; | ||
312 | #endif | ||
313 | #ifdef CONFIG_NET_DSA_TAG_TRAILER | ||
314 | static const struct net_device_ops trailer_netdev_ops = { | ||
315 | .ndo_open = dsa_slave_open, | ||
316 | .ndo_stop = dsa_slave_close, | ||
317 | .ndo_start_xmit = trailer_xmit, | ||
318 | .ndo_change_rx_flags = dsa_slave_change_rx_flags, | ||
319 | .ndo_set_rx_mode = dsa_slave_set_rx_mode, | ||
320 | .ndo_set_multicast_list = dsa_slave_set_rx_mode, | ||
321 | .ndo_set_mac_address = dsa_slave_set_mac_address, | ||
322 | .ndo_do_ioctl = dsa_slave_ioctl, | ||
323 | }; | ||
324 | #endif | ||
289 | 325 | ||
290 | /* slave device setup *******************************************************/ | 326 | /* slave device setup *******************************************************/ |
291 | struct net_device * | 327 | struct net_device * |
@@ -306,32 +342,27 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent, | |||
306 | SET_ETHTOOL_OPS(slave_dev, &dsa_slave_ethtool_ops); | 342 | SET_ETHTOOL_OPS(slave_dev, &dsa_slave_ethtool_ops); |
307 | memcpy(slave_dev->dev_addr, master->dev_addr, ETH_ALEN); | 343 | memcpy(slave_dev->dev_addr, master->dev_addr, ETH_ALEN); |
308 | slave_dev->tx_queue_len = 0; | 344 | slave_dev->tx_queue_len = 0; |
345 | |||
309 | switch (ds->tag_protocol) { | 346 | switch (ds->tag_protocol) { |
310 | #ifdef CONFIG_NET_DSA_TAG_DSA | 347 | #ifdef CONFIG_NET_DSA_TAG_DSA |
311 | case htons(ETH_P_DSA): | 348 | case htons(ETH_P_DSA): |
312 | slave_dev->hard_start_xmit = dsa_xmit; | 349 | slave_dev->netdev_ops = &dsa_netdev_ops; |
313 | break; | 350 | break; |
314 | #endif | 351 | #endif |
315 | #ifdef CONFIG_NET_DSA_TAG_EDSA | 352 | #ifdef CONFIG_NET_DSA_TAG_EDSA |
316 | case htons(ETH_P_EDSA): | 353 | case htons(ETH_P_EDSA): |
317 | slave_dev->hard_start_xmit = edsa_xmit; | 354 | slave_dev->netdev_ops = &edsa_netdev_ops; |
318 | break; | 355 | break; |
319 | #endif | 356 | #endif |
320 | #ifdef CONFIG_NET_DSA_TAG_TRAILER | 357 | #ifdef CONFIG_NET_DSA_TAG_TRAILER |
321 | case htons(ETH_P_TRAILER): | 358 | case htons(ETH_P_TRAILER): |
322 | slave_dev->hard_start_xmit = trailer_xmit; | 359 | slave_dev->netdev_ops = &trailer_netdev_ops; |
323 | break; | 360 | break; |
324 | #endif | 361 | #endif |
325 | default: | 362 | default: |
326 | BUG(); | 363 | BUG(); |
327 | } | 364 | } |
328 | slave_dev->open = dsa_slave_open; | 365 | |
329 | slave_dev->stop = dsa_slave_close; | ||
330 | slave_dev->change_rx_flags = dsa_slave_change_rx_flags; | ||
331 | slave_dev->set_rx_mode = dsa_slave_set_rx_mode; | ||
332 | slave_dev->set_multicast_list = dsa_slave_set_rx_mode; | ||
333 | slave_dev->set_mac_address = dsa_slave_set_mac_address; | ||
334 | slave_dev->do_ioctl = dsa_slave_ioctl; | ||
335 | SET_NETDEV_DEV(slave_dev, parent); | 366 | SET_NETDEV_DEV(slave_dev, parent); |
336 | slave_dev->vlan_features = master->vlan_features; | 367 | slave_dev->vlan_features = master->vlan_features; |
337 | 368 | ||
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 35bcddf8a932..bd6ff907d9e4 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -2542,6 +2542,7 @@ out: | |||
2542 | 2542 | ||
2543 | return pp; | 2543 | return pp; |
2544 | } | 2544 | } |
2545 | EXPORT_SYMBOL(tcp_gro_receive); | ||
2545 | 2546 | ||
2546 | int tcp_gro_complete(struct sk_buff *skb) | 2547 | int tcp_gro_complete(struct sk_buff *skb) |
2547 | { | 2548 | { |
@@ -2558,6 +2559,7 @@ int tcp_gro_complete(struct sk_buff *skb) | |||
2558 | 2559 | ||
2559 | return 0; | 2560 | return 0; |
2560 | } | 2561 | } |
2562 | EXPORT_SYMBOL(tcp_gro_complete); | ||
2561 | 2563 | ||
2562 | #ifdef CONFIG_TCP_MD5SIG | 2564 | #ifdef CONFIG_TCP_MD5SIG |
2563 | static unsigned long tcp_md5sig_users; | 2565 | static unsigned long tcp_md5sig_users; |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 437b750b98fd..94f74f5b0cbf 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -672,8 +672,7 @@ int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb) | |||
672 | 672 | ||
673 | EXPORT_SYMBOL_GPL(ipv6_opt_accepted); | 673 | EXPORT_SYMBOL_GPL(ipv6_opt_accepted); |
674 | 674 | ||
675 | static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb, | 675 | static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto) |
676 | int proto) | ||
677 | { | 676 | { |
678 | struct inet6_protocol *ops = NULL; | 677 | struct inet6_protocol *ops = NULL; |
679 | 678 | ||
@@ -704,7 +703,7 @@ static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb, | |||
704 | __skb_pull(skb, len); | 703 | __skb_pull(skb, len); |
705 | } | 704 | } |
706 | 705 | ||
707 | return ops; | 706 | return proto; |
708 | } | 707 | } |
709 | 708 | ||
710 | static int ipv6_gso_send_check(struct sk_buff *skb) | 709 | static int ipv6_gso_send_check(struct sk_buff *skb) |
@@ -721,7 +720,9 @@ static int ipv6_gso_send_check(struct sk_buff *skb) | |||
721 | err = -EPROTONOSUPPORT; | 720 | err = -EPROTONOSUPPORT; |
722 | 721 | ||
723 | rcu_read_lock(); | 722 | rcu_read_lock(); |
724 | ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); | 723 | ops = rcu_dereference(inet6_protos[ |
724 | ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]); | ||
725 | |||
725 | if (likely(ops && ops->gso_send_check)) { | 726 | if (likely(ops && ops->gso_send_check)) { |
726 | skb_reset_transport_header(skb); | 727 | skb_reset_transport_header(skb); |
727 | err = ops->gso_send_check(skb); | 728 | err = ops->gso_send_check(skb); |
@@ -757,7 +758,9 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features) | |||
757 | segs = ERR_PTR(-EPROTONOSUPPORT); | 758 | segs = ERR_PTR(-EPROTONOSUPPORT); |
758 | 759 | ||
759 | rcu_read_lock(); | 760 | rcu_read_lock(); |
760 | ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); | 761 | ops = rcu_dereference(inet6_protos[ |
762 | ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]); | ||
763 | |||
761 | if (likely(ops && ops->gso_segment)) { | 764 | if (likely(ops && ops->gso_segment)) { |
762 | skb_reset_transport_header(skb); | 765 | skb_reset_transport_header(skb); |
763 | segs = ops->gso_segment(skb, features); | 766 | segs = ops->gso_segment(skb, features); |
@@ -777,11 +780,105 @@ out: | |||
777 | return segs; | 780 | return segs; |
778 | } | 781 | } |
779 | 782 | ||
783 | struct ipv6_gro_cb { | ||
784 | struct napi_gro_cb napi; | ||
785 | int proto; | ||
786 | }; | ||
787 | |||
788 | #define IPV6_GRO_CB(skb) ((struct ipv6_gro_cb *)(skb)->cb) | ||
789 | |||
790 | static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, | ||
791 | struct sk_buff *skb) | ||
792 | { | ||
793 | struct inet6_protocol *ops; | ||
794 | struct sk_buff **pp = NULL; | ||
795 | struct sk_buff *p; | ||
796 | struct ipv6hdr *iph; | ||
797 | unsigned int nlen; | ||
798 | int flush = 1; | ||
799 | int proto; | ||
800 | |||
801 | if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) | ||
802 | goto out; | ||
803 | |||
804 | iph = ipv6_hdr(skb); | ||
805 | __skb_pull(skb, sizeof(*iph)); | ||
806 | |||
807 | flush += ntohs(iph->payload_len) != skb->len; | ||
808 | |||
809 | rcu_read_lock(); | ||
810 | proto = ipv6_gso_pull_exthdrs(skb, iph->nexthdr); | ||
811 | IPV6_GRO_CB(skb)->proto = proto; | ||
812 | ops = rcu_dereference(inet6_protos[proto]); | ||
813 | if (!ops || !ops->gro_receive) | ||
814 | goto out_unlock; | ||
815 | |||
816 | flush--; | ||
817 | skb_reset_transport_header(skb); | ||
818 | nlen = skb_network_header_len(skb); | ||
819 | |||
820 | for (p = *head; p; p = p->next) { | ||
821 | struct ipv6hdr *iph2; | ||
822 | |||
823 | if (!NAPI_GRO_CB(p)->same_flow) | ||
824 | continue; | ||
825 | |||
826 | iph2 = ipv6_hdr(p); | ||
827 | |||
828 | /* All fields must match except length. */ | ||
829 | if (nlen != skb_network_header_len(p) || | ||
830 | memcmp(iph, iph2, offsetof(struct ipv6hdr, payload_len)) || | ||
831 | memcmp(&iph->nexthdr, &iph2->nexthdr, | ||
832 | nlen - offsetof(struct ipv6hdr, nexthdr))) { | ||
833 | NAPI_GRO_CB(p)->same_flow = 0; | ||
834 | continue; | ||
835 | } | ||
836 | |||
837 | NAPI_GRO_CB(p)->flush |= flush; | ||
838 | } | ||
839 | |||
840 | NAPI_GRO_CB(skb)->flush |= flush; | ||
841 | |||
842 | pp = ops->gro_receive(head, skb); | ||
843 | |||
844 | out_unlock: | ||
845 | rcu_read_unlock(); | ||
846 | |||
847 | out: | ||
848 | NAPI_GRO_CB(skb)->flush |= flush; | ||
849 | |||
850 | return pp; | ||
851 | } | ||
852 | |||
853 | static int ipv6_gro_complete(struct sk_buff *skb) | ||
854 | { | ||
855 | struct inet6_protocol *ops; | ||
856 | struct ipv6hdr *iph = ipv6_hdr(skb); | ||
857 | int err = -ENOSYS; | ||
858 | |||
859 | iph->payload_len = htons(skb->len - skb_network_offset(skb) - | ||
860 | sizeof(*iph)); | ||
861 | |||
862 | rcu_read_lock(); | ||
863 | ops = rcu_dereference(inet6_protos[IPV6_GRO_CB(skb)->proto]); | ||
864 | if (WARN_ON(!ops || !ops->gro_complete)) | ||
865 | goto out_unlock; | ||
866 | |||
867 | err = ops->gro_complete(skb); | ||
868 | |||
869 | out_unlock: | ||
870 | rcu_read_unlock(); | ||
871 | |||
872 | return err; | ||
873 | } | ||
874 | |||
780 | static struct packet_type ipv6_packet_type = { | 875 | static struct packet_type ipv6_packet_type = { |
781 | .type = __constant_htons(ETH_P_IPV6), | 876 | .type = __constant_htons(ETH_P_IPV6), |
782 | .func = ipv6_rcv, | 877 | .func = ipv6_rcv, |
783 | .gso_send_check = ipv6_gso_send_check, | 878 | .gso_send_check = ipv6_gso_send_check, |
784 | .gso_segment = ipv6_gso_segment, | 879 | .gso_segment = ipv6_gso_segment, |
880 | .gro_receive = ipv6_gro_receive, | ||
881 | .gro_complete = ipv6_gro_complete, | ||
785 | }; | 882 | }; |
786 | 883 | ||
787 | static int __init ipv6_packet_init(void) | 884 | static int __init ipv6_packet_init(void) |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index e8b8337a8310..1297306d729c 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -101,7 +101,7 @@ static void tcp_v6_hash(struct sock *sk) | |||
101 | } | 101 | } |
102 | } | 102 | } |
103 | 103 | ||
104 | static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len, | 104 | static __inline__ __sum16 tcp_v6_check(int len, |
105 | struct in6_addr *saddr, | 105 | struct in6_addr *saddr, |
106 | struct in6_addr *daddr, | 106 | struct in6_addr *daddr, |
107 | __wsum base) | 107 | __wsum base) |
@@ -501,7 +501,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req) | |||
501 | if (skb) { | 501 | if (skb) { |
502 | struct tcphdr *th = tcp_hdr(skb); | 502 | struct tcphdr *th = tcp_hdr(skb); |
503 | 503 | ||
504 | th->check = tcp_v6_check(th, skb->len, | 504 | th->check = tcp_v6_check(skb->len, |
505 | &treq->loc_addr, &treq->rmt_addr, | 505 | &treq->loc_addr, &treq->rmt_addr, |
506 | csum_partial(th, skb->len, skb->csum)); | 506 | csum_partial(th, skb->len, skb->csum)); |
507 | 507 | ||
@@ -942,6 +942,41 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb) | |||
942 | return 0; | 942 | return 0; |
943 | } | 943 | } |
944 | 944 | ||
945 | struct sk_buff **tcp6_gro_receive(struct sk_buff **head, struct sk_buff *skb) | ||
946 | { | ||
947 | struct ipv6hdr *iph = ipv6_hdr(skb); | ||
948 | |||
949 | switch (skb->ip_summed) { | ||
950 | case CHECKSUM_COMPLETE: | ||
951 | if (!tcp_v6_check(skb->len, &iph->saddr, &iph->daddr, | ||
952 | skb->csum)) { | ||
953 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
954 | break; | ||
955 | } | ||
956 | |||
957 | /* fall through */ | ||
958 | case CHECKSUM_NONE: | ||
959 | NAPI_GRO_CB(skb)->flush = 1; | ||
960 | return NULL; | ||
961 | } | ||
962 | |||
963 | return tcp_gro_receive(head, skb); | ||
964 | } | ||
965 | EXPORT_SYMBOL(tcp6_gro_receive); | ||
966 | |||
967 | int tcp6_gro_complete(struct sk_buff *skb) | ||
968 | { | ||
969 | struct ipv6hdr *iph = ipv6_hdr(skb); | ||
970 | struct tcphdr *th = tcp_hdr(skb); | ||
971 | |||
972 | th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), | ||
973 | &iph->saddr, &iph->daddr, 0); | ||
974 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; | ||
975 | |||
976 | return tcp_gro_complete(skb); | ||
977 | } | ||
978 | EXPORT_SYMBOL(tcp6_gro_complete); | ||
979 | |||
945 | static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, | 980 | static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, |
946 | u32 ts, struct tcp_md5sig_key *key, int rst) | 981 | u32 ts, struct tcp_md5sig_key *key, int rst) |
947 | { | 982 | { |
@@ -1429,14 +1464,14 @@ out: | |||
1429 | static __sum16 tcp_v6_checksum_init(struct sk_buff *skb) | 1464 | static __sum16 tcp_v6_checksum_init(struct sk_buff *skb) |
1430 | { | 1465 | { |
1431 | if (skb->ip_summed == CHECKSUM_COMPLETE) { | 1466 | if (skb->ip_summed == CHECKSUM_COMPLETE) { |
1432 | if (!tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr, | 1467 | if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr, |
1433 | &ipv6_hdr(skb)->daddr, skb->csum)) { | 1468 | &ipv6_hdr(skb)->daddr, skb->csum)) { |
1434 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1469 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1435 | return 0; | 1470 | return 0; |
1436 | } | 1471 | } |
1437 | } | 1472 | } |
1438 | 1473 | ||
1439 | skb->csum = ~csum_unfold(tcp_v6_check(tcp_hdr(skb), skb->len, | 1474 | skb->csum = ~csum_unfold(tcp_v6_check(skb->len, |
1440 | &ipv6_hdr(skb)->saddr, | 1475 | &ipv6_hdr(skb)->saddr, |
1441 | &ipv6_hdr(skb)->daddr, 0)); | 1476 | &ipv6_hdr(skb)->daddr, 0)); |
1442 | 1477 | ||
@@ -2062,6 +2097,8 @@ static struct inet6_protocol tcpv6_protocol = { | |||
2062 | .err_handler = tcp_v6_err, | 2097 | .err_handler = tcp_v6_err, |
2063 | .gso_send_check = tcp_v6_gso_send_check, | 2098 | .gso_send_check = tcp_v6_gso_send_check, |
2064 | .gso_segment = tcp_tso_segment, | 2099 | .gso_segment = tcp_tso_segment, |
2100 | .gro_receive = tcp6_gro_receive, | ||
2101 | .gro_complete = tcp6_gro_complete, | ||
2065 | .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, | 2102 | .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, |
2066 | }; | 2103 | }; |
2067 | 2104 | ||
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c index b0ceac2d6cd1..6a91a32a80c1 100644 --- a/net/phonet/pep-gprs.c +++ b/net/phonet/pep-gprs.c | |||
@@ -227,6 +227,13 @@ static int gprs_set_mtu(struct net_device *dev, int new_mtu) | |||
227 | return 0; | 227 | return 0; |
228 | } | 228 | } |
229 | 229 | ||
230 | static const struct net_device_ops gprs_netdev_ops = { | ||
231 | .ndo_open = gprs_open, | ||
232 | .ndo_stop = gprs_close, | ||
233 | .ndo_start_xmit = gprs_xmit, | ||
234 | .ndo_change_mtu = gprs_set_mtu, | ||
235 | }; | ||
236 | |||
230 | static void gprs_setup(struct net_device *dev) | 237 | static void gprs_setup(struct net_device *dev) |
231 | { | 238 | { |
232 | dev->features = NETIF_F_FRAGLIST; | 239 | dev->features = NETIF_F_FRAGLIST; |
@@ -237,11 +244,8 @@ static void gprs_setup(struct net_device *dev) | |||
237 | dev->addr_len = 0; | 244 | dev->addr_len = 0; |
238 | dev->tx_queue_len = 10; | 245 | dev->tx_queue_len = 10; |
239 | 246 | ||
247 | dev->netdev_ops = &gprs_netdev_ops; | ||
240 | dev->destructor = free_netdev; | 248 | dev->destructor = free_netdev; |
241 | dev->open = gprs_open; | ||
242 | dev->stop = gprs_close; | ||
243 | dev->hard_start_xmit = gprs_xmit; /* mandatory */ | ||
244 | dev->change_mtu = gprs_set_mtu; | ||
245 | } | 249 | } |
246 | 250 | ||
247 | /* | 251 | /* |
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index cfc8e7caba62..ec697cebb63b 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -289,9 +289,9 @@ restart: | |||
289 | 289 | ||
290 | do { | 290 | do { |
291 | struct net_device *slave = qdisc_dev(q); | 291 | struct net_device *slave = qdisc_dev(q); |
292 | struct netdev_queue *slave_txq; | 292 | struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0); |
293 | const struct net_device_ops *slave_ops = slave->netdev_ops; | ||
293 | 294 | ||
294 | slave_txq = netdev_get_tx_queue(slave, 0); | ||
295 | if (slave_txq->qdisc_sleeping != q) | 295 | if (slave_txq->qdisc_sleeping != q) |
296 | continue; | 296 | continue; |
297 | if (__netif_subqueue_stopped(slave, subq) || | 297 | if (__netif_subqueue_stopped(slave, subq) || |
@@ -305,7 +305,7 @@ restart: | |||
305 | if (__netif_tx_trylock(slave_txq)) { | 305 | if (__netif_tx_trylock(slave_txq)) { |
306 | if (!netif_tx_queue_stopped(slave_txq) && | 306 | if (!netif_tx_queue_stopped(slave_txq) && |
307 | !netif_tx_queue_frozen(slave_txq) && | 307 | !netif_tx_queue_frozen(slave_txq) && |
308 | slave->hard_start_xmit(skb, slave) == 0) { | 308 | slave_ops->ndo_start_xmit(skb, slave) == 0) { |
309 | __netif_tx_unlock(slave_txq); | 309 | __netif_tx_unlock(slave_txq); |
310 | master->slaves = NEXT_SLAVE(q); | 310 | master->slaves = NEXT_SLAVE(q); |
311 | netif_wake_queue(dev); | 311 | netif_wake_queue(dev); |
@@ -420,6 +420,14 @@ static int teql_master_mtu(struct net_device *dev, int new_mtu) | |||
420 | return 0; | 420 | return 0; |
421 | } | 421 | } |
422 | 422 | ||
423 | static const struct net_device_ops teql_netdev_ops = { | ||
424 | .ndo_open = teql_master_open, | ||
425 | .ndo_stop = teql_master_close, | ||
426 | .ndo_start_xmit = teql_master_xmit, | ||
427 | .ndo_get_stats = teql_master_stats, | ||
428 | .ndo_change_mtu = teql_master_mtu, | ||
429 | }; | ||
430 | |||
423 | static __init void teql_master_setup(struct net_device *dev) | 431 | static __init void teql_master_setup(struct net_device *dev) |
424 | { | 432 | { |
425 | struct teql_master *master = netdev_priv(dev); | 433 | struct teql_master *master = netdev_priv(dev); |
@@ -436,11 +444,7 @@ static __init void teql_master_setup(struct net_device *dev) | |||
436 | ops->destroy = teql_destroy; | 444 | ops->destroy = teql_destroy; |
437 | ops->owner = THIS_MODULE; | 445 | ops->owner = THIS_MODULE; |
438 | 446 | ||
439 | dev->open = teql_master_open; | 447 | dev->netdev_ops = &teql_netdev_ops; |
440 | dev->hard_start_xmit = teql_master_xmit; | ||
441 | dev->stop = teql_master_close; | ||
442 | dev->get_stats = teql_master_stats; | ||
443 | dev->change_mtu = teql_master_mtu; | ||
444 | dev->type = ARPHRD_VOID; | 448 | dev->type = ARPHRD_VOID; |
445 | dev->mtu = 1500; | 449 | dev->mtu = 1500; |
446 | dev->tx_queue_len = 100; | 450 | dev->tx_queue_len = 100; |
diff --git a/net/wimax/Kconfig b/net/wimax/Kconfig index 0bdbb6928205..18495cdcd10d 100644 --- a/net/wimax/Kconfig +++ b/net/wimax/Kconfig | |||
@@ -1,9 +1,23 @@ | |||
1 | # | 1 | # |
2 | # WiMAX LAN device configuration | 2 | # WiMAX LAN device configuration |
3 | # | 3 | # |
4 | # Note the ugly 'depends on' on WIMAX: that disallows RFKILL to be a | ||
5 | # module if WIMAX is to be linked in. The WiMAX code is done in such a | ||
6 | # way that it doesn't require and explicit dependency on RFKILL in | ||
7 | # case an embedded system wants to rip it out. | ||
8 | # | ||
9 | # As well, enablement of the RFKILL code means we need the INPUT layer | ||
10 | # support to inject events coming from hw rfkill switches. That | ||
11 | # dependency could be killed if input.h provided appropiate means to | ||
12 | # work when input is disabled. | ||
13 | |||
14 | comment "WiMAX Wireless Broadband support requires CONFIG_INPUT enabled" | ||
15 | depends on INPUT = n && RFKILL != n | ||
4 | 16 | ||
5 | menuconfig WIMAX | 17 | menuconfig WIMAX |
6 | tristate "WiMAX Wireless Broadband support" | 18 | tristate "WiMAX Wireless Broadband support" |
19 | depends on (y && RFKILL != m) || m | ||
20 | depends on (INPUT && RFKILL != n) || RFKILL = n | ||
7 | help | 21 | help |
8 | 22 | ||
9 | Select to configure support for devices that provide | 23 | Select to configure support for devices that provide |
diff --git a/net/wimax/id-table.c b/net/wimax/id-table.c index d3b88558682c..5e685f7eda90 100644 --- a/net/wimax/id-table.c +++ b/net/wimax/id-table.c | |||
@@ -123,15 +123,17 @@ void wimax_id_table_rm(struct wimax_dev *wimax_dev) | |||
123 | /* | 123 | /* |
124 | * Release the gennetlink family id / mapping table | 124 | * Release the gennetlink family id / mapping table |
125 | * | 125 | * |
126 | * On debug, verify that the table is empty upon removal. | 126 | * On debug, verify that the table is empty upon removal. We want the |
127 | * code always compiled, to ensure it doesn't bit rot. It will be | ||
128 | * compiled out if CONFIG_BUG is disabled. | ||
127 | */ | 129 | */ |
128 | void wimax_id_table_release(void) | 130 | void wimax_id_table_release(void) |
129 | { | 131 | { |
132 | struct wimax_dev *wimax_dev; | ||
133 | |||
130 | #ifndef CONFIG_BUG | 134 | #ifndef CONFIG_BUG |
131 | return; | 135 | return; |
132 | #endif | 136 | #endif |
133 | struct wimax_dev *wimax_dev; | ||
134 | |||
135 | spin_lock(&wimax_id_table_lock); | 137 | spin_lock(&wimax_id_table_lock); |
136 | list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) { | 138 | list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) { |
137 | printk(KERN_ERR "BUG: %s wimax_dev %p ifindex %d not cleared\n", | 139 | printk(KERN_ERR "BUG: %s wimax_dev %p ifindex %d not cleared\n", |
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c index 8745bac173f1..2b75aee04217 100644 --- a/net/wimax/op-rfkill.c +++ b/net/wimax/op-rfkill.c | |||
@@ -71,7 +71,7 @@ | |||
71 | #define D_SUBMODULE op_rfkill | 71 | #define D_SUBMODULE op_rfkill |
72 | #include "debug-levels.h" | 72 | #include "debug-levels.h" |
73 | 73 | ||
74 | #ifdef CONFIG_RFKILL | 74 | #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) |
75 | 75 | ||
76 | 76 | ||
77 | /** | 77 | /** |
diff --git a/net/wireless/wext.c b/net/wireless/wext.c index e49a2d1ef1e4..cb6a5bb85d80 100644 --- a/net/wireless/wext.c +++ b/net/wireless/wext.c | |||
@@ -1055,8 +1055,8 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, | |||
1055 | return private(dev, iwr, cmd, info, handler); | 1055 | return private(dev, iwr, cmd, info, handler); |
1056 | } | 1056 | } |
1057 | /* Old driver API : call driver ioctl handler */ | 1057 | /* Old driver API : call driver ioctl handler */ |
1058 | if (dev->do_ioctl) | 1058 | if (dev->netdev_ops->ndo_do_ioctl) |
1059 | return dev->do_ioctl(dev, ifr, cmd); | 1059 | return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd); |
1060 | return -EOPNOTSUPP; | 1060 | return -EOPNOTSUPP; |
1061 | } | 1061 | } |
1062 | 1062 | ||