diff options
Diffstat (limited to 'net')
46 files changed, 2659 insertions, 349 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index dd86a1dc4cd0..6c1323940263 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
@@ -3,46 +3,35 @@ | |||
3 | #include <linux/if_vlan.h> | 3 | #include <linux/if_vlan.h> |
4 | #include "vlan.h" | 4 | #include "vlan.h" |
5 | 5 | ||
6 | struct vlan_hwaccel_cb { | ||
7 | struct net_device *dev; | ||
8 | }; | ||
9 | |||
10 | static inline struct vlan_hwaccel_cb *vlan_hwaccel_cb(struct sk_buff *skb) | ||
11 | { | ||
12 | return (struct vlan_hwaccel_cb *)skb->cb; | ||
13 | } | ||
14 | |||
15 | /* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ | 6 | /* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ |
16 | int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, | 7 | int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, |
17 | u16 vlan_tci, int polling) | 8 | u16 vlan_tci, int polling) |
18 | { | 9 | { |
19 | struct vlan_hwaccel_cb *cb = vlan_hwaccel_cb(skb); | 10 | if (skb_bond_should_drop(skb)) |
20 | 11 | goto drop; | |
21 | if (skb_bond_should_drop(skb)) { | ||
22 | dev_kfree_skb_any(skb); | ||
23 | return NET_RX_DROP; | ||
24 | } | ||
25 | 12 | ||
26 | skb->vlan_tci = vlan_tci; | 13 | skb->vlan_tci = vlan_tci; |
27 | cb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); | 14 | skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); |
15 | |||
16 | if (!skb->dev) | ||
17 | goto drop; | ||
28 | 18 | ||
29 | return (polling ? netif_receive_skb(skb) : netif_rx(skb)); | 19 | return (polling ? netif_receive_skb(skb) : netif_rx(skb)); |
20 | |||
21 | drop: | ||
22 | dev_kfree_skb_any(skb); | ||
23 | return NET_RX_DROP; | ||
30 | } | 24 | } |
31 | EXPORT_SYMBOL(__vlan_hwaccel_rx); | 25 | EXPORT_SYMBOL(__vlan_hwaccel_rx); |
32 | 26 | ||
33 | int vlan_hwaccel_do_receive(struct sk_buff *skb) | 27 | int vlan_hwaccel_do_receive(struct sk_buff *skb) |
34 | { | 28 | { |
35 | struct vlan_hwaccel_cb *cb = vlan_hwaccel_cb(skb); | 29 | struct net_device *dev = skb->dev; |
36 | struct net_device *dev = cb->dev; | ||
37 | struct net_device_stats *stats; | 30 | struct net_device_stats *stats; |
38 | 31 | ||
32 | skb->dev = vlan_dev_info(dev)->real_dev; | ||
39 | netif_nit_deliver(skb); | 33 | netif_nit_deliver(skb); |
40 | 34 | ||
41 | if (dev == NULL) { | ||
42 | kfree_skb(skb); | ||
43 | return -1; | ||
44 | } | ||
45 | |||
46 | skb->dev = dev; | 35 | skb->dev = dev; |
47 | skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci); | 36 | skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci); |
48 | skb->vlan_tci = 0; | 37 | skb->vlan_tci = 0; |
@@ -80,3 +69,79 @@ u16 vlan_dev_vlan_id(const struct net_device *dev) | |||
80 | return vlan_dev_info(dev)->vlan_id; | 69 | return vlan_dev_info(dev)->vlan_id; |
81 | } | 70 | } |
82 | EXPORT_SYMBOL_GPL(vlan_dev_vlan_id); | 71 | EXPORT_SYMBOL_GPL(vlan_dev_vlan_id); |
72 | |||
73 | static int vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, | ||
74 | unsigned int vlan_tci, struct sk_buff *skb) | ||
75 | { | ||
76 | struct sk_buff *p; | ||
77 | |||
78 | if (skb_bond_should_drop(skb)) | ||
79 | goto drop; | ||
80 | |||
81 | skb->vlan_tci = vlan_tci; | ||
82 | skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); | ||
83 | |||
84 | if (!skb->dev) | ||
85 | goto drop; | ||
86 | |||
87 | for (p = napi->gro_list; p; p = p->next) { | ||
88 | NAPI_GRO_CB(p)->same_flow = p->dev == skb->dev; | ||
89 | NAPI_GRO_CB(p)->flush = 0; | ||
90 | } | ||
91 | |||
92 | return dev_gro_receive(napi, skb); | ||
93 | |||
94 | drop: | ||
95 | return 2; | ||
96 | } | ||
97 | |||
98 | int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, | ||
99 | unsigned int vlan_tci, struct sk_buff *skb) | ||
100 | { | ||
101 | int err = NET_RX_SUCCESS; | ||
102 | |||
103 | switch (vlan_gro_common(napi, grp, vlan_tci, skb)) { | ||
104 | case -1: | ||
105 | return netif_receive_skb(skb); | ||
106 | |||
107 | case 2: | ||
108 | err = NET_RX_DROP; | ||
109 | /* fall through */ | ||
110 | |||
111 | case 1: | ||
112 | kfree_skb(skb); | ||
113 | break; | ||
114 | } | ||
115 | |||
116 | return err; | ||
117 | } | ||
118 | EXPORT_SYMBOL(vlan_gro_receive); | ||
119 | |||
120 | int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, | ||
121 | unsigned int vlan_tci, struct napi_gro_fraginfo *info) | ||
122 | { | ||
123 | struct sk_buff *skb = napi_fraginfo_skb(napi, info); | ||
124 | int err = NET_RX_DROP; | ||
125 | |||
126 | if (!skb) | ||
127 | goto out; | ||
128 | |||
129 | err = NET_RX_SUCCESS; | ||
130 | |||
131 | switch (vlan_gro_common(napi, grp, vlan_tci, skb)) { | ||
132 | case -1: | ||
133 | return netif_receive_skb(skb); | ||
134 | |||
135 | case 2: | ||
136 | err = NET_RX_DROP; | ||
137 | /* fall through */ | ||
138 | |||
139 | case 1: | ||
140 | napi_reuse_skb(napi, skb); | ||
141 | break; | ||
142 | } | ||
143 | |||
144 | out: | ||
145 | return err; | ||
146 | } | ||
147 | EXPORT_SYMBOL(vlan_gro_frags); | ||
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 89a3bbdfca3f..4a19acd3a32b 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -546,6 +546,18 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
546 | return err; | 546 | return err; |
547 | } | 547 | } |
548 | 548 | ||
549 | static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa) | ||
550 | { | ||
551 | struct net_device *real_dev = vlan_dev_info(dev)->real_dev; | ||
552 | const struct net_device_ops *ops = real_dev->netdev_ops; | ||
553 | int err = 0; | ||
554 | |||
555 | if (netif_device_present(real_dev) && ops->ndo_neigh_setup) | ||
556 | err = ops->ndo_neigh_setup(dev, pa); | ||
557 | |||
558 | return err; | ||
559 | } | ||
560 | |||
549 | static void vlan_dev_change_rx_flags(struct net_device *dev, int change) | 561 | static void vlan_dev_change_rx_flags(struct net_device *dev, int change) |
550 | { | 562 | { |
551 | struct net_device *real_dev = vlan_dev_info(dev)->real_dev; | 563 | struct net_device *real_dev = vlan_dev_info(dev)->real_dev; |
@@ -713,6 +725,7 @@ static const struct net_device_ops vlan_netdev_ops = { | |||
713 | .ndo_set_multicast_list = vlan_dev_set_rx_mode, | 725 | .ndo_set_multicast_list = vlan_dev_set_rx_mode, |
714 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, | 726 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, |
715 | .ndo_do_ioctl = vlan_dev_ioctl, | 727 | .ndo_do_ioctl = vlan_dev_ioctl, |
728 | .ndo_neigh_setup = vlan_dev_neigh_setup, | ||
716 | }; | 729 | }; |
717 | 730 | ||
718 | static const struct net_device_ops vlan_netdev_accel_ops = { | 731 | static const struct net_device_ops vlan_netdev_accel_ops = { |
@@ -728,6 +741,7 @@ static const struct net_device_ops vlan_netdev_accel_ops = { | |||
728 | .ndo_set_multicast_list = vlan_dev_set_rx_mode, | 741 | .ndo_set_multicast_list = vlan_dev_set_rx_mode, |
729 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, | 742 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, |
730 | .ndo_do_ioctl = vlan_dev_ioctl, | 743 | .ndo_do_ioctl = vlan_dev_ioctl, |
744 | .ndo_neigh_setup = vlan_dev_neigh_setup, | ||
731 | }; | 745 | }; |
732 | 746 | ||
733 | void vlan_setup(struct net_device *dev) | 747 | void vlan_setup(struct net_device *dev) |
diff --git a/net/Kconfig b/net/Kconfig index 6ec2cce7c167..bf2776018f71 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
@@ -254,6 +254,8 @@ source "net/mac80211/Kconfig" | |||
254 | 254 | ||
255 | endif # WIRELESS | 255 | endif # WIRELESS |
256 | 256 | ||
257 | source "net/wimax/Kconfig" | ||
258 | |||
257 | source "net/rfkill/Kconfig" | 259 | source "net/rfkill/Kconfig" |
258 | source "net/9p/Kconfig" | 260 | source "net/9p/Kconfig" |
259 | 261 | ||
diff --git a/net/Makefile b/net/Makefile index ba4460432b7c..0fcce89d7169 100644 --- a/net/Makefile +++ b/net/Makefile | |||
@@ -63,3 +63,4 @@ endif | |||
63 | ifeq ($(CONFIG_NET),y) | 63 | ifeq ($(CONFIG_NET),y) |
64 | obj-$(CONFIG_SYSCTL) += sysctl_net.o | 64 | obj-$(CONFIG_SYSCTL) += sysctl_net.o |
65 | endif | 65 | endif |
66 | obj-$(CONFIG_WIMAX) += wimax/ | ||
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c index b03ff58e9308..89f99d3beb60 100644 --- a/net/appletalk/aarp.c +++ b/net/appletalk/aarp.c | |||
@@ -443,13 +443,14 @@ static void aarp_send_probe_phase1(struct atalk_iface *iface) | |||
443 | { | 443 | { |
444 | struct ifreq atreq; | 444 | struct ifreq atreq; |
445 | struct sockaddr_at *sa = (struct sockaddr_at *)&atreq.ifr_addr; | 445 | struct sockaddr_at *sa = (struct sockaddr_at *)&atreq.ifr_addr; |
446 | const struct net_device_ops *ops = iface->dev->netdev_ops; | ||
446 | 447 | ||
447 | sa->sat_addr.s_node = iface->address.s_node; | 448 | sa->sat_addr.s_node = iface->address.s_node; |
448 | sa->sat_addr.s_net = ntohs(iface->address.s_net); | 449 | sa->sat_addr.s_net = ntohs(iface->address.s_net); |
449 | 450 | ||
450 | /* We pass the Net:Node to the drivers/cards by a Device ioctl. */ | 451 | /* We pass the Net:Node to the drivers/cards by a Device ioctl. */ |
451 | if (!(iface->dev->do_ioctl(iface->dev, &atreq, SIOCSIFADDR))) { | 452 | if (!(ops->ndo_do_ioctl(iface->dev, &atreq, SIOCSIFADDR))) { |
452 | (void)iface->dev->do_ioctl(iface->dev, &atreq, SIOCGIFADDR); | 453 | ops->ndo_do_ioctl(iface->dev, &atreq, SIOCGIFADDR); |
453 | if (iface->address.s_net != htons(sa->sat_addr.s_net) || | 454 | if (iface->address.s_net != htons(sa->sat_addr.s_net) || |
454 | iface->address.s_node != sa->sat_addr.s_node) | 455 | iface->address.s_node != sa->sat_addr.s_node) |
455 | iface->status |= ATIF_PROBE_FAIL; | 456 | iface->status |= ATIF_PROBE_FAIL; |
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h index d20f8a40f36e..0d9e506f5d5a 100644 --- a/net/bluetooth/bnep/bnep.h +++ b/net/bluetooth/bnep/bnep.h | |||
@@ -165,7 +165,6 @@ struct bnep_session { | |||
165 | 165 | ||
166 | struct socket *sock; | 166 | struct socket *sock; |
167 | struct net_device *dev; | 167 | struct net_device *dev; |
168 | struct net_device_stats stats; | ||
169 | }; | 168 | }; |
170 | 169 | ||
171 | void bnep_net_setup(struct net_device *dev); | 170 | void bnep_net_setup(struct net_device *dev); |
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index 70fea8bdb4e5..52a6ce0d772b 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c | |||
@@ -306,7 +306,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) | |||
306 | struct sk_buff *nskb; | 306 | struct sk_buff *nskb; |
307 | u8 type; | 307 | u8 type; |
308 | 308 | ||
309 | s->stats.rx_bytes += skb->len; | 309 | dev->stats.rx_bytes += skb->len; |
310 | 310 | ||
311 | type = *(u8 *) skb->data; skb_pull(skb, 1); | 311 | type = *(u8 *) skb->data; skb_pull(skb, 1); |
312 | 312 | ||
@@ -343,7 +343,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) | |||
343 | * may not be modified and because of the alignment requirements. */ | 343 | * may not be modified and because of the alignment requirements. */ |
344 | nskb = alloc_skb(2 + ETH_HLEN + skb->len, GFP_KERNEL); | 344 | nskb = alloc_skb(2 + ETH_HLEN + skb->len, GFP_KERNEL); |
345 | if (!nskb) { | 345 | if (!nskb) { |
346 | s->stats.rx_dropped++; | 346 | dev->stats.rx_dropped++; |
347 | kfree_skb(skb); | 347 | kfree_skb(skb); |
348 | return -ENOMEM; | 348 | return -ENOMEM; |
349 | } | 349 | } |
@@ -378,14 +378,14 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) | |||
378 | skb_copy_from_linear_data(skb, __skb_put(nskb, skb->len), skb->len); | 378 | skb_copy_from_linear_data(skb, __skb_put(nskb, skb->len), skb->len); |
379 | kfree_skb(skb); | 379 | kfree_skb(skb); |
380 | 380 | ||
381 | s->stats.rx_packets++; | 381 | dev->stats.rx_packets++; |
382 | nskb->ip_summed = CHECKSUM_NONE; | 382 | nskb->ip_summed = CHECKSUM_NONE; |
383 | nskb->protocol = eth_type_trans(nskb, dev); | 383 | nskb->protocol = eth_type_trans(nskb, dev); |
384 | netif_rx_ni(nskb); | 384 | netif_rx_ni(nskb); |
385 | return 0; | 385 | return 0; |
386 | 386 | ||
387 | badframe: | 387 | badframe: |
388 | s->stats.rx_errors++; | 388 | dev->stats.rx_errors++; |
389 | kfree_skb(skb); | 389 | kfree_skb(skb); |
390 | return 0; | 390 | return 0; |
391 | } | 391 | } |
@@ -448,8 +448,8 @@ send: | |||
448 | kfree_skb(skb); | 448 | kfree_skb(skb); |
449 | 449 | ||
450 | if (len > 0) { | 450 | if (len > 0) { |
451 | s->stats.tx_bytes += len; | 451 | s->dev->stats.tx_bytes += len; |
452 | s->stats.tx_packets++; | 452 | s->dev->stats.tx_packets++; |
453 | return 0; | 453 | return 0; |
454 | } | 454 | } |
455 | 455 | ||
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c index f897da6e0444..d7a0e9722def 100644 --- a/net/bluetooth/bnep/netdev.c +++ b/net/bluetooth/bnep/netdev.c | |||
@@ -55,12 +55,6 @@ static int bnep_net_close(struct net_device *dev) | |||
55 | return 0; | 55 | return 0; |
56 | } | 56 | } |
57 | 57 | ||
58 | static struct net_device_stats *bnep_net_get_stats(struct net_device *dev) | ||
59 | { | ||
60 | struct bnep_session *s = netdev_priv(dev); | ||
61 | return &s->stats; | ||
62 | } | ||
63 | |||
64 | static void bnep_net_set_mc_list(struct net_device *dev) | 58 | static void bnep_net_set_mc_list(struct net_device *dev) |
65 | { | 59 | { |
66 | #ifdef CONFIG_BT_BNEP_MC_FILTER | 60 | #ifdef CONFIG_BT_BNEP_MC_FILTER |
@@ -128,11 +122,6 @@ static void bnep_net_timeout(struct net_device *dev) | |||
128 | netif_wake_queue(dev); | 122 | netif_wake_queue(dev); |
129 | } | 123 | } |
130 | 124 | ||
131 | static int bnep_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | ||
132 | { | ||
133 | return -EINVAL; | ||
134 | } | ||
135 | |||
136 | #ifdef CONFIG_BT_BNEP_MC_FILTER | 125 | #ifdef CONFIG_BT_BNEP_MC_FILTER |
137 | static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) | 126 | static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) |
138 | { | 127 | { |
@@ -217,6 +206,18 @@ static int bnep_net_xmit(struct sk_buff *skb, struct net_device *dev) | |||
217 | return 0; | 206 | return 0; |
218 | } | 207 | } |
219 | 208 | ||
209 | static const struct net_device_ops bnep_netdev_ops = { | ||
210 | .ndo_open = bnep_net_open, | ||
211 | .ndo_stop = bnep_net_close, | ||
212 | .ndo_start_xmit = bnep_net_xmit, | ||
213 | .ndo_validate_addr = eth_validate_addr, | ||
214 | .ndo_set_multicast_list = bnep_net_set_mc_list, | ||
215 | .ndo_set_mac_address = bnep_net_set_mac_addr, | ||
216 | .ndo_tx_timeout = bnep_net_timeout, | ||
217 | .ndo_change_mtu = eth_change_mtu, | ||
218 | |||
219 | }; | ||
220 | |||
220 | void bnep_net_setup(struct net_device *dev) | 221 | void bnep_net_setup(struct net_device *dev) |
221 | { | 222 | { |
222 | 223 | ||
@@ -224,15 +225,7 @@ void bnep_net_setup(struct net_device *dev) | |||
224 | dev->addr_len = ETH_ALEN; | 225 | dev->addr_len = ETH_ALEN; |
225 | 226 | ||
226 | ether_setup(dev); | 227 | ether_setup(dev); |
227 | 228 | dev->netdev_ops = &bnep_netdev_ops; | |
228 | dev->open = bnep_net_open; | ||
229 | dev->stop = bnep_net_close; | ||
230 | dev->hard_start_xmit = bnep_net_xmit; | ||
231 | dev->get_stats = bnep_net_get_stats; | ||
232 | dev->do_ioctl = bnep_net_ioctl; | ||
233 | dev->set_mac_address = bnep_net_set_mac_addr; | ||
234 | dev->set_multicast_list = bnep_net_set_mc_list; | ||
235 | 229 | ||
236 | dev->watchdog_timeo = HZ * 2; | 230 | dev->watchdog_timeo = HZ * 2; |
237 | dev->tx_timeout = bnep_net_timeout; | ||
238 | } | 231 | } |
diff --git a/net/can/af_can.c b/net/can/af_can.c index 3dadb338addd..fa417ca6cbe6 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c | |||
@@ -414,6 +414,12 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask, | |||
414 | * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can | 414 | * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can |
415 | * filter for error frames (CAN_ERR_FLAG bit set in mask). | 415 | * filter for error frames (CAN_ERR_FLAG bit set in mask). |
416 | * | 416 | * |
417 | * The provided pointer to the sk_buff is guaranteed to be valid as long as | ||
418 | * the callback function is running. The callback function must *not* free | ||
419 | * the given sk_buff while processing it's task. When the given sk_buff is | ||
420 | * needed after the end of the callback function it must be cloned inside | ||
421 | * the callback function with skb_clone(). | ||
422 | * | ||
417 | * Return: | 423 | * Return: |
418 | * 0 on success | 424 | * 0 on success |
419 | * -ENOMEM on missing cache mem to create subscription entry | 425 | * -ENOMEM on missing cache mem to create subscription entry |
@@ -569,13 +575,8 @@ EXPORT_SYMBOL(can_rx_unregister); | |||
569 | 575 | ||
570 | static inline void deliver(struct sk_buff *skb, struct receiver *r) | 576 | static inline void deliver(struct sk_buff *skb, struct receiver *r) |
571 | { | 577 | { |
572 | struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); | 578 | r->func(skb, r->data); |
573 | 579 | r->matches++; | |
574 | if (clone) { | ||
575 | clone->sk = skb->sk; | ||
576 | r->func(clone, r->data); | ||
577 | r->matches++; | ||
578 | } | ||
579 | } | 580 | } |
580 | 581 | ||
581 | static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) | 582 | static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) |
diff --git a/net/can/bcm.c b/net/can/bcm.c index 6248ae2502c7..1649c8ab2c2f 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
@@ -633,7 +633,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data) | |||
633 | hrtimer_cancel(&op->timer); | 633 | hrtimer_cancel(&op->timer); |
634 | 634 | ||
635 | if (op->can_id != rxframe->can_id) | 635 | if (op->can_id != rxframe->can_id) |
636 | goto rx_freeskb; | 636 | return; |
637 | 637 | ||
638 | /* save rx timestamp */ | 638 | /* save rx timestamp */ |
639 | op->rx_stamp = skb->tstamp; | 639 | op->rx_stamp = skb->tstamp; |
@@ -645,19 +645,19 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data) | |||
645 | if (op->flags & RX_RTR_FRAME) { | 645 | if (op->flags & RX_RTR_FRAME) { |
646 | /* send reply for RTR-request (placed in op->frames[0]) */ | 646 | /* send reply for RTR-request (placed in op->frames[0]) */ |
647 | bcm_can_tx(op); | 647 | bcm_can_tx(op); |
648 | goto rx_freeskb; | 648 | return; |
649 | } | 649 | } |
650 | 650 | ||
651 | if (op->flags & RX_FILTER_ID) { | 651 | if (op->flags & RX_FILTER_ID) { |
652 | /* the easiest case */ | 652 | /* the easiest case */ |
653 | bcm_rx_update_and_send(op, &op->last_frames[0], rxframe); | 653 | bcm_rx_update_and_send(op, &op->last_frames[0], rxframe); |
654 | goto rx_freeskb_starttimer; | 654 | goto rx_starttimer; |
655 | } | 655 | } |
656 | 656 | ||
657 | if (op->nframes == 1) { | 657 | if (op->nframes == 1) { |
658 | /* simple compare with index 0 */ | 658 | /* simple compare with index 0 */ |
659 | bcm_rx_cmp_to_index(op, 0, rxframe); | 659 | bcm_rx_cmp_to_index(op, 0, rxframe); |
660 | goto rx_freeskb_starttimer; | 660 | goto rx_starttimer; |
661 | } | 661 | } |
662 | 662 | ||
663 | if (op->nframes > 1) { | 663 | if (op->nframes > 1) { |
@@ -678,10 +678,8 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data) | |||
678 | } | 678 | } |
679 | } | 679 | } |
680 | 680 | ||
681 | rx_freeskb_starttimer: | 681 | rx_starttimer: |
682 | bcm_rx_starttimer(op); | 682 | bcm_rx_starttimer(op); |
683 | rx_freeskb: | ||
684 | kfree_skb(skb); | ||
685 | } | 683 | } |
686 | 684 | ||
687 | /* | 685 | /* |
diff --git a/net/can/raw.c b/net/can/raw.c index 27aab63df467..0703cba4bf9f 100644 --- a/net/can/raw.c +++ b/net/can/raw.c | |||
@@ -99,13 +99,14 @@ static void raw_rcv(struct sk_buff *skb, void *data) | |||
99 | struct raw_sock *ro = raw_sk(sk); | 99 | struct raw_sock *ro = raw_sk(sk); |
100 | struct sockaddr_can *addr; | 100 | struct sockaddr_can *addr; |
101 | 101 | ||
102 | if (!ro->recv_own_msgs) { | 102 | /* check the received tx sock reference */ |
103 | /* check the received tx sock reference */ | 103 | if (!ro->recv_own_msgs && skb->sk == sk) |
104 | if (skb->sk == sk) { | 104 | return; |
105 | kfree_skb(skb); | 105 | |
106 | return; | 106 | /* clone the given skb to be able to enqueue it into the rcv queue */ |
107 | } | 107 | skb = skb_clone(skb, GFP_ATOMIC); |
108 | } | 108 | if (!skb) |
109 | return; | ||
109 | 110 | ||
110 | /* | 111 | /* |
111 | * Put the datagram to the queue so that raw_recvmsg() can | 112 | * Put the datagram to the queue so that raw_recvmsg() can |
diff --git a/net/core/dev.c b/net/core/dev.c index 382df6c09eec..5f736f1ceeae 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -170,25 +170,6 @@ static DEFINE_SPINLOCK(ptype_lock); | |||
170 | static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; | 170 | static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; |
171 | static struct list_head ptype_all __read_mostly; /* Taps */ | 171 | static struct list_head ptype_all __read_mostly; /* Taps */ |
172 | 172 | ||
173 | #ifdef CONFIG_NET_DMA | ||
174 | struct net_dma { | ||
175 | struct dma_client client; | ||
176 | spinlock_t lock; | ||
177 | cpumask_t channel_mask; | ||
178 | struct dma_chan **channels; | ||
179 | }; | ||
180 | |||
181 | static enum dma_state_client | ||
182 | netdev_dma_event(struct dma_client *client, struct dma_chan *chan, | ||
183 | enum dma_state state); | ||
184 | |||
185 | static struct net_dma net_dma = { | ||
186 | .client = { | ||
187 | .event_callback = netdev_dma_event, | ||
188 | }, | ||
189 | }; | ||
190 | #endif | ||
191 | |||
192 | /* | 173 | /* |
193 | * The @dev_base_head list is protected by @dev_base_lock and the rtnl | 174 | * The @dev_base_head list is protected by @dev_base_lock and the rtnl |
194 | * semaphore. | 175 | * semaphore. |
@@ -2387,7 +2368,7 @@ void napi_gro_flush(struct napi_struct *napi) | |||
2387 | } | 2368 | } |
2388 | EXPORT_SYMBOL(napi_gro_flush); | 2369 | EXPORT_SYMBOL(napi_gro_flush); |
2389 | 2370 | ||
2390 | static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | 2371 | int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) |
2391 | { | 2372 | { |
2392 | struct sk_buff **pp = NULL; | 2373 | struct sk_buff **pp = NULL; |
2393 | struct packet_type *ptype; | 2374 | struct packet_type *ptype; |
@@ -2417,11 +2398,14 @@ static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
2417 | 2398 | ||
2418 | for (p = napi->gro_list; p; p = p->next) { | 2399 | for (p = napi->gro_list; p; p = p->next) { |
2419 | count++; | 2400 | count++; |
2420 | NAPI_GRO_CB(p)->same_flow = | 2401 | |
2421 | p->mac_len == mac_len && | 2402 | if (!NAPI_GRO_CB(p)->same_flow) |
2422 | !memcmp(skb_mac_header(p), skb_mac_header(skb), | 2403 | continue; |
2423 | mac_len); | 2404 | |
2424 | NAPI_GRO_CB(p)->flush = 0; | 2405 | if (p->mac_len != mac_len || |
2406 | memcmp(skb_mac_header(p), skb_mac_header(skb), | ||
2407 | mac_len)) | ||
2408 | NAPI_GRO_CB(p)->same_flow = 0; | ||
2425 | } | 2409 | } |
2426 | 2410 | ||
2427 | pp = ptype->gro_receive(&napi->gro_list, skb); | 2411 | pp = ptype->gro_receive(&napi->gro_list, skb); |
@@ -2463,6 +2447,19 @@ ok: | |||
2463 | normal: | 2447 | normal: |
2464 | return -1; | 2448 | return -1; |
2465 | } | 2449 | } |
2450 | EXPORT_SYMBOL(dev_gro_receive); | ||
2451 | |||
2452 | static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | ||
2453 | { | ||
2454 | struct sk_buff *p; | ||
2455 | |||
2456 | for (p = napi->gro_list; p; p = p->next) { | ||
2457 | NAPI_GRO_CB(p)->same_flow = 1; | ||
2458 | NAPI_GRO_CB(p)->flush = 0; | ||
2459 | } | ||
2460 | |||
2461 | return dev_gro_receive(napi, skb); | ||
2462 | } | ||
2466 | 2463 | ||
2467 | int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | 2464 | int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) |
2468 | { | 2465 | { |
@@ -2479,11 +2476,26 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
2479 | } | 2476 | } |
2480 | EXPORT_SYMBOL(napi_gro_receive); | 2477 | EXPORT_SYMBOL(napi_gro_receive); |
2481 | 2478 | ||
2482 | int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info) | 2479 | void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) |
2480 | { | ||
2481 | skb_shinfo(skb)->nr_frags = 0; | ||
2482 | |||
2483 | skb->len -= skb->data_len; | ||
2484 | skb->truesize -= skb->data_len; | ||
2485 | skb->data_len = 0; | ||
2486 | |||
2487 | __skb_pull(skb, skb_headlen(skb)); | ||
2488 | skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb)); | ||
2489 | |||
2490 | napi->skb = skb; | ||
2491 | } | ||
2492 | EXPORT_SYMBOL(napi_reuse_skb); | ||
2493 | |||
2494 | struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi, | ||
2495 | struct napi_gro_fraginfo *info) | ||
2483 | { | 2496 | { |
2484 | struct net_device *dev = napi->dev; | 2497 | struct net_device *dev = napi->dev; |
2485 | struct sk_buff *skb = napi->skb; | 2498 | struct sk_buff *skb = napi->skb; |
2486 | int err = NET_RX_DROP; | ||
2487 | 2499 | ||
2488 | napi->skb = NULL; | 2500 | napi->skb = NULL; |
2489 | 2501 | ||
@@ -2503,16 +2515,31 @@ int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info) | |||
2503 | skb->len += info->len; | 2515 | skb->len += info->len; |
2504 | skb->truesize += info->len; | 2516 | skb->truesize += info->len; |
2505 | 2517 | ||
2506 | if (!pskb_may_pull(skb, ETH_HLEN)) | 2518 | if (!pskb_may_pull(skb, ETH_HLEN)) { |
2507 | goto reuse; | 2519 | napi_reuse_skb(napi, skb); |
2508 | 2520 | goto out; | |
2509 | err = NET_RX_SUCCESS; | 2521 | } |
2510 | 2522 | ||
2511 | skb->protocol = eth_type_trans(skb, dev); | 2523 | skb->protocol = eth_type_trans(skb, dev); |
2512 | 2524 | ||
2513 | skb->ip_summed = info->ip_summed; | 2525 | skb->ip_summed = info->ip_summed; |
2514 | skb->csum = info->csum; | 2526 | skb->csum = info->csum; |
2515 | 2527 | ||
2528 | out: | ||
2529 | return skb; | ||
2530 | } | ||
2531 | EXPORT_SYMBOL(napi_fraginfo_skb); | ||
2532 | |||
2533 | int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info) | ||
2534 | { | ||
2535 | struct sk_buff *skb = napi_fraginfo_skb(napi, info); | ||
2536 | int err = NET_RX_DROP; | ||
2537 | |||
2538 | if (!skb) | ||
2539 | goto out; | ||
2540 | |||
2541 | err = NET_RX_SUCCESS; | ||
2542 | |||
2516 | switch (__napi_gro_receive(napi, skb)) { | 2543 | switch (__napi_gro_receive(napi, skb)) { |
2517 | case -1: | 2544 | case -1: |
2518 | return netif_receive_skb(skb); | 2545 | return netif_receive_skb(skb); |
@@ -2521,17 +2548,7 @@ int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info) | |||
2521 | goto out; | 2548 | goto out; |
2522 | } | 2549 | } |
2523 | 2550 | ||
2524 | reuse: | 2551 | napi_reuse_skb(napi, skb); |
2525 | skb_shinfo(skb)->nr_frags = 0; | ||
2526 | |||
2527 | skb->len -= skb->data_len; | ||
2528 | skb->truesize -= skb->data_len; | ||
2529 | skb->data_len = 0; | ||
2530 | |||
2531 | __skb_pull(skb, skb_headlen(skb)); | ||
2532 | skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb)); | ||
2533 | |||
2534 | napi->skb = skb; | ||
2535 | 2552 | ||
2536 | out: | 2553 | out: |
2537 | return err; | 2554 | return err; |
@@ -2718,14 +2735,7 @@ out: | |||
2718 | * There may not be any more sk_buffs coming right now, so push | 2735 | * There may not be any more sk_buffs coming right now, so push |
2719 | * any pending DMA copies to hardware | 2736 | * any pending DMA copies to hardware |
2720 | */ | 2737 | */ |
2721 | if (!cpus_empty(net_dma.channel_mask)) { | 2738 | dma_issue_pending_all(); |
2722 | int chan_idx; | ||
2723 | for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) { | ||
2724 | struct dma_chan *chan = net_dma.channels[chan_idx]; | ||
2725 | if (chan) | ||
2726 | dma_async_memcpy_issue_pending(chan); | ||
2727 | } | ||
2728 | } | ||
2729 | #endif | 2739 | #endif |
2730 | 2740 | ||
2731 | return; | 2741 | return; |
@@ -4916,122 +4926,6 @@ static int dev_cpu_callback(struct notifier_block *nfb, | |||
4916 | return NOTIFY_OK; | 4926 | return NOTIFY_OK; |
4917 | } | 4927 | } |
4918 | 4928 | ||
4919 | #ifdef CONFIG_NET_DMA | ||
4920 | /** | ||
4921 | * net_dma_rebalance - try to maintain one DMA channel per CPU | ||
4922 | * @net_dma: DMA client and associated data (lock, channels, channel_mask) | ||
4923 | * | ||
4924 | * This is called when the number of channels allocated to the net_dma client | ||
4925 | * changes. The net_dma client tries to have one DMA channel per CPU. | ||
4926 | */ | ||
4927 | |||
4928 | static void net_dma_rebalance(struct net_dma *net_dma) | ||
4929 | { | ||
4930 | unsigned int cpu, i, n, chan_idx; | ||
4931 | struct dma_chan *chan; | ||
4932 | |||
4933 | if (cpus_empty(net_dma->channel_mask)) { | ||
4934 | for_each_online_cpu(cpu) | ||
4935 | rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL); | ||
4936 | return; | ||
4937 | } | ||
4938 | |||
4939 | i = 0; | ||
4940 | cpu = first_cpu(cpu_online_map); | ||
4941 | |||
4942 | for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) { | ||
4943 | chan = net_dma->channels[chan_idx]; | ||
4944 | |||
4945 | n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask)) | ||
4946 | + (i < (num_online_cpus() % | ||
4947 | cpus_weight(net_dma->channel_mask)) ? 1 : 0)); | ||
4948 | |||
4949 | while(n) { | ||
4950 | per_cpu(softnet_data, cpu).net_dma = chan; | ||
4951 | cpu = next_cpu(cpu, cpu_online_map); | ||
4952 | n--; | ||
4953 | } | ||
4954 | i++; | ||
4955 | } | ||
4956 | } | ||
4957 | |||
4958 | /** | ||
4959 | * netdev_dma_event - event callback for the net_dma_client | ||
4960 | * @client: should always be net_dma_client | ||
4961 | * @chan: DMA channel for the event | ||
4962 | * @state: DMA state to be handled | ||
4963 | */ | ||
4964 | static enum dma_state_client | ||
4965 | netdev_dma_event(struct dma_client *client, struct dma_chan *chan, | ||
4966 | enum dma_state state) | ||
4967 | { | ||
4968 | int i, found = 0, pos = -1; | ||
4969 | struct net_dma *net_dma = | ||
4970 | container_of(client, struct net_dma, client); | ||
4971 | enum dma_state_client ack = DMA_DUP; /* default: take no action */ | ||
4972 | |||
4973 | spin_lock(&net_dma->lock); | ||
4974 | switch (state) { | ||
4975 | case DMA_RESOURCE_AVAILABLE: | ||
4976 | for (i = 0; i < nr_cpu_ids; i++) | ||
4977 | if (net_dma->channels[i] == chan) { | ||
4978 | found = 1; | ||
4979 | break; | ||
4980 | } else if (net_dma->channels[i] == NULL && pos < 0) | ||
4981 | pos = i; | ||
4982 | |||
4983 | if (!found && pos >= 0) { | ||
4984 | ack = DMA_ACK; | ||
4985 | net_dma->channels[pos] = chan; | ||
4986 | cpu_set(pos, net_dma->channel_mask); | ||
4987 | net_dma_rebalance(net_dma); | ||
4988 | } | ||
4989 | break; | ||
4990 | case DMA_RESOURCE_REMOVED: | ||
4991 | for (i = 0; i < nr_cpu_ids; i++) | ||
4992 | if (net_dma->channels[i] == chan) { | ||
4993 | found = 1; | ||
4994 | pos = i; | ||
4995 | break; | ||
4996 | } | ||
4997 | |||
4998 | if (found) { | ||
4999 | ack = DMA_ACK; | ||
5000 | cpu_clear(pos, net_dma->channel_mask); | ||
5001 | net_dma->channels[i] = NULL; | ||
5002 | net_dma_rebalance(net_dma); | ||
5003 | } | ||
5004 | break; | ||
5005 | default: | ||
5006 | break; | ||
5007 | } | ||
5008 | spin_unlock(&net_dma->lock); | ||
5009 | |||
5010 | return ack; | ||
5011 | } | ||
5012 | |||
5013 | /** | ||
5014 | * netdev_dma_register - register the networking subsystem as a DMA client | ||
5015 | */ | ||
5016 | static int __init netdev_dma_register(void) | ||
5017 | { | ||
5018 | net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma), | ||
5019 | GFP_KERNEL); | ||
5020 | if (unlikely(!net_dma.channels)) { | ||
5021 | printk(KERN_NOTICE | ||
5022 | "netdev_dma: no memory for net_dma.channels\n"); | ||
5023 | return -ENOMEM; | ||
5024 | } | ||
5025 | spin_lock_init(&net_dma.lock); | ||
5026 | dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask); | ||
5027 | dma_async_client_register(&net_dma.client); | ||
5028 | dma_async_client_chan_request(&net_dma.client); | ||
5029 | return 0; | ||
5030 | } | ||
5031 | |||
5032 | #else | ||
5033 | static int __init netdev_dma_register(void) { return -ENODEV; } | ||
5034 | #endif /* CONFIG_NET_DMA */ | ||
5035 | 4929 | ||
5036 | /** | 4930 | /** |
5037 | * netdev_increment_features - increment feature set by one | 4931 | * netdev_increment_features - increment feature set by one |
@@ -5251,14 +5145,15 @@ static int __init net_dev_init(void) | |||
5251 | if (register_pernet_device(&default_device_ops)) | 5145 | if (register_pernet_device(&default_device_ops)) |
5252 | goto out; | 5146 | goto out; |
5253 | 5147 | ||
5254 | netdev_dma_register(); | ||
5255 | |||
5256 | open_softirq(NET_TX_SOFTIRQ, net_tx_action); | 5148 | open_softirq(NET_TX_SOFTIRQ, net_tx_action); |
5257 | open_softirq(NET_RX_SOFTIRQ, net_rx_action); | 5149 | open_softirq(NET_RX_SOFTIRQ, net_rx_action); |
5258 | 5150 | ||
5259 | hotcpu_notifier(dev_cpu_callback, 0); | 5151 | hotcpu_notifier(dev_cpu_callback, 0); |
5260 | dst_init(); | 5152 | dst_init(); |
5261 | dev_mcast_init(); | 5153 | dev_mcast_init(); |
5154 | #ifdef CONFIG_NET_DMA | ||
5155 | dmaengine_get(); | ||
5156 | #endif | ||
5262 | rc = 0; | 5157 | rc = 0; |
5263 | out: | 5158 | out: |
5264 | return rc; | 5159 | return rc; |
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index a3a410d20da0..a68fd79e9eca 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
@@ -286,6 +286,42 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = { | |||
286 | .get_sset_count = dsa_slave_get_sset_count, | 286 | .get_sset_count = dsa_slave_get_sset_count, |
287 | }; | 287 | }; |
288 | 288 | ||
289 | #ifdef CONFIG_NET_DSA_TAG_DSA | ||
290 | static const struct net_device_ops dsa_netdev_ops = { | ||
291 | .ndo_open = dsa_slave_open, | ||
292 | .ndo_stop = dsa_slave_close, | ||
293 | .ndo_start_xmit = dsa_xmit, | ||
294 | .ndo_change_rx_flags = dsa_slave_change_rx_flags, | ||
295 | .ndo_set_rx_mode = dsa_slave_set_rx_mode, | ||
296 | .ndo_set_multicast_list = dsa_slave_set_rx_mode, | ||
297 | .ndo_set_mac_address = dsa_slave_set_mac_address, | ||
298 | .ndo_do_ioctl = dsa_slave_ioctl, | ||
299 | }; | ||
300 | #endif | ||
301 | #ifdef CONFIG_NET_DSA_TAG_EDSA | ||
302 | static const struct net_device_ops edsa_netdev_ops = { | ||
303 | .ndo_open = dsa_slave_open, | ||
304 | .ndo_stop = dsa_slave_close, | ||
305 | .ndo_start_xmit = edsa_xmit, | ||
306 | .ndo_change_rx_flags = dsa_slave_change_rx_flags, | ||
307 | .ndo_set_rx_mode = dsa_slave_set_rx_mode, | ||
308 | .ndo_set_multicast_list = dsa_slave_set_rx_mode, | ||
309 | .ndo_set_mac_address = dsa_slave_set_mac_address, | ||
310 | .ndo_do_ioctl = dsa_slave_ioctl, | ||
311 | }; | ||
312 | #endif | ||
313 | #ifdef CONFIG_NET_DSA_TAG_TRAILER | ||
314 | static const struct net_device_ops trailer_netdev_ops = { | ||
315 | .ndo_open = dsa_slave_open, | ||
316 | .ndo_stop = dsa_slave_close, | ||
317 | .ndo_start_xmit = trailer_xmit, | ||
318 | .ndo_change_rx_flags = dsa_slave_change_rx_flags, | ||
319 | .ndo_set_rx_mode = dsa_slave_set_rx_mode, | ||
320 | .ndo_set_multicast_list = dsa_slave_set_rx_mode, | ||
321 | .ndo_set_mac_address = dsa_slave_set_mac_address, | ||
322 | .ndo_do_ioctl = dsa_slave_ioctl, | ||
323 | }; | ||
324 | #endif | ||
289 | 325 | ||
290 | /* slave device setup *******************************************************/ | 326 | /* slave device setup *******************************************************/ |
291 | struct net_device * | 327 | struct net_device * |
@@ -306,32 +342,27 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent, | |||
306 | SET_ETHTOOL_OPS(slave_dev, &dsa_slave_ethtool_ops); | 342 | SET_ETHTOOL_OPS(slave_dev, &dsa_slave_ethtool_ops); |
307 | memcpy(slave_dev->dev_addr, master->dev_addr, ETH_ALEN); | 343 | memcpy(slave_dev->dev_addr, master->dev_addr, ETH_ALEN); |
308 | slave_dev->tx_queue_len = 0; | 344 | slave_dev->tx_queue_len = 0; |
345 | |||
309 | switch (ds->tag_protocol) { | 346 | switch (ds->tag_protocol) { |
310 | #ifdef CONFIG_NET_DSA_TAG_DSA | 347 | #ifdef CONFIG_NET_DSA_TAG_DSA |
311 | case htons(ETH_P_DSA): | 348 | case htons(ETH_P_DSA): |
312 | slave_dev->hard_start_xmit = dsa_xmit; | 349 | slave_dev->netdev_ops = &dsa_netdev_ops; |
313 | break; | 350 | break; |
314 | #endif | 351 | #endif |
315 | #ifdef CONFIG_NET_DSA_TAG_EDSA | 352 | #ifdef CONFIG_NET_DSA_TAG_EDSA |
316 | case htons(ETH_P_EDSA): | 353 | case htons(ETH_P_EDSA): |
317 | slave_dev->hard_start_xmit = edsa_xmit; | 354 | slave_dev->netdev_ops = &edsa_netdev_ops; |
318 | break; | 355 | break; |
319 | #endif | 356 | #endif |
320 | #ifdef CONFIG_NET_DSA_TAG_TRAILER | 357 | #ifdef CONFIG_NET_DSA_TAG_TRAILER |
321 | case htons(ETH_P_TRAILER): | 358 | case htons(ETH_P_TRAILER): |
322 | slave_dev->hard_start_xmit = trailer_xmit; | 359 | slave_dev->netdev_ops = &trailer_netdev_ops; |
323 | break; | 360 | break; |
324 | #endif | 361 | #endif |
325 | default: | 362 | default: |
326 | BUG(); | 363 | BUG(); |
327 | } | 364 | } |
328 | slave_dev->open = dsa_slave_open; | 365 | |
329 | slave_dev->stop = dsa_slave_close; | ||
330 | slave_dev->change_rx_flags = dsa_slave_change_rx_flags; | ||
331 | slave_dev->set_rx_mode = dsa_slave_set_rx_mode; | ||
332 | slave_dev->set_multicast_list = dsa_slave_set_rx_mode; | ||
333 | slave_dev->set_mac_address = dsa_slave_set_mac_address; | ||
334 | slave_dev->do_ioctl = dsa_slave_ioctl; | ||
335 | SET_NETDEV_DEV(slave_dev, parent); | 366 | SET_NETDEV_DEV(slave_dev, parent); |
336 | slave_dev->vlan_features = master->vlan_features; | 367 | slave_dev->vlan_features = master->vlan_features; |
337 | 368 | ||
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 35bcddf8a932..ce572f9dff02 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -1313,7 +1313,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1313 | if ((available < target) && | 1313 | if ((available < target) && |
1314 | (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && | 1314 | (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && |
1315 | !sysctl_tcp_low_latency && | 1315 | !sysctl_tcp_low_latency && |
1316 | __get_cpu_var(softnet_data).net_dma) { | 1316 | dma_find_channel(DMA_MEMCPY)) { |
1317 | preempt_enable_no_resched(); | 1317 | preempt_enable_no_resched(); |
1318 | tp->ucopy.pinned_list = | 1318 | tp->ucopy.pinned_list = |
1319 | dma_pin_iovec_pages(msg->msg_iov, len); | 1319 | dma_pin_iovec_pages(msg->msg_iov, len); |
@@ -1523,7 +1523,7 @@ do_prequeue: | |||
1523 | if (!(flags & MSG_TRUNC)) { | 1523 | if (!(flags & MSG_TRUNC)) { |
1524 | #ifdef CONFIG_NET_DMA | 1524 | #ifdef CONFIG_NET_DMA |
1525 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) | 1525 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) |
1526 | tp->ucopy.dma_chan = get_softnet_dma(); | 1526 | tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); |
1527 | 1527 | ||
1528 | if (tp->ucopy.dma_chan) { | 1528 | if (tp->ucopy.dma_chan) { |
1529 | tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( | 1529 | tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( |
@@ -1628,7 +1628,6 @@ skip_copy: | |||
1628 | 1628 | ||
1629 | /* Safe to free early-copied skbs now */ | 1629 | /* Safe to free early-copied skbs now */ |
1630 | __skb_queue_purge(&sk->sk_async_wait_queue); | 1630 | __skb_queue_purge(&sk->sk_async_wait_queue); |
1631 | dma_chan_put(tp->ucopy.dma_chan); | ||
1632 | tp->ucopy.dma_chan = NULL; | 1631 | tp->ucopy.dma_chan = NULL; |
1633 | } | 1632 | } |
1634 | if (tp->ucopy.pinned_list) { | 1633 | if (tp->ucopy.pinned_list) { |
@@ -2542,6 +2541,7 @@ out: | |||
2542 | 2541 | ||
2543 | return pp; | 2542 | return pp; |
2544 | } | 2543 | } |
2544 | EXPORT_SYMBOL(tcp_gro_receive); | ||
2545 | 2545 | ||
2546 | int tcp_gro_complete(struct sk_buff *skb) | 2546 | int tcp_gro_complete(struct sk_buff *skb) |
2547 | { | 2547 | { |
@@ -2558,6 +2558,7 @@ int tcp_gro_complete(struct sk_buff *skb) | |||
2558 | 2558 | ||
2559 | return 0; | 2559 | return 0; |
2560 | } | 2560 | } |
2561 | EXPORT_SYMBOL(tcp_gro_complete); | ||
2561 | 2562 | ||
2562 | #ifdef CONFIG_TCP_MD5SIG | 2563 | #ifdef CONFIG_TCP_MD5SIG |
2563 | static unsigned long tcp_md5sig_users; | 2564 | static unsigned long tcp_md5sig_users; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 99b7ecbe8893..a6961d75c7ea 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -5005,7 +5005,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, | |||
5005 | return 0; | 5005 | return 0; |
5006 | 5006 | ||
5007 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) | 5007 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) |
5008 | tp->ucopy.dma_chan = get_softnet_dma(); | 5008 | tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); |
5009 | 5009 | ||
5010 | if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { | 5010 | if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { |
5011 | 5011 | ||
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 9d839fa9331e..19d7b429a262 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1594,7 +1594,7 @@ process: | |||
1594 | #ifdef CONFIG_NET_DMA | 1594 | #ifdef CONFIG_NET_DMA |
1595 | struct tcp_sock *tp = tcp_sk(sk); | 1595 | struct tcp_sock *tp = tcp_sk(sk); |
1596 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) | 1596 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) |
1597 | tp->ucopy.dma_chan = get_softnet_dma(); | 1597 | tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); |
1598 | if (tp->ucopy.dma_chan) | 1598 | if (tp->ucopy.dma_chan) |
1599 | ret = tcp_v4_do_rcv(sk, skb); | 1599 | ret = tcp_v4_do_rcv(sk, skb); |
1600 | else | 1600 | else |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 437b750b98fd..94f74f5b0cbf 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -672,8 +672,7 @@ int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb) | |||
672 | 672 | ||
673 | EXPORT_SYMBOL_GPL(ipv6_opt_accepted); | 673 | EXPORT_SYMBOL_GPL(ipv6_opt_accepted); |
674 | 674 | ||
675 | static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb, | 675 | static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto) |
676 | int proto) | ||
677 | { | 676 | { |
678 | struct inet6_protocol *ops = NULL; | 677 | struct inet6_protocol *ops = NULL; |
679 | 678 | ||
@@ -704,7 +703,7 @@ static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb, | |||
704 | __skb_pull(skb, len); | 703 | __skb_pull(skb, len); |
705 | } | 704 | } |
706 | 705 | ||
707 | return ops; | 706 | return proto; |
708 | } | 707 | } |
709 | 708 | ||
710 | static int ipv6_gso_send_check(struct sk_buff *skb) | 709 | static int ipv6_gso_send_check(struct sk_buff *skb) |
@@ -721,7 +720,9 @@ static int ipv6_gso_send_check(struct sk_buff *skb) | |||
721 | err = -EPROTONOSUPPORT; | 720 | err = -EPROTONOSUPPORT; |
722 | 721 | ||
723 | rcu_read_lock(); | 722 | rcu_read_lock(); |
724 | ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); | 723 | ops = rcu_dereference(inet6_protos[ |
724 | ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]); | ||
725 | |||
725 | if (likely(ops && ops->gso_send_check)) { | 726 | if (likely(ops && ops->gso_send_check)) { |
726 | skb_reset_transport_header(skb); | 727 | skb_reset_transport_header(skb); |
727 | err = ops->gso_send_check(skb); | 728 | err = ops->gso_send_check(skb); |
@@ -757,7 +758,9 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features) | |||
757 | segs = ERR_PTR(-EPROTONOSUPPORT); | 758 | segs = ERR_PTR(-EPROTONOSUPPORT); |
758 | 759 | ||
759 | rcu_read_lock(); | 760 | rcu_read_lock(); |
760 | ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); | 761 | ops = rcu_dereference(inet6_protos[ |
762 | ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]); | ||
763 | |||
761 | if (likely(ops && ops->gso_segment)) { | 764 | if (likely(ops && ops->gso_segment)) { |
762 | skb_reset_transport_header(skb); | 765 | skb_reset_transport_header(skb); |
763 | segs = ops->gso_segment(skb, features); | 766 | segs = ops->gso_segment(skb, features); |
@@ -777,11 +780,105 @@ out: | |||
777 | return segs; | 780 | return segs; |
778 | } | 781 | } |
779 | 782 | ||
783 | struct ipv6_gro_cb { | ||
784 | struct napi_gro_cb napi; | ||
785 | int proto; | ||
786 | }; | ||
787 | |||
788 | #define IPV6_GRO_CB(skb) ((struct ipv6_gro_cb *)(skb)->cb) | ||
789 | |||
790 | static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, | ||
791 | struct sk_buff *skb) | ||
792 | { | ||
793 | struct inet6_protocol *ops; | ||
794 | struct sk_buff **pp = NULL; | ||
795 | struct sk_buff *p; | ||
796 | struct ipv6hdr *iph; | ||
797 | unsigned int nlen; | ||
798 | int flush = 1; | ||
799 | int proto; | ||
800 | |||
801 | if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) | ||
802 | goto out; | ||
803 | |||
804 | iph = ipv6_hdr(skb); | ||
805 | __skb_pull(skb, sizeof(*iph)); | ||
806 | |||
807 | flush += ntohs(iph->payload_len) != skb->len; | ||
808 | |||
809 | rcu_read_lock(); | ||
810 | proto = ipv6_gso_pull_exthdrs(skb, iph->nexthdr); | ||
811 | IPV6_GRO_CB(skb)->proto = proto; | ||
812 | ops = rcu_dereference(inet6_protos[proto]); | ||
813 | if (!ops || !ops->gro_receive) | ||
814 | goto out_unlock; | ||
815 | |||
816 | flush--; | ||
817 | skb_reset_transport_header(skb); | ||
818 | nlen = skb_network_header_len(skb); | ||
819 | |||
820 | for (p = *head; p; p = p->next) { | ||
821 | struct ipv6hdr *iph2; | ||
822 | |||
823 | if (!NAPI_GRO_CB(p)->same_flow) | ||
824 | continue; | ||
825 | |||
826 | iph2 = ipv6_hdr(p); | ||
827 | |||
828 | /* All fields must match except length. */ | ||
829 | if (nlen != skb_network_header_len(p) || | ||
830 | memcmp(iph, iph2, offsetof(struct ipv6hdr, payload_len)) || | ||
831 | memcmp(&iph->nexthdr, &iph2->nexthdr, | ||
832 | nlen - offsetof(struct ipv6hdr, nexthdr))) { | ||
833 | NAPI_GRO_CB(p)->same_flow = 0; | ||
834 | continue; | ||
835 | } | ||
836 | |||
837 | NAPI_GRO_CB(p)->flush |= flush; | ||
838 | } | ||
839 | |||
840 | NAPI_GRO_CB(skb)->flush |= flush; | ||
841 | |||
842 | pp = ops->gro_receive(head, skb); | ||
843 | |||
844 | out_unlock: | ||
845 | rcu_read_unlock(); | ||
846 | |||
847 | out: | ||
848 | NAPI_GRO_CB(skb)->flush |= flush; | ||
849 | |||
850 | return pp; | ||
851 | } | ||
852 | |||
853 | static int ipv6_gro_complete(struct sk_buff *skb) | ||
854 | { | ||
855 | struct inet6_protocol *ops; | ||
856 | struct ipv6hdr *iph = ipv6_hdr(skb); | ||
857 | int err = -ENOSYS; | ||
858 | |||
859 | iph->payload_len = htons(skb->len - skb_network_offset(skb) - | ||
860 | sizeof(*iph)); | ||
861 | |||
862 | rcu_read_lock(); | ||
863 | ops = rcu_dereference(inet6_protos[IPV6_GRO_CB(skb)->proto]); | ||
864 | if (WARN_ON(!ops || !ops->gro_complete)) | ||
865 | goto out_unlock; | ||
866 | |||
867 | err = ops->gro_complete(skb); | ||
868 | |||
869 | out_unlock: | ||
870 | rcu_read_unlock(); | ||
871 | |||
872 | return err; | ||
873 | } | ||
874 | |||
780 | static struct packet_type ipv6_packet_type = { | 875 | static struct packet_type ipv6_packet_type = { |
781 | .type = __constant_htons(ETH_P_IPV6), | 876 | .type = __constant_htons(ETH_P_IPV6), |
782 | .func = ipv6_rcv, | 877 | .func = ipv6_rcv, |
783 | .gso_send_check = ipv6_gso_send_check, | 878 | .gso_send_check = ipv6_gso_send_check, |
784 | .gso_segment = ipv6_gso_segment, | 879 | .gso_segment = ipv6_gso_segment, |
880 | .gro_receive = ipv6_gro_receive, | ||
881 | .gro_complete = ipv6_gro_complete, | ||
785 | }; | 882 | }; |
786 | 883 | ||
787 | static int __init ipv6_packet_init(void) | 884 | static int __init ipv6_packet_init(void) |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 76f06b94ab9f..c4a59824ac2c 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -2752,7 +2752,7 @@ int __init ip6_route_init(void) | |||
2752 | kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0, | 2752 | kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0, |
2753 | SLAB_HWCACHE_ALIGN, NULL); | 2753 | SLAB_HWCACHE_ALIGN, NULL); |
2754 | if (!ip6_dst_ops_template.kmem_cachep) | 2754 | if (!ip6_dst_ops_template.kmem_cachep) |
2755 | goto out;; | 2755 | goto out; |
2756 | 2756 | ||
2757 | ret = register_pernet_subsys(&ip6_route_net_ops); | 2757 | ret = register_pernet_subsys(&ip6_route_net_ops); |
2758 | if (ret) | 2758 | if (ret) |
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c index 9048fe7e7ea7..a031034720b4 100644 --- a/net/ipv6/sysctl_net_ipv6.c +++ b/net/ipv6/sysctl_net_ipv6.c | |||
@@ -128,7 +128,7 @@ static struct ctl_table_header *ip6_header; | |||
128 | 128 | ||
129 | int ipv6_sysctl_register(void) | 129 | int ipv6_sysctl_register(void) |
130 | { | 130 | { |
131 | int err = -ENOMEM;; | 131 | int err = -ENOMEM; |
132 | 132 | ||
133 | ip6_header = register_net_sysctl_rotable(net_ipv6_ctl_path, ipv6_table); | 133 | ip6_header = register_net_sysctl_rotable(net_ipv6_ctl_path, ipv6_table); |
134 | if (ip6_header == NULL) | 134 | if (ip6_header == NULL) |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index e8b8337a8310..e5b85d45bee8 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -101,7 +101,7 @@ static void tcp_v6_hash(struct sock *sk) | |||
101 | } | 101 | } |
102 | } | 102 | } |
103 | 103 | ||
104 | static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len, | 104 | static __inline__ __sum16 tcp_v6_check(int len, |
105 | struct in6_addr *saddr, | 105 | struct in6_addr *saddr, |
106 | struct in6_addr *daddr, | 106 | struct in6_addr *daddr, |
107 | __wsum base) | 107 | __wsum base) |
@@ -501,7 +501,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req) | |||
501 | if (skb) { | 501 | if (skb) { |
502 | struct tcphdr *th = tcp_hdr(skb); | 502 | struct tcphdr *th = tcp_hdr(skb); |
503 | 503 | ||
504 | th->check = tcp_v6_check(th, skb->len, | 504 | th->check = tcp_v6_check(skb->len, |
505 | &treq->loc_addr, &treq->rmt_addr, | 505 | &treq->loc_addr, &treq->rmt_addr, |
506 | csum_partial(th, skb->len, skb->csum)); | 506 | csum_partial(th, skb->len, skb->csum)); |
507 | 507 | ||
@@ -942,6 +942,41 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb) | |||
942 | return 0; | 942 | return 0; |
943 | } | 943 | } |
944 | 944 | ||
945 | struct sk_buff **tcp6_gro_receive(struct sk_buff **head, struct sk_buff *skb) | ||
946 | { | ||
947 | struct ipv6hdr *iph = ipv6_hdr(skb); | ||
948 | |||
949 | switch (skb->ip_summed) { | ||
950 | case CHECKSUM_COMPLETE: | ||
951 | if (!tcp_v6_check(skb->len, &iph->saddr, &iph->daddr, | ||
952 | skb->csum)) { | ||
953 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
954 | break; | ||
955 | } | ||
956 | |||
957 | /* fall through */ | ||
958 | case CHECKSUM_NONE: | ||
959 | NAPI_GRO_CB(skb)->flush = 1; | ||
960 | return NULL; | ||
961 | } | ||
962 | |||
963 | return tcp_gro_receive(head, skb); | ||
964 | } | ||
965 | EXPORT_SYMBOL(tcp6_gro_receive); | ||
966 | |||
967 | int tcp6_gro_complete(struct sk_buff *skb) | ||
968 | { | ||
969 | struct ipv6hdr *iph = ipv6_hdr(skb); | ||
970 | struct tcphdr *th = tcp_hdr(skb); | ||
971 | |||
972 | th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), | ||
973 | &iph->saddr, &iph->daddr, 0); | ||
974 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; | ||
975 | |||
976 | return tcp_gro_complete(skb); | ||
977 | } | ||
978 | EXPORT_SYMBOL(tcp6_gro_complete); | ||
979 | |||
945 | static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, | 980 | static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, |
946 | u32 ts, struct tcp_md5sig_key *key, int rst) | 981 | u32 ts, struct tcp_md5sig_key *key, int rst) |
947 | { | 982 | { |
@@ -1429,14 +1464,14 @@ out: | |||
1429 | static __sum16 tcp_v6_checksum_init(struct sk_buff *skb) | 1464 | static __sum16 tcp_v6_checksum_init(struct sk_buff *skb) |
1430 | { | 1465 | { |
1431 | if (skb->ip_summed == CHECKSUM_COMPLETE) { | 1466 | if (skb->ip_summed == CHECKSUM_COMPLETE) { |
1432 | if (!tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr, | 1467 | if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr, |
1433 | &ipv6_hdr(skb)->daddr, skb->csum)) { | 1468 | &ipv6_hdr(skb)->daddr, skb->csum)) { |
1434 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1469 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1435 | return 0; | 1470 | return 0; |
1436 | } | 1471 | } |
1437 | } | 1472 | } |
1438 | 1473 | ||
1439 | skb->csum = ~csum_unfold(tcp_v6_check(tcp_hdr(skb), skb->len, | 1474 | skb->csum = ~csum_unfold(tcp_v6_check(skb->len, |
1440 | &ipv6_hdr(skb)->saddr, | 1475 | &ipv6_hdr(skb)->saddr, |
1441 | &ipv6_hdr(skb)->daddr, 0)); | 1476 | &ipv6_hdr(skb)->daddr, 0)); |
1442 | 1477 | ||
@@ -1640,7 +1675,7 @@ process: | |||
1640 | #ifdef CONFIG_NET_DMA | 1675 | #ifdef CONFIG_NET_DMA |
1641 | struct tcp_sock *tp = tcp_sk(sk); | 1676 | struct tcp_sock *tp = tcp_sk(sk); |
1642 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) | 1677 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) |
1643 | tp->ucopy.dma_chan = get_softnet_dma(); | 1678 | tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); |
1644 | if (tp->ucopy.dma_chan) | 1679 | if (tp->ucopy.dma_chan) |
1645 | ret = tcp_v6_do_rcv(sk, skb); | 1680 | ret = tcp_v6_do_rcv(sk, skb); |
1646 | else | 1681 | else |
@@ -2062,6 +2097,8 @@ static struct inet6_protocol tcpv6_protocol = { | |||
2062 | .err_handler = tcp_v6_err, | 2097 | .err_handler = tcp_v6_err, |
2063 | .gso_send_check = tcp_v6_gso_send_check, | 2098 | .gso_send_check = tcp_v6_gso_send_check, |
2064 | .gso_segment = tcp_tso_segment, | 2099 | .gso_segment = tcp_tso_segment, |
2100 | .gro_receive = tcp6_gro_receive, | ||
2101 | .gro_complete = tcp6_gro_complete, | ||
2065 | .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, | 2102 | .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, |
2066 | }; | 2103 | }; |
2067 | 2104 | ||
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 3e1191cecaf0..1d3dd30099df 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
@@ -225,6 +225,7 @@ void genl_unregister_mc_group(struct genl_family *family, | |||
225 | __genl_unregister_mc_group(family, grp); | 225 | __genl_unregister_mc_group(family, grp); |
226 | genl_unlock(); | 226 | genl_unlock(); |
227 | } | 227 | } |
228 | EXPORT_SYMBOL(genl_unregister_mc_group); | ||
228 | 229 | ||
229 | static void genl_unregister_mc_groups(struct genl_family *family) | 230 | static void genl_unregister_mc_groups(struct genl_family *family) |
230 | { | 231 | { |
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c index b0ceac2d6cd1..6a91a32a80c1 100644 --- a/net/phonet/pep-gprs.c +++ b/net/phonet/pep-gprs.c | |||
@@ -227,6 +227,13 @@ static int gprs_set_mtu(struct net_device *dev, int new_mtu) | |||
227 | return 0; | 227 | return 0; |
228 | } | 228 | } |
229 | 229 | ||
230 | static const struct net_device_ops gprs_netdev_ops = { | ||
231 | .ndo_open = gprs_open, | ||
232 | .ndo_stop = gprs_close, | ||
233 | .ndo_start_xmit = gprs_xmit, | ||
234 | .ndo_change_mtu = gprs_set_mtu, | ||
235 | }; | ||
236 | |||
230 | static void gprs_setup(struct net_device *dev) | 237 | static void gprs_setup(struct net_device *dev) |
231 | { | 238 | { |
232 | dev->features = NETIF_F_FRAGLIST; | 239 | dev->features = NETIF_F_FRAGLIST; |
@@ -237,11 +244,8 @@ static void gprs_setup(struct net_device *dev) | |||
237 | dev->addr_len = 0; | 244 | dev->addr_len = 0; |
238 | dev->tx_queue_len = 10; | 245 | dev->tx_queue_len = 10; |
239 | 246 | ||
247 | dev->netdev_ops = &gprs_netdev_ops; | ||
240 | dev->destructor = free_netdev; | 248 | dev->destructor = free_netdev; |
241 | dev->open = gprs_open; | ||
242 | dev->stop = gprs_close; | ||
243 | dev->hard_start_xmit = gprs_xmit; /* mandatory */ | ||
244 | dev->change_mtu = gprs_set_mtu; | ||
245 | } | 249 | } |
246 | 250 | ||
247 | /* | 251 | /* |
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index f3965df00559..33133d27b539 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c | |||
@@ -435,7 +435,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt) | |||
435 | int i; | 435 | int i; |
436 | 436 | ||
437 | q->perturb_timer.function = sfq_perturbation; | 437 | q->perturb_timer.function = sfq_perturbation; |
438 | q->perturb_timer.data = (unsigned long)sch;; | 438 | q->perturb_timer.data = (unsigned long)sch; |
439 | init_timer_deferrable(&q->perturb_timer); | 439 | init_timer_deferrable(&q->perturb_timer); |
440 | 440 | ||
441 | for (i = 0; i < SFQ_HASH_DIVISOR; i++) | 441 | for (i = 0; i < SFQ_HASH_DIVISOR; i++) |
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index cfc8e7caba62..ec697cebb63b 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -289,9 +289,9 @@ restart: | |||
289 | 289 | ||
290 | do { | 290 | do { |
291 | struct net_device *slave = qdisc_dev(q); | 291 | struct net_device *slave = qdisc_dev(q); |
292 | struct netdev_queue *slave_txq; | 292 | struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0); |
293 | const struct net_device_ops *slave_ops = slave->netdev_ops; | ||
293 | 294 | ||
294 | slave_txq = netdev_get_tx_queue(slave, 0); | ||
295 | if (slave_txq->qdisc_sleeping != q) | 295 | if (slave_txq->qdisc_sleeping != q) |
296 | continue; | 296 | continue; |
297 | if (__netif_subqueue_stopped(slave, subq) || | 297 | if (__netif_subqueue_stopped(slave, subq) || |
@@ -305,7 +305,7 @@ restart: | |||
305 | if (__netif_tx_trylock(slave_txq)) { | 305 | if (__netif_tx_trylock(slave_txq)) { |
306 | if (!netif_tx_queue_stopped(slave_txq) && | 306 | if (!netif_tx_queue_stopped(slave_txq) && |
307 | !netif_tx_queue_frozen(slave_txq) && | 307 | !netif_tx_queue_frozen(slave_txq) && |
308 | slave->hard_start_xmit(skb, slave) == 0) { | 308 | slave_ops->ndo_start_xmit(skb, slave) == 0) { |
309 | __netif_tx_unlock(slave_txq); | 309 | __netif_tx_unlock(slave_txq); |
310 | master->slaves = NEXT_SLAVE(q); | 310 | master->slaves = NEXT_SLAVE(q); |
311 | netif_wake_queue(dev); | 311 | netif_wake_queue(dev); |
@@ -420,6 +420,14 @@ static int teql_master_mtu(struct net_device *dev, int new_mtu) | |||
420 | return 0; | 420 | return 0; |
421 | } | 421 | } |
422 | 422 | ||
423 | static const struct net_device_ops teql_netdev_ops = { | ||
424 | .ndo_open = teql_master_open, | ||
425 | .ndo_stop = teql_master_close, | ||
426 | .ndo_start_xmit = teql_master_xmit, | ||
427 | .ndo_get_stats = teql_master_stats, | ||
428 | .ndo_change_mtu = teql_master_mtu, | ||
429 | }; | ||
430 | |||
423 | static __init void teql_master_setup(struct net_device *dev) | 431 | static __init void teql_master_setup(struct net_device *dev) |
424 | { | 432 | { |
425 | struct teql_master *master = netdev_priv(dev); | 433 | struct teql_master *master = netdev_priv(dev); |
@@ -436,11 +444,7 @@ static __init void teql_master_setup(struct net_device *dev) | |||
436 | ops->destroy = teql_destroy; | 444 | ops->destroy = teql_destroy; |
437 | ops->owner = THIS_MODULE; | 445 | ops->owner = THIS_MODULE; |
438 | 446 | ||
439 | dev->open = teql_master_open; | 447 | dev->netdev_ops = &teql_netdev_ops; |
440 | dev->hard_start_xmit = teql_master_xmit; | ||
441 | dev->stop = teql_master_close; | ||
442 | dev->get_stats = teql_master_stats; | ||
443 | dev->change_mtu = teql_master_mtu; | ||
444 | dev->type = ARPHRD_VOID; | 448 | dev->type = ARPHRD_VOID; |
445 | dev->mtu = 1500; | 449 | dev->mtu = 1500; |
446 | dev->tx_queue_len = 100; | 450 | dev->tx_queue_len = 100; |
diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 52db5f60daa0..56935bbc1496 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c | |||
@@ -141,8 +141,8 @@ void sctp_auth_destroy_keys(struct list_head *keys) | |||
141 | /* Compare two byte vectors as numbers. Return values | 141 | /* Compare two byte vectors as numbers. Return values |
142 | * are: | 142 | * are: |
143 | * 0 - vectors are equal | 143 | * 0 - vectors are equal |
144 | * < 0 - vector 1 is smaller then vector2 | 144 | * < 0 - vector 1 is smaller than vector2 |
145 | * > 0 - vector 1 is greater then vector2 | 145 | * > 0 - vector 1 is greater than vector2 |
146 | * | 146 | * |
147 | * Algorithm is: | 147 | * Algorithm is: |
148 | * This is performed by selecting the numerically smaller key vector... | 148 | * This is performed by selecting the numerically smaller key vector... |
@@ -489,7 +489,7 @@ int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp) | |||
489 | return 0; | 489 | return 0; |
490 | 490 | ||
491 | out_err: | 491 | out_err: |
492 | /* Clean up any successfull allocations */ | 492 | /* Clean up any successful allocations */ |
493 | sctp_auth_destroy_hmacs(ep->auth_hmacs); | 493 | sctp_auth_destroy_hmacs(ep->auth_hmacs); |
494 | return -ENOMEM; | 494 | return -ENOMEM; |
495 | } | 495 | } |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 1c4e5d6c29c0..3a0cd075914f 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -4268,9 +4268,9 @@ nomem: | |||
4268 | 4268 | ||
4269 | /* | 4269 | /* |
4270 | * Handle a protocol violation when the chunk length is invalid. | 4270 | * Handle a protocol violation when the chunk length is invalid. |
4271 | * "Invalid" length is identified as smaller then the minimal length a | 4271 | * "Invalid" length is identified as smaller than the minimal length a |
4272 | * given chunk can be. For example, a SACK chunk has invalid length | 4272 | * given chunk can be. For example, a SACK chunk has invalid length |
4273 | * if it's length is set to be smaller then the size of sctp_sack_chunk_t. | 4273 | * if its length is set to be smaller than the size of sctp_sack_chunk_t. |
4274 | * | 4274 | * |
4275 | * We inform the other end by sending an ABORT with a Protocol Violation | 4275 | * We inform the other end by sending an ABORT with a Protocol Violation |
4276 | * error code. | 4276 | * error code. |
@@ -4300,7 +4300,7 @@ static sctp_disposition_t sctp_sf_violation_chunklen( | |||
4300 | 4300 | ||
4301 | /* | 4301 | /* |
4302 | * Handle a protocol violation when the parameter length is invalid. | 4302 | * Handle a protocol violation when the parameter length is invalid. |
4303 | * "Invalid" length is identified as smaller then the minimal length a | 4303 | * "Invalid" length is identified as smaller than the minimal length a |
4304 | * given parameter can be. | 4304 | * given parameter can be. |
4305 | */ | 4305 | */ |
4306 | static sctp_disposition_t sctp_sf_violation_paramlen( | 4306 | static sctp_disposition_t sctp_sf_violation_paramlen( |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index b14a8f33e42d..ff0a8f88de04 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -2717,7 +2717,7 @@ static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, int o | |||
2717 | paths++; | 2717 | paths++; |
2718 | } | 2718 | } |
2719 | 2719 | ||
2720 | /* Only validate asocmaxrxt if we have more then | 2720 | /* Only validate asocmaxrxt if we have more than |
2721 | * one path/transport. We do this because path | 2721 | * one path/transport. We do this because path |
2722 | * retransmissions are only counted when we have more | 2722 | * retransmissions are only counted when we have more |
2723 | * then one path. | 2723 | * then one path. |
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c index 35c73e82553a..9bd64565021a 100644 --- a/net/sctp/tsnmap.c +++ b/net/sctp/tsnmap.c | |||
@@ -227,7 +227,7 @@ void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn) | |||
227 | */ | 227 | */ |
228 | bitmap_zero(map->tsn_map, map->len); | 228 | bitmap_zero(map->tsn_map, map->len); |
229 | } else { | 229 | } else { |
230 | /* If the gap is smaller then the map size, | 230 | /* If the gap is smaller than the map size, |
231 | * shift the map by 'gap' bits and update further. | 231 | * shift the map by 'gap' bits and update further. |
232 | */ | 232 | */ |
233 | bitmap_shift_right(map->tsn_map, map->tsn_map, gap, map->len); | 233 | bitmap_shift_right(map->tsn_map, map->tsn_map, gap, map->len); |
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index c9966713282a..4735caad26ed 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -98,7 +98,7 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, | |||
98 | 98 | ||
99 | return new; | 99 | return new; |
100 | } | 100 | } |
101 | EXPORT_SYMBOL(sunrpc_cache_lookup); | 101 | EXPORT_SYMBOL_GPL(sunrpc_cache_lookup); |
102 | 102 | ||
103 | 103 | ||
104 | static void queue_loose(struct cache_detail *detail, struct cache_head *ch); | 104 | static void queue_loose(struct cache_detail *detail, struct cache_head *ch); |
@@ -173,7 +173,7 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail, | |||
173 | cache_put(old, detail); | 173 | cache_put(old, detail); |
174 | return tmp; | 174 | return tmp; |
175 | } | 175 | } |
176 | EXPORT_SYMBOL(sunrpc_cache_update); | 176 | EXPORT_SYMBOL_GPL(sunrpc_cache_update); |
177 | 177 | ||
178 | static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h); | 178 | static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h); |
179 | /* | 179 | /* |
@@ -245,7 +245,7 @@ int cache_check(struct cache_detail *detail, | |||
245 | cache_put(h, detail); | 245 | cache_put(h, detail); |
246 | return rv; | 246 | return rv; |
247 | } | 247 | } |
248 | EXPORT_SYMBOL(cache_check); | 248 | EXPORT_SYMBOL_GPL(cache_check); |
249 | 249 | ||
250 | /* | 250 | /* |
251 | * caches need to be periodically cleaned. | 251 | * caches need to be periodically cleaned. |
@@ -373,7 +373,7 @@ int cache_register(struct cache_detail *cd) | |||
373 | schedule_delayed_work(&cache_cleaner, 0); | 373 | schedule_delayed_work(&cache_cleaner, 0); |
374 | return 0; | 374 | return 0; |
375 | } | 375 | } |
376 | EXPORT_SYMBOL(cache_register); | 376 | EXPORT_SYMBOL_GPL(cache_register); |
377 | 377 | ||
378 | void cache_unregister(struct cache_detail *cd) | 378 | void cache_unregister(struct cache_detail *cd) |
379 | { | 379 | { |
@@ -399,7 +399,7 @@ void cache_unregister(struct cache_detail *cd) | |||
399 | out: | 399 | out: |
400 | printk(KERN_ERR "nfsd: failed to unregister %s cache\n", cd->name); | 400 | printk(KERN_ERR "nfsd: failed to unregister %s cache\n", cd->name); |
401 | } | 401 | } |
402 | EXPORT_SYMBOL(cache_unregister); | 402 | EXPORT_SYMBOL_GPL(cache_unregister); |
403 | 403 | ||
404 | /* clean cache tries to find something to clean | 404 | /* clean cache tries to find something to clean |
405 | * and cleans it. | 405 | * and cleans it. |
@@ -514,7 +514,7 @@ void cache_flush(void) | |||
514 | while (cache_clean() != -1) | 514 | while (cache_clean() != -1) |
515 | cond_resched(); | 515 | cond_resched(); |
516 | } | 516 | } |
517 | EXPORT_SYMBOL(cache_flush); | 517 | EXPORT_SYMBOL_GPL(cache_flush); |
518 | 518 | ||
519 | void cache_purge(struct cache_detail *detail) | 519 | void cache_purge(struct cache_detail *detail) |
520 | { | 520 | { |
@@ -523,7 +523,7 @@ void cache_purge(struct cache_detail *detail) | |||
523 | cache_flush(); | 523 | cache_flush(); |
524 | detail->flush_time = 1; | 524 | detail->flush_time = 1; |
525 | } | 525 | } |
526 | EXPORT_SYMBOL(cache_purge); | 526 | EXPORT_SYMBOL_GPL(cache_purge); |
527 | 527 | ||
528 | 528 | ||
529 | /* | 529 | /* |
@@ -988,7 +988,7 @@ void qword_add(char **bpp, int *lp, char *str) | |||
988 | *bpp = bp; | 988 | *bpp = bp; |
989 | *lp = len; | 989 | *lp = len; |
990 | } | 990 | } |
991 | EXPORT_SYMBOL(qword_add); | 991 | EXPORT_SYMBOL_GPL(qword_add); |
992 | 992 | ||
993 | void qword_addhex(char **bpp, int *lp, char *buf, int blen) | 993 | void qword_addhex(char **bpp, int *lp, char *buf, int blen) |
994 | { | 994 | { |
@@ -1017,7 +1017,7 @@ void qword_addhex(char **bpp, int *lp, char *buf, int blen) | |||
1017 | *bpp = bp; | 1017 | *bpp = bp; |
1018 | *lp = len; | 1018 | *lp = len; |
1019 | } | 1019 | } |
1020 | EXPORT_SYMBOL(qword_addhex); | 1020 | EXPORT_SYMBOL_GPL(qword_addhex); |
1021 | 1021 | ||
1022 | static void warn_no_listener(struct cache_detail *detail) | 1022 | static void warn_no_listener(struct cache_detail *detail) |
1023 | { | 1023 | { |
@@ -1140,7 +1140,7 @@ int qword_get(char **bpp, char *dest, int bufsize) | |||
1140 | *dest = '\0'; | 1140 | *dest = '\0'; |
1141 | return len; | 1141 | return len; |
1142 | } | 1142 | } |
1143 | EXPORT_SYMBOL(qword_get); | 1143 | EXPORT_SYMBOL_GPL(qword_get); |
1144 | 1144 | ||
1145 | 1145 | ||
1146 | /* | 1146 | /* |
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c index 50b049c6598a..085372ef4feb 100644 --- a/net/sunrpc/stats.c +++ b/net/sunrpc/stats.c | |||
@@ -106,7 +106,7 @@ void svc_seq_show(struct seq_file *seq, const struct svc_stat *statp) { | |||
106 | seq_putc(seq, '\n'); | 106 | seq_putc(seq, '\n'); |
107 | } | 107 | } |
108 | } | 108 | } |
109 | EXPORT_SYMBOL(svc_seq_show); | 109 | EXPORT_SYMBOL_GPL(svc_seq_show); |
110 | 110 | ||
111 | /** | 111 | /** |
112 | * rpc_alloc_iostats - allocate an rpc_iostats structure | 112 | * rpc_alloc_iostats - allocate an rpc_iostats structure |
@@ -249,14 +249,14 @@ svc_proc_register(struct svc_stat *statp, const struct file_operations *fops) | |||
249 | { | 249 | { |
250 | return do_register(statp->program->pg_name, statp, fops); | 250 | return do_register(statp->program->pg_name, statp, fops); |
251 | } | 251 | } |
252 | EXPORT_SYMBOL(svc_proc_register); | 252 | EXPORT_SYMBOL_GPL(svc_proc_register); |
253 | 253 | ||
254 | void | 254 | void |
255 | svc_proc_unregister(const char *name) | 255 | svc_proc_unregister(const char *name) |
256 | { | 256 | { |
257 | remove_proc_entry(name, proc_net_rpc); | 257 | remove_proc_entry(name, proc_net_rpc); |
258 | } | 258 | } |
259 | EXPORT_SYMBOL(svc_proc_unregister); | 259 | EXPORT_SYMBOL_GPL(svc_proc_unregister); |
260 | 260 | ||
261 | void | 261 | void |
262 | rpc_proc_init(void) | 262 | rpc_proc_init(void) |
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 54c98d876847..c51fed4d1af1 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -431,7 +431,7 @@ svc_create(struct svc_program *prog, unsigned int bufsize, | |||
431 | { | 431 | { |
432 | return __svc_create(prog, bufsize, /*npools*/1, family, shutdown); | 432 | return __svc_create(prog, bufsize, /*npools*/1, family, shutdown); |
433 | } | 433 | } |
434 | EXPORT_SYMBOL(svc_create); | 434 | EXPORT_SYMBOL_GPL(svc_create); |
435 | 435 | ||
436 | struct svc_serv * | 436 | struct svc_serv * |
437 | svc_create_pooled(struct svc_program *prog, unsigned int bufsize, | 437 | svc_create_pooled(struct svc_program *prog, unsigned int bufsize, |
@@ -450,7 +450,7 @@ svc_create_pooled(struct svc_program *prog, unsigned int bufsize, | |||
450 | 450 | ||
451 | return serv; | 451 | return serv; |
452 | } | 452 | } |
453 | EXPORT_SYMBOL(svc_create_pooled); | 453 | EXPORT_SYMBOL_GPL(svc_create_pooled); |
454 | 454 | ||
455 | /* | 455 | /* |
456 | * Destroy an RPC service. Should be called with appropriate locking to | 456 | * Destroy an RPC service. Should be called with appropriate locking to |
@@ -492,7 +492,7 @@ svc_destroy(struct svc_serv *serv) | |||
492 | kfree(serv->sv_pools); | 492 | kfree(serv->sv_pools); |
493 | kfree(serv); | 493 | kfree(serv); |
494 | } | 494 | } |
495 | EXPORT_SYMBOL(svc_destroy); | 495 | EXPORT_SYMBOL_GPL(svc_destroy); |
496 | 496 | ||
497 | /* | 497 | /* |
498 | * Allocate an RPC server's buffer space. | 498 | * Allocate an RPC server's buffer space. |
@@ -567,7 +567,7 @@ out_thread: | |||
567 | out_enomem: | 567 | out_enomem: |
568 | return ERR_PTR(-ENOMEM); | 568 | return ERR_PTR(-ENOMEM); |
569 | } | 569 | } |
570 | EXPORT_SYMBOL(svc_prepare_thread); | 570 | EXPORT_SYMBOL_GPL(svc_prepare_thread); |
571 | 571 | ||
572 | /* | 572 | /* |
573 | * Choose a pool in which to create a new thread, for svc_set_num_threads | 573 | * Choose a pool in which to create a new thread, for svc_set_num_threads |
@@ -689,7 +689,7 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) | |||
689 | 689 | ||
690 | return error; | 690 | return error; |
691 | } | 691 | } |
692 | EXPORT_SYMBOL(svc_set_num_threads); | 692 | EXPORT_SYMBOL_GPL(svc_set_num_threads); |
693 | 693 | ||
694 | /* | 694 | /* |
695 | * Called from a server thread as it's exiting. Caller must hold the BKL or | 695 | * Called from a server thread as it's exiting. Caller must hold the BKL or |
@@ -717,7 +717,7 @@ svc_exit_thread(struct svc_rqst *rqstp) | |||
717 | if (serv) | 717 | if (serv) |
718 | svc_destroy(serv); | 718 | svc_destroy(serv); |
719 | } | 719 | } |
720 | EXPORT_SYMBOL(svc_exit_thread); | 720 | EXPORT_SYMBOL_GPL(svc_exit_thread); |
721 | 721 | ||
722 | #ifdef CONFIG_SUNRPC_REGISTER_V4 | 722 | #ifdef CONFIG_SUNRPC_REGISTER_V4 |
723 | 723 | ||
@@ -1231,7 +1231,7 @@ err_bad: | |||
1231 | svc_putnl(resv, ntohl(rpc_stat)); | 1231 | svc_putnl(resv, ntohl(rpc_stat)); |
1232 | goto sendit; | 1232 | goto sendit; |
1233 | } | 1233 | } |
1234 | EXPORT_SYMBOL(svc_process); | 1234 | EXPORT_SYMBOL_GPL(svc_process); |
1235 | 1235 | ||
1236 | /* | 1236 | /* |
1237 | * Return (transport-specific) limit on the rpc payload. | 1237 | * Return (transport-specific) limit on the rpc payload. |
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index bf5b5cdafebf..e588df5d6b34 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -440,7 +440,7 @@ void svc_reserve(struct svc_rqst *rqstp, int space) | |||
440 | svc_xprt_enqueue(xprt); | 440 | svc_xprt_enqueue(xprt); |
441 | } | 441 | } |
442 | } | 442 | } |
443 | EXPORT_SYMBOL(svc_reserve); | 443 | EXPORT_SYMBOL_GPL(svc_reserve); |
444 | 444 | ||
445 | static void svc_xprt_release(struct svc_rqst *rqstp) | 445 | static void svc_xprt_release(struct svc_rqst *rqstp) |
446 | { | 446 | { |
@@ -448,6 +448,9 @@ static void svc_xprt_release(struct svc_rqst *rqstp) | |||
448 | 448 | ||
449 | rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); | 449 | rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); |
450 | 450 | ||
451 | kfree(rqstp->rq_deferred); | ||
452 | rqstp->rq_deferred = NULL; | ||
453 | |||
451 | svc_free_res_pages(rqstp); | 454 | svc_free_res_pages(rqstp); |
452 | rqstp->rq_res.page_len = 0; | 455 | rqstp->rq_res.page_len = 0; |
453 | rqstp->rq_res.page_base = 0; | 456 | rqstp->rq_res.page_base = 0; |
@@ -498,7 +501,7 @@ void svc_wake_up(struct svc_serv *serv) | |||
498 | spin_unlock_bh(&pool->sp_lock); | 501 | spin_unlock_bh(&pool->sp_lock); |
499 | } | 502 | } |
500 | } | 503 | } |
501 | EXPORT_SYMBOL(svc_wake_up); | 504 | EXPORT_SYMBOL_GPL(svc_wake_up); |
502 | 505 | ||
503 | int svc_port_is_privileged(struct sockaddr *sin) | 506 | int svc_port_is_privileged(struct sockaddr *sin) |
504 | { | 507 | { |
@@ -515,8 +518,10 @@ int svc_port_is_privileged(struct sockaddr *sin) | |||
515 | } | 518 | } |
516 | 519 | ||
517 | /* | 520 | /* |
518 | * Make sure that we don't have too many active connections. If we | 521 | * Make sure that we don't have too many active connections. If we have, |
519 | * have, something must be dropped. | 522 | * something must be dropped. It's not clear what will happen if we allow |
523 | * "too many" connections, but when dealing with network-facing software, | ||
524 | * we have to code defensively. Here we do that by imposing hard limits. | ||
520 | * | 525 | * |
521 | * There's no point in trying to do random drop here for DoS | 526 | * There's no point in trying to do random drop here for DoS |
522 | * prevention. The NFS clients does 1 reconnect in 15 seconds. An | 527 | * prevention. The NFS clients does 1 reconnect in 15 seconds. An |
@@ -525,19 +530,27 @@ int svc_port_is_privileged(struct sockaddr *sin) | |||
525 | * The only somewhat efficient mechanism would be if drop old | 530 | * The only somewhat efficient mechanism would be if drop old |
526 | * connections from the same IP first. But right now we don't even | 531 | * connections from the same IP first. But right now we don't even |
527 | * record the client IP in svc_sock. | 532 | * record the client IP in svc_sock. |
533 | * | ||
534 | * single-threaded services that expect a lot of clients will probably | ||
535 | * need to set sv_maxconn to override the default value which is based | ||
536 | * on the number of threads | ||
528 | */ | 537 | */ |
529 | static void svc_check_conn_limits(struct svc_serv *serv) | 538 | static void svc_check_conn_limits(struct svc_serv *serv) |
530 | { | 539 | { |
531 | if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) { | 540 | unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn : |
541 | (serv->sv_nrthreads+3) * 20; | ||
542 | |||
543 | if (serv->sv_tmpcnt > limit) { | ||
532 | struct svc_xprt *xprt = NULL; | 544 | struct svc_xprt *xprt = NULL; |
533 | spin_lock_bh(&serv->sv_lock); | 545 | spin_lock_bh(&serv->sv_lock); |
534 | if (!list_empty(&serv->sv_tempsocks)) { | 546 | if (!list_empty(&serv->sv_tempsocks)) { |
535 | if (net_ratelimit()) { | 547 | if (net_ratelimit()) { |
536 | /* Try to help the admin */ | 548 | /* Try to help the admin */ |
537 | printk(KERN_NOTICE "%s: too many open " | 549 | printk(KERN_NOTICE "%s: too many open " |
538 | "connections, consider increasing the " | 550 | "connections, consider increasing %s\n", |
539 | "number of nfsd threads\n", | 551 | serv->sv_name, serv->sv_maxconn ? |
540 | serv->sv_name); | 552 | "the max number of connections." : |
553 | "the number of threads."); | ||
541 | } | 554 | } |
542 | /* | 555 | /* |
543 | * Always select the oldest connection. It's not fair, | 556 | * Always select the oldest connection. It's not fair, |
@@ -730,7 +743,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
730 | serv->sv_stats->netcnt++; | 743 | serv->sv_stats->netcnt++; |
731 | return len; | 744 | return len; |
732 | } | 745 | } |
733 | EXPORT_SYMBOL(svc_recv); | 746 | EXPORT_SYMBOL_GPL(svc_recv); |
734 | 747 | ||
735 | /* | 748 | /* |
736 | * Drop request | 749 | * Drop request |
@@ -740,7 +753,7 @@ void svc_drop(struct svc_rqst *rqstp) | |||
740 | dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt); | 753 | dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt); |
741 | svc_xprt_release(rqstp); | 754 | svc_xprt_release(rqstp); |
742 | } | 755 | } |
743 | EXPORT_SYMBOL(svc_drop); | 756 | EXPORT_SYMBOL_GPL(svc_drop); |
744 | 757 | ||
745 | /* | 758 | /* |
746 | * Return reply to client. | 759 | * Return reply to client. |
@@ -837,6 +850,11 @@ static void svc_age_temp_xprts(unsigned long closure) | |||
837 | void svc_delete_xprt(struct svc_xprt *xprt) | 850 | void svc_delete_xprt(struct svc_xprt *xprt) |
838 | { | 851 | { |
839 | struct svc_serv *serv = xprt->xpt_server; | 852 | struct svc_serv *serv = xprt->xpt_server; |
853 | struct svc_deferred_req *dr; | ||
854 | |||
855 | /* Only do this once */ | ||
856 | if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) | ||
857 | return; | ||
840 | 858 | ||
841 | dprintk("svc: svc_delete_xprt(%p)\n", xprt); | 859 | dprintk("svc: svc_delete_xprt(%p)\n", xprt); |
842 | xprt->xpt_ops->xpo_detach(xprt); | 860 | xprt->xpt_ops->xpo_detach(xprt); |
@@ -851,12 +869,16 @@ void svc_delete_xprt(struct svc_xprt *xprt) | |||
851 | * while still attached to a queue, the queue itself | 869 | * while still attached to a queue, the queue itself |
852 | * is about to be destroyed (in svc_destroy). | 870 | * is about to be destroyed (in svc_destroy). |
853 | */ | 871 | */ |
854 | if (!test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) { | 872 | if (test_bit(XPT_TEMP, &xprt->xpt_flags)) |
855 | BUG_ON(atomic_read(&xprt->xpt_ref.refcount) < 2); | 873 | serv->sv_tmpcnt--; |
856 | if (test_bit(XPT_TEMP, &xprt->xpt_flags)) | 874 | |
857 | serv->sv_tmpcnt--; | 875 | for (dr = svc_deferred_dequeue(xprt); dr; |
876 | dr = svc_deferred_dequeue(xprt)) { | ||
858 | svc_xprt_put(xprt); | 877 | svc_xprt_put(xprt); |
878 | kfree(dr); | ||
859 | } | 879 | } |
880 | |||
881 | svc_xprt_put(xprt); | ||
860 | spin_unlock_bh(&serv->sv_lock); | 882 | spin_unlock_bh(&serv->sv_lock); |
861 | } | 883 | } |
862 | 884 | ||
@@ -902,17 +924,19 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many) | |||
902 | container_of(dreq, struct svc_deferred_req, handle); | 924 | container_of(dreq, struct svc_deferred_req, handle); |
903 | struct svc_xprt *xprt = dr->xprt; | 925 | struct svc_xprt *xprt = dr->xprt; |
904 | 926 | ||
905 | if (too_many) { | 927 | spin_lock(&xprt->xpt_lock); |
928 | set_bit(XPT_DEFERRED, &xprt->xpt_flags); | ||
929 | if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) { | ||
930 | spin_unlock(&xprt->xpt_lock); | ||
931 | dprintk("revisit canceled\n"); | ||
906 | svc_xprt_put(xprt); | 932 | svc_xprt_put(xprt); |
907 | kfree(dr); | 933 | kfree(dr); |
908 | return; | 934 | return; |
909 | } | 935 | } |
910 | dprintk("revisit queued\n"); | 936 | dprintk("revisit queued\n"); |
911 | dr->xprt = NULL; | 937 | dr->xprt = NULL; |
912 | spin_lock(&xprt->xpt_lock); | ||
913 | list_add(&dr->handle.recent, &xprt->xpt_deferred); | 938 | list_add(&dr->handle.recent, &xprt->xpt_deferred); |
914 | spin_unlock(&xprt->xpt_lock); | 939 | spin_unlock(&xprt->xpt_lock); |
915 | set_bit(XPT_DEFERRED, &xprt->xpt_flags); | ||
916 | svc_xprt_enqueue(xprt); | 940 | svc_xprt_enqueue(xprt); |
917 | svc_xprt_put(xprt); | 941 | svc_xprt_put(xprt); |
918 | } | 942 | } |
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c index 8a73cbb16052..e64109b02aee 100644 --- a/net/sunrpc/svcauth.c +++ b/net/sunrpc/svcauth.c | |||
@@ -57,13 +57,13 @@ svc_authenticate(struct svc_rqst *rqstp, __be32 *authp) | |||
57 | rqstp->rq_authop = aops; | 57 | rqstp->rq_authop = aops; |
58 | return aops->accept(rqstp, authp); | 58 | return aops->accept(rqstp, authp); |
59 | } | 59 | } |
60 | EXPORT_SYMBOL(svc_authenticate); | 60 | EXPORT_SYMBOL_GPL(svc_authenticate); |
61 | 61 | ||
62 | int svc_set_client(struct svc_rqst *rqstp) | 62 | int svc_set_client(struct svc_rqst *rqstp) |
63 | { | 63 | { |
64 | return rqstp->rq_authop->set_client(rqstp); | 64 | return rqstp->rq_authop->set_client(rqstp); |
65 | } | 65 | } |
66 | EXPORT_SYMBOL(svc_set_client); | 66 | EXPORT_SYMBOL_GPL(svc_set_client); |
67 | 67 | ||
68 | /* A request, which was authenticated, has now executed. | 68 | /* A request, which was authenticated, has now executed. |
69 | * Time to finalise the credentials and verifier | 69 | * Time to finalise the credentials and verifier |
@@ -95,7 +95,7 @@ svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops) | |||
95 | spin_unlock(&authtab_lock); | 95 | spin_unlock(&authtab_lock); |
96 | return rv; | 96 | return rv; |
97 | } | 97 | } |
98 | EXPORT_SYMBOL(svc_auth_register); | 98 | EXPORT_SYMBOL_GPL(svc_auth_register); |
99 | 99 | ||
100 | void | 100 | void |
101 | svc_auth_unregister(rpc_authflavor_t flavor) | 101 | svc_auth_unregister(rpc_authflavor_t flavor) |
@@ -105,7 +105,7 @@ svc_auth_unregister(rpc_authflavor_t flavor) | |||
105 | authtab[flavor] = NULL; | 105 | authtab[flavor] = NULL; |
106 | spin_unlock(&authtab_lock); | 106 | spin_unlock(&authtab_lock); |
107 | } | 107 | } |
108 | EXPORT_SYMBOL(svc_auth_unregister); | 108 | EXPORT_SYMBOL_GPL(svc_auth_unregister); |
109 | 109 | ||
110 | /************************************************** | 110 | /************************************************** |
111 | * 'auth_domains' are stored in a hash table indexed by name. | 111 | * 'auth_domains' are stored in a hash table indexed by name. |
@@ -132,7 +132,7 @@ void auth_domain_put(struct auth_domain *dom) | |||
132 | spin_unlock(&auth_domain_lock); | 132 | spin_unlock(&auth_domain_lock); |
133 | } | 133 | } |
134 | } | 134 | } |
135 | EXPORT_SYMBOL(auth_domain_put); | 135 | EXPORT_SYMBOL_GPL(auth_domain_put); |
136 | 136 | ||
137 | struct auth_domain * | 137 | struct auth_domain * |
138 | auth_domain_lookup(char *name, struct auth_domain *new) | 138 | auth_domain_lookup(char *name, struct auth_domain *new) |
@@ -157,10 +157,10 @@ auth_domain_lookup(char *name, struct auth_domain *new) | |||
157 | spin_unlock(&auth_domain_lock); | 157 | spin_unlock(&auth_domain_lock); |
158 | return new; | 158 | return new; |
159 | } | 159 | } |
160 | EXPORT_SYMBOL(auth_domain_lookup); | 160 | EXPORT_SYMBOL_GPL(auth_domain_lookup); |
161 | 161 | ||
162 | struct auth_domain *auth_domain_find(char *name) | 162 | struct auth_domain *auth_domain_find(char *name) |
163 | { | 163 | { |
164 | return auth_domain_lookup(name, NULL); | 164 | return auth_domain_lookup(name, NULL); |
165 | } | 165 | } |
166 | EXPORT_SYMBOL(auth_domain_find); | 166 | EXPORT_SYMBOL_GPL(auth_domain_find); |
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index 82240e6127b2..5c865e2d299e 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c | |||
@@ -64,7 +64,7 @@ struct auth_domain *unix_domain_find(char *name) | |||
64 | rv = auth_domain_lookup(name, &new->h); | 64 | rv = auth_domain_lookup(name, &new->h); |
65 | } | 65 | } |
66 | } | 66 | } |
67 | EXPORT_SYMBOL(unix_domain_find); | 67 | EXPORT_SYMBOL_GPL(unix_domain_find); |
68 | 68 | ||
69 | static void svcauth_unix_domain_release(struct auth_domain *dom) | 69 | static void svcauth_unix_domain_release(struct auth_domain *dom) |
70 | { | 70 | { |
@@ -358,7 +358,7 @@ int auth_unix_add_addr(struct in6_addr *addr, struct auth_domain *dom) | |||
358 | else | 358 | else |
359 | return -ENOMEM; | 359 | return -ENOMEM; |
360 | } | 360 | } |
361 | EXPORT_SYMBOL(auth_unix_add_addr); | 361 | EXPORT_SYMBOL_GPL(auth_unix_add_addr); |
362 | 362 | ||
363 | int auth_unix_forget_old(struct auth_domain *dom) | 363 | int auth_unix_forget_old(struct auth_domain *dom) |
364 | { | 364 | { |
@@ -370,7 +370,7 @@ int auth_unix_forget_old(struct auth_domain *dom) | |||
370 | udom->addr_changes++; | 370 | udom->addr_changes++; |
371 | return 0; | 371 | return 0; |
372 | } | 372 | } |
373 | EXPORT_SYMBOL(auth_unix_forget_old); | 373 | EXPORT_SYMBOL_GPL(auth_unix_forget_old); |
374 | 374 | ||
375 | struct auth_domain *auth_unix_lookup(struct in6_addr *addr) | 375 | struct auth_domain *auth_unix_lookup(struct in6_addr *addr) |
376 | { | 376 | { |
@@ -395,13 +395,13 @@ struct auth_domain *auth_unix_lookup(struct in6_addr *addr) | |||
395 | cache_put(&ipm->h, &ip_map_cache); | 395 | cache_put(&ipm->h, &ip_map_cache); |
396 | return rv; | 396 | return rv; |
397 | } | 397 | } |
398 | EXPORT_SYMBOL(auth_unix_lookup); | 398 | EXPORT_SYMBOL_GPL(auth_unix_lookup); |
399 | 399 | ||
400 | void svcauth_unix_purge(void) | 400 | void svcauth_unix_purge(void) |
401 | { | 401 | { |
402 | cache_purge(&ip_map_cache); | 402 | cache_purge(&ip_map_cache); |
403 | } | 403 | } |
404 | EXPORT_SYMBOL(svcauth_unix_purge); | 404 | EXPORT_SYMBOL_GPL(svcauth_unix_purge); |
405 | 405 | ||
406 | static inline struct ip_map * | 406 | static inline struct ip_map * |
407 | ip_map_cached_get(struct svc_rqst *rqstp) | 407 | ip_map_cached_get(struct svc_rqst *rqstp) |
@@ -714,7 +714,7 @@ svcauth_unix_set_client(struct svc_rqst *rqstp) | |||
714 | return SVC_OK; | 714 | return SVC_OK; |
715 | } | 715 | } |
716 | 716 | ||
717 | EXPORT_SYMBOL(svcauth_unix_set_client); | 717 | EXPORT_SYMBOL_GPL(svcauth_unix_set_client); |
718 | 718 | ||
719 | static int | 719 | static int |
720 | svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp) | 720 | svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp) |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index ef3238d665ee..5763e6460fea 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -59,6 +59,7 @@ static void svc_udp_data_ready(struct sock *, int); | |||
59 | static int svc_udp_recvfrom(struct svc_rqst *); | 59 | static int svc_udp_recvfrom(struct svc_rqst *); |
60 | static int svc_udp_sendto(struct svc_rqst *); | 60 | static int svc_udp_sendto(struct svc_rqst *); |
61 | static void svc_sock_detach(struct svc_xprt *); | 61 | static void svc_sock_detach(struct svc_xprt *); |
62 | static void svc_tcp_sock_detach(struct svc_xprt *); | ||
62 | static void svc_sock_free(struct svc_xprt *); | 63 | static void svc_sock_free(struct svc_xprt *); |
63 | 64 | ||
64 | static struct svc_xprt *svc_create_socket(struct svc_serv *, int, | 65 | static struct svc_xprt *svc_create_socket(struct svc_serv *, int, |
@@ -102,7 +103,6 @@ static void svc_reclassify_socket(struct socket *sock) | |||
102 | static void svc_release_skb(struct svc_rqst *rqstp) | 103 | static void svc_release_skb(struct svc_rqst *rqstp) |
103 | { | 104 | { |
104 | struct sk_buff *skb = rqstp->rq_xprt_ctxt; | 105 | struct sk_buff *skb = rqstp->rq_xprt_ctxt; |
105 | struct svc_deferred_req *dr = rqstp->rq_deferred; | ||
106 | 106 | ||
107 | if (skb) { | 107 | if (skb) { |
108 | struct svc_sock *svsk = | 108 | struct svc_sock *svsk = |
@@ -112,10 +112,6 @@ static void svc_release_skb(struct svc_rqst *rqstp) | |||
112 | dprintk("svc: service %p, releasing skb %p\n", rqstp, skb); | 112 | dprintk("svc: service %p, releasing skb %p\n", rqstp, skb); |
113 | skb_free_datagram(svsk->sk_sk, skb); | 113 | skb_free_datagram(svsk->sk_sk, skb); |
114 | } | 114 | } |
115 | if (dr) { | ||
116 | rqstp->rq_deferred = NULL; | ||
117 | kfree(dr); | ||
118 | } | ||
119 | } | 115 | } |
120 | 116 | ||
121 | union svc_pktinfo_u { | 117 | union svc_pktinfo_u { |
@@ -289,7 +285,7 @@ svc_sock_names(char *buf, struct svc_serv *serv, char *toclose) | |||
289 | return -ENOENT; | 285 | return -ENOENT; |
290 | return len; | 286 | return len; |
291 | } | 287 | } |
292 | EXPORT_SYMBOL(svc_sock_names); | 288 | EXPORT_SYMBOL_GPL(svc_sock_names); |
293 | 289 | ||
294 | /* | 290 | /* |
295 | * Check input queue length | 291 | * Check input queue length |
@@ -1017,7 +1013,7 @@ static struct svc_xprt_ops svc_tcp_ops = { | |||
1017 | .xpo_recvfrom = svc_tcp_recvfrom, | 1013 | .xpo_recvfrom = svc_tcp_recvfrom, |
1018 | .xpo_sendto = svc_tcp_sendto, | 1014 | .xpo_sendto = svc_tcp_sendto, |
1019 | .xpo_release_rqst = svc_release_skb, | 1015 | .xpo_release_rqst = svc_release_skb, |
1020 | .xpo_detach = svc_sock_detach, | 1016 | .xpo_detach = svc_tcp_sock_detach, |
1021 | .xpo_free = svc_sock_free, | 1017 | .xpo_free = svc_sock_free, |
1022 | .xpo_prep_reply_hdr = svc_tcp_prep_reply_hdr, | 1018 | .xpo_prep_reply_hdr = svc_tcp_prep_reply_hdr, |
1023 | .xpo_has_wspace = svc_tcp_has_wspace, | 1019 | .xpo_has_wspace = svc_tcp_has_wspace, |
@@ -1101,7 +1097,7 @@ void svc_sock_update_bufs(struct svc_serv *serv) | |||
1101 | } | 1097 | } |
1102 | spin_unlock_bh(&serv->sv_lock); | 1098 | spin_unlock_bh(&serv->sv_lock); |
1103 | } | 1099 | } |
1104 | EXPORT_SYMBOL(svc_sock_update_bufs); | 1100 | EXPORT_SYMBOL_GPL(svc_sock_update_bufs); |
1105 | 1101 | ||
1106 | /* | 1102 | /* |
1107 | * Initialize socket for RPC use and create svc_sock struct | 1103 | * Initialize socket for RPC use and create svc_sock struct |
@@ -1287,6 +1283,24 @@ static void svc_sock_detach(struct svc_xprt *xprt) | |||
1287 | sk->sk_state_change = svsk->sk_ostate; | 1283 | sk->sk_state_change = svsk->sk_ostate; |
1288 | sk->sk_data_ready = svsk->sk_odata; | 1284 | sk->sk_data_ready = svsk->sk_odata; |
1289 | sk->sk_write_space = svsk->sk_owspace; | 1285 | sk->sk_write_space = svsk->sk_owspace; |
1286 | |||
1287 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | ||
1288 | wake_up_interruptible(sk->sk_sleep); | ||
1289 | } | ||
1290 | |||
1291 | /* | ||
1292 | * Disconnect the socket, and reset the callbacks | ||
1293 | */ | ||
1294 | static void svc_tcp_sock_detach(struct svc_xprt *xprt) | ||
1295 | { | ||
1296 | struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); | ||
1297 | |||
1298 | dprintk("svc: svc_tcp_sock_detach(%p)\n", svsk); | ||
1299 | |||
1300 | svc_sock_detach(xprt); | ||
1301 | |||
1302 | if (!test_bit(XPT_LISTENER, &xprt->xpt_flags)) | ||
1303 | kernel_sock_shutdown(svsk->sk_sock, SHUT_RDWR); | ||
1290 | } | 1304 | } |
1291 | 1305 | ||
1292 | /* | 1306 | /* |
diff --git a/net/wimax/Kconfig b/net/wimax/Kconfig new file mode 100644 index 000000000000..18495cdcd10d --- /dev/null +++ b/net/wimax/Kconfig | |||
@@ -0,0 +1,52 @@ | |||
1 | # | ||
2 | # WiMAX LAN device configuration | ||
3 | # | ||
4 | # Note the ugly 'depends on' on WIMAX: that disallows RFKILL to be a | ||
5 | # module if WIMAX is to be linked in. The WiMAX code is done in such a | ||
6 | # way that it doesn't require and explicit dependency on RFKILL in | ||
7 | # case an embedded system wants to rip it out. | ||
8 | # | ||
9 | # As well, enablement of the RFKILL code means we need the INPUT layer | ||
10 | # support to inject events coming from hw rfkill switches. That | ||
11 | # dependency could be killed if input.h provided appropiate means to | ||
12 | # work when input is disabled. | ||
13 | |||
14 | comment "WiMAX Wireless Broadband support requires CONFIG_INPUT enabled" | ||
15 | depends on INPUT = n && RFKILL != n | ||
16 | |||
17 | menuconfig WIMAX | ||
18 | tristate "WiMAX Wireless Broadband support" | ||
19 | depends on (y && RFKILL != m) || m | ||
20 | depends on (INPUT && RFKILL != n) || RFKILL = n | ||
21 | help | ||
22 | |||
23 | Select to configure support for devices that provide | ||
24 | wireless broadband connectivity using the WiMAX protocol | ||
25 | (IEEE 802.16). | ||
26 | |||
27 | Please note that most of these devices require signing up | ||
28 | for a service plan with a provider. | ||
29 | |||
30 | The different WiMAX drivers can be enabled in the menu entry | ||
31 | |||
32 | Device Drivers > Network device support > WiMAX Wireless | ||
33 | Broadband devices | ||
34 | |||
35 | If unsure, it is safe to select M (module). | ||
36 | |||
37 | config WIMAX_DEBUG_LEVEL | ||
38 | int "WiMAX debug level" | ||
39 | depends on WIMAX | ||
40 | default 8 | ||
41 | help | ||
42 | |||
43 | Select the maximum debug verbosity level to be compiled into | ||
44 | the WiMAX stack code. | ||
45 | |||
46 | By default, debug messages are disabled at runtime and can | ||
47 | be selectively enabled for different parts of the code using | ||
48 | the sysfs debug-levels file. | ||
49 | |||
50 | If set at zero, this will compile out all the debug code. | ||
51 | |||
52 | It is recommended that it is left at 8. | ||
diff --git a/net/wimax/Makefile b/net/wimax/Makefile new file mode 100644 index 000000000000..5b80b941c2c9 --- /dev/null +++ b/net/wimax/Makefile | |||
@@ -0,0 +1,13 @@ | |||
1 | |||
2 | obj-$(CONFIG_WIMAX) += wimax.o | ||
3 | |||
4 | wimax-y := \ | ||
5 | id-table.o \ | ||
6 | op-msg.o \ | ||
7 | op-reset.o \ | ||
8 | op-rfkill.o \ | ||
9 | stack.o | ||
10 | |||
11 | wimax-$(CONFIG_DEBUG_FS) += debugfs.o | ||
12 | |||
13 | |||
diff --git a/net/wimax/debug-levels.h b/net/wimax/debug-levels.h new file mode 100644 index 000000000000..1c29123a3aa9 --- /dev/null +++ b/net/wimax/debug-levels.h | |||
@@ -0,0 +1,42 @@ | |||
1 | /* | ||
2 | * Linux WiMAX Stack | ||
3 | * Debug levels control file for the wimax module | ||
4 | * | ||
5 | * | ||
6 | * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com> | ||
7 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version | ||
11 | * 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
21 | * 02110-1301, USA. | ||
22 | */ | ||
23 | #ifndef __debug_levels__h__ | ||
24 | #define __debug_levels__h__ | ||
25 | |||
26 | /* Maximum compile and run time debug level for all submodules */ | ||
27 | #define D_MODULENAME wimax | ||
28 | #define D_MASTER CONFIG_WIMAX_DEBUG_LEVEL | ||
29 | |||
30 | #include <linux/wimax/debug.h> | ||
31 | |||
32 | /* List of all the enabled modules */ | ||
33 | enum d_module { | ||
34 | D_SUBMODULE_DECLARE(debugfs), | ||
35 | D_SUBMODULE_DECLARE(id_table), | ||
36 | D_SUBMODULE_DECLARE(op_msg), | ||
37 | D_SUBMODULE_DECLARE(op_reset), | ||
38 | D_SUBMODULE_DECLARE(op_rfkill), | ||
39 | D_SUBMODULE_DECLARE(stack), | ||
40 | }; | ||
41 | |||
42 | #endif /* #ifndef __debug_levels__h__ */ | ||
diff --git a/net/wimax/debugfs.c b/net/wimax/debugfs.c new file mode 100644 index 000000000000..87cf4430079c --- /dev/null +++ b/net/wimax/debugfs.c | |||
@@ -0,0 +1,90 @@ | |||
1 | /* | ||
2 | * Linux WiMAX | ||
3 | * Debugfs support | ||
4 | * | ||
5 | * | ||
6 | * Copyright (C) 2005-2006 Intel Corporation <linux-wimax@intel.com> | ||
7 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version | ||
11 | * 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
21 | * 02110-1301, USA. | ||
22 | */ | ||
23 | #include <linux/debugfs.h> | ||
24 | #include <linux/wimax.h> | ||
25 | #include "wimax-internal.h" | ||
26 | |||
27 | #define D_SUBMODULE debugfs | ||
28 | #include "debug-levels.h" | ||
29 | |||
30 | |||
31 | /* Debug framework control of debug levels */ | ||
32 | struct d_level D_LEVEL[] = { | ||
33 | D_SUBMODULE_DEFINE(debugfs), | ||
34 | D_SUBMODULE_DEFINE(id_table), | ||
35 | D_SUBMODULE_DEFINE(op_msg), | ||
36 | D_SUBMODULE_DEFINE(op_reset), | ||
37 | D_SUBMODULE_DEFINE(op_rfkill), | ||
38 | D_SUBMODULE_DEFINE(stack), | ||
39 | }; | ||
40 | size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL); | ||
41 | |||
42 | #define __debugfs_register(prefix, name, parent) \ | ||
43 | do { \ | ||
44 | result = d_level_register_debugfs(prefix, name, parent); \ | ||
45 | if (result < 0) \ | ||
46 | goto error; \ | ||
47 | } while (0) | ||
48 | |||
49 | |||
50 | int wimax_debugfs_add(struct wimax_dev *wimax_dev) | ||
51 | { | ||
52 | int result; | ||
53 | struct net_device *net_dev = wimax_dev->net_dev; | ||
54 | struct device *dev = net_dev->dev.parent; | ||
55 | struct dentry *dentry; | ||
56 | char buf[128]; | ||
57 | |||
58 | snprintf(buf, sizeof(buf), "wimax:%s", net_dev->name); | ||
59 | dentry = debugfs_create_dir(buf, NULL); | ||
60 | result = PTR_ERR(dentry); | ||
61 | if (IS_ERR(dentry)) { | ||
62 | if (result == -ENODEV) | ||
63 | result = 0; /* No debugfs support */ | ||
64 | else | ||
65 | dev_err(dev, "Can't create debugfs dentry: %d\n", | ||
66 | result); | ||
67 | goto out; | ||
68 | } | ||
69 | wimax_dev->debugfs_dentry = dentry; | ||
70 | __debugfs_register("wimax_dl_", debugfs, dentry); | ||
71 | __debugfs_register("wimax_dl_", id_table, dentry); | ||
72 | __debugfs_register("wimax_dl_", op_msg, dentry); | ||
73 | __debugfs_register("wimax_dl_", op_reset, dentry); | ||
74 | __debugfs_register("wimax_dl_", op_rfkill, dentry); | ||
75 | __debugfs_register("wimax_dl_", stack, dentry); | ||
76 | result = 0; | ||
77 | out: | ||
78 | return result; | ||
79 | |||
80 | error: | ||
81 | debugfs_remove_recursive(wimax_dev->debugfs_dentry); | ||
82 | return result; | ||
83 | } | ||
84 | |||
85 | void wimax_debugfs_rm(struct wimax_dev *wimax_dev) | ||
86 | { | ||
87 | debugfs_remove_recursive(wimax_dev->debugfs_dentry); | ||
88 | } | ||
89 | |||
90 | |||
diff --git a/net/wimax/id-table.c b/net/wimax/id-table.c new file mode 100644 index 000000000000..5e685f7eda90 --- /dev/null +++ b/net/wimax/id-table.c | |||
@@ -0,0 +1,144 @@ | |||
1 | /* | ||
2 | * Linux WiMAX | ||
3 | * Mappping of generic netlink family IDs to net devices | ||
4 | * | ||
5 | * | ||
6 | * Copyright (C) 2005-2006 Intel Corporation <linux-wimax@intel.com> | ||
7 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version | ||
11 | * 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
21 | * 02110-1301, USA. | ||
22 | * | ||
23 | * | ||
24 | * We assign a single generic netlink family ID to each device (to | ||
25 | * simplify lookup). | ||
26 | * | ||
27 | * We need a way to map family ID to a wimax_dev pointer. | ||
28 | * | ||
29 | * The idea is to use a very simple lookup. Using a netlink attribute | ||
30 | * with (for example) the interface name implies a heavier search over | ||
31 | * all the network devices; seemed kind of a waste given that we know | ||
32 | * we are looking for a WiMAX device and that most systems will have | ||
33 | * just a single WiMAX adapter. | ||
34 | * | ||
35 | * We put all the WiMAX devices in the system in a linked list and | ||
36 | * match the generic link family ID against the list. | ||
37 | * | ||
38 | * By using a linked list, the case of a single adapter in the system | ||
39 | * becomes (almost) no overhead, while still working for many more. If | ||
40 | * it ever goes beyond two, I'll be surprised. | ||
41 | */ | ||
42 | #include <linux/device.h> | ||
43 | #include <net/genetlink.h> | ||
44 | #include <linux/netdevice.h> | ||
45 | #include <linux/list.h> | ||
46 | #include <linux/wimax.h> | ||
47 | #include "wimax-internal.h" | ||
48 | |||
49 | |||
50 | #define D_SUBMODULE id_table | ||
51 | #include "debug-levels.h" | ||
52 | |||
53 | |||
54 | static DEFINE_SPINLOCK(wimax_id_table_lock); | ||
55 | static struct list_head wimax_id_table = LIST_HEAD_INIT(wimax_id_table); | ||
56 | |||
57 | |||
58 | /* | ||
59 | * wimax_id_table_add - add a gennetlink familiy ID / wimax_dev mapping | ||
60 | * | ||
61 | * @wimax_dev: WiMAX device descriptor to associate to the Generic | ||
62 | * Netlink family ID. | ||
63 | * | ||
64 | * Look for an empty spot in the ID table; if none found, double the | ||
65 | * table's size and get the first spot. | ||
66 | */ | ||
67 | void wimax_id_table_add(struct wimax_dev *wimax_dev) | ||
68 | { | ||
69 | d_fnstart(3, NULL, "(wimax_dev %p)\n", wimax_dev); | ||
70 | spin_lock(&wimax_id_table_lock); | ||
71 | list_add(&wimax_dev->id_table_node, &wimax_id_table); | ||
72 | spin_unlock(&wimax_id_table_lock); | ||
73 | d_fnend(3, NULL, "(wimax_dev %p)\n", wimax_dev); | ||
74 | } | ||
75 | |||
76 | |||
77 | /* | ||
78 | * wimax_get_netdev_by_info - lookup a wimax_dev from the gennetlink info | ||
79 | * | ||
80 | * The generic netlink family ID has been filled out in the | ||
81 | * nlmsghdr->nlmsg_type field, so we pull it from there, look it up in | ||
82 | * the mapping table and reference the wimax_dev. | ||
83 | * | ||
84 | * When done, the reference should be dropped with | ||
85 | * 'dev_put(wimax_dev->net_dev)'. | ||
86 | */ | ||
87 | struct wimax_dev *wimax_dev_get_by_genl_info( | ||
88 | struct genl_info *info, int ifindex) | ||
89 | { | ||
90 | struct wimax_dev *wimax_dev = NULL; | ||
91 | |||
92 | d_fnstart(3, NULL, "(info %p ifindex %d)\n", info, ifindex); | ||
93 | spin_lock(&wimax_id_table_lock); | ||
94 | list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) { | ||
95 | if (wimax_dev->net_dev->ifindex == ifindex) { | ||
96 | dev_hold(wimax_dev->net_dev); | ||
97 | break; | ||
98 | } | ||
99 | } | ||
100 | if (wimax_dev == NULL) | ||
101 | d_printf(1, NULL, "wimax: no devices found with ifindex %d\n", | ||
102 | ifindex); | ||
103 | spin_unlock(&wimax_id_table_lock); | ||
104 | d_fnend(3, NULL, "(info %p ifindex %d) = %p\n", | ||
105 | info, ifindex, wimax_dev); | ||
106 | return wimax_dev; | ||
107 | } | ||
108 | |||
109 | |||
110 | /* | ||
111 | * wimax_id_table_rm - Remove a gennetlink familiy ID / wimax_dev mapping | ||
112 | * | ||
113 | * @id: family ID to remove from the table | ||
114 | */ | ||
115 | void wimax_id_table_rm(struct wimax_dev *wimax_dev) | ||
116 | { | ||
117 | spin_lock(&wimax_id_table_lock); | ||
118 | list_del_init(&wimax_dev->id_table_node); | ||
119 | spin_unlock(&wimax_id_table_lock); | ||
120 | } | ||
121 | |||
122 | |||
123 | /* | ||
124 | * Release the gennetlink family id / mapping table | ||
125 | * | ||
126 | * On debug, verify that the table is empty upon removal. We want the | ||
127 | * code always compiled, to ensure it doesn't bit rot. It will be | ||
128 | * compiled out if CONFIG_BUG is disabled. | ||
129 | */ | ||
130 | void wimax_id_table_release(void) | ||
131 | { | ||
132 | struct wimax_dev *wimax_dev; | ||
133 | |||
134 | #ifndef CONFIG_BUG | ||
135 | return; | ||
136 | #endif | ||
137 | spin_lock(&wimax_id_table_lock); | ||
138 | list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) { | ||
139 | printk(KERN_ERR "BUG: %s wimax_dev %p ifindex %d not cleared\n", | ||
140 | __func__, wimax_dev, wimax_dev->net_dev->ifindex); | ||
141 | WARN_ON(1); | ||
142 | } | ||
143 | spin_unlock(&wimax_id_table_lock); | ||
144 | } | ||
diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c new file mode 100644 index 000000000000..cb3b4ad53683 --- /dev/null +++ b/net/wimax/op-msg.c | |||
@@ -0,0 +1,421 @@ | |||
1 | /* | ||
2 | * Linux WiMAX | ||
3 | * Generic messaging interface between userspace and driver/device | ||
4 | * | ||
5 | * | ||
6 | * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com> | ||
7 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version | ||
11 | * 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
21 | * 02110-1301, USA. | ||
22 | * | ||
23 | * | ||
24 | * This implements a direct communication channel between user space and | ||
25 | * the driver/device, by which free form messages can be sent back and | ||
26 | * forth. | ||
27 | * | ||
28 | * This is intended for device-specific features, vendor quirks, etc. | ||
29 | * | ||
30 | * See include/net/wimax.h | ||
31 | * | ||
32 | * GENERIC NETLINK ENCODING AND CAPACITY | ||
33 | * | ||
34 | * A destination "pipe name" is added to each message; it is up to the | ||
35 | * drivers to assign or use those names (if using them at all). | ||
36 | * | ||
37 | * Messages are encoded as a binary netlink attribute using nla_put() | ||
38 | * using type NLA_UNSPEC (as some versions of libnl still in | ||
39 | * deployment don't yet understand NLA_BINARY). | ||
40 | * | ||
41 | * The maximum capacity of this transport is PAGESIZE per message (so | ||
42 | * the actual payload will be bit smaller depending on the | ||
43 | * netlink/generic netlink attributes and headers). | ||
44 | * | ||
45 | * RECEPTION OF MESSAGES | ||
46 | * | ||
47 | * When a message is received from user space, it is passed verbatim | ||
48 | * to the driver calling wimax_dev->op_msg_from_user(). The return | ||
49 | * value from this function is passed back to user space as an ack | ||
50 | * over the generic netlink protocol. | ||
51 | * | ||
52 | * The stack doesn't do any processing or interpretation of these | ||
53 | * messages. | ||
54 | * | ||
55 | * SENDING MESSAGES | ||
56 | * | ||
57 | * Messages can be sent with wimax_msg(). | ||
58 | * | ||
59 | * If the message delivery needs to happen on a different context to | ||
60 | * that of its creation, wimax_msg_alloc() can be used to get a | ||
61 | * pointer to the message that can be delivered later on with | ||
62 | * wimax_msg_send(). | ||
63 | * | ||
64 | * ROADMAP | ||
65 | * | ||
66 | * wimax_gnl_doit_msg_from_user() Process a message from user space | ||
67 | * wimax_dev_get_by_genl_info() | ||
68 | * wimax_dev->op_msg_from_user() Delivery of message to the driver | ||
69 | * | ||
70 | * wimax_msg() Send a message to user space | ||
71 | * wimax_msg_alloc() | ||
72 | * wimax_msg_send() | ||
73 | */ | ||
74 | #include <linux/device.h> | ||
75 | #include <net/genetlink.h> | ||
76 | #include <linux/netdevice.h> | ||
77 | #include <linux/wimax.h> | ||
78 | #include <linux/security.h> | ||
79 | #include "wimax-internal.h" | ||
80 | |||
81 | |||
82 | #define D_SUBMODULE op_msg | ||
83 | #include "debug-levels.h" | ||
84 | |||
85 | |||
86 | /** | ||
87 | * wimax_msg_alloc - Create a new skb for sending a message to userspace | ||
88 | * | ||
89 | * @wimax_dev: WiMAX device descriptor | ||
90 | * @pipe_name: "named pipe" the message will be sent to | ||
91 | * @msg: pointer to the message data to send | ||
92 | * @size: size of the message to send (in bytes), including the header. | ||
93 | * @gfp_flags: flags for memory allocation. | ||
94 | * | ||
95 | * Returns: %0 if ok, negative errno code on error | ||
96 | * | ||
97 | * Description: | ||
98 | * | ||
99 | * Allocates an skb that will contain the message to send to user | ||
100 | * space over the messaging pipe and initializes it, copying the | ||
101 | * payload. | ||
102 | * | ||
103 | * Once this call is done, you can deliver it with | ||
104 | * wimax_msg_send(). | ||
105 | * | ||
106 | * IMPORTANT: | ||
107 | * | ||
108 | * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as | ||
109 | * wimax_msg_send() depends on skb->data being placed at the | ||
110 | * beginning of the user message. | ||
111 | */ | ||
112 | struct sk_buff *wimax_msg_alloc(struct wimax_dev *wimax_dev, | ||
113 | const char *pipe_name, | ||
114 | const void *msg, size_t size, | ||
115 | gfp_t gfp_flags) | ||
116 | { | ||
117 | int result; | ||
118 | struct device *dev = wimax_dev->net_dev->dev.parent; | ||
119 | size_t msg_size; | ||
120 | void *genl_msg; | ||
121 | struct sk_buff *skb; | ||
122 | |||
123 | msg_size = nla_total_size(size) | ||
124 | + nla_total_size(sizeof(u32)) | ||
125 | + (pipe_name ? nla_total_size(strlen(pipe_name)) : 0); | ||
126 | result = -ENOMEM; | ||
127 | skb = genlmsg_new(msg_size, gfp_flags); | ||
128 | if (skb == NULL) | ||
129 | goto error_new; | ||
130 | genl_msg = genlmsg_put(skb, 0, 0, &wimax_gnl_family, | ||
131 | 0, WIMAX_GNL_OP_MSG_TO_USER); | ||
132 | if (genl_msg == NULL) { | ||
133 | dev_err(dev, "no memory to create generic netlink message\n"); | ||
134 | goto error_genlmsg_put; | ||
135 | } | ||
136 | result = nla_put_u32(skb, WIMAX_GNL_MSG_IFIDX, | ||
137 | wimax_dev->net_dev->ifindex); | ||
138 | if (result < 0) { | ||
139 | dev_err(dev, "no memory to add ifindex attribute\n"); | ||
140 | goto error_nla_put; | ||
141 | } | ||
142 | if (pipe_name) { | ||
143 | result = nla_put_string(skb, WIMAX_GNL_MSG_PIPE_NAME, | ||
144 | pipe_name); | ||
145 | if (result < 0) { | ||
146 | dev_err(dev, "no memory to add pipe_name attribute\n"); | ||
147 | goto error_nla_put; | ||
148 | } | ||
149 | } | ||
150 | result = nla_put(skb, WIMAX_GNL_MSG_DATA, size, msg); | ||
151 | if (result < 0) { | ||
152 | dev_err(dev, "no memory to add payload in attribute\n"); | ||
153 | goto error_nla_put; | ||
154 | } | ||
155 | genlmsg_end(skb, genl_msg); | ||
156 | return skb; | ||
157 | |||
158 | error_nla_put: | ||
159 | error_genlmsg_put: | ||
160 | error_new: | ||
161 | nlmsg_free(skb); | ||
162 | return ERR_PTR(result); | ||
163 | |||
164 | } | ||
165 | EXPORT_SYMBOL_GPL(wimax_msg_alloc); | ||
166 | |||
167 | |||
168 | /** | ||
169 | * wimax_msg_data_len - Return a pointer and size of a message's payload | ||
170 | * | ||
171 | * @msg: Pointer to a message created with wimax_msg_alloc() | ||
172 | * @size: Pointer to where to store the message's size | ||
173 | * | ||
174 | * Returns the pointer to the message data. | ||
175 | */ | ||
176 | const void *wimax_msg_data_len(struct sk_buff *msg, size_t *size) | ||
177 | { | ||
178 | struct nlmsghdr *nlh = (void *) msg->head; | ||
179 | struct nlattr *nla; | ||
180 | |||
181 | nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr), | ||
182 | WIMAX_GNL_MSG_DATA); | ||
183 | if (nla == NULL) { | ||
184 | printk(KERN_ERR "Cannot find attribute WIMAX_GNL_MSG_DATA\n"); | ||
185 | return NULL; | ||
186 | } | ||
187 | *size = nla_len(nla); | ||
188 | return nla_data(nla); | ||
189 | } | ||
190 | EXPORT_SYMBOL_GPL(wimax_msg_data_len); | ||
191 | |||
192 | |||
193 | /** | ||
194 | * wimax_msg_data - Return a pointer to a message's payload | ||
195 | * | ||
196 | * @msg: Pointer to a message created with wimax_msg_alloc() | ||
197 | */ | ||
198 | const void *wimax_msg_data(struct sk_buff *msg) | ||
199 | { | ||
200 | struct nlmsghdr *nlh = (void *) msg->head; | ||
201 | struct nlattr *nla; | ||
202 | |||
203 | nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr), | ||
204 | WIMAX_GNL_MSG_DATA); | ||
205 | if (nla == NULL) { | ||
206 | printk(KERN_ERR "Cannot find attribute WIMAX_GNL_MSG_DATA\n"); | ||
207 | return NULL; | ||
208 | } | ||
209 | return nla_data(nla); | ||
210 | } | ||
211 | EXPORT_SYMBOL_GPL(wimax_msg_data); | ||
212 | |||
213 | |||
214 | /** | ||
215 | * wimax_msg_len - Return a message's payload length | ||
216 | * | ||
217 | * @msg: Pointer to a message created with wimax_msg_alloc() | ||
218 | */ | ||
219 | ssize_t wimax_msg_len(struct sk_buff *msg) | ||
220 | { | ||
221 | struct nlmsghdr *nlh = (void *) msg->head; | ||
222 | struct nlattr *nla; | ||
223 | |||
224 | nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr), | ||
225 | WIMAX_GNL_MSG_DATA); | ||
226 | if (nla == NULL) { | ||
227 | printk(KERN_ERR "Cannot find attribute WIMAX_GNL_MSG_DATA\n"); | ||
228 | return -EINVAL; | ||
229 | } | ||
230 | return nla_len(nla); | ||
231 | } | ||
232 | EXPORT_SYMBOL_GPL(wimax_msg_len); | ||
233 | |||
234 | |||
235 | /** | ||
236 | * wimax_msg_send - Send a pre-allocated message to user space | ||
237 | * | ||
238 | * @wimax_dev: WiMAX device descriptor | ||
239 | * | ||
240 | * @skb: &struct sk_buff returned by wimax_msg_alloc(). Note the | ||
241 | * ownership of @skb is transferred to this function. | ||
242 | * | ||
243 | * Returns: 0 if ok, < 0 errno code on error | ||
244 | * | ||
245 | * Description: | ||
246 | * | ||
247 | * Sends a free-form message that was preallocated with | ||
248 | * wimax_msg_alloc() and filled up. | ||
249 | * | ||
250 | * Assumes that once you pass an skb to this function for sending, it | ||
251 | * owns it and will release it when done (on success). | ||
252 | * | ||
253 | * IMPORTANT: | ||
254 | * | ||
255 | * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as | ||
256 | * wimax_msg_send() depends on skb->data being placed at the | ||
257 | * beginning of the user message. | ||
258 | */ | ||
259 | int wimax_msg_send(struct wimax_dev *wimax_dev, struct sk_buff *skb) | ||
260 | { | ||
261 | int result; | ||
262 | struct device *dev = wimax_dev->net_dev->dev.parent; | ||
263 | void *msg = skb->data; | ||
264 | size_t size = skb->len; | ||
265 | might_sleep(); | ||
266 | |||
267 | d_printf(1, dev, "CTX: wimax msg, %zu bytes\n", size); | ||
268 | d_dump(2, dev, msg, size); | ||
269 | result = genlmsg_multicast(skb, 0, wimax_gnl_mcg.id, GFP_KERNEL); | ||
270 | d_printf(1, dev, "CTX: genl multicast result %d\n", result); | ||
271 | if (result == -ESRCH) /* Nobody connected, ignore it */ | ||
272 | result = 0; /* btw, the skb is freed already */ | ||
273 | return result; | ||
274 | } | ||
275 | EXPORT_SYMBOL_GPL(wimax_msg_send); | ||
276 | |||
277 | |||
278 | /** | ||
279 | * wimax_msg - Send a message to user space | ||
280 | * | ||
281 | * @wimax_dev: WiMAX device descriptor (properly referenced) | ||
282 | * @pipe_name: "named pipe" the message will be sent to | ||
283 | * @buf: pointer to the message to send. | ||
284 | * @size: size of the buffer pointed to by @buf (in bytes). | ||
285 | * @gfp_flags: flags for memory allocation. | ||
286 | * | ||
287 | * Returns: %0 if ok, negative errno code on error. | ||
288 | * | ||
289 | * Description: | ||
290 | * | ||
291 | * Sends a free-form message to user space on the device @wimax_dev. | ||
292 | * | ||
293 | * NOTES: | ||
294 | * | ||
295 | * Once the @skb is given to this function, who will own it and will | ||
296 | * release it when done (unless it returns error). | ||
297 | */ | ||
298 | int wimax_msg(struct wimax_dev *wimax_dev, const char *pipe_name, | ||
299 | const void *buf, size_t size, gfp_t gfp_flags) | ||
300 | { | ||
301 | int result = -ENOMEM; | ||
302 | struct sk_buff *skb; | ||
303 | |||
304 | skb = wimax_msg_alloc(wimax_dev, pipe_name, buf, size, gfp_flags); | ||
305 | if (skb == NULL) | ||
306 | goto error_msg_new; | ||
307 | result = wimax_msg_send(wimax_dev, skb); | ||
308 | error_msg_new: | ||
309 | return result; | ||
310 | } | ||
311 | EXPORT_SYMBOL_GPL(wimax_msg); | ||
312 | |||
313 | |||
314 | static const | ||
315 | struct nla_policy wimax_gnl_msg_policy[WIMAX_GNL_ATTR_MAX + 1] = { | ||
316 | [WIMAX_GNL_MSG_IFIDX] = { | ||
317 | .type = NLA_U32, | ||
318 | }, | ||
319 | [WIMAX_GNL_MSG_DATA] = { | ||
320 | .type = NLA_UNSPEC, /* libnl doesn't grok BINARY yet */ | ||
321 | }, | ||
322 | }; | ||
323 | |||
324 | |||
325 | /* | ||
326 | * Relays a message from user space to the driver | ||
327 | * | ||
328 | * The skb is passed to the driver-specific function with the netlink | ||
329 | * and generic netlink headers already stripped. | ||
330 | * | ||
331 | * This call will block while handling/relaying the message. | ||
332 | */ | ||
333 | static | ||
334 | int wimax_gnl_doit_msg_from_user(struct sk_buff *skb, struct genl_info *info) | ||
335 | { | ||
336 | int result, ifindex; | ||
337 | struct wimax_dev *wimax_dev; | ||
338 | struct device *dev; | ||
339 | struct nlmsghdr *nlh = info->nlhdr; | ||
340 | char *pipe_name; | ||
341 | void *msg_buf; | ||
342 | size_t msg_len; | ||
343 | |||
344 | might_sleep(); | ||
345 | d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info); | ||
346 | result = -ENODEV; | ||
347 | if (info->attrs[WIMAX_GNL_MSG_IFIDX] == NULL) { | ||
348 | printk(KERN_ERR "WIMAX_GNL_MSG_FROM_USER: can't find IFIDX " | ||
349 | "attribute\n"); | ||
350 | goto error_no_wimax_dev; | ||
351 | } | ||
352 | ifindex = nla_get_u32(info->attrs[WIMAX_GNL_MSG_IFIDX]); | ||
353 | wimax_dev = wimax_dev_get_by_genl_info(info, ifindex); | ||
354 | if (wimax_dev == NULL) | ||
355 | goto error_no_wimax_dev; | ||
356 | dev = wimax_dev_to_dev(wimax_dev); | ||
357 | |||
358 | /* Unpack arguments */ | ||
359 | result = -EINVAL; | ||
360 | if (info->attrs[WIMAX_GNL_MSG_DATA] == NULL) { | ||
361 | dev_err(dev, "WIMAX_GNL_MSG_FROM_USER: can't find MSG_DATA " | ||
362 | "attribute\n"); | ||
363 | goto error_no_data; | ||
364 | } | ||
365 | msg_buf = nla_data(info->attrs[WIMAX_GNL_MSG_DATA]); | ||
366 | msg_len = nla_len(info->attrs[WIMAX_GNL_MSG_DATA]); | ||
367 | |||
368 | if (info->attrs[WIMAX_GNL_MSG_PIPE_NAME] == NULL) | ||
369 | pipe_name = NULL; | ||
370 | else { | ||
371 | struct nlattr *attr = info->attrs[WIMAX_GNL_MSG_PIPE_NAME]; | ||
372 | size_t attr_len = nla_len(attr); | ||
373 | /* libnl-1.1 does not yet support NLA_NUL_STRING */ | ||
374 | result = -ENOMEM; | ||
375 | pipe_name = kstrndup(nla_data(attr), attr_len + 1, GFP_KERNEL); | ||
376 | if (pipe_name == NULL) | ||
377 | goto error_alloc; | ||
378 | pipe_name[attr_len] = 0; | ||
379 | } | ||
380 | mutex_lock(&wimax_dev->mutex); | ||
381 | result = wimax_dev_is_ready(wimax_dev); | ||
382 | if (result < 0) | ||
383 | goto error_not_ready; | ||
384 | result = -ENOSYS; | ||
385 | if (wimax_dev->op_msg_from_user == NULL) | ||
386 | goto error_noop; | ||
387 | |||
388 | d_printf(1, dev, | ||
389 | "CRX: nlmsghdr len %u type %u flags 0x%04x seq 0x%x pid %u\n", | ||
390 | nlh->nlmsg_len, nlh->nlmsg_type, nlh->nlmsg_flags, | ||
391 | nlh->nlmsg_seq, nlh->nlmsg_pid); | ||
392 | d_printf(1, dev, "CRX: wimax message %zu bytes\n", msg_len); | ||
393 | d_dump(2, dev, msg_buf, msg_len); | ||
394 | |||
395 | result = wimax_dev->op_msg_from_user(wimax_dev, pipe_name, | ||
396 | msg_buf, msg_len, info); | ||
397 | error_noop: | ||
398 | error_not_ready: | ||
399 | mutex_unlock(&wimax_dev->mutex); | ||
400 | error_alloc: | ||
401 | kfree(pipe_name); | ||
402 | error_no_data: | ||
403 | dev_put(wimax_dev->net_dev); | ||
404 | error_no_wimax_dev: | ||
405 | d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result); | ||
406 | return result; | ||
407 | } | ||
408 | |||
409 | |||
410 | /* | ||
411 | * Generic Netlink glue | ||
412 | */ | ||
413 | |||
414 | struct genl_ops wimax_gnl_msg_from_user = { | ||
415 | .cmd = WIMAX_GNL_OP_MSG_FROM_USER, | ||
416 | .flags = GENL_ADMIN_PERM, | ||
417 | .policy = wimax_gnl_msg_policy, | ||
418 | .doit = wimax_gnl_doit_msg_from_user, | ||
419 | .dumpit = NULL, | ||
420 | }; | ||
421 | |||
diff --git a/net/wimax/op-reset.c b/net/wimax/op-reset.c new file mode 100644 index 000000000000..ca269178c4d4 --- /dev/null +++ b/net/wimax/op-reset.c | |||
@@ -0,0 +1,143 @@ | |||
1 | /* | ||
2 | * Linux WiMAX | ||
3 | * Implement and export a method for resetting a WiMAX device | ||
4 | * | ||
5 | * | ||
6 | * Copyright (C) 2008 Intel Corporation <linux-wimax@intel.com> | ||
7 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version | ||
11 | * 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
21 | * 02110-1301, USA. | ||
22 | * | ||
23 | * | ||
24 | * This implements a simple synchronous call to reset a WiMAX device. | ||
25 | * | ||
26 | * Resets aim at being warm, keeping the device handles active; | ||
27 | * however, when that fails, it falls back to a cold reset (that will | ||
28 | * disconnect and reconnect the device). | ||
29 | */ | ||
30 | |||
31 | #include <net/wimax.h> | ||
32 | #include <net/genetlink.h> | ||
33 | #include <linux/wimax.h> | ||
34 | #include <linux/security.h> | ||
35 | #include "wimax-internal.h" | ||
36 | |||
37 | #define D_SUBMODULE op_reset | ||
38 | #include "debug-levels.h" | ||
39 | |||
40 | |||
41 | /** | ||
42 | * wimax_reset - Reset a WiMAX device | ||
43 | * | ||
44 | * @wimax_dev: WiMAX device descriptor | ||
45 | * | ||
46 | * Returns: | ||
47 | * | ||
48 | * %0 if ok and a warm reset was done (the device still exists in | ||
49 | * the system). | ||
50 | * | ||
51 | * -%ENODEV if a cold/bus reset had to be done (device has | ||
52 | * disconnected and reconnected, so current handle is not valid | ||
53 | * any more). | ||
54 | * | ||
55 | * -%EINVAL if the device is not even registered. | ||
56 | * | ||
57 | * Any other negative error code shall be considered as | ||
58 | * non-recoverable. | ||
59 | * | ||
60 | * Description: | ||
61 | * | ||
62 | * Called when wanting to reset the device for any reason. Device is | ||
63 | * taken back to power on status. | ||
64 | * | ||
65 | * This call blocks; on succesful return, the device has completed the | ||
66 | * reset process and is ready to operate. | ||
67 | */ | ||
68 | int wimax_reset(struct wimax_dev *wimax_dev) | ||
69 | { | ||
70 | int result = -EINVAL; | ||
71 | struct device *dev = wimax_dev_to_dev(wimax_dev); | ||
72 | enum wimax_st state; | ||
73 | |||
74 | might_sleep(); | ||
75 | d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev); | ||
76 | mutex_lock(&wimax_dev->mutex); | ||
77 | dev_hold(wimax_dev->net_dev); | ||
78 | state = wimax_dev->state; | ||
79 | mutex_unlock(&wimax_dev->mutex); | ||
80 | |||
81 | if (state >= WIMAX_ST_DOWN) { | ||
82 | mutex_lock(&wimax_dev->mutex_reset); | ||
83 | result = wimax_dev->op_reset(wimax_dev); | ||
84 | mutex_unlock(&wimax_dev->mutex_reset); | ||
85 | } | ||
86 | dev_put(wimax_dev->net_dev); | ||
87 | |||
88 | d_fnend(3, dev, "(wimax_dev %p) = %d\n", wimax_dev, result); | ||
89 | return result; | ||
90 | } | ||
91 | EXPORT_SYMBOL(wimax_reset); | ||
92 | |||
93 | |||
94 | static const | ||
95 | struct nla_policy wimax_gnl_reset_policy[WIMAX_GNL_ATTR_MAX + 1] = { | ||
96 | [WIMAX_GNL_RESET_IFIDX] = { | ||
97 | .type = NLA_U32, | ||
98 | }, | ||
99 | }; | ||
100 | |||
101 | |||
102 | /* | ||
103 | * Exporting to user space over generic netlink | ||
104 | * | ||
105 | * Parse the reset command from user space, return error code. | ||
106 | * | ||
107 | * No attributes. | ||
108 | */ | ||
109 | static | ||
110 | int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info) | ||
111 | { | ||
112 | int result, ifindex; | ||
113 | struct wimax_dev *wimax_dev; | ||
114 | struct device *dev; | ||
115 | |||
116 | d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info); | ||
117 | result = -ENODEV; | ||
118 | if (info->attrs[WIMAX_GNL_RESET_IFIDX] == NULL) { | ||
119 | printk(KERN_ERR "WIMAX_GNL_OP_RFKILL: can't find IFIDX " | ||
120 | "attribute\n"); | ||
121 | goto error_no_wimax_dev; | ||
122 | } | ||
123 | ifindex = nla_get_u32(info->attrs[WIMAX_GNL_RESET_IFIDX]); | ||
124 | wimax_dev = wimax_dev_get_by_genl_info(info, ifindex); | ||
125 | if (wimax_dev == NULL) | ||
126 | goto error_no_wimax_dev; | ||
127 | dev = wimax_dev_to_dev(wimax_dev); | ||
128 | /* Execute the operation and send the result back to user space */ | ||
129 | result = wimax_reset(wimax_dev); | ||
130 | dev_put(wimax_dev->net_dev); | ||
131 | error_no_wimax_dev: | ||
132 | d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result); | ||
133 | return result; | ||
134 | } | ||
135 | |||
136 | |||
137 | struct genl_ops wimax_gnl_reset = { | ||
138 | .cmd = WIMAX_GNL_OP_RESET, | ||
139 | .flags = GENL_ADMIN_PERM, | ||
140 | .policy = wimax_gnl_reset_policy, | ||
141 | .doit = wimax_gnl_doit_reset, | ||
142 | .dumpit = NULL, | ||
143 | }; | ||
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c new file mode 100644 index 000000000000..2b75aee04217 --- /dev/null +++ b/net/wimax/op-rfkill.c | |||
@@ -0,0 +1,532 @@ | |||
1 | /* | ||
2 | * Linux WiMAX | ||
3 | * RF-kill framework integration | ||
4 | * | ||
5 | * | ||
6 | * Copyright (C) 2008 Intel Corporation <linux-wimax@intel.com> | ||
7 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version | ||
11 | * 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
21 | * 02110-1301, USA. | ||
22 | * | ||
23 | * | ||
24 | * This integrates into the Linux Kernel rfkill susbystem so that the | ||
25 | * drivers just have to do the bare minimal work, which is providing a | ||
26 | * method to set the software RF-Kill switch and to report changes in | ||
27 | * the software and hardware switch status. | ||
28 | * | ||
29 | * A non-polled generic rfkill device is embedded into the WiMAX | ||
30 | * subsystem's representation of a device. | ||
31 | * | ||
32 | * FIXME: Need polled support? use a timer or add the implementation | ||
33 | * to the stack. | ||
34 | * | ||
35 | * All device drivers have to do is after wimax_dev_init(), call | ||
36 | * wimax_report_rfkill_hw() and wimax_report_rfkill_sw() to update | ||
37 | * initial state and then every time it changes. See wimax.h:struct | ||
38 | * wimax_dev for more information. | ||
39 | * | ||
40 | * ROADMAP | ||
41 | * | ||
42 | * wimax_gnl_doit_rfkill() User space calling wimax_rfkill() | ||
43 | * wimax_rfkill() Kernel calling wimax_rfkill() | ||
44 | * __wimax_rf_toggle_radio() | ||
45 | * | ||
46 | * wimax_rfkill_toggle_radio() RF-Kill subsytem calling | ||
47 | * __wimax_rf_toggle_radio() | ||
48 | * | ||
49 | * __wimax_rf_toggle_radio() | ||
50 | * wimax_dev->op_rfkill_sw_toggle() Driver backend | ||
51 | * __wimax_state_change() | ||
52 | * | ||
53 | * wimax_report_rfkill_sw() Driver reports state change | ||
54 | * __wimax_state_change() | ||
55 | * | ||
56 | * wimax_report_rfkill_hw() Driver reports state change | ||
57 | * __wimax_state_change() | ||
58 | * | ||
59 | * wimax_rfkill_add() Initialize/shutdown rfkill support | ||
60 | * wimax_rfkill_rm() [called by wimax_dev_add/rm()] | ||
61 | */ | ||
62 | |||
63 | #include <net/wimax.h> | ||
64 | #include <net/genetlink.h> | ||
65 | #include <linux/wimax.h> | ||
66 | #include <linux/security.h> | ||
67 | #include <linux/rfkill.h> | ||
68 | #include <linux/input.h> | ||
69 | #include "wimax-internal.h" | ||
70 | |||
71 | #define D_SUBMODULE op_rfkill | ||
72 | #include "debug-levels.h" | ||
73 | |||
74 | #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) | ||
75 | |||
76 | |||
77 | /** | ||
78 | * wimax_report_rfkill_hw - Reports changes in the hardware RF switch | ||
79 | * | ||
80 | * @wimax_dev: WiMAX device descriptor | ||
81 | * | ||
82 | * @state: New state of the RF Kill switch. %WIMAX_RF_ON radio on, | ||
83 | * %WIMAX_RF_OFF radio off. | ||
84 | * | ||
85 | * When the device detects a change in the state of thehardware RF | ||
86 | * switch, it must call this function to let the WiMAX kernel stack | ||
87 | * know that the state has changed so it can be properly propagated. | ||
88 | * | ||
89 | * The WiMAX stack caches the state (the driver doesn't need to). As | ||
90 | * well, as the change is propagated it will come back as a request to | ||
91 | * change the software state to mirror the hardware state. | ||
92 | * | ||
93 | * If the device doesn't have a hardware kill switch, just report | ||
94 | * it on initialization as always on (%WIMAX_RF_ON, radio on). | ||
95 | */ | ||
96 | void wimax_report_rfkill_hw(struct wimax_dev *wimax_dev, | ||
97 | enum wimax_rf_state state) | ||
98 | { | ||
99 | int result; | ||
100 | struct device *dev = wimax_dev_to_dev(wimax_dev); | ||
101 | enum wimax_st wimax_state; | ||
102 | enum rfkill_state rfkill_state; | ||
103 | |||
104 | d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state); | ||
105 | BUG_ON(state == WIMAX_RF_QUERY); | ||
106 | BUG_ON(state != WIMAX_RF_ON && state != WIMAX_RF_OFF); | ||
107 | |||
108 | mutex_lock(&wimax_dev->mutex); | ||
109 | result = wimax_dev_is_ready(wimax_dev); | ||
110 | if (result < 0) | ||
111 | goto error_not_ready; | ||
112 | |||
113 | if (state != wimax_dev->rf_hw) { | ||
114 | wimax_dev->rf_hw = state; | ||
115 | rfkill_state = state == WIMAX_RF_ON ? | ||
116 | RFKILL_STATE_OFF : RFKILL_STATE_ON; | ||
117 | if (wimax_dev->rf_hw == WIMAX_RF_ON | ||
118 | && wimax_dev->rf_sw == WIMAX_RF_ON) | ||
119 | wimax_state = WIMAX_ST_READY; | ||
120 | else | ||
121 | wimax_state = WIMAX_ST_RADIO_OFF; | ||
122 | __wimax_state_change(wimax_dev, wimax_state); | ||
123 | input_report_key(wimax_dev->rfkill_input, KEY_WIMAX, | ||
124 | rfkill_state); | ||
125 | } | ||
126 | error_not_ready: | ||
127 | mutex_unlock(&wimax_dev->mutex); | ||
128 | d_fnend(3, dev, "(wimax_dev %p state %u) = void [%d]\n", | ||
129 | wimax_dev, state, result); | ||
130 | } | ||
131 | EXPORT_SYMBOL_GPL(wimax_report_rfkill_hw); | ||
132 | |||
133 | |||
134 | /** | ||
135 | * wimax_report_rfkill_sw - Reports changes in the software RF switch | ||
136 | * | ||
137 | * @wimax_dev: WiMAX device descriptor | ||
138 | * | ||
139 | * @state: New state of the RF kill switch. %WIMAX_RF_ON radio on, | ||
140 | * %WIMAX_RF_OFF radio off. | ||
141 | * | ||
142 | * Reports changes in the software RF switch state to the the WiMAX | ||
143 | * stack. | ||
144 | * | ||
145 | * The main use is during initialization, so the driver can query the | ||
146 | * device for its current software radio kill switch state and feed it | ||
147 | * to the system. | ||
148 | * | ||
149 | * On the side, the device does not change the software state by | ||
150 | * itself. In practice, this can happen, as the device might decide to | ||
151 | * switch (in software) the radio off for different reasons. | ||
152 | */ | ||
153 | void wimax_report_rfkill_sw(struct wimax_dev *wimax_dev, | ||
154 | enum wimax_rf_state state) | ||
155 | { | ||
156 | int result; | ||
157 | struct device *dev = wimax_dev_to_dev(wimax_dev); | ||
158 | enum wimax_st wimax_state; | ||
159 | |||
160 | d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state); | ||
161 | BUG_ON(state == WIMAX_RF_QUERY); | ||
162 | BUG_ON(state != WIMAX_RF_ON && state != WIMAX_RF_OFF); | ||
163 | |||
164 | mutex_lock(&wimax_dev->mutex); | ||
165 | result = wimax_dev_is_ready(wimax_dev); | ||
166 | if (result < 0) | ||
167 | goto error_not_ready; | ||
168 | |||
169 | if (state != wimax_dev->rf_sw) { | ||
170 | wimax_dev->rf_sw = state; | ||
171 | if (wimax_dev->rf_hw == WIMAX_RF_ON | ||
172 | && wimax_dev->rf_sw == WIMAX_RF_ON) | ||
173 | wimax_state = WIMAX_ST_READY; | ||
174 | else | ||
175 | wimax_state = WIMAX_ST_RADIO_OFF; | ||
176 | __wimax_state_change(wimax_dev, wimax_state); | ||
177 | } | ||
178 | error_not_ready: | ||
179 | mutex_unlock(&wimax_dev->mutex); | ||
180 | d_fnend(3, dev, "(wimax_dev %p state %u) = void [%d]\n", | ||
181 | wimax_dev, state, result); | ||
182 | } | ||
183 | EXPORT_SYMBOL_GPL(wimax_report_rfkill_sw); | ||
184 | |||
185 | |||
186 | /* | ||
187 | * Callback for the RF Kill toggle operation | ||
188 | * | ||
189 | * This function is called by: | ||
190 | * | ||
191 | * - The rfkill subsystem when the RF-Kill key is pressed in the | ||
192 | * hardware and the driver notifies through | ||
193 | * wimax_report_rfkill_hw(). The rfkill subsystem ends up calling back | ||
194 | * here so the software RF Kill switch state is changed to reflect | ||
195 | * the hardware switch state. | ||
196 | * | ||
197 | * - When the user sets the state through sysfs' rfkill/state file | ||
198 | * | ||
199 | * - When the user calls wimax_rfkill(). | ||
200 | * | ||
201 | * This call blocks! | ||
202 | * | ||
203 | * WARNING! When we call rfkill_unregister(), this will be called with | ||
204 | * state 0! | ||
205 | * | ||
206 | * WARNING: wimax_dev must be locked | ||
207 | */ | ||
208 | static | ||
209 | int __wimax_rf_toggle_radio(struct wimax_dev *wimax_dev, | ||
210 | enum wimax_rf_state state) | ||
211 | { | ||
212 | int result = 0; | ||
213 | struct device *dev = wimax_dev_to_dev(wimax_dev); | ||
214 | enum wimax_st wimax_state; | ||
215 | |||
216 | might_sleep(); | ||
217 | d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state); | ||
218 | if (wimax_dev->rf_sw == state) | ||
219 | goto out_no_change; | ||
220 | if (wimax_dev->op_rfkill_sw_toggle != NULL) | ||
221 | result = wimax_dev->op_rfkill_sw_toggle(wimax_dev, state); | ||
222 | else if (state == WIMAX_RF_OFF) /* No op? can't turn off */ | ||
223 | result = -ENXIO; | ||
224 | else /* No op? can turn on */ | ||
225 | result = 0; /* should never happen tho */ | ||
226 | if (result >= 0) { | ||
227 | result = 0; | ||
228 | wimax_dev->rf_sw = state; | ||
229 | wimax_state = state == WIMAX_RF_ON ? | ||
230 | WIMAX_ST_READY : WIMAX_ST_RADIO_OFF; | ||
231 | __wimax_state_change(wimax_dev, wimax_state); | ||
232 | } | ||
233 | out_no_change: | ||
234 | d_fnend(3, dev, "(wimax_dev %p state %u) = %d\n", | ||
235 | wimax_dev, state, result); | ||
236 | return result; | ||
237 | } | ||
238 | |||
239 | |||
240 | /* | ||
241 | * Translate from rfkill state to wimax state | ||
242 | * | ||
243 | * NOTE: Special state handling rules here | ||
244 | * | ||
245 | * Just pretend the call didn't happen if we are in a state where | ||
246 | * we know for sure it cannot be handled (WIMAX_ST_DOWN or | ||
247 | * __WIMAX_ST_QUIESCING). rfkill() needs it to register and | ||
248 | * unregister, as it will run this path. | ||
249 | * | ||
250 | * NOTE: This call will block until the operation is completed. | ||
251 | */ | ||
252 | static | ||
253 | int wimax_rfkill_toggle_radio(void *data, enum rfkill_state state) | ||
254 | { | ||
255 | int result; | ||
256 | struct wimax_dev *wimax_dev = data; | ||
257 | struct device *dev = wimax_dev_to_dev(wimax_dev); | ||
258 | enum wimax_rf_state rf_state; | ||
259 | |||
260 | d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state); | ||
261 | switch (state) { | ||
262 | case RFKILL_STATE_ON: | ||
263 | rf_state = WIMAX_RF_OFF; | ||
264 | break; | ||
265 | case RFKILL_STATE_OFF: | ||
266 | rf_state = WIMAX_RF_ON; | ||
267 | break; | ||
268 | default: | ||
269 | BUG(); | ||
270 | } | ||
271 | mutex_lock(&wimax_dev->mutex); | ||
272 | if (wimax_dev->state <= __WIMAX_ST_QUIESCING) | ||
273 | result = 0; /* just pretend it didn't happen */ | ||
274 | else | ||
275 | result = __wimax_rf_toggle_radio(wimax_dev, rf_state); | ||
276 | mutex_unlock(&wimax_dev->mutex); | ||
277 | d_fnend(3, dev, "(wimax_dev %p state %u) = %d\n", | ||
278 | wimax_dev, state, result); | ||
279 | return result; | ||
280 | } | ||
281 | |||
282 | |||
283 | /** | ||
284 | * wimax_rfkill - Set the software RF switch state for a WiMAX device | ||
285 | * | ||
286 | * @wimax_dev: WiMAX device descriptor | ||
287 | * | ||
288 | * @state: New RF state. | ||
289 | * | ||
290 | * Returns: | ||
291 | * | ||
292 | * >= 0 toggle state if ok, < 0 errno code on error. The toggle state | ||
293 | * is returned as a bitmap, bit 0 being the hardware RF state, bit 1 | ||
294 | * the software RF state. | ||
295 | * | ||
296 | * 0 means disabled (%WIMAX_RF_ON, radio on), 1 means enabled radio | ||
297 | * off (%WIMAX_RF_OFF). | ||
298 | * | ||
299 | * Description: | ||
300 | * | ||
301 | * Called by the user when he wants to request the WiMAX radio to be | ||
302 | * switched on (%WIMAX_RF_ON) or off (%WIMAX_RF_OFF). With | ||
303 | * %WIMAX_RF_QUERY, just the current state is returned. | ||
304 | * | ||
305 | * NOTE: | ||
306 | * | ||
307 | * This call will block until the operation is complete. | ||
308 | */ | ||
309 | int wimax_rfkill(struct wimax_dev *wimax_dev, enum wimax_rf_state state) | ||
310 | { | ||
311 | int result; | ||
312 | struct device *dev = wimax_dev_to_dev(wimax_dev); | ||
313 | |||
314 | d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state); | ||
315 | mutex_lock(&wimax_dev->mutex); | ||
316 | result = wimax_dev_is_ready(wimax_dev); | ||
317 | if (result < 0) | ||
318 | goto error_not_ready; | ||
319 | switch (state) { | ||
320 | case WIMAX_RF_ON: | ||
321 | case WIMAX_RF_OFF: | ||
322 | result = __wimax_rf_toggle_radio(wimax_dev, state); | ||
323 | if (result < 0) | ||
324 | goto error; | ||
325 | break; | ||
326 | case WIMAX_RF_QUERY: | ||
327 | break; | ||
328 | default: | ||
329 | result = -EINVAL; | ||
330 | goto error; | ||
331 | } | ||
332 | result = wimax_dev->rf_sw << 1 | wimax_dev->rf_hw; | ||
333 | error: | ||
334 | error_not_ready: | ||
335 | mutex_unlock(&wimax_dev->mutex); | ||
336 | d_fnend(3, dev, "(wimax_dev %p state %u) = %d\n", | ||
337 | wimax_dev, state, result); | ||
338 | return result; | ||
339 | } | ||
340 | EXPORT_SYMBOL(wimax_rfkill); | ||
341 | |||
342 | |||
343 | /* | ||
344 | * Register a new WiMAX device's RF Kill support | ||
345 | * | ||
346 | * WARNING: wimax_dev->mutex must be unlocked | ||
347 | */ | ||
348 | int wimax_rfkill_add(struct wimax_dev *wimax_dev) | ||
349 | { | ||
350 | int result; | ||
351 | struct rfkill *rfkill; | ||
352 | struct input_dev *input_dev; | ||
353 | struct device *dev = wimax_dev_to_dev(wimax_dev); | ||
354 | |||
355 | d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev); | ||
356 | /* Initialize RF Kill */ | ||
357 | result = -ENOMEM; | ||
358 | rfkill = rfkill_allocate(dev, RFKILL_TYPE_WIMAX); | ||
359 | if (rfkill == NULL) | ||
360 | goto error_rfkill_allocate; | ||
361 | wimax_dev->rfkill = rfkill; | ||
362 | |||
363 | rfkill->name = wimax_dev->name; | ||
364 | rfkill->state = RFKILL_STATE_OFF; | ||
365 | rfkill->data = wimax_dev; | ||
366 | rfkill->toggle_radio = wimax_rfkill_toggle_radio; | ||
367 | rfkill->user_claim_unsupported = 1; | ||
368 | |||
369 | /* Initialize the input device for the hw key */ | ||
370 | input_dev = input_allocate_device(); | ||
371 | if (input_dev == NULL) | ||
372 | goto error_input_allocate; | ||
373 | wimax_dev->rfkill_input = input_dev; | ||
374 | d_printf(1, dev, "rfkill %p input %p\n", rfkill, input_dev); | ||
375 | |||
376 | input_dev->name = wimax_dev->name; | ||
377 | /* FIXME: get a real device bus ID and stuff? do we care? */ | ||
378 | input_dev->id.bustype = BUS_HOST; | ||
379 | input_dev->id.vendor = 0xffff; | ||
380 | input_dev->evbit[0] = BIT(EV_KEY); | ||
381 | set_bit(KEY_WIMAX, input_dev->keybit); | ||
382 | |||
383 | /* Register both */ | ||
384 | result = input_register_device(wimax_dev->rfkill_input); | ||
385 | if (result < 0) | ||
386 | goto error_input_register; | ||
387 | result = rfkill_register(wimax_dev->rfkill); | ||
388 | if (result < 0) | ||
389 | goto error_rfkill_register; | ||
390 | |||
391 | /* If there is no SW toggle op, SW RFKill is always on */ | ||
392 | if (wimax_dev->op_rfkill_sw_toggle == NULL) | ||
393 | wimax_dev->rf_sw = WIMAX_RF_ON; | ||
394 | |||
395 | d_fnend(3, dev, "(wimax_dev %p) = 0\n", wimax_dev); | ||
396 | return 0; | ||
397 | |||
398 | /* if rfkill_register() suceeds, can't use rfkill_free() any | ||
399 | * more, only rfkill_unregister() [it owns the refcount]; with | ||
400 | * the input device we have the same issue--hence the if. */ | ||
401 | error_rfkill_register: | ||
402 | input_unregister_device(wimax_dev->rfkill_input); | ||
403 | wimax_dev->rfkill_input = NULL; | ||
404 | error_input_register: | ||
405 | if (wimax_dev->rfkill_input) | ||
406 | input_free_device(wimax_dev->rfkill_input); | ||
407 | error_input_allocate: | ||
408 | rfkill_free(wimax_dev->rfkill); | ||
409 | error_rfkill_allocate: | ||
410 | d_fnend(3, dev, "(wimax_dev %p) = %d\n", wimax_dev, result); | ||
411 | return result; | ||
412 | } | ||
413 | |||
414 | |||
415 | /* | ||
416 | * Deregister a WiMAX device's RF Kill support | ||
417 | * | ||
418 | * Ick, we can't call rfkill_free() after rfkill_unregister()...oh | ||
419 | * well. | ||
420 | * | ||
421 | * WARNING: wimax_dev->mutex must be unlocked | ||
422 | */ | ||
423 | void wimax_rfkill_rm(struct wimax_dev *wimax_dev) | ||
424 | { | ||
425 | struct device *dev = wimax_dev_to_dev(wimax_dev); | ||
426 | d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev); | ||
427 | rfkill_unregister(wimax_dev->rfkill); /* frees */ | ||
428 | input_unregister_device(wimax_dev->rfkill_input); | ||
429 | d_fnend(3, dev, "(wimax_dev %p)\n", wimax_dev); | ||
430 | } | ||
431 | |||
432 | |||
433 | #else /* #ifdef CONFIG_RFKILL */ | ||
434 | |||
435 | void wimax_report_rfkill_hw(struct wimax_dev *wimax_dev, | ||
436 | enum wimax_rf_state state) | ||
437 | { | ||
438 | } | ||
439 | EXPORT_SYMBOL_GPL(wimax_report_rfkill_hw); | ||
440 | |||
441 | void wimax_report_rfkill_sw(struct wimax_dev *wimax_dev, | ||
442 | enum wimax_rf_state state) | ||
443 | { | ||
444 | } | ||
445 | EXPORT_SYMBOL_GPL(wimax_report_rfkill_sw); | ||
446 | |||
447 | int wimax_rfkill(struct wimax_dev *wimax_dev, | ||
448 | enum wimax_rf_state state) | ||
449 | { | ||
450 | return WIMAX_RF_ON << 1 | WIMAX_RF_ON; | ||
451 | } | ||
452 | EXPORT_SYMBOL_GPL(wimax_rfkill); | ||
453 | |||
454 | int wimax_rfkill_add(struct wimax_dev *wimax_dev) | ||
455 | { | ||
456 | return 0; | ||
457 | } | ||
458 | |||
459 | void wimax_rfkill_rm(struct wimax_dev *wimax_dev) | ||
460 | { | ||
461 | } | ||
462 | |||
463 | #endif /* #ifdef CONFIG_RFKILL */ | ||
464 | |||
465 | |||
466 | /* | ||
467 | * Exporting to user space over generic netlink | ||
468 | * | ||
469 | * Parse the rfkill command from user space, return a combination | ||
470 | * value that describe the states of the different toggles. | ||
471 | * | ||
472 | * Only one attribute: the new state requested (on, off or no change, | ||
473 | * just query). | ||
474 | */ | ||
475 | |||
476 | static const | ||
477 | struct nla_policy wimax_gnl_rfkill_policy[WIMAX_GNL_ATTR_MAX + 1] = { | ||
478 | [WIMAX_GNL_RFKILL_IFIDX] = { | ||
479 | .type = NLA_U32, | ||
480 | }, | ||
481 | [WIMAX_GNL_RFKILL_STATE] = { | ||
482 | .type = NLA_U32 /* enum wimax_rf_state */ | ||
483 | }, | ||
484 | }; | ||
485 | |||
486 | |||
487 | static | ||
488 | int wimax_gnl_doit_rfkill(struct sk_buff *skb, struct genl_info *info) | ||
489 | { | ||
490 | int result, ifindex; | ||
491 | struct wimax_dev *wimax_dev; | ||
492 | struct device *dev; | ||
493 | enum wimax_rf_state new_state; | ||
494 | |||
495 | d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info); | ||
496 | result = -ENODEV; | ||
497 | if (info->attrs[WIMAX_GNL_RFKILL_IFIDX] == NULL) { | ||
498 | printk(KERN_ERR "WIMAX_GNL_OP_RFKILL: can't find IFIDX " | ||
499 | "attribute\n"); | ||
500 | goto error_no_wimax_dev; | ||
501 | } | ||
502 | ifindex = nla_get_u32(info->attrs[WIMAX_GNL_RFKILL_IFIDX]); | ||
503 | wimax_dev = wimax_dev_get_by_genl_info(info, ifindex); | ||
504 | if (wimax_dev == NULL) | ||
505 | goto error_no_wimax_dev; | ||
506 | dev = wimax_dev_to_dev(wimax_dev); | ||
507 | result = -EINVAL; | ||
508 | if (info->attrs[WIMAX_GNL_RFKILL_STATE] == NULL) { | ||
509 | dev_err(dev, "WIMAX_GNL_RFKILL: can't find RFKILL_STATE " | ||
510 | "attribute\n"); | ||
511 | goto error_no_pid; | ||
512 | } | ||
513 | new_state = nla_get_u32(info->attrs[WIMAX_GNL_RFKILL_STATE]); | ||
514 | |||
515 | /* Execute the operation and send the result back to user space */ | ||
516 | result = wimax_rfkill(wimax_dev, new_state); | ||
517 | error_no_pid: | ||
518 | dev_put(wimax_dev->net_dev); | ||
519 | error_no_wimax_dev: | ||
520 | d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result); | ||
521 | return result; | ||
522 | } | ||
523 | |||
524 | |||
525 | struct genl_ops wimax_gnl_rfkill = { | ||
526 | .cmd = WIMAX_GNL_OP_RFKILL, | ||
527 | .flags = GENL_ADMIN_PERM, | ||
528 | .policy = wimax_gnl_rfkill_policy, | ||
529 | .doit = wimax_gnl_doit_rfkill, | ||
530 | .dumpit = NULL, | ||
531 | }; | ||
532 | |||
diff --git a/net/wimax/stack.c b/net/wimax/stack.c new file mode 100644 index 000000000000..d4da92f8981a --- /dev/null +++ b/net/wimax/stack.c | |||
@@ -0,0 +1,599 @@ | |||
1 | /* | ||
2 | * Linux WiMAX | ||
3 | * Initialization, addition and removal of wimax devices | ||
4 | * | ||
5 | * | ||
6 | * Copyright (C) 2005-2006 Intel Corporation <linux-wimax@intel.com> | ||
7 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version | ||
11 | * 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
21 | * 02110-1301, USA. | ||
22 | * | ||
23 | * | ||
24 | * This implements: | ||
25 | * | ||
26 | * - basic life cycle of 'struct wimax_dev' [wimax_dev_*()]; on | ||
27 | * addition/registration initialize all subfields and allocate | ||
28 | * generic netlink resources for user space communication. On | ||
29 | * removal/unregistration, undo all that. | ||
30 | * | ||
31 | * - device state machine [wimax_state_change()] and support to send | ||
32 | * reports to user space when the state changes | ||
33 | * [wimax_gnl_re_state_change*()]. | ||
34 | * | ||
35 | * See include/net/wimax.h for rationales and design. | ||
36 | * | ||
37 | * ROADMAP | ||
38 | * | ||
39 | * [__]wimax_state_change() Called by drivers to update device's state | ||
40 | * wimax_gnl_re_state_change_alloc() | ||
41 | * wimax_gnl_re_state_change_send() | ||
42 | * | ||
43 | * wimax_dev_init() Init a device | ||
44 | * wimax_dev_add() Register | ||
45 | * wimax_rfkill_add() | ||
46 | * wimax_gnl_add() Register all the generic netlink resources. | ||
47 | * wimax_id_table_add() | ||
48 | * wimax_dev_rm() Unregister | ||
49 | * wimax_id_table_rm() | ||
50 | * wimax_gnl_rm() | ||
51 | * wimax_rfkill_rm() | ||
52 | */ | ||
53 | #include <linux/device.h> | ||
54 | #include <net/genetlink.h> | ||
55 | #include <linux/netdevice.h> | ||
56 | #include <linux/wimax.h> | ||
57 | #include "wimax-internal.h" | ||
58 | |||
59 | |||
60 | #define D_SUBMODULE stack | ||
61 | #include "debug-levels.h" | ||
62 | |||
63 | /* | ||
64 | * Authoritative source for the RE_STATE_CHANGE attribute policy | ||
65 | * | ||
66 | * We don't really use it here, but /me likes to keep the definition | ||
67 | * close to where the data is generated. | ||
68 | */ | ||
69 | /* | ||
70 | static const | ||
71 | struct nla_policy wimax_gnl_re_status_change[WIMAX_GNL_ATTR_MAX + 1] = { | ||
72 | [WIMAX_GNL_STCH_STATE_OLD] = { .type = NLA_U8 }, | ||
73 | [WIMAX_GNL_STCH_STATE_NEW] = { .type = NLA_U8 }, | ||
74 | }; | ||
75 | */ | ||
76 | |||
77 | |||
78 | /* | ||
79 | * Allocate a Report State Change message | ||
80 | * | ||
81 | * @header: save it, you need it for _send() | ||
82 | * | ||
83 | * Creates and fills a basic state change message; different code | ||
84 | * paths can then add more attributes to the message as needed. | ||
85 | * | ||
86 | * Use wimax_gnl_re_state_change_send() to send the returned skb. | ||
87 | * | ||
88 | * Returns: skb with the genl message if ok, IS_ERR() ptr on error | ||
89 | * with an errno code. | ||
90 | */ | ||
91 | static | ||
92 | struct sk_buff *wimax_gnl_re_state_change_alloc( | ||
93 | struct wimax_dev *wimax_dev, | ||
94 | enum wimax_st new_state, enum wimax_st old_state, | ||
95 | void **header) | ||
96 | { | ||
97 | int result; | ||
98 | struct device *dev = wimax_dev_to_dev(wimax_dev); | ||
99 | void *data; | ||
100 | struct sk_buff *report_skb; | ||
101 | |||
102 | d_fnstart(3, dev, "(wimax_dev %p new_state %u old_state %u)\n", | ||
103 | wimax_dev, new_state, old_state); | ||
104 | result = -ENOMEM; | ||
105 | report_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | ||
106 | if (report_skb == NULL) { | ||
107 | dev_err(dev, "RE_STCH: can't create message\n"); | ||
108 | goto error_new; | ||
109 | } | ||
110 | data = genlmsg_put(report_skb, 0, wimax_gnl_mcg.id, &wimax_gnl_family, | ||
111 | 0, WIMAX_GNL_RE_STATE_CHANGE); | ||
112 | if (data == NULL) { | ||
113 | dev_err(dev, "RE_STCH: can't put data into message\n"); | ||
114 | goto error_put; | ||
115 | } | ||
116 | *header = data; | ||
117 | |||
118 | result = nla_put_u8(report_skb, WIMAX_GNL_STCH_STATE_OLD, old_state); | ||
119 | if (result < 0) { | ||
120 | dev_err(dev, "RE_STCH: Error adding OLD attr: %d\n", result); | ||
121 | goto error_put; | ||
122 | } | ||
123 | result = nla_put_u8(report_skb, WIMAX_GNL_STCH_STATE_NEW, new_state); | ||
124 | if (result < 0) { | ||
125 | dev_err(dev, "RE_STCH: Error adding NEW attr: %d\n", result); | ||
126 | goto error_put; | ||
127 | } | ||
128 | result = nla_put_u32(report_skb, WIMAX_GNL_STCH_IFIDX, | ||
129 | wimax_dev->net_dev->ifindex); | ||
130 | if (result < 0) { | ||
131 | dev_err(dev, "RE_STCH: Error adding IFINDEX attribute\n"); | ||
132 | goto error_put; | ||
133 | } | ||
134 | d_fnend(3, dev, "(wimax_dev %p new_state %u old_state %u) = %p\n", | ||
135 | wimax_dev, new_state, old_state, report_skb); | ||
136 | return report_skb; | ||
137 | |||
138 | error_put: | ||
139 | nlmsg_free(report_skb); | ||
140 | error_new: | ||
141 | d_fnend(3, dev, "(wimax_dev %p new_state %u old_state %u) = %d\n", | ||
142 | wimax_dev, new_state, old_state, result); | ||
143 | return ERR_PTR(result); | ||
144 | } | ||
145 | |||
146 | |||
147 | /* | ||
148 | * Send a Report State Change message (as created with _alloc). | ||
149 | * | ||
150 | * @report_skb: as returned by wimax_gnl_re_state_change_alloc() | ||
151 | * @header: as returned by wimax_gnl_re_state_change_alloc() | ||
152 | * | ||
153 | * Returns: 0 if ok, < 0 errno code on error. | ||
154 | * | ||
155 | * If the message is NULL, pretend it didn't happen. | ||
156 | */ | ||
157 | static | ||
158 | int wimax_gnl_re_state_change_send( | ||
159 | struct wimax_dev *wimax_dev, struct sk_buff *report_skb, | ||
160 | void *header) | ||
161 | { | ||
162 | int result = 0; | ||
163 | struct device *dev = wimax_dev_to_dev(wimax_dev); | ||
164 | d_fnstart(3, dev, "(wimax_dev %p report_skb %p)\n", | ||
165 | wimax_dev, report_skb); | ||
166 | if (report_skb == NULL) | ||
167 | goto out; | ||
168 | genlmsg_end(report_skb, header); | ||
169 | result = genlmsg_multicast(report_skb, 0, wimax_gnl_mcg.id, GFP_KERNEL); | ||
170 | if (result == -ESRCH) /* Nobody connected, ignore it */ | ||
171 | result = 0; /* btw, the skb is freed already */ | ||
172 | if (result < 0) { | ||
173 | dev_err(dev, "RE_STCH: Error sending: %d\n", result); | ||
174 | nlmsg_free(report_skb); | ||
175 | } | ||
176 | out: | ||
177 | d_fnend(3, dev, "(wimax_dev %p report_skb %p) = %d\n", | ||
178 | wimax_dev, report_skb, result); | ||
179 | return result; | ||
180 | } | ||
181 | |||
182 | |||
183 | static | ||
184 | void __check_new_state(enum wimax_st old_state, enum wimax_st new_state, | ||
185 | unsigned allowed_states_bm) | ||
186 | { | ||
187 | if (WARN_ON(((1 << new_state) & allowed_states_bm) == 0)) { | ||
188 | printk(KERN_ERR "SW BUG! Forbidden state change %u -> %u\n", | ||
189 | old_state, new_state); | ||
190 | } | ||
191 | } | ||
192 | |||
193 | |||
194 | /* | ||
195 | * Set the current state of a WiMAX device [unlocking version of | ||
196 | * wimax_state_change(). | ||
197 | */ | ||
198 | void __wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state) | ||
199 | { | ||
200 | struct device *dev = wimax_dev_to_dev(wimax_dev); | ||
201 | enum wimax_st old_state = wimax_dev->state; | ||
202 | struct sk_buff *stch_skb; | ||
203 | void *header; | ||
204 | |||
205 | d_fnstart(3, dev, "(wimax_dev %p new_state %u [old %u])\n", | ||
206 | wimax_dev, new_state, old_state); | ||
207 | |||
208 | if (WARN_ON(new_state >= __WIMAX_ST_INVALID)) { | ||
209 | dev_err(dev, "SW BUG: requesting invalid state %u\n", | ||
210 | new_state); | ||
211 | goto out; | ||
212 | } | ||
213 | if (old_state == new_state) | ||
214 | goto out; | ||
215 | header = NULL; /* gcc complains? can't grok why */ | ||
216 | stch_skb = wimax_gnl_re_state_change_alloc( | ||
217 | wimax_dev, new_state, old_state, &header); | ||
218 | |||
219 | /* Verify the state transition and do exit-from-state actions */ | ||
220 | switch (old_state) { | ||
221 | case __WIMAX_ST_NULL: | ||
222 | __check_new_state(old_state, new_state, | ||
223 | 1 << WIMAX_ST_DOWN); | ||
224 | break; | ||
225 | case WIMAX_ST_DOWN: | ||
226 | __check_new_state(old_state, new_state, | ||
227 | 1 << __WIMAX_ST_QUIESCING | ||
228 | | 1 << WIMAX_ST_UNINITIALIZED | ||
229 | | 1 << WIMAX_ST_RADIO_OFF); | ||
230 | break; | ||
231 | case __WIMAX_ST_QUIESCING: | ||
232 | __check_new_state(old_state, new_state, 1 << WIMAX_ST_DOWN); | ||
233 | break; | ||
234 | case WIMAX_ST_UNINITIALIZED: | ||
235 | __check_new_state(old_state, new_state, | ||
236 | 1 << __WIMAX_ST_QUIESCING | ||
237 | | 1 << WIMAX_ST_RADIO_OFF); | ||
238 | break; | ||
239 | case WIMAX_ST_RADIO_OFF: | ||
240 | __check_new_state(old_state, new_state, | ||
241 | 1 << __WIMAX_ST_QUIESCING | ||
242 | | 1 << WIMAX_ST_READY); | ||
243 | break; | ||
244 | case WIMAX_ST_READY: | ||
245 | __check_new_state(old_state, new_state, | ||
246 | 1 << __WIMAX_ST_QUIESCING | ||
247 | | 1 << WIMAX_ST_RADIO_OFF | ||
248 | | 1 << WIMAX_ST_SCANNING | ||
249 | | 1 << WIMAX_ST_CONNECTING | ||
250 | | 1 << WIMAX_ST_CONNECTED); | ||
251 | break; | ||
252 | case WIMAX_ST_SCANNING: | ||
253 | __check_new_state(old_state, new_state, | ||
254 | 1 << __WIMAX_ST_QUIESCING | ||
255 | | 1 << WIMAX_ST_RADIO_OFF | ||
256 | | 1 << WIMAX_ST_READY | ||
257 | | 1 << WIMAX_ST_CONNECTING | ||
258 | | 1 << WIMAX_ST_CONNECTED); | ||
259 | break; | ||
260 | case WIMAX_ST_CONNECTING: | ||
261 | __check_new_state(old_state, new_state, | ||
262 | 1 << __WIMAX_ST_QUIESCING | ||
263 | | 1 << WIMAX_ST_RADIO_OFF | ||
264 | | 1 << WIMAX_ST_READY | ||
265 | | 1 << WIMAX_ST_SCANNING | ||
266 | | 1 << WIMAX_ST_CONNECTED); | ||
267 | break; | ||
268 | case WIMAX_ST_CONNECTED: | ||
269 | __check_new_state(old_state, new_state, | ||
270 | 1 << __WIMAX_ST_QUIESCING | ||
271 | | 1 << WIMAX_ST_RADIO_OFF | ||
272 | | 1 << WIMAX_ST_READY); | ||
273 | netif_tx_disable(wimax_dev->net_dev); | ||
274 | netif_carrier_off(wimax_dev->net_dev); | ||
275 | break; | ||
276 | case __WIMAX_ST_INVALID: | ||
277 | default: | ||
278 | dev_err(dev, "SW BUG: wimax_dev %p is in unknown state %u\n", | ||
279 | wimax_dev, wimax_dev->state); | ||
280 | WARN_ON(1); | ||
281 | goto out; | ||
282 | } | ||
283 | |||
284 | /* Execute the actions of entry to the new state */ | ||
285 | switch (new_state) { | ||
286 | case __WIMAX_ST_NULL: | ||
287 | dev_err(dev, "SW BUG: wimax_dev %p entering NULL state " | ||
288 | "from %u\n", wimax_dev, wimax_dev->state); | ||
289 | WARN_ON(1); /* Nobody can enter this state */ | ||
290 | break; | ||
291 | case WIMAX_ST_DOWN: | ||
292 | break; | ||
293 | case __WIMAX_ST_QUIESCING: | ||
294 | break; | ||
295 | case WIMAX_ST_UNINITIALIZED: | ||
296 | break; | ||
297 | case WIMAX_ST_RADIO_OFF: | ||
298 | break; | ||
299 | case WIMAX_ST_READY: | ||
300 | break; | ||
301 | case WIMAX_ST_SCANNING: | ||
302 | break; | ||
303 | case WIMAX_ST_CONNECTING: | ||
304 | break; | ||
305 | case WIMAX_ST_CONNECTED: | ||
306 | netif_carrier_on(wimax_dev->net_dev); | ||
307 | netif_wake_queue(wimax_dev->net_dev); | ||
308 | break; | ||
309 | case __WIMAX_ST_INVALID: | ||
310 | default: | ||
311 | BUG(); | ||
312 | } | ||
313 | __wimax_state_set(wimax_dev, new_state); | ||
314 | if (stch_skb) | ||
315 | wimax_gnl_re_state_change_send(wimax_dev, stch_skb, header); | ||
316 | out: | ||
317 | d_fnend(3, dev, "(wimax_dev %p new_state %u [old %u]) = void\n", | ||
318 | wimax_dev, new_state, old_state); | ||
319 | return; | ||
320 | } | ||
321 | |||
322 | |||
323 | /** | ||
324 | * wimax_state_change - Set the current state of a WiMAX device | ||
325 | * | ||
326 | * @wimax_dev: WiMAX device descriptor (properly referenced) | ||
327 | * @new_state: New state to switch to | ||
328 | * | ||
329 | * This implements the state changes for the wimax devices. It will | ||
330 | * | ||
331 | * - verify that the state transition is legal (for now it'll just | ||
332 | * print a warning if not) according to the table in | ||
333 | * linux/wimax.h's documentation for 'enum wimax_st'. | ||
334 | * | ||
335 | * - perform the actions needed for leaving the current state and | ||
336 | * whichever are needed for entering the new state. | ||
337 | * | ||
338 | * - issue a report to user space indicating the new state (and an | ||
339 | * optional payload with information about the new state). | ||
340 | * | ||
341 | * NOTE: @wimax_dev must be locked | ||
342 | */ | ||
343 | void wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state) | ||
344 | { | ||
345 | mutex_lock(&wimax_dev->mutex); | ||
346 | __wimax_state_change(wimax_dev, new_state); | ||
347 | mutex_unlock(&wimax_dev->mutex); | ||
348 | return; | ||
349 | } | ||
350 | EXPORT_SYMBOL_GPL(wimax_state_change); | ||
351 | |||
352 | |||
353 | /** | ||
354 | * wimax_state_get() - Return the current state of a WiMAX device | ||
355 | * | ||
356 | * @wimax_dev: WiMAX device descriptor | ||
357 | * | ||
358 | * Returns: Current state of the device according to its driver. | ||
359 | */ | ||
360 | enum wimax_st wimax_state_get(struct wimax_dev *wimax_dev) | ||
361 | { | ||
362 | enum wimax_st state; | ||
363 | mutex_lock(&wimax_dev->mutex); | ||
364 | state = wimax_dev->state; | ||
365 | mutex_unlock(&wimax_dev->mutex); | ||
366 | return state; | ||
367 | } | ||
368 | EXPORT_SYMBOL_GPL(wimax_state_get); | ||
369 | |||
370 | |||
371 | /** | ||
372 | * wimax_dev_init - initialize a newly allocated instance | ||
373 | * | ||
374 | * @wimax_dev: WiMAX device descriptor to initialize. | ||
375 | * | ||
376 | * Initializes fields of a freshly allocated @wimax_dev instance. This | ||
377 | * function assumes that after allocation, the memory occupied by | ||
378 | * @wimax_dev was zeroed. | ||
379 | */ | ||
380 | void wimax_dev_init(struct wimax_dev *wimax_dev) | ||
381 | { | ||
382 | INIT_LIST_HEAD(&wimax_dev->id_table_node); | ||
383 | __wimax_state_set(wimax_dev, WIMAX_ST_UNINITIALIZED); | ||
384 | mutex_init(&wimax_dev->mutex); | ||
385 | mutex_init(&wimax_dev->mutex_reset); | ||
386 | } | ||
387 | EXPORT_SYMBOL_GPL(wimax_dev_init); | ||
388 | |||
389 | /* | ||
390 | * This extern is declared here because it's easier to keep track -- | ||
391 | * both declarations are a list of the same | ||
392 | */ | ||
393 | extern struct genl_ops | ||
394 | wimax_gnl_msg_from_user, | ||
395 | wimax_gnl_reset, | ||
396 | wimax_gnl_rfkill; | ||
397 | |||
398 | static | ||
399 | struct genl_ops *wimax_gnl_ops[] = { | ||
400 | &wimax_gnl_msg_from_user, | ||
401 | &wimax_gnl_reset, | ||
402 | &wimax_gnl_rfkill, | ||
403 | }; | ||
404 | |||
405 | |||
406 | static | ||
407 | size_t wimax_addr_scnprint(char *addr_str, size_t addr_str_size, | ||
408 | unsigned char *addr, size_t addr_len) | ||
409 | { | ||
410 | unsigned cnt, total; | ||
411 | for (total = cnt = 0; cnt < addr_len; cnt++) | ||
412 | total += scnprintf(addr_str + total, addr_str_size - total, | ||
413 | "%02x%c", addr[cnt], | ||
414 | cnt == addr_len - 1 ? '\0' : ':'); | ||
415 | return total; | ||
416 | } | ||
417 | |||
418 | |||
419 | /** | ||
420 | * wimax_dev_add - Register a new WiMAX device | ||
421 | * | ||
422 | * @wimax_dev: WiMAX device descriptor (as embedded in your @net_dev's | ||
423 | * priv data). You must have called wimax_dev_init() on it before. | ||
424 | * | ||
425 | * @net_dev: net device the @wimax_dev is associated with. The | ||
426 | * function expects SET_NETDEV_DEV() and register_netdev() were | ||
427 | * already called on it. | ||
428 | * | ||
429 | * Registers the new WiMAX device, sets up the user-kernel control | ||
430 | * interface (generic netlink) and common WiMAX infrastructure. | ||
431 | * | ||
432 | * Note that the parts that will allow interaction with user space are | ||
433 | * setup at the very end, when the rest is in place, as once that | ||
434 | * happens, the driver might get user space control requests via | ||
435 | * netlink or from debugfs that might translate into calls into | ||
436 | * wimax_dev->op_*(). | ||
437 | */ | ||
438 | int wimax_dev_add(struct wimax_dev *wimax_dev, struct net_device *net_dev) | ||
439 | { | ||
440 | int result; | ||
441 | struct device *dev = net_dev->dev.parent; | ||
442 | char addr_str[32]; | ||
443 | |||
444 | d_fnstart(3, dev, "(wimax_dev %p net_dev %p)\n", wimax_dev, net_dev); | ||
445 | |||
446 | /* Do the RFKILL setup before locking, as RFKILL will call | ||
447 | * into our functions. */ | ||
448 | wimax_dev->net_dev = net_dev; | ||
449 | result = wimax_rfkill_add(wimax_dev); | ||
450 | if (result < 0) | ||
451 | goto error_rfkill_add; | ||
452 | |||
453 | /* Set up user-space interaction */ | ||
454 | mutex_lock(&wimax_dev->mutex); | ||
455 | wimax_id_table_add(wimax_dev); | ||
456 | result = wimax_debugfs_add(wimax_dev); | ||
457 | if (result < 0) { | ||
458 | dev_err(dev, "cannot initialize debugfs: %d\n", | ||
459 | result); | ||
460 | goto error_debugfs_add; | ||
461 | } | ||
462 | |||
463 | __wimax_state_set(wimax_dev, WIMAX_ST_DOWN); | ||
464 | mutex_unlock(&wimax_dev->mutex); | ||
465 | |||
466 | wimax_addr_scnprint(addr_str, sizeof(addr_str), | ||
467 | net_dev->dev_addr, net_dev->addr_len); | ||
468 | dev_err(dev, "WiMAX interface %s (%s) ready\n", | ||
469 | net_dev->name, addr_str); | ||
470 | d_fnend(3, dev, "(wimax_dev %p net_dev %p) = 0\n", wimax_dev, net_dev); | ||
471 | return 0; | ||
472 | |||
473 | error_debugfs_add: | ||
474 | wimax_id_table_rm(wimax_dev); | ||
475 | mutex_unlock(&wimax_dev->mutex); | ||
476 | wimax_rfkill_rm(wimax_dev); | ||
477 | error_rfkill_add: | ||
478 | d_fnend(3, dev, "(wimax_dev %p net_dev %p) = %d\n", | ||
479 | wimax_dev, net_dev, result); | ||
480 | return result; | ||
481 | } | ||
482 | EXPORT_SYMBOL_GPL(wimax_dev_add); | ||
483 | |||
484 | |||
485 | /** | ||
486 | * wimax_dev_rm - Unregister an existing WiMAX device | ||
487 | * | ||
488 | * @wimax_dev: WiMAX device descriptor | ||
489 | * | ||
490 | * Unregisters a WiMAX device previously registered for use with | ||
491 | * wimax_add_rm(). | ||
492 | * | ||
493 | * IMPORTANT! Must call before calling unregister_netdev(). | ||
494 | * | ||
495 | * After this function returns, you will not get any more user space | ||
496 | * control requests (via netlink or debugfs) and thus to wimax_dev->ops. | ||
497 | * | ||
498 | * Reentrancy control is ensured by setting the state to | ||
499 | * %__WIMAX_ST_QUIESCING. rfkill operations coming through | ||
500 | * wimax_*rfkill*() will be stopped by the quiescing state; ops coming | ||
501 | * from the rfkill subsystem will be stopped by the support being | ||
502 | * removed by wimax_rfkill_rm(). | ||
503 | */ | ||
504 | void wimax_dev_rm(struct wimax_dev *wimax_dev) | ||
505 | { | ||
506 | d_fnstart(3, NULL, "(wimax_dev %p)\n", wimax_dev); | ||
507 | |||
508 | mutex_lock(&wimax_dev->mutex); | ||
509 | __wimax_state_change(wimax_dev, __WIMAX_ST_QUIESCING); | ||
510 | wimax_debugfs_rm(wimax_dev); | ||
511 | wimax_id_table_rm(wimax_dev); | ||
512 | __wimax_state_change(wimax_dev, WIMAX_ST_DOWN); | ||
513 | mutex_unlock(&wimax_dev->mutex); | ||
514 | wimax_rfkill_rm(wimax_dev); | ||
515 | d_fnend(3, NULL, "(wimax_dev %p) = void\n", wimax_dev); | ||
516 | } | ||
517 | EXPORT_SYMBOL_GPL(wimax_dev_rm); | ||
518 | |||
519 | struct genl_family wimax_gnl_family = { | ||
520 | .id = GENL_ID_GENERATE, | ||
521 | .name = "WiMAX", | ||
522 | .version = WIMAX_GNL_VERSION, | ||
523 | .hdrsize = 0, | ||
524 | .maxattr = WIMAX_GNL_ATTR_MAX, | ||
525 | }; | ||
526 | |||
527 | struct genl_multicast_group wimax_gnl_mcg = { | ||
528 | .name = "msg", | ||
529 | }; | ||
530 | |||
531 | |||
532 | |||
533 | /* Shutdown the wimax stack */ | ||
534 | static | ||
535 | int __init wimax_subsys_init(void) | ||
536 | { | ||
537 | int result, cnt; | ||
538 | |||
539 | d_fnstart(4, NULL, "()\n"); | ||
540 | snprintf(wimax_gnl_family.name, sizeof(wimax_gnl_family.name), | ||
541 | "WiMAX"); | ||
542 | result = genl_register_family(&wimax_gnl_family); | ||
543 | if (unlikely(result < 0)) { | ||
544 | printk(KERN_ERR "cannot register generic netlink family: %d\n", | ||
545 | result); | ||
546 | goto error_register_family; | ||
547 | } | ||
548 | |||
549 | for (cnt = 0; cnt < ARRAY_SIZE(wimax_gnl_ops); cnt++) { | ||
550 | result = genl_register_ops(&wimax_gnl_family, | ||
551 | wimax_gnl_ops[cnt]); | ||
552 | d_printf(4, NULL, "registering generic netlink op code " | ||
553 | "%u: %d\n", wimax_gnl_ops[cnt]->cmd, result); | ||
554 | if (unlikely(result < 0)) { | ||
555 | printk(KERN_ERR "cannot register generic netlink op " | ||
556 | "code %u: %d\n", | ||
557 | wimax_gnl_ops[cnt]->cmd, result); | ||
558 | goto error_register_ops; | ||
559 | } | ||
560 | } | ||
561 | |||
562 | result = genl_register_mc_group(&wimax_gnl_family, &wimax_gnl_mcg); | ||
563 | if (result < 0) | ||
564 | goto error_mc_group; | ||
565 | d_fnend(4, NULL, "() = 0\n"); | ||
566 | return 0; | ||
567 | |||
568 | error_mc_group: | ||
569 | error_register_ops: | ||
570 | for (cnt--; cnt >= 0; cnt--) | ||
571 | genl_unregister_ops(&wimax_gnl_family, | ||
572 | wimax_gnl_ops[cnt]); | ||
573 | genl_unregister_family(&wimax_gnl_family); | ||
574 | error_register_family: | ||
575 | d_fnend(4, NULL, "() = %d\n", result); | ||
576 | return result; | ||
577 | |||
578 | } | ||
579 | module_init(wimax_subsys_init); | ||
580 | |||
581 | |||
582 | /* Shutdown the wimax stack */ | ||
583 | static | ||
584 | void __exit wimax_subsys_exit(void) | ||
585 | { | ||
586 | int cnt; | ||
587 | wimax_id_table_release(); | ||
588 | genl_unregister_mc_group(&wimax_gnl_family, &wimax_gnl_mcg); | ||
589 | for (cnt = ARRAY_SIZE(wimax_gnl_ops) - 1; cnt >= 0; cnt--) | ||
590 | genl_unregister_ops(&wimax_gnl_family, | ||
591 | wimax_gnl_ops[cnt]); | ||
592 | genl_unregister_family(&wimax_gnl_family); | ||
593 | } | ||
594 | module_exit(wimax_subsys_exit); | ||
595 | |||
596 | MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>"); | ||
597 | MODULE_DESCRIPTION("Linux WiMAX stack"); | ||
598 | MODULE_LICENSE("GPL"); | ||
599 | |||
diff --git a/net/wimax/wimax-internal.h b/net/wimax/wimax-internal.h new file mode 100644 index 000000000000..1e743d214856 --- /dev/null +++ b/net/wimax/wimax-internal.h | |||
@@ -0,0 +1,91 @@ | |||
1 | /* | ||
2 | * Linux WiMAX | ||
3 | * Internal API for kernel space WiMAX stack | ||
4 | * | ||
5 | * | ||
6 | * Copyright (C) 2007 Intel Corporation <linux-wimax@intel.com> | ||
7 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version | ||
11 | * 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
21 | * 02110-1301, USA. | ||
22 | * | ||
23 | * | ||
24 | * This header file is for declarations and definitions internal to | ||
25 | * the WiMAX stack. For public APIs and documentation, see | ||
26 | * include/net/wimax.h and include/linux/wimax.h. | ||
27 | */ | ||
28 | |||
29 | #ifndef __WIMAX_INTERNAL_H__ | ||
30 | #define __WIMAX_INTERNAL_H__ | ||
31 | #ifdef __KERNEL__ | ||
32 | |||
33 | #include <linux/device.h> | ||
34 | #include <net/wimax.h> | ||
35 | |||
36 | |||
37 | /* | ||
38 | * Decide if a (locked) device is ready for use | ||
39 | * | ||
40 | * Before using the device structure, it must be locked | ||
41 | * (wimax_dev->mutex). As well, most operations need to call this | ||
42 | * function to check if the state is the right one. | ||
43 | * | ||
44 | * An error value will be returned if the state is not the right | ||
45 | * one. In that case, the caller should not attempt to use the device | ||
46 | * and just unlock it. | ||
47 | */ | ||
48 | static inline __must_check | ||
49 | int wimax_dev_is_ready(struct wimax_dev *wimax_dev) | ||
50 | { | ||
51 | if (wimax_dev->state == __WIMAX_ST_NULL) | ||
52 | return -EINVAL; /* Device is not even registered! */ | ||
53 | if (wimax_dev->state == WIMAX_ST_DOWN) | ||
54 | return -ENOMEDIUM; | ||
55 | if (wimax_dev->state == __WIMAX_ST_QUIESCING) | ||
56 | return -ESHUTDOWN; | ||
57 | return 0; | ||
58 | } | ||
59 | |||
60 | |||
61 | static inline | ||
62 | void __wimax_state_set(struct wimax_dev *wimax_dev, enum wimax_st state) | ||
63 | { | ||
64 | wimax_dev->state = state; | ||
65 | } | ||
66 | extern void __wimax_state_change(struct wimax_dev *, enum wimax_st); | ||
67 | |||
68 | #ifdef CONFIG_DEBUG_FS | ||
69 | extern int wimax_debugfs_add(struct wimax_dev *); | ||
70 | extern void wimax_debugfs_rm(struct wimax_dev *); | ||
71 | #else | ||
72 | static inline int wimax_debugfs_add(struct wimax_dev *wimax_dev) | ||
73 | { | ||
74 | return 0; | ||
75 | } | ||
76 | static inline void wimax_debugfs_rm(struct wimax_dev *wimax_dev) {} | ||
77 | #endif | ||
78 | |||
79 | extern void wimax_id_table_add(struct wimax_dev *); | ||
80 | extern struct wimax_dev *wimax_dev_get_by_genl_info(struct genl_info *, int); | ||
81 | extern void wimax_id_table_rm(struct wimax_dev *); | ||
82 | extern void wimax_id_table_release(void); | ||
83 | |||
84 | extern int wimax_rfkill_add(struct wimax_dev *); | ||
85 | extern void wimax_rfkill_rm(struct wimax_dev *); | ||
86 | |||
87 | extern struct genl_family wimax_gnl_family; | ||
88 | extern struct genl_multicast_group wimax_gnl_mcg; | ||
89 | |||
90 | #endif /* #ifdef __KERNEL__ */ | ||
91 | #endif /* #ifndef __WIMAX_INTERNAL_H__ */ | ||
diff --git a/net/wireless/wext.c b/net/wireless/wext.c index e49a2d1ef1e4..cb6a5bb85d80 100644 --- a/net/wireless/wext.c +++ b/net/wireless/wext.c | |||
@@ -1055,8 +1055,8 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, | |||
1055 | return private(dev, iwr, cmd, info, handler); | 1055 | return private(dev, iwr, cmd, info, handler); |
1056 | } | 1056 | } |
1057 | /* Old driver API : call driver ioctl handler */ | 1057 | /* Old driver API : call driver ioctl handler */ |
1058 | if (dev->do_ioctl) | 1058 | if (dev->netdev_ops->ndo_do_ioctl) |
1059 | return dev->do_ioctl(dev, ifr, cmd); | 1059 | return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd); |
1060 | return -EOPNOTSUPP; | 1060 | return -EOPNOTSUPP; |
1061 | } | 1061 | } |
1062 | 1062 | ||