diff options
Diffstat (limited to 'net')
78 files changed, 3583 insertions, 807 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index dd86a1dc4cd0..6c1323940263 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
@@ -3,46 +3,35 @@ | |||
3 | #include <linux/if_vlan.h> | 3 | #include <linux/if_vlan.h> |
4 | #include "vlan.h" | 4 | #include "vlan.h" |
5 | 5 | ||
6 | struct vlan_hwaccel_cb { | ||
7 | struct net_device *dev; | ||
8 | }; | ||
9 | |||
10 | static inline struct vlan_hwaccel_cb *vlan_hwaccel_cb(struct sk_buff *skb) | ||
11 | { | ||
12 | return (struct vlan_hwaccel_cb *)skb->cb; | ||
13 | } | ||
14 | |||
15 | /* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ | 6 | /* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ |
16 | int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, | 7 | int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, |
17 | u16 vlan_tci, int polling) | 8 | u16 vlan_tci, int polling) |
18 | { | 9 | { |
19 | struct vlan_hwaccel_cb *cb = vlan_hwaccel_cb(skb); | 10 | if (skb_bond_should_drop(skb)) |
20 | 11 | goto drop; | |
21 | if (skb_bond_should_drop(skb)) { | ||
22 | dev_kfree_skb_any(skb); | ||
23 | return NET_RX_DROP; | ||
24 | } | ||
25 | 12 | ||
26 | skb->vlan_tci = vlan_tci; | 13 | skb->vlan_tci = vlan_tci; |
27 | cb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); | 14 | skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); |
15 | |||
16 | if (!skb->dev) | ||
17 | goto drop; | ||
28 | 18 | ||
29 | return (polling ? netif_receive_skb(skb) : netif_rx(skb)); | 19 | return (polling ? netif_receive_skb(skb) : netif_rx(skb)); |
20 | |||
21 | drop: | ||
22 | dev_kfree_skb_any(skb); | ||
23 | return NET_RX_DROP; | ||
30 | } | 24 | } |
31 | EXPORT_SYMBOL(__vlan_hwaccel_rx); | 25 | EXPORT_SYMBOL(__vlan_hwaccel_rx); |
32 | 26 | ||
33 | int vlan_hwaccel_do_receive(struct sk_buff *skb) | 27 | int vlan_hwaccel_do_receive(struct sk_buff *skb) |
34 | { | 28 | { |
35 | struct vlan_hwaccel_cb *cb = vlan_hwaccel_cb(skb); | 29 | struct net_device *dev = skb->dev; |
36 | struct net_device *dev = cb->dev; | ||
37 | struct net_device_stats *stats; | 30 | struct net_device_stats *stats; |
38 | 31 | ||
32 | skb->dev = vlan_dev_info(dev)->real_dev; | ||
39 | netif_nit_deliver(skb); | 33 | netif_nit_deliver(skb); |
40 | 34 | ||
41 | if (dev == NULL) { | ||
42 | kfree_skb(skb); | ||
43 | return -1; | ||
44 | } | ||
45 | |||
46 | skb->dev = dev; | 35 | skb->dev = dev; |
47 | skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci); | 36 | skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci); |
48 | skb->vlan_tci = 0; | 37 | skb->vlan_tci = 0; |
@@ -80,3 +69,79 @@ u16 vlan_dev_vlan_id(const struct net_device *dev) | |||
80 | return vlan_dev_info(dev)->vlan_id; | 69 | return vlan_dev_info(dev)->vlan_id; |
81 | } | 70 | } |
82 | EXPORT_SYMBOL_GPL(vlan_dev_vlan_id); | 71 | EXPORT_SYMBOL_GPL(vlan_dev_vlan_id); |
72 | |||
73 | static int vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, | ||
74 | unsigned int vlan_tci, struct sk_buff *skb) | ||
75 | { | ||
76 | struct sk_buff *p; | ||
77 | |||
78 | if (skb_bond_should_drop(skb)) | ||
79 | goto drop; | ||
80 | |||
81 | skb->vlan_tci = vlan_tci; | ||
82 | skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); | ||
83 | |||
84 | if (!skb->dev) | ||
85 | goto drop; | ||
86 | |||
87 | for (p = napi->gro_list; p; p = p->next) { | ||
88 | NAPI_GRO_CB(p)->same_flow = p->dev == skb->dev; | ||
89 | NAPI_GRO_CB(p)->flush = 0; | ||
90 | } | ||
91 | |||
92 | return dev_gro_receive(napi, skb); | ||
93 | |||
94 | drop: | ||
95 | return 2; | ||
96 | } | ||
97 | |||
98 | int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, | ||
99 | unsigned int vlan_tci, struct sk_buff *skb) | ||
100 | { | ||
101 | int err = NET_RX_SUCCESS; | ||
102 | |||
103 | switch (vlan_gro_common(napi, grp, vlan_tci, skb)) { | ||
104 | case -1: | ||
105 | return netif_receive_skb(skb); | ||
106 | |||
107 | case 2: | ||
108 | err = NET_RX_DROP; | ||
109 | /* fall through */ | ||
110 | |||
111 | case 1: | ||
112 | kfree_skb(skb); | ||
113 | break; | ||
114 | } | ||
115 | |||
116 | return err; | ||
117 | } | ||
118 | EXPORT_SYMBOL(vlan_gro_receive); | ||
119 | |||
120 | int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, | ||
121 | unsigned int vlan_tci, struct napi_gro_fraginfo *info) | ||
122 | { | ||
123 | struct sk_buff *skb = napi_fraginfo_skb(napi, info); | ||
124 | int err = NET_RX_DROP; | ||
125 | |||
126 | if (!skb) | ||
127 | goto out; | ||
128 | |||
129 | err = NET_RX_SUCCESS; | ||
130 | |||
131 | switch (vlan_gro_common(napi, grp, vlan_tci, skb)) { | ||
132 | case -1: | ||
133 | return netif_receive_skb(skb); | ||
134 | |||
135 | case 2: | ||
136 | err = NET_RX_DROP; | ||
137 | /* fall through */ | ||
138 | |||
139 | case 1: | ||
140 | napi_reuse_skb(napi, skb); | ||
141 | break; | ||
142 | } | ||
143 | |||
144 | out: | ||
145 | return err; | ||
146 | } | ||
147 | EXPORT_SYMBOL(vlan_gro_frags); | ||
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 89a3bbdfca3f..4a19acd3a32b 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -546,6 +546,18 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
546 | return err; | 546 | return err; |
547 | } | 547 | } |
548 | 548 | ||
549 | static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa) | ||
550 | { | ||
551 | struct net_device *real_dev = vlan_dev_info(dev)->real_dev; | ||
552 | const struct net_device_ops *ops = real_dev->netdev_ops; | ||
553 | int err = 0; | ||
554 | |||
555 | if (netif_device_present(real_dev) && ops->ndo_neigh_setup) | ||
556 | err = ops->ndo_neigh_setup(dev, pa); | ||
557 | |||
558 | return err; | ||
559 | } | ||
560 | |||
549 | static void vlan_dev_change_rx_flags(struct net_device *dev, int change) | 561 | static void vlan_dev_change_rx_flags(struct net_device *dev, int change) |
550 | { | 562 | { |
551 | struct net_device *real_dev = vlan_dev_info(dev)->real_dev; | 563 | struct net_device *real_dev = vlan_dev_info(dev)->real_dev; |
@@ -713,6 +725,7 @@ static const struct net_device_ops vlan_netdev_ops = { | |||
713 | .ndo_set_multicast_list = vlan_dev_set_rx_mode, | 725 | .ndo_set_multicast_list = vlan_dev_set_rx_mode, |
714 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, | 726 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, |
715 | .ndo_do_ioctl = vlan_dev_ioctl, | 727 | .ndo_do_ioctl = vlan_dev_ioctl, |
728 | .ndo_neigh_setup = vlan_dev_neigh_setup, | ||
716 | }; | 729 | }; |
717 | 730 | ||
718 | static const struct net_device_ops vlan_netdev_accel_ops = { | 731 | static const struct net_device_ops vlan_netdev_accel_ops = { |
@@ -728,6 +741,7 @@ static const struct net_device_ops vlan_netdev_accel_ops = { | |||
728 | .ndo_set_multicast_list = vlan_dev_set_rx_mode, | 741 | .ndo_set_multicast_list = vlan_dev_set_rx_mode, |
729 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, | 742 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, |
730 | .ndo_do_ioctl = vlan_dev_ioctl, | 743 | .ndo_do_ioctl = vlan_dev_ioctl, |
744 | .ndo_neigh_setup = vlan_dev_neigh_setup, | ||
731 | }; | 745 | }; |
732 | 746 | ||
733 | void vlan_setup(struct net_device *dev) | 747 | void vlan_setup(struct net_device *dev) |
diff --git a/net/Kconfig b/net/Kconfig index 6ec2cce7c167..bf2776018f71 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
@@ -254,6 +254,8 @@ source "net/mac80211/Kconfig" | |||
254 | 254 | ||
255 | endif # WIRELESS | 255 | endif # WIRELESS |
256 | 256 | ||
257 | source "net/wimax/Kconfig" | ||
258 | |||
257 | source "net/rfkill/Kconfig" | 259 | source "net/rfkill/Kconfig" |
258 | source "net/9p/Kconfig" | 260 | source "net/9p/Kconfig" |
259 | 261 | ||
diff --git a/net/Makefile b/net/Makefile index ba4460432b7c..0fcce89d7169 100644 --- a/net/Makefile +++ b/net/Makefile | |||
@@ -63,3 +63,4 @@ endif | |||
63 | ifeq ($(CONFIG_NET),y) | 63 | ifeq ($(CONFIG_NET),y) |
64 | obj-$(CONFIG_SYSCTL) += sysctl_net.o | 64 | obj-$(CONFIG_SYSCTL) += sysctl_net.o |
65 | endif | 65 | endif |
66 | obj-$(CONFIG_WIMAX) += wimax/ | ||
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c index b03ff58e9308..89f99d3beb60 100644 --- a/net/appletalk/aarp.c +++ b/net/appletalk/aarp.c | |||
@@ -443,13 +443,14 @@ static void aarp_send_probe_phase1(struct atalk_iface *iface) | |||
443 | { | 443 | { |
444 | struct ifreq atreq; | 444 | struct ifreq atreq; |
445 | struct sockaddr_at *sa = (struct sockaddr_at *)&atreq.ifr_addr; | 445 | struct sockaddr_at *sa = (struct sockaddr_at *)&atreq.ifr_addr; |
446 | const struct net_device_ops *ops = iface->dev->netdev_ops; | ||
446 | 447 | ||
447 | sa->sat_addr.s_node = iface->address.s_node; | 448 | sa->sat_addr.s_node = iface->address.s_node; |
448 | sa->sat_addr.s_net = ntohs(iface->address.s_net); | 449 | sa->sat_addr.s_net = ntohs(iface->address.s_net); |
449 | 450 | ||
450 | /* We pass the Net:Node to the drivers/cards by a Device ioctl. */ | 451 | /* We pass the Net:Node to the drivers/cards by a Device ioctl. */ |
451 | if (!(iface->dev->do_ioctl(iface->dev, &atreq, SIOCSIFADDR))) { | 452 | if (!(ops->ndo_do_ioctl(iface->dev, &atreq, SIOCSIFADDR))) { |
452 | (void)iface->dev->do_ioctl(iface->dev, &atreq, SIOCGIFADDR); | 453 | ops->ndo_do_ioctl(iface->dev, &atreq, SIOCGIFADDR); |
453 | if (iface->address.s_net != htons(sa->sat_addr.s_net) || | 454 | if (iface->address.s_net != htons(sa->sat_addr.s_net) || |
454 | iface->address.s_node != sa->sat_addr.s_node) | 455 | iface->address.s_node != sa->sat_addr.s_node) |
455 | iface->status |= ATIF_PROBE_FAIL; | 456 | iface->status |= ATIF_PROBE_FAIL; |
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h index d20f8a40f36e..0d9e506f5d5a 100644 --- a/net/bluetooth/bnep/bnep.h +++ b/net/bluetooth/bnep/bnep.h | |||
@@ -165,7 +165,6 @@ struct bnep_session { | |||
165 | 165 | ||
166 | struct socket *sock; | 166 | struct socket *sock; |
167 | struct net_device *dev; | 167 | struct net_device *dev; |
168 | struct net_device_stats stats; | ||
169 | }; | 168 | }; |
170 | 169 | ||
171 | void bnep_net_setup(struct net_device *dev); | 170 | void bnep_net_setup(struct net_device *dev); |
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index 70fea8bdb4e5..52a6ce0d772b 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c | |||
@@ -306,7 +306,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) | |||
306 | struct sk_buff *nskb; | 306 | struct sk_buff *nskb; |
307 | u8 type; | 307 | u8 type; |
308 | 308 | ||
309 | s->stats.rx_bytes += skb->len; | 309 | dev->stats.rx_bytes += skb->len; |
310 | 310 | ||
311 | type = *(u8 *) skb->data; skb_pull(skb, 1); | 311 | type = *(u8 *) skb->data; skb_pull(skb, 1); |
312 | 312 | ||
@@ -343,7 +343,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) | |||
343 | * may not be modified and because of the alignment requirements. */ | 343 | * may not be modified and because of the alignment requirements. */ |
344 | nskb = alloc_skb(2 + ETH_HLEN + skb->len, GFP_KERNEL); | 344 | nskb = alloc_skb(2 + ETH_HLEN + skb->len, GFP_KERNEL); |
345 | if (!nskb) { | 345 | if (!nskb) { |
346 | s->stats.rx_dropped++; | 346 | dev->stats.rx_dropped++; |
347 | kfree_skb(skb); | 347 | kfree_skb(skb); |
348 | return -ENOMEM; | 348 | return -ENOMEM; |
349 | } | 349 | } |
@@ -378,14 +378,14 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) | |||
378 | skb_copy_from_linear_data(skb, __skb_put(nskb, skb->len), skb->len); | 378 | skb_copy_from_linear_data(skb, __skb_put(nskb, skb->len), skb->len); |
379 | kfree_skb(skb); | 379 | kfree_skb(skb); |
380 | 380 | ||
381 | s->stats.rx_packets++; | 381 | dev->stats.rx_packets++; |
382 | nskb->ip_summed = CHECKSUM_NONE; | 382 | nskb->ip_summed = CHECKSUM_NONE; |
383 | nskb->protocol = eth_type_trans(nskb, dev); | 383 | nskb->protocol = eth_type_trans(nskb, dev); |
384 | netif_rx_ni(nskb); | 384 | netif_rx_ni(nskb); |
385 | return 0; | 385 | return 0; |
386 | 386 | ||
387 | badframe: | 387 | badframe: |
388 | s->stats.rx_errors++; | 388 | dev->stats.rx_errors++; |
389 | kfree_skb(skb); | 389 | kfree_skb(skb); |
390 | return 0; | 390 | return 0; |
391 | } | 391 | } |
@@ -448,8 +448,8 @@ send: | |||
448 | kfree_skb(skb); | 448 | kfree_skb(skb); |
449 | 449 | ||
450 | if (len > 0) { | 450 | if (len > 0) { |
451 | s->stats.tx_bytes += len; | 451 | s->dev->stats.tx_bytes += len; |
452 | s->stats.tx_packets++; | 452 | s->dev->stats.tx_packets++; |
453 | return 0; | 453 | return 0; |
454 | } | 454 | } |
455 | 455 | ||
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c index f897da6e0444..d7a0e9722def 100644 --- a/net/bluetooth/bnep/netdev.c +++ b/net/bluetooth/bnep/netdev.c | |||
@@ -55,12 +55,6 @@ static int bnep_net_close(struct net_device *dev) | |||
55 | return 0; | 55 | return 0; |
56 | } | 56 | } |
57 | 57 | ||
58 | static struct net_device_stats *bnep_net_get_stats(struct net_device *dev) | ||
59 | { | ||
60 | struct bnep_session *s = netdev_priv(dev); | ||
61 | return &s->stats; | ||
62 | } | ||
63 | |||
64 | static void bnep_net_set_mc_list(struct net_device *dev) | 58 | static void bnep_net_set_mc_list(struct net_device *dev) |
65 | { | 59 | { |
66 | #ifdef CONFIG_BT_BNEP_MC_FILTER | 60 | #ifdef CONFIG_BT_BNEP_MC_FILTER |
@@ -128,11 +122,6 @@ static void bnep_net_timeout(struct net_device *dev) | |||
128 | netif_wake_queue(dev); | 122 | netif_wake_queue(dev); |
129 | } | 123 | } |
130 | 124 | ||
131 | static int bnep_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | ||
132 | { | ||
133 | return -EINVAL; | ||
134 | } | ||
135 | |||
136 | #ifdef CONFIG_BT_BNEP_MC_FILTER | 125 | #ifdef CONFIG_BT_BNEP_MC_FILTER |
137 | static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) | 126 | static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) |
138 | { | 127 | { |
@@ -217,6 +206,18 @@ static int bnep_net_xmit(struct sk_buff *skb, struct net_device *dev) | |||
217 | return 0; | 206 | return 0; |
218 | } | 207 | } |
219 | 208 | ||
209 | static const struct net_device_ops bnep_netdev_ops = { | ||
210 | .ndo_open = bnep_net_open, | ||
211 | .ndo_stop = bnep_net_close, | ||
212 | .ndo_start_xmit = bnep_net_xmit, | ||
213 | .ndo_validate_addr = eth_validate_addr, | ||
214 | .ndo_set_multicast_list = bnep_net_set_mc_list, | ||
215 | .ndo_set_mac_address = bnep_net_set_mac_addr, | ||
216 | .ndo_tx_timeout = bnep_net_timeout, | ||
217 | .ndo_change_mtu = eth_change_mtu, | ||
218 | |||
219 | }; | ||
220 | |||
220 | void bnep_net_setup(struct net_device *dev) | 221 | void bnep_net_setup(struct net_device *dev) |
221 | { | 222 | { |
222 | 223 | ||
@@ -224,15 +225,7 @@ void bnep_net_setup(struct net_device *dev) | |||
224 | dev->addr_len = ETH_ALEN; | 225 | dev->addr_len = ETH_ALEN; |
225 | 226 | ||
226 | ether_setup(dev); | 227 | ether_setup(dev); |
227 | 228 | dev->netdev_ops = &bnep_netdev_ops; | |
228 | dev->open = bnep_net_open; | ||
229 | dev->stop = bnep_net_close; | ||
230 | dev->hard_start_xmit = bnep_net_xmit; | ||
231 | dev->get_stats = bnep_net_get_stats; | ||
232 | dev->do_ioctl = bnep_net_ioctl; | ||
233 | dev->set_mac_address = bnep_net_set_mac_addr; | ||
234 | dev->set_multicast_list = bnep_net_set_mc_list; | ||
235 | 229 | ||
236 | dev->watchdog_timeo = HZ * 2; | 230 | dev->watchdog_timeo = HZ * 2; |
237 | dev->tx_timeout = bnep_net_timeout; | ||
238 | } | 231 | } |
diff --git a/net/can/af_can.c b/net/can/af_can.c index 3dadb338addd..fa417ca6cbe6 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c | |||
@@ -414,6 +414,12 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask, | |||
414 | * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can | 414 | * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can |
415 | * filter for error frames (CAN_ERR_FLAG bit set in mask). | 415 | * filter for error frames (CAN_ERR_FLAG bit set in mask). |
416 | * | 416 | * |
417 | * The provided pointer to the sk_buff is guaranteed to be valid as long as | ||
418 | * the callback function is running. The callback function must *not* free | ||
419 | * the given sk_buff while processing it's task. When the given sk_buff is | ||
420 | * needed after the end of the callback function it must be cloned inside | ||
421 | * the callback function with skb_clone(). | ||
422 | * | ||
417 | * Return: | 423 | * Return: |
418 | * 0 on success | 424 | * 0 on success |
419 | * -ENOMEM on missing cache mem to create subscription entry | 425 | * -ENOMEM on missing cache mem to create subscription entry |
@@ -569,13 +575,8 @@ EXPORT_SYMBOL(can_rx_unregister); | |||
569 | 575 | ||
570 | static inline void deliver(struct sk_buff *skb, struct receiver *r) | 576 | static inline void deliver(struct sk_buff *skb, struct receiver *r) |
571 | { | 577 | { |
572 | struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); | 578 | r->func(skb, r->data); |
573 | 579 | r->matches++; | |
574 | if (clone) { | ||
575 | clone->sk = skb->sk; | ||
576 | r->func(clone, r->data); | ||
577 | r->matches++; | ||
578 | } | ||
579 | } | 580 | } |
580 | 581 | ||
581 | static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) | 582 | static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) |
diff --git a/net/can/bcm.c b/net/can/bcm.c index da0d426c0ce4..1649c8ab2c2f 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
@@ -70,7 +70,7 @@ | |||
70 | 70 | ||
71 | #define CAN_BCM_VERSION CAN_VERSION | 71 | #define CAN_BCM_VERSION CAN_VERSION |
72 | static __initdata const char banner[] = KERN_INFO | 72 | static __initdata const char banner[] = KERN_INFO |
73 | "can: broadcast manager protocol (rev " CAN_BCM_VERSION ")\n"; | 73 | "can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n"; |
74 | 74 | ||
75 | MODULE_DESCRIPTION("PF_CAN broadcast manager protocol"); | 75 | MODULE_DESCRIPTION("PF_CAN broadcast manager protocol"); |
76 | MODULE_LICENSE("Dual BSD/GPL"); | 76 | MODULE_LICENSE("Dual BSD/GPL"); |
@@ -90,6 +90,7 @@ struct bcm_op { | |||
90 | unsigned long frames_abs, frames_filtered; | 90 | unsigned long frames_abs, frames_filtered; |
91 | struct timeval ival1, ival2; | 91 | struct timeval ival1, ival2; |
92 | struct hrtimer timer, thrtimer; | 92 | struct hrtimer timer, thrtimer; |
93 | struct tasklet_struct tsklet, thrtsklet; | ||
93 | ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg; | 94 | ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg; |
94 | int rx_ifindex; | 95 | int rx_ifindex; |
95 | int count; | 96 | int count; |
@@ -341,6 +342,23 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head, | |||
341 | } | 342 | } |
342 | } | 343 | } |
343 | 344 | ||
345 | static void bcm_tx_timeout_tsklet(unsigned long data) | ||
346 | { | ||
347 | struct bcm_op *op = (struct bcm_op *)data; | ||
348 | struct bcm_msg_head msg_head; | ||
349 | |||
350 | /* create notification to user */ | ||
351 | msg_head.opcode = TX_EXPIRED; | ||
352 | msg_head.flags = op->flags; | ||
353 | msg_head.count = op->count; | ||
354 | msg_head.ival1 = op->ival1; | ||
355 | msg_head.ival2 = op->ival2; | ||
356 | msg_head.can_id = op->can_id; | ||
357 | msg_head.nframes = 0; | ||
358 | |||
359 | bcm_send_to_user(op, &msg_head, NULL, 0); | ||
360 | } | ||
361 | |||
344 | /* | 362 | /* |
345 | * bcm_tx_timeout_handler - performes cyclic CAN frame transmissions | 363 | * bcm_tx_timeout_handler - performes cyclic CAN frame transmissions |
346 | */ | 364 | */ |
@@ -352,20 +370,8 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer) | |||
352 | if (op->kt_ival1.tv64 && (op->count > 0)) { | 370 | if (op->kt_ival1.tv64 && (op->count > 0)) { |
353 | 371 | ||
354 | op->count--; | 372 | op->count--; |
355 | if (!op->count && (op->flags & TX_COUNTEVT)) { | 373 | if (!op->count && (op->flags & TX_COUNTEVT)) |
356 | struct bcm_msg_head msg_head; | 374 | tasklet_schedule(&op->tsklet); |
357 | |||
358 | /* create notification to user */ | ||
359 | msg_head.opcode = TX_EXPIRED; | ||
360 | msg_head.flags = op->flags; | ||
361 | msg_head.count = op->count; | ||
362 | msg_head.ival1 = op->ival1; | ||
363 | msg_head.ival2 = op->ival2; | ||
364 | msg_head.can_id = op->can_id; | ||
365 | msg_head.nframes = 0; | ||
366 | |||
367 | bcm_send_to_user(op, &msg_head, NULL, 0); | ||
368 | } | ||
369 | } | 375 | } |
370 | 376 | ||
371 | if (op->kt_ival1.tv64 && (op->count > 0)) { | 377 | if (op->kt_ival1.tv64 && (op->count > 0)) { |
@@ -402,6 +408,9 @@ static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data) | |||
402 | if (op->frames_filtered > ULONG_MAX/100) | 408 | if (op->frames_filtered > ULONG_MAX/100) |
403 | op->frames_filtered = op->frames_abs = 0; | 409 | op->frames_filtered = op->frames_abs = 0; |
404 | 410 | ||
411 | /* this element is not throttled anymore */ | ||
412 | data->can_dlc &= (BCM_CAN_DLC_MASK|RX_RECV); | ||
413 | |||
405 | head.opcode = RX_CHANGED; | 414 | head.opcode = RX_CHANGED; |
406 | head.flags = op->flags; | 415 | head.flags = op->flags; |
407 | head.count = op->count; | 416 | head.count = op->count; |
@@ -420,37 +429,32 @@ static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data) | |||
420 | */ | 429 | */ |
421 | static void bcm_rx_update_and_send(struct bcm_op *op, | 430 | static void bcm_rx_update_and_send(struct bcm_op *op, |
422 | struct can_frame *lastdata, | 431 | struct can_frame *lastdata, |
423 | struct can_frame *rxdata) | 432 | const struct can_frame *rxdata) |
424 | { | 433 | { |
425 | memcpy(lastdata, rxdata, CFSIZ); | 434 | memcpy(lastdata, rxdata, CFSIZ); |
426 | 435 | ||
427 | /* mark as used */ | 436 | /* mark as used and throttled by default */ |
428 | lastdata->can_dlc |= RX_RECV; | 437 | lastdata->can_dlc |= (RX_RECV|RX_THR); |
429 | 438 | ||
430 | /* throtteling mode inactive OR data update already on the run ? */ | 439 | /* throtteling mode inactive ? */ |
431 | if (!op->kt_ival2.tv64 || hrtimer_callback_running(&op->thrtimer)) { | 440 | if (!op->kt_ival2.tv64) { |
432 | /* send RX_CHANGED to the user immediately */ | 441 | /* send RX_CHANGED to the user immediately */ |
433 | bcm_rx_changed(op, rxdata); | 442 | bcm_rx_changed(op, lastdata); |
434 | return; | 443 | return; |
435 | } | 444 | } |
436 | 445 | ||
437 | if (hrtimer_active(&op->thrtimer)) { | 446 | /* with active throttling timer we are just done here */ |
438 | /* mark as 'throttled' */ | 447 | if (hrtimer_active(&op->thrtimer)) |
439 | lastdata->can_dlc |= RX_THR; | ||
440 | return; | 448 | return; |
441 | } | ||
442 | 449 | ||
443 | if (!op->kt_lastmsg.tv64) { | 450 | /* first receiption with enabled throttling mode */ |
444 | /* send first RX_CHANGED to the user immediately */ | 451 | if (!op->kt_lastmsg.tv64) |
445 | bcm_rx_changed(op, rxdata); | 452 | goto rx_changed_settime; |
446 | op->kt_lastmsg = ktime_get(); | ||
447 | return; | ||
448 | } | ||
449 | 453 | ||
454 | /* got a second frame inside a potential throttle period? */ | ||
450 | if (ktime_us_delta(ktime_get(), op->kt_lastmsg) < | 455 | if (ktime_us_delta(ktime_get(), op->kt_lastmsg) < |
451 | ktime_to_us(op->kt_ival2)) { | 456 | ktime_to_us(op->kt_ival2)) { |
452 | /* mark as 'throttled' and start timer */ | 457 | /* do not send the saved data - only start throttle timer */ |
453 | lastdata->can_dlc |= RX_THR; | ||
454 | hrtimer_start(&op->thrtimer, | 458 | hrtimer_start(&op->thrtimer, |
455 | ktime_add(op->kt_lastmsg, op->kt_ival2), | 459 | ktime_add(op->kt_lastmsg, op->kt_ival2), |
456 | HRTIMER_MODE_ABS); | 460 | HRTIMER_MODE_ABS); |
@@ -458,7 +462,8 @@ static void bcm_rx_update_and_send(struct bcm_op *op, | |||
458 | } | 462 | } |
459 | 463 | ||
460 | /* the gap was that big, that throttling was not needed here */ | 464 | /* the gap was that big, that throttling was not needed here */ |
461 | bcm_rx_changed(op, rxdata); | 465 | rx_changed_settime: |
466 | bcm_rx_changed(op, lastdata); | ||
462 | op->kt_lastmsg = ktime_get(); | 467 | op->kt_lastmsg = ktime_get(); |
463 | } | 468 | } |
464 | 469 | ||
@@ -467,7 +472,7 @@ static void bcm_rx_update_and_send(struct bcm_op *op, | |||
467 | * received data stored in op->last_frames[] | 472 | * received data stored in op->last_frames[] |
468 | */ | 473 | */ |
469 | static void bcm_rx_cmp_to_index(struct bcm_op *op, int index, | 474 | static void bcm_rx_cmp_to_index(struct bcm_op *op, int index, |
470 | struct can_frame *rxdata) | 475 | const struct can_frame *rxdata) |
471 | { | 476 | { |
472 | /* | 477 | /* |
473 | * no one uses the MSBs of can_dlc for comparation, | 478 | * no one uses the MSBs of can_dlc for comparation, |
@@ -511,14 +516,12 @@ static void bcm_rx_starttimer(struct bcm_op *op) | |||
511 | hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL); | 516 | hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL); |
512 | } | 517 | } |
513 | 518 | ||
514 | /* | 519 | static void bcm_rx_timeout_tsklet(unsigned long data) |
515 | * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out | ||
516 | */ | ||
517 | static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer) | ||
518 | { | 520 | { |
519 | struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); | 521 | struct bcm_op *op = (struct bcm_op *)data; |
520 | struct bcm_msg_head msg_head; | 522 | struct bcm_msg_head msg_head; |
521 | 523 | ||
524 | /* create notification to user */ | ||
522 | msg_head.opcode = RX_TIMEOUT; | 525 | msg_head.opcode = RX_TIMEOUT; |
523 | msg_head.flags = op->flags; | 526 | msg_head.flags = op->flags; |
524 | msg_head.count = op->count; | 527 | msg_head.count = op->count; |
@@ -528,6 +531,17 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer) | |||
528 | msg_head.nframes = 0; | 531 | msg_head.nframes = 0; |
529 | 532 | ||
530 | bcm_send_to_user(op, &msg_head, NULL, 0); | 533 | bcm_send_to_user(op, &msg_head, NULL, 0); |
534 | } | ||
535 | |||
536 | /* | ||
537 | * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out | ||
538 | */ | ||
539 | static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer) | ||
540 | { | ||
541 | struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); | ||
542 | |||
543 | /* schedule before NET_RX_SOFTIRQ */ | ||
544 | tasklet_hi_schedule(&op->tsklet); | ||
531 | 545 | ||
532 | /* no restart of the timer is done here! */ | 546 | /* no restart of the timer is done here! */ |
533 | 547 | ||
@@ -541,9 +555,25 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer) | |||
541 | } | 555 | } |
542 | 556 | ||
543 | /* | 557 | /* |
558 | * bcm_rx_do_flush - helper for bcm_rx_thr_flush | ||
559 | */ | ||
560 | static inline int bcm_rx_do_flush(struct bcm_op *op, int update, int index) | ||
561 | { | ||
562 | if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) { | ||
563 | if (update) | ||
564 | bcm_rx_changed(op, &op->last_frames[index]); | ||
565 | return 1; | ||
566 | } | ||
567 | return 0; | ||
568 | } | ||
569 | |||
570 | /* | ||
544 | * bcm_rx_thr_flush - Check for throttled data and send it to the userspace | 571 | * bcm_rx_thr_flush - Check for throttled data and send it to the userspace |
572 | * | ||
573 | * update == 0 : just check if throttled data is available (any irq context) | ||
574 | * update == 1 : check and send throttled data to userspace (soft_irq context) | ||
545 | */ | 575 | */ |
546 | static int bcm_rx_thr_flush(struct bcm_op *op) | 576 | static int bcm_rx_thr_flush(struct bcm_op *op, int update) |
547 | { | 577 | { |
548 | int updated = 0; | 578 | int updated = 0; |
549 | 579 | ||
@@ -551,27 +581,25 @@ static int bcm_rx_thr_flush(struct bcm_op *op) | |||
551 | int i; | 581 | int i; |
552 | 582 | ||
553 | /* for MUX filter we start at index 1 */ | 583 | /* for MUX filter we start at index 1 */ |
554 | for (i = 1; i < op->nframes; i++) { | 584 | for (i = 1; i < op->nframes; i++) |
555 | if ((op->last_frames) && | 585 | updated += bcm_rx_do_flush(op, update, i); |
556 | (op->last_frames[i].can_dlc & RX_THR)) { | ||
557 | op->last_frames[i].can_dlc &= ~RX_THR; | ||
558 | bcm_rx_changed(op, &op->last_frames[i]); | ||
559 | updated++; | ||
560 | } | ||
561 | } | ||
562 | 586 | ||
563 | } else { | 587 | } else { |
564 | /* for RX_FILTER_ID and simple filter */ | 588 | /* for RX_FILTER_ID and simple filter */ |
565 | if (op->last_frames && (op->last_frames[0].can_dlc & RX_THR)) { | 589 | updated += bcm_rx_do_flush(op, update, 0); |
566 | op->last_frames[0].can_dlc &= ~RX_THR; | ||
567 | bcm_rx_changed(op, &op->last_frames[0]); | ||
568 | updated++; | ||
569 | } | ||
570 | } | 590 | } |
571 | 591 | ||
572 | return updated; | 592 | return updated; |
573 | } | 593 | } |
574 | 594 | ||
595 | static void bcm_rx_thr_tsklet(unsigned long data) | ||
596 | { | ||
597 | struct bcm_op *op = (struct bcm_op *)data; | ||
598 | |||
599 | /* push the changed data to the userspace */ | ||
600 | bcm_rx_thr_flush(op, 1); | ||
601 | } | ||
602 | |||
575 | /* | 603 | /* |
576 | * bcm_rx_thr_handler - the time for blocked content updates is over now: | 604 | * bcm_rx_thr_handler - the time for blocked content updates is over now: |
577 | * Check for throttled data and send it to the userspace | 605 | * Check for throttled data and send it to the userspace |
@@ -580,7 +608,9 @@ static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer) | |||
580 | { | 608 | { |
581 | struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer); | 609 | struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer); |
582 | 610 | ||
583 | if (bcm_rx_thr_flush(op)) { | 611 | tasklet_schedule(&op->thrtsklet); |
612 | |||
613 | if (bcm_rx_thr_flush(op, 0)) { | ||
584 | hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2); | 614 | hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2); |
585 | return HRTIMER_RESTART; | 615 | return HRTIMER_RESTART; |
586 | } else { | 616 | } else { |
@@ -596,29 +626,21 @@ static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer) | |||
596 | static void bcm_rx_handler(struct sk_buff *skb, void *data) | 626 | static void bcm_rx_handler(struct sk_buff *skb, void *data) |
597 | { | 627 | { |
598 | struct bcm_op *op = (struct bcm_op *)data; | 628 | struct bcm_op *op = (struct bcm_op *)data; |
599 | struct can_frame rxframe; | 629 | const struct can_frame *rxframe = (struct can_frame *)skb->data; |
600 | int i; | 630 | int i; |
601 | 631 | ||
602 | /* disable timeout */ | 632 | /* disable timeout */ |
603 | hrtimer_cancel(&op->timer); | 633 | hrtimer_cancel(&op->timer); |
604 | 634 | ||
605 | if (skb->len == sizeof(rxframe)) { | 635 | if (op->can_id != rxframe->can_id) |
606 | memcpy(&rxframe, skb->data, sizeof(rxframe)); | ||
607 | /* save rx timestamp */ | ||
608 | op->rx_stamp = skb->tstamp; | ||
609 | /* save originator for recvfrom() */ | ||
610 | op->rx_ifindex = skb->dev->ifindex; | ||
611 | /* update statistics */ | ||
612 | op->frames_abs++; | ||
613 | kfree_skb(skb); | ||
614 | |||
615 | } else { | ||
616 | kfree_skb(skb); | ||
617 | return; | 636 | return; |
618 | } | ||
619 | 637 | ||
620 | if (op->can_id != rxframe.can_id) | 638 | /* save rx timestamp */ |
621 | return; | 639 | op->rx_stamp = skb->tstamp; |
640 | /* save originator for recvfrom() */ | ||
641 | op->rx_ifindex = skb->dev->ifindex; | ||
642 | /* update statistics */ | ||
643 | op->frames_abs++; | ||
622 | 644 | ||
623 | if (op->flags & RX_RTR_FRAME) { | 645 | if (op->flags & RX_RTR_FRAME) { |
624 | /* send reply for RTR-request (placed in op->frames[0]) */ | 646 | /* send reply for RTR-request (placed in op->frames[0]) */ |
@@ -628,16 +650,14 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data) | |||
628 | 650 | ||
629 | if (op->flags & RX_FILTER_ID) { | 651 | if (op->flags & RX_FILTER_ID) { |
630 | /* the easiest case */ | 652 | /* the easiest case */ |
631 | bcm_rx_update_and_send(op, &op->last_frames[0], &rxframe); | 653 | bcm_rx_update_and_send(op, &op->last_frames[0], rxframe); |
632 | bcm_rx_starttimer(op); | 654 | goto rx_starttimer; |
633 | return; | ||
634 | } | 655 | } |
635 | 656 | ||
636 | if (op->nframes == 1) { | 657 | if (op->nframes == 1) { |
637 | /* simple compare with index 0 */ | 658 | /* simple compare with index 0 */ |
638 | bcm_rx_cmp_to_index(op, 0, &rxframe); | 659 | bcm_rx_cmp_to_index(op, 0, rxframe); |
639 | bcm_rx_starttimer(op); | 660 | goto rx_starttimer; |
640 | return; | ||
641 | } | 661 | } |
642 | 662 | ||
643 | if (op->nframes > 1) { | 663 | if (op->nframes > 1) { |
@@ -649,15 +669,17 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data) | |||
649 | */ | 669 | */ |
650 | 670 | ||
651 | for (i = 1; i < op->nframes; i++) { | 671 | for (i = 1; i < op->nframes; i++) { |
652 | if ((GET_U64(&op->frames[0]) & GET_U64(&rxframe)) == | 672 | if ((GET_U64(&op->frames[0]) & GET_U64(rxframe)) == |
653 | (GET_U64(&op->frames[0]) & | 673 | (GET_U64(&op->frames[0]) & |
654 | GET_U64(&op->frames[i]))) { | 674 | GET_U64(&op->frames[i]))) { |
655 | bcm_rx_cmp_to_index(op, i, &rxframe); | 675 | bcm_rx_cmp_to_index(op, i, rxframe); |
656 | break; | 676 | break; |
657 | } | 677 | } |
658 | } | 678 | } |
659 | bcm_rx_starttimer(op); | ||
660 | } | 679 | } |
680 | |||
681 | rx_starttimer: | ||
682 | bcm_rx_starttimer(op); | ||
661 | } | 683 | } |
662 | 684 | ||
663 | /* | 685 | /* |
@@ -681,6 +703,12 @@ static void bcm_remove_op(struct bcm_op *op) | |||
681 | hrtimer_cancel(&op->timer); | 703 | hrtimer_cancel(&op->timer); |
682 | hrtimer_cancel(&op->thrtimer); | 704 | hrtimer_cancel(&op->thrtimer); |
683 | 705 | ||
706 | if (op->tsklet.func) | ||
707 | tasklet_kill(&op->tsklet); | ||
708 | |||
709 | if (op->thrtsklet.func) | ||
710 | tasklet_kill(&op->thrtsklet); | ||
711 | |||
684 | if ((op->frames) && (op->frames != &op->sframe)) | 712 | if ((op->frames) && (op->frames != &op->sframe)) |
685 | kfree(op->frames); | 713 | kfree(op->frames); |
686 | 714 | ||
@@ -891,6 +919,10 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, | |||
891 | hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 919 | hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
892 | op->timer.function = bcm_tx_timeout_handler; | 920 | op->timer.function = bcm_tx_timeout_handler; |
893 | 921 | ||
922 | /* initialize tasklet for tx countevent notification */ | ||
923 | tasklet_init(&op->tsklet, bcm_tx_timeout_tsklet, | ||
924 | (unsigned long) op); | ||
925 | |||
894 | /* currently unused in tx_ops */ | 926 | /* currently unused in tx_ops */ |
895 | hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 927 | hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
896 | 928 | ||
@@ -1054,9 +1086,17 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, | |||
1054 | hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 1086 | hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
1055 | op->timer.function = bcm_rx_timeout_handler; | 1087 | op->timer.function = bcm_rx_timeout_handler; |
1056 | 1088 | ||
1089 | /* initialize tasklet for rx timeout notification */ | ||
1090 | tasklet_init(&op->tsklet, bcm_rx_timeout_tsklet, | ||
1091 | (unsigned long) op); | ||
1092 | |||
1057 | hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 1093 | hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
1058 | op->thrtimer.function = bcm_rx_thr_handler; | 1094 | op->thrtimer.function = bcm_rx_thr_handler; |
1059 | 1095 | ||
1096 | /* initialize tasklet for rx throttle handling */ | ||
1097 | tasklet_init(&op->thrtsklet, bcm_rx_thr_tsklet, | ||
1098 | (unsigned long) op); | ||
1099 | |||
1060 | /* add this bcm_op to the list of the rx_ops */ | 1100 | /* add this bcm_op to the list of the rx_ops */ |
1061 | list_add(&op->list, &bo->rx_ops); | 1101 | list_add(&op->list, &bo->rx_ops); |
1062 | 1102 | ||
@@ -1102,7 +1142,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, | |||
1102 | */ | 1142 | */ |
1103 | op->kt_lastmsg = ktime_set(0, 0); | 1143 | op->kt_lastmsg = ktime_set(0, 0); |
1104 | hrtimer_cancel(&op->thrtimer); | 1144 | hrtimer_cancel(&op->thrtimer); |
1105 | bcm_rx_thr_flush(op); | 1145 | bcm_rx_thr_flush(op, 1); |
1106 | } | 1146 | } |
1107 | 1147 | ||
1108 | if ((op->flags & STARTTIMER) && op->kt_ival1.tv64) | 1148 | if ((op->flags & STARTTIMER) && op->kt_ival1.tv64) |
diff --git a/net/can/raw.c b/net/can/raw.c index 27aab63df467..0703cba4bf9f 100644 --- a/net/can/raw.c +++ b/net/can/raw.c | |||
@@ -99,13 +99,14 @@ static void raw_rcv(struct sk_buff *skb, void *data) | |||
99 | struct raw_sock *ro = raw_sk(sk); | 99 | struct raw_sock *ro = raw_sk(sk); |
100 | struct sockaddr_can *addr; | 100 | struct sockaddr_can *addr; |
101 | 101 | ||
102 | if (!ro->recv_own_msgs) { | 102 | /* check the received tx sock reference */ |
103 | /* check the received tx sock reference */ | 103 | if (!ro->recv_own_msgs && skb->sk == sk) |
104 | if (skb->sk == sk) { | 104 | return; |
105 | kfree_skb(skb); | 105 | |
106 | return; | 106 | /* clone the given skb to be able to enqueue it into the rcv queue */ |
107 | } | 107 | skb = skb_clone(skb, GFP_ATOMIC); |
108 | } | 108 | if (!skb) |
109 | return; | ||
109 | 110 | ||
110 | /* | 111 | /* |
111 | * Put the datagram to the queue so that raw_recvmsg() can | 112 | * Put the datagram to the queue so that raw_recvmsg() can |
diff --git a/net/core/dev.c b/net/core/dev.c index ac55d84d6255..5f736f1ceeae 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -132,6 +132,9 @@ | |||
132 | /* Instead of increasing this, you should create a hash table. */ | 132 | /* Instead of increasing this, you should create a hash table. */ |
133 | #define MAX_GRO_SKBS 8 | 133 | #define MAX_GRO_SKBS 8 |
134 | 134 | ||
135 | /* This should be increased if a protocol with a bigger head is added. */ | ||
136 | #define GRO_MAX_HEAD (MAX_HEADER + 128) | ||
137 | |||
135 | /* | 138 | /* |
136 | * The list of packet types we will receive (as opposed to discard) | 139 | * The list of packet types we will receive (as opposed to discard) |
137 | * and the routines to invoke. | 140 | * and the routines to invoke. |
@@ -2326,7 +2329,7 @@ static int napi_gro_complete(struct sk_buff *skb) | |||
2326 | struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; | 2329 | struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; |
2327 | int err = -ENOENT; | 2330 | int err = -ENOENT; |
2328 | 2331 | ||
2329 | if (!skb_shinfo(skb)->frag_list) | 2332 | if (NAPI_GRO_CB(skb)->count == 1) |
2330 | goto out; | 2333 | goto out; |
2331 | 2334 | ||
2332 | rcu_read_lock(); | 2335 | rcu_read_lock(); |
@@ -2346,6 +2349,7 @@ static int napi_gro_complete(struct sk_buff *skb) | |||
2346 | } | 2349 | } |
2347 | 2350 | ||
2348 | out: | 2351 | out: |
2352 | skb_shinfo(skb)->gso_size = 0; | ||
2349 | __skb_push(skb, -skb_network_offset(skb)); | 2353 | __skb_push(skb, -skb_network_offset(skb)); |
2350 | return netif_receive_skb(skb); | 2354 | return netif_receive_skb(skb); |
2351 | } | 2355 | } |
@@ -2364,7 +2368,7 @@ void napi_gro_flush(struct napi_struct *napi) | |||
2364 | } | 2368 | } |
2365 | EXPORT_SYMBOL(napi_gro_flush); | 2369 | EXPORT_SYMBOL(napi_gro_flush); |
2366 | 2370 | ||
2367 | int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | 2371 | int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) |
2368 | { | 2372 | { |
2369 | struct sk_buff **pp = NULL; | 2373 | struct sk_buff **pp = NULL; |
2370 | struct packet_type *ptype; | 2374 | struct packet_type *ptype; |
@@ -2373,6 +2377,7 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
2373 | int count = 0; | 2377 | int count = 0; |
2374 | int same_flow; | 2378 | int same_flow; |
2375 | int mac_len; | 2379 | int mac_len; |
2380 | int free; | ||
2376 | 2381 | ||
2377 | if (!(skb->dev->features & NETIF_F_GRO)) | 2382 | if (!(skb->dev->features & NETIF_F_GRO)) |
2378 | goto normal; | 2383 | goto normal; |
@@ -2389,14 +2394,18 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
2389 | skb->mac_len = mac_len; | 2394 | skb->mac_len = mac_len; |
2390 | NAPI_GRO_CB(skb)->same_flow = 0; | 2395 | NAPI_GRO_CB(skb)->same_flow = 0; |
2391 | NAPI_GRO_CB(skb)->flush = 0; | 2396 | NAPI_GRO_CB(skb)->flush = 0; |
2397 | NAPI_GRO_CB(skb)->free = 0; | ||
2392 | 2398 | ||
2393 | for (p = napi->gro_list; p; p = p->next) { | 2399 | for (p = napi->gro_list; p; p = p->next) { |
2394 | count++; | 2400 | count++; |
2395 | NAPI_GRO_CB(p)->same_flow = | 2401 | |
2396 | p->mac_len == mac_len && | 2402 | if (!NAPI_GRO_CB(p)->same_flow) |
2397 | !memcmp(skb_mac_header(p), skb_mac_header(skb), | 2403 | continue; |
2398 | mac_len); | 2404 | |
2399 | NAPI_GRO_CB(p)->flush = 0; | 2405 | if (p->mac_len != mac_len || |
2406 | memcmp(skb_mac_header(p), skb_mac_header(skb), | ||
2407 | mac_len)) | ||
2408 | NAPI_GRO_CB(p)->same_flow = 0; | ||
2400 | } | 2409 | } |
2401 | 2410 | ||
2402 | pp = ptype->gro_receive(&napi->gro_list, skb); | 2411 | pp = ptype->gro_receive(&napi->gro_list, skb); |
@@ -2408,6 +2417,7 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
2408 | goto normal; | 2417 | goto normal; |
2409 | 2418 | ||
2410 | same_flow = NAPI_GRO_CB(skb)->same_flow; | 2419 | same_flow = NAPI_GRO_CB(skb)->same_flow; |
2420 | free = NAPI_GRO_CB(skb)->free; | ||
2411 | 2421 | ||
2412 | if (pp) { | 2422 | if (pp) { |
2413 | struct sk_buff *nskb = *pp; | 2423 | struct sk_buff *nskb = *pp; |
@@ -2427,17 +2437,124 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
2427 | } | 2437 | } |
2428 | 2438 | ||
2429 | NAPI_GRO_CB(skb)->count = 1; | 2439 | NAPI_GRO_CB(skb)->count = 1; |
2440 | skb_shinfo(skb)->gso_size = skb->len; | ||
2430 | skb->next = napi->gro_list; | 2441 | skb->next = napi->gro_list; |
2431 | napi->gro_list = skb; | 2442 | napi->gro_list = skb; |
2432 | 2443 | ||
2433 | ok: | 2444 | ok: |
2434 | return NET_RX_SUCCESS; | 2445 | return free; |
2435 | 2446 | ||
2436 | normal: | 2447 | normal: |
2437 | return netif_receive_skb(skb); | 2448 | return -1; |
2449 | } | ||
2450 | EXPORT_SYMBOL(dev_gro_receive); | ||
2451 | |||
2452 | static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | ||
2453 | { | ||
2454 | struct sk_buff *p; | ||
2455 | |||
2456 | for (p = napi->gro_list; p; p = p->next) { | ||
2457 | NAPI_GRO_CB(p)->same_flow = 1; | ||
2458 | NAPI_GRO_CB(p)->flush = 0; | ||
2459 | } | ||
2460 | |||
2461 | return dev_gro_receive(napi, skb); | ||
2462 | } | ||
2463 | |||
2464 | int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | ||
2465 | { | ||
2466 | switch (__napi_gro_receive(napi, skb)) { | ||
2467 | case -1: | ||
2468 | return netif_receive_skb(skb); | ||
2469 | |||
2470 | case 1: | ||
2471 | kfree_skb(skb); | ||
2472 | break; | ||
2473 | } | ||
2474 | |||
2475 | return NET_RX_SUCCESS; | ||
2438 | } | 2476 | } |
2439 | EXPORT_SYMBOL(napi_gro_receive); | 2477 | EXPORT_SYMBOL(napi_gro_receive); |
2440 | 2478 | ||
2479 | void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) | ||
2480 | { | ||
2481 | skb_shinfo(skb)->nr_frags = 0; | ||
2482 | |||
2483 | skb->len -= skb->data_len; | ||
2484 | skb->truesize -= skb->data_len; | ||
2485 | skb->data_len = 0; | ||
2486 | |||
2487 | __skb_pull(skb, skb_headlen(skb)); | ||
2488 | skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb)); | ||
2489 | |||
2490 | napi->skb = skb; | ||
2491 | } | ||
2492 | EXPORT_SYMBOL(napi_reuse_skb); | ||
2493 | |||
2494 | struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi, | ||
2495 | struct napi_gro_fraginfo *info) | ||
2496 | { | ||
2497 | struct net_device *dev = napi->dev; | ||
2498 | struct sk_buff *skb = napi->skb; | ||
2499 | |||
2500 | napi->skb = NULL; | ||
2501 | |||
2502 | if (!skb) { | ||
2503 | skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN); | ||
2504 | if (!skb) | ||
2505 | goto out; | ||
2506 | |||
2507 | skb_reserve(skb, NET_IP_ALIGN); | ||
2508 | } | ||
2509 | |||
2510 | BUG_ON(info->nr_frags > MAX_SKB_FRAGS); | ||
2511 | skb_shinfo(skb)->nr_frags = info->nr_frags; | ||
2512 | memcpy(skb_shinfo(skb)->frags, info->frags, sizeof(info->frags)); | ||
2513 | |||
2514 | skb->data_len = info->len; | ||
2515 | skb->len += info->len; | ||
2516 | skb->truesize += info->len; | ||
2517 | |||
2518 | if (!pskb_may_pull(skb, ETH_HLEN)) { | ||
2519 | napi_reuse_skb(napi, skb); | ||
2520 | goto out; | ||
2521 | } | ||
2522 | |||
2523 | skb->protocol = eth_type_trans(skb, dev); | ||
2524 | |||
2525 | skb->ip_summed = info->ip_summed; | ||
2526 | skb->csum = info->csum; | ||
2527 | |||
2528 | out: | ||
2529 | return skb; | ||
2530 | } | ||
2531 | EXPORT_SYMBOL(napi_fraginfo_skb); | ||
2532 | |||
2533 | int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info) | ||
2534 | { | ||
2535 | struct sk_buff *skb = napi_fraginfo_skb(napi, info); | ||
2536 | int err = NET_RX_DROP; | ||
2537 | |||
2538 | if (!skb) | ||
2539 | goto out; | ||
2540 | |||
2541 | err = NET_RX_SUCCESS; | ||
2542 | |||
2543 | switch (__napi_gro_receive(napi, skb)) { | ||
2544 | case -1: | ||
2545 | return netif_receive_skb(skb); | ||
2546 | |||
2547 | case 0: | ||
2548 | goto out; | ||
2549 | } | ||
2550 | |||
2551 | napi_reuse_skb(napi, skb); | ||
2552 | |||
2553 | out: | ||
2554 | return err; | ||
2555 | } | ||
2556 | EXPORT_SYMBOL(napi_gro_frags); | ||
2557 | |||
2441 | static int process_backlog(struct napi_struct *napi, int quota) | 2558 | static int process_backlog(struct napi_struct *napi, int quota) |
2442 | { | 2559 | { |
2443 | int work = 0; | 2560 | int work = 0; |
@@ -2516,11 +2633,12 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, | |||
2516 | { | 2633 | { |
2517 | INIT_LIST_HEAD(&napi->poll_list); | 2634 | INIT_LIST_HEAD(&napi->poll_list); |
2518 | napi->gro_list = NULL; | 2635 | napi->gro_list = NULL; |
2636 | napi->skb = NULL; | ||
2519 | napi->poll = poll; | 2637 | napi->poll = poll; |
2520 | napi->weight = weight; | 2638 | napi->weight = weight; |
2521 | list_add(&napi->dev_list, &dev->napi_list); | 2639 | list_add(&napi->dev_list, &dev->napi_list); |
2522 | #ifdef CONFIG_NETPOLL | ||
2523 | napi->dev = dev; | 2640 | napi->dev = dev; |
2641 | #ifdef CONFIG_NETPOLL | ||
2524 | spin_lock_init(&napi->poll_lock); | 2642 | spin_lock_init(&napi->poll_lock); |
2525 | napi->poll_owner = -1; | 2643 | napi->poll_owner = -1; |
2526 | #endif | 2644 | #endif |
@@ -2533,6 +2651,7 @@ void netif_napi_del(struct napi_struct *napi) | |||
2533 | struct sk_buff *skb, *next; | 2651 | struct sk_buff *skb, *next; |
2534 | 2652 | ||
2535 | list_del_init(&napi->dev_list); | 2653 | list_del_init(&napi->dev_list); |
2654 | kfree(napi->skb); | ||
2536 | 2655 | ||
2537 | for (skb = napi->gro_list; skb; skb = next) { | 2656 | for (skb = napi->gro_list; skb; skb = next) { |
2538 | next = skb->next; | 2657 | next = skb->next; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index b8d0abb26433..5110b359c758 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -2594,6 +2594,17 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
2594 | 2594 | ||
2595 | if (skb_shinfo(p)->frag_list) | 2595 | if (skb_shinfo(p)->frag_list) |
2596 | goto merge; | 2596 | goto merge; |
2597 | else if (!skb_headlen(p) && !skb_headlen(skb) && | ||
2598 | skb_shinfo(p)->nr_frags + skb_shinfo(skb)->nr_frags < | ||
2599 | MAX_SKB_FRAGS) { | ||
2600 | memcpy(skb_shinfo(p)->frags + skb_shinfo(p)->nr_frags, | ||
2601 | skb_shinfo(skb)->frags, | ||
2602 | skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); | ||
2603 | |||
2604 | skb_shinfo(p)->nr_frags += skb_shinfo(skb)->nr_frags; | ||
2605 | NAPI_GRO_CB(skb)->free = 1; | ||
2606 | goto done; | ||
2607 | } | ||
2597 | 2608 | ||
2598 | headroom = skb_headroom(p); | 2609 | headroom = skb_headroom(p); |
2599 | nskb = netdev_alloc_skb(p->dev, headroom); | 2610 | nskb = netdev_alloc_skb(p->dev, headroom); |
@@ -2613,6 +2624,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
2613 | 2624 | ||
2614 | *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); | 2625 | *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); |
2615 | skb_shinfo(nskb)->frag_list = p; | 2626 | skb_shinfo(nskb)->frag_list = p; |
2627 | skb_shinfo(nskb)->gso_size = skb_shinfo(p)->gso_size; | ||
2616 | skb_header_release(p); | 2628 | skb_header_release(p); |
2617 | nskb->prev = p; | 2629 | nskb->prev = p; |
2618 | 2630 | ||
@@ -2627,11 +2639,12 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
2627 | p = nskb; | 2639 | p = nskb; |
2628 | 2640 | ||
2629 | merge: | 2641 | merge: |
2630 | NAPI_GRO_CB(p)->count++; | ||
2631 | p->prev->next = skb; | 2642 | p->prev->next = skb; |
2632 | p->prev = skb; | 2643 | p->prev = skb; |
2633 | skb_header_release(skb); | 2644 | skb_header_release(skb); |
2634 | 2645 | ||
2646 | done: | ||
2647 | NAPI_GRO_CB(p)->count++; | ||
2635 | p->data_len += skb->len; | 2648 | p->data_len += skb->len; |
2636 | p->truesize += skb->len; | 2649 | p->truesize += skb->len; |
2637 | p->len += skb->len; | 2650 | p->len += skb->len; |
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 5dbfe5fdc0d6..8379496de82b 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c | |||
@@ -191,7 +191,7 @@ static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid, | |||
191 | return 0; | 191 | return 0; |
192 | nlmsg_failure: | 192 | nlmsg_failure: |
193 | err: | 193 | err: |
194 | kfree(dcbnl_skb); | 194 | kfree_skb(dcbnl_skb); |
195 | return ret; | 195 | return ret; |
196 | } | 196 | } |
197 | 197 | ||
@@ -272,7 +272,7 @@ static int dcbnl_getpfccfg(struct net_device *netdev, struct nlattr **tb, | |||
272 | return 0; | 272 | return 0; |
273 | nlmsg_failure: | 273 | nlmsg_failure: |
274 | err: | 274 | err: |
275 | kfree(dcbnl_skb); | 275 | kfree_skb(dcbnl_skb); |
276 | err_out: | 276 | err_out: |
277 | return -EINVAL; | 277 | return -EINVAL; |
278 | } | 278 | } |
@@ -314,7 +314,7 @@ static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlattr **tb, | |||
314 | 314 | ||
315 | nlmsg_failure: | 315 | nlmsg_failure: |
316 | err: | 316 | err: |
317 | kfree(dcbnl_skb); | 317 | kfree_skb(dcbnl_skb); |
318 | err_out: | 318 | err_out: |
319 | return -EINVAL; | 319 | return -EINVAL; |
320 | } | 320 | } |
@@ -380,7 +380,7 @@ static int dcbnl_getcap(struct net_device *netdev, struct nlattr **tb, | |||
380 | return 0; | 380 | return 0; |
381 | nlmsg_failure: | 381 | nlmsg_failure: |
382 | err: | 382 | err: |
383 | kfree(dcbnl_skb); | 383 | kfree_skb(dcbnl_skb); |
384 | err_out: | 384 | err_out: |
385 | return -EINVAL; | 385 | return -EINVAL; |
386 | } | 386 | } |
@@ -458,7 +458,7 @@ static int dcbnl_getnumtcs(struct net_device *netdev, struct nlattr **tb, | |||
458 | return 0; | 458 | return 0; |
459 | nlmsg_failure: | 459 | nlmsg_failure: |
460 | err: | 460 | err: |
461 | kfree(dcbnl_skb); | 461 | kfree_skb(dcbnl_skb); |
462 | err_out: | 462 | err_out: |
463 | return ret; | 463 | return ret; |
464 | } | 464 | } |
@@ -687,7 +687,7 @@ err_pg: | |||
687 | nla_nest_cancel(dcbnl_skb, pg_nest); | 687 | nla_nest_cancel(dcbnl_skb, pg_nest); |
688 | nlmsg_failure: | 688 | nlmsg_failure: |
689 | err: | 689 | err: |
690 | kfree(dcbnl_skb); | 690 | kfree_skb(dcbnl_skb); |
691 | err_out: | 691 | err_out: |
692 | ret = -EINVAL; | 692 | ret = -EINVAL; |
693 | return ret; | 693 | return ret; |
@@ -949,7 +949,7 @@ err_bcn: | |||
949 | nla_nest_cancel(dcbnl_skb, bcn_nest); | 949 | nla_nest_cancel(dcbnl_skb, bcn_nest); |
950 | nlmsg_failure: | 950 | nlmsg_failure: |
951 | err: | 951 | err: |
952 | kfree(dcbnl_skb); | 952 | kfree_skb(dcbnl_skb); |
953 | err_out: | 953 | err_out: |
954 | ret = -EINVAL; | 954 | ret = -EINVAL; |
955 | return ret; | 955 | return ret; |
diff --git a/net/dccp/Kconfig b/net/dccp/Kconfig index 7aa2a7acc7ec..ad6dffd9070e 100644 --- a/net/dccp/Kconfig +++ b/net/dccp/Kconfig | |||
@@ -1,7 +1,6 @@ | |||
1 | menuconfig IP_DCCP | 1 | menuconfig IP_DCCP |
2 | tristate "The DCCP Protocol (EXPERIMENTAL)" | 2 | tristate "The DCCP Protocol (EXPERIMENTAL)" |
3 | depends on INET && EXPERIMENTAL | 3 | depends on INET && EXPERIMENTAL |
4 | select IP_DCCP_CCID2 | ||
5 | ---help--- | 4 | ---help--- |
6 | Datagram Congestion Control Protocol (RFC 4340) | 5 | Datagram Congestion Control Protocol (RFC 4340) |
7 | 6 | ||
@@ -25,9 +24,6 @@ config INET_DCCP_DIAG | |||
25 | def_tristate y if (IP_DCCP = y && INET_DIAG = y) | 24 | def_tristate y if (IP_DCCP = y && INET_DIAG = y) |
26 | def_tristate m | 25 | def_tristate m |
27 | 26 | ||
28 | config IP_DCCP_ACKVEC | ||
29 | bool | ||
30 | |||
31 | source "net/dccp/ccids/Kconfig" | 27 | source "net/dccp/ccids/Kconfig" |
32 | 28 | ||
33 | menu "DCCP Kernel Hacking" | 29 | menu "DCCP Kernel Hacking" |
diff --git a/net/dccp/Makefile b/net/dccp/Makefile index f4f8793aafff..2991efcc8dea 100644 --- a/net/dccp/Makefile +++ b/net/dccp/Makefile | |||
@@ -2,14 +2,23 @@ obj-$(CONFIG_IP_DCCP) += dccp.o dccp_ipv4.o | |||
2 | 2 | ||
3 | dccp-y := ccid.o feat.o input.o minisocks.o options.o output.o proto.o timer.o | 3 | dccp-y := ccid.o feat.o input.o minisocks.o options.o output.o proto.o timer.o |
4 | 4 | ||
5 | # | ||
6 | # CCID algorithms to be used by dccp.ko | ||
7 | # | ||
8 | # CCID-2 is default (RFC 4340, p. 77) and has Ack Vectors as dependency | ||
9 | dccp-y += ccids/ccid2.o ackvec.o | ||
10 | dccp-$(CONFIG_IP_DCCP_CCID3) += ccids/ccid3.o | ||
11 | dccp-$(CONFIG_IP_DCCP_TFRC_LIB) += ccids/lib/tfrc.o \ | ||
12 | ccids/lib/tfrc_equation.o \ | ||
13 | ccids/lib/packet_history.o \ | ||
14 | ccids/lib/loss_interval.o | ||
15 | |||
5 | dccp_ipv4-y := ipv4.o | 16 | dccp_ipv4-y := ipv4.o |
6 | 17 | ||
7 | # build dccp_ipv6 as module whenever either IPv6 or DCCP is a module | 18 | # build dccp_ipv6 as module whenever either IPv6 or DCCP is a module |
8 | obj-$(subst y,$(CONFIG_IP_DCCP),$(CONFIG_IPV6)) += dccp_ipv6.o | 19 | obj-$(subst y,$(CONFIG_IP_DCCP),$(CONFIG_IPV6)) += dccp_ipv6.o |
9 | dccp_ipv6-y := ipv6.o | 20 | dccp_ipv6-y := ipv6.o |
10 | 21 | ||
11 | dccp-$(CONFIG_IP_DCCP_ACKVEC) += ackvec.o | ||
12 | |||
13 | obj-$(CONFIG_INET_DCCP_DIAG) += dccp_diag.o | 22 | obj-$(CONFIG_INET_DCCP_DIAG) += dccp_diag.o |
14 | obj-$(CONFIG_NET_DCCPPROBE) += dccp_probe.o | 23 | obj-$(CONFIG_NET_DCCPPROBE) += dccp_probe.o |
15 | 24 | ||
@@ -17,5 +26,3 @@ dccp-$(CONFIG_SYSCTL) += sysctl.o | |||
17 | 26 | ||
18 | dccp_diag-y := diag.o | 27 | dccp_diag-y := diag.o |
19 | dccp_probe-y := probe.o | 28 | dccp_probe-y := probe.o |
20 | |||
21 | obj-y += ccids/ | ||
diff --git a/net/dccp/ackvec.h b/net/dccp/ackvec.h index 4ccee030524e..45f95e55f873 100644 --- a/net/dccp/ackvec.h +++ b/net/dccp/ackvec.h | |||
@@ -84,7 +84,6 @@ struct dccp_ackvec_record { | |||
84 | struct sock; | 84 | struct sock; |
85 | struct sk_buff; | 85 | struct sk_buff; |
86 | 86 | ||
87 | #ifdef CONFIG_IP_DCCP_ACKVEC | ||
88 | extern int dccp_ackvec_init(void); | 87 | extern int dccp_ackvec_init(void); |
89 | extern void dccp_ackvec_exit(void); | 88 | extern void dccp_ackvec_exit(void); |
90 | 89 | ||
@@ -106,52 +105,4 @@ static inline int dccp_ackvec_pending(const struct dccp_ackvec *av) | |||
106 | { | 105 | { |
107 | return av->av_vec_len; | 106 | return av->av_vec_len; |
108 | } | 107 | } |
109 | #else /* CONFIG_IP_DCCP_ACKVEC */ | ||
110 | static inline int dccp_ackvec_init(void) | ||
111 | { | ||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | static inline void dccp_ackvec_exit(void) | ||
116 | { | ||
117 | } | ||
118 | |||
119 | static inline struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority) | ||
120 | { | ||
121 | return NULL; | ||
122 | } | ||
123 | |||
124 | static inline void dccp_ackvec_free(struct dccp_ackvec *av) | ||
125 | { | ||
126 | } | ||
127 | |||
128 | static inline int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk, | ||
129 | const u64 ackno, const u8 state) | ||
130 | { | ||
131 | return -1; | ||
132 | } | ||
133 | |||
134 | static inline void dccp_ackvec_check_rcv_ackno(struct dccp_ackvec *av, | ||
135 | struct sock *sk, const u64 ackno) | ||
136 | { | ||
137 | } | ||
138 | |||
139 | static inline int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb, | ||
140 | const u64 *ackno, const u8 opt, | ||
141 | const u8 *value, const u8 len) | ||
142 | { | ||
143 | return -1; | ||
144 | } | ||
145 | |||
146 | static inline int dccp_insert_option_ackvec(const struct sock *sk, | ||
147 | const struct sk_buff *skb) | ||
148 | { | ||
149 | return -1; | ||
150 | } | ||
151 | |||
152 | static inline int dccp_ackvec_pending(const struct dccp_ackvec *av) | ||
153 | { | ||
154 | return 0; | ||
155 | } | ||
156 | #endif /* CONFIG_IP_DCCP_ACKVEC */ | ||
157 | #endif /* _ACKVEC_H */ | 108 | #endif /* _ACKVEC_H */ |
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c index bcc643f992ae..f3e9ba1cfd01 100644 --- a/net/dccp/ccid.c +++ b/net/dccp/ccid.c | |||
@@ -12,56 +12,70 @@ | |||
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include "ccid.h" | 14 | #include "ccid.h" |
15 | #include "ccids/lib/tfrc.h" | ||
15 | 16 | ||
16 | static u8 builtin_ccids[] = { | 17 | static struct ccid_operations *ccids[] = { |
17 | DCCPC_CCID2, /* CCID2 is supported by default */ | 18 | &ccid2_ops, |
18 | #if defined(CONFIG_IP_DCCP_CCID3) || defined(CONFIG_IP_DCCP_CCID3_MODULE) | 19 | #ifdef CONFIG_IP_DCCP_CCID3 |
19 | DCCPC_CCID3, | 20 | &ccid3_ops, |
20 | #endif | 21 | #endif |
21 | }; | 22 | }; |
22 | 23 | ||
23 | static struct ccid_operations *ccids[CCID_MAX]; | 24 | static struct ccid_operations *ccid_by_number(const u8 id) |
24 | #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) | ||
25 | static atomic_t ccids_lockct = ATOMIC_INIT(0); | ||
26 | static DEFINE_SPINLOCK(ccids_lock); | ||
27 | |||
28 | /* | ||
29 | * The strategy is: modifications ccids vector are short, do not sleep and | ||
30 | * veeery rare, but read access should be free of any exclusive locks. | ||
31 | */ | ||
32 | static void ccids_write_lock(void) | ||
33 | { | 25 | { |
34 | spin_lock(&ccids_lock); | 26 | int i; |
35 | while (atomic_read(&ccids_lockct) != 0) { | 27 | |
36 | spin_unlock(&ccids_lock); | 28 | for (i = 0; i < ARRAY_SIZE(ccids); i++) |
37 | yield(); | 29 | if (ccids[i]->ccid_id == id) |
38 | spin_lock(&ccids_lock); | 30 | return ccids[i]; |
39 | } | 31 | return NULL; |
40 | } | 32 | } |
41 | 33 | ||
42 | static inline void ccids_write_unlock(void) | 34 | /* check that up to @array_len members in @ccid_array are supported */ |
35 | bool ccid_support_check(u8 const *ccid_array, u8 array_len) | ||
43 | { | 36 | { |
44 | spin_unlock(&ccids_lock); | 37 | while (array_len > 0) |
38 | if (ccid_by_number(ccid_array[--array_len]) == NULL) | ||
39 | return false; | ||
40 | return true; | ||
45 | } | 41 | } |
46 | 42 | ||
47 | static inline void ccids_read_lock(void) | 43 | /** |
44 | * ccid_get_builtin_ccids - Populate a list of built-in CCIDs | ||
45 | * @ccid_array: pointer to copy into | ||
46 | * @array_len: value to return length into | ||
47 | * This function allocates memory - caller must see that it is freed after use. | ||
48 | */ | ||
49 | int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len) | ||
48 | { | 50 | { |
49 | atomic_inc(&ccids_lockct); | 51 | *ccid_array = kmalloc(ARRAY_SIZE(ccids), gfp_any()); |
50 | smp_mb__after_atomic_inc(); | 52 | if (*ccid_array == NULL) |
51 | spin_unlock_wait(&ccids_lock); | 53 | return -ENOBUFS; |
54 | |||
55 | for (*array_len = 0; *array_len < ARRAY_SIZE(ccids); *array_len += 1) | ||
56 | (*ccid_array)[*array_len] = ccids[*array_len]->ccid_id; | ||
57 | return 0; | ||
52 | } | 58 | } |
53 | 59 | ||
54 | static inline void ccids_read_unlock(void) | 60 | int ccid_getsockopt_builtin_ccids(struct sock *sk, int len, |
61 | char __user *optval, int __user *optlen) | ||
55 | { | 62 | { |
56 | atomic_dec(&ccids_lockct); | 63 | u8 *ccid_array, array_len; |
57 | } | 64 | int err = 0; |
58 | 65 | ||
59 | #else | 66 | if (len < ARRAY_SIZE(ccids)) |
60 | #define ccids_write_lock() do { } while(0) | 67 | return -EINVAL; |
61 | #define ccids_write_unlock() do { } while(0) | 68 | |
62 | #define ccids_read_lock() do { } while(0) | 69 | if (ccid_get_builtin_ccids(&ccid_array, &array_len)) |
63 | #define ccids_read_unlock() do { } while(0) | 70 | return -ENOBUFS; |
64 | #endif | 71 | |
72 | if (put_user(array_len, optlen) || | ||
73 | copy_to_user(optval, ccid_array, array_len)) | ||
74 | err = -EFAULT; | ||
75 | |||
76 | kfree(ccid_array); | ||
77 | return err; | ||
78 | } | ||
65 | 79 | ||
66 | static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...) | 80 | static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...) |
67 | { | 81 | { |
@@ -93,48 +107,7 @@ static void ccid_kmem_cache_destroy(struct kmem_cache *slab) | |||
93 | } | 107 | } |
94 | } | 108 | } |
95 | 109 | ||
96 | /* check that up to @array_len members in @ccid_array are supported */ | 110 | static int ccid_activate(struct ccid_operations *ccid_ops) |
97 | bool ccid_support_check(u8 const *ccid_array, u8 array_len) | ||
98 | { | ||
99 | u8 i, j, found; | ||
100 | |||
101 | for (i = 0, found = 0; i < array_len; i++, found = 0) { | ||
102 | for (j = 0; !found && j < ARRAY_SIZE(builtin_ccids); j++) | ||
103 | found = (ccid_array[i] == builtin_ccids[j]); | ||
104 | if (!found) | ||
105 | return false; | ||
106 | } | ||
107 | return true; | ||
108 | } | ||
109 | |||
110 | /** | ||
111 | * ccid_get_builtin_ccids - Provide copy of `builtin' CCID array | ||
112 | * @ccid_array: pointer to copy into | ||
113 | * @array_len: value to return length into | ||
114 | * This function allocates memory - caller must see that it is freed after use. | ||
115 | */ | ||
116 | int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len) | ||
117 | { | ||
118 | *ccid_array = kmemdup(builtin_ccids, sizeof(builtin_ccids), gfp_any()); | ||
119 | if (*ccid_array == NULL) | ||
120 | return -ENOBUFS; | ||
121 | *array_len = ARRAY_SIZE(builtin_ccids); | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | int ccid_getsockopt_builtin_ccids(struct sock *sk, int len, | ||
126 | char __user *optval, int __user *optlen) | ||
127 | { | ||
128 | if (len < sizeof(builtin_ccids)) | ||
129 | return -EINVAL; | ||
130 | |||
131 | if (put_user(sizeof(builtin_ccids), optlen) || | ||
132 | copy_to_user(optval, builtin_ccids, sizeof(builtin_ccids))) | ||
133 | return -EFAULT; | ||
134 | return 0; | ||
135 | } | ||
136 | |||
137 | int ccid_register(struct ccid_operations *ccid_ops) | ||
138 | { | 111 | { |
139 | int err = -ENOBUFS; | 112 | int err = -ENOBUFS; |
140 | 113 | ||
@@ -152,79 +125,40 @@ int ccid_register(struct ccid_operations *ccid_ops) | |||
152 | if (ccid_ops->ccid_hc_tx_slab == NULL) | 125 | if (ccid_ops->ccid_hc_tx_slab == NULL) |
153 | goto out_free_rx_slab; | 126 | goto out_free_rx_slab; |
154 | 127 | ||
155 | ccids_write_lock(); | 128 | pr_info("CCID: Activated CCID %d (%s)\n", |
156 | err = -EEXIST; | ||
157 | if (ccids[ccid_ops->ccid_id] == NULL) { | ||
158 | ccids[ccid_ops->ccid_id] = ccid_ops; | ||
159 | err = 0; | ||
160 | } | ||
161 | ccids_write_unlock(); | ||
162 | if (err != 0) | ||
163 | goto out_free_tx_slab; | ||
164 | |||
165 | pr_info("CCID: Registered CCID %d (%s)\n", | ||
166 | ccid_ops->ccid_id, ccid_ops->ccid_name); | 129 | ccid_ops->ccid_id, ccid_ops->ccid_name); |
130 | err = 0; | ||
167 | out: | 131 | out: |
168 | return err; | 132 | return err; |
169 | out_free_tx_slab: | ||
170 | ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab); | ||
171 | ccid_ops->ccid_hc_tx_slab = NULL; | ||
172 | goto out; | ||
173 | out_free_rx_slab: | 133 | out_free_rx_slab: |
174 | ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab); | 134 | ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab); |
175 | ccid_ops->ccid_hc_rx_slab = NULL; | 135 | ccid_ops->ccid_hc_rx_slab = NULL; |
176 | goto out; | 136 | goto out; |
177 | } | 137 | } |
178 | 138 | ||
179 | EXPORT_SYMBOL_GPL(ccid_register); | 139 | static void ccid_deactivate(struct ccid_operations *ccid_ops) |
180 | |||
181 | int ccid_unregister(struct ccid_operations *ccid_ops) | ||
182 | { | 140 | { |
183 | ccids_write_lock(); | ||
184 | ccids[ccid_ops->ccid_id] = NULL; | ||
185 | ccids_write_unlock(); | ||
186 | |||
187 | ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab); | 141 | ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab); |
188 | ccid_ops->ccid_hc_tx_slab = NULL; | 142 | ccid_ops->ccid_hc_tx_slab = NULL; |
189 | ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab); | 143 | ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab); |
190 | ccid_ops->ccid_hc_rx_slab = NULL; | 144 | ccid_ops->ccid_hc_rx_slab = NULL; |
191 | 145 | ||
192 | pr_info("CCID: Unregistered CCID %d (%s)\n", | 146 | pr_info("CCID: Deactivated CCID %d (%s)\n", |
193 | ccid_ops->ccid_id, ccid_ops->ccid_name); | 147 | ccid_ops->ccid_id, ccid_ops->ccid_name); |
194 | return 0; | ||
195 | } | 148 | } |
196 | 149 | ||
197 | EXPORT_SYMBOL_GPL(ccid_unregister); | 150 | struct ccid *ccid_new(const u8 id, struct sock *sk, bool rx) |
198 | |||
199 | struct ccid *ccid_new(unsigned char id, struct sock *sk, int rx, gfp_t gfp) | ||
200 | { | 151 | { |
201 | struct ccid_operations *ccid_ops; | 152 | struct ccid_operations *ccid_ops = ccid_by_number(id); |
202 | struct ccid *ccid = NULL; | 153 | struct ccid *ccid = NULL; |
203 | 154 | ||
204 | ccids_read_lock(); | ||
205 | #ifdef CONFIG_MODULES | ||
206 | if (ccids[id] == NULL) { | ||
207 | /* We only try to load if in process context */ | ||
208 | ccids_read_unlock(); | ||
209 | if (gfp & GFP_ATOMIC) | ||
210 | goto out; | ||
211 | request_module("net-dccp-ccid-%d", id); | ||
212 | ccids_read_lock(); | ||
213 | } | ||
214 | #endif | ||
215 | ccid_ops = ccids[id]; | ||
216 | if (ccid_ops == NULL) | 155 | if (ccid_ops == NULL) |
217 | goto out_unlock; | 156 | goto out; |
218 | |||
219 | if (!try_module_get(ccid_ops->ccid_owner)) | ||
220 | goto out_unlock; | ||
221 | |||
222 | ccids_read_unlock(); | ||
223 | 157 | ||
224 | ccid = kmem_cache_alloc(rx ? ccid_ops->ccid_hc_rx_slab : | 158 | ccid = kmem_cache_alloc(rx ? ccid_ops->ccid_hc_rx_slab : |
225 | ccid_ops->ccid_hc_tx_slab, gfp); | 159 | ccid_ops->ccid_hc_tx_slab, gfp_any()); |
226 | if (ccid == NULL) | 160 | if (ccid == NULL) |
227 | goto out_module_put; | 161 | goto out; |
228 | ccid->ccid_ops = ccid_ops; | 162 | ccid->ccid_ops = ccid_ops; |
229 | if (rx) { | 163 | if (rx) { |
230 | memset(ccid + 1, 0, ccid_ops->ccid_hc_rx_obj_size); | 164 | memset(ccid + 1, 0, ccid_ops->ccid_hc_rx_obj_size); |
@@ -239,53 +173,57 @@ struct ccid *ccid_new(unsigned char id, struct sock *sk, int rx, gfp_t gfp) | |||
239 | } | 173 | } |
240 | out: | 174 | out: |
241 | return ccid; | 175 | return ccid; |
242 | out_unlock: | ||
243 | ccids_read_unlock(); | ||
244 | goto out; | ||
245 | out_free_ccid: | 176 | out_free_ccid: |
246 | kmem_cache_free(rx ? ccid_ops->ccid_hc_rx_slab : | 177 | kmem_cache_free(rx ? ccid_ops->ccid_hc_rx_slab : |
247 | ccid_ops->ccid_hc_tx_slab, ccid); | 178 | ccid_ops->ccid_hc_tx_slab, ccid); |
248 | ccid = NULL; | 179 | ccid = NULL; |
249 | out_module_put: | ||
250 | module_put(ccid_ops->ccid_owner); | ||
251 | goto out; | 180 | goto out; |
252 | } | 181 | } |
253 | 182 | ||
254 | EXPORT_SYMBOL_GPL(ccid_new); | 183 | void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk) |
255 | |||
256 | static void ccid_delete(struct ccid *ccid, struct sock *sk, int rx) | ||
257 | { | 184 | { |
258 | struct ccid_operations *ccid_ops; | 185 | if (ccid != NULL) { |
259 | 186 | if (ccid->ccid_ops->ccid_hc_rx_exit != NULL) | |
260 | if (ccid == NULL) | 187 | ccid->ccid_ops->ccid_hc_rx_exit(sk); |
261 | return; | 188 | kmem_cache_free(ccid->ccid_ops->ccid_hc_rx_slab, ccid); |
262 | |||
263 | ccid_ops = ccid->ccid_ops; | ||
264 | if (rx) { | ||
265 | if (ccid_ops->ccid_hc_rx_exit != NULL) | ||
266 | ccid_ops->ccid_hc_rx_exit(sk); | ||
267 | kmem_cache_free(ccid_ops->ccid_hc_rx_slab, ccid); | ||
268 | } else { | ||
269 | if (ccid_ops->ccid_hc_tx_exit != NULL) | ||
270 | ccid_ops->ccid_hc_tx_exit(sk); | ||
271 | kmem_cache_free(ccid_ops->ccid_hc_tx_slab, ccid); | ||
272 | } | 189 | } |
273 | ccids_read_lock(); | ||
274 | if (ccids[ccid_ops->ccid_id] != NULL) | ||
275 | module_put(ccid_ops->ccid_owner); | ||
276 | ccids_read_unlock(); | ||
277 | } | 190 | } |
278 | 191 | ||
279 | void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk) | 192 | void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk) |
280 | { | 193 | { |
281 | ccid_delete(ccid, sk, 1); | 194 | if (ccid != NULL) { |
195 | if (ccid->ccid_ops->ccid_hc_tx_exit != NULL) | ||
196 | ccid->ccid_ops->ccid_hc_tx_exit(sk); | ||
197 | kmem_cache_free(ccid->ccid_ops->ccid_hc_tx_slab, ccid); | ||
198 | } | ||
282 | } | 199 | } |
283 | 200 | ||
284 | EXPORT_SYMBOL_GPL(ccid_hc_rx_delete); | 201 | int __init ccid_initialize_builtins(void) |
285 | |||
286 | void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk) | ||
287 | { | 202 | { |
288 | ccid_delete(ccid, sk, 0); | 203 | int i, err = tfrc_lib_init(); |
204 | |||
205 | if (err) | ||
206 | return err; | ||
207 | |||
208 | for (i = 0; i < ARRAY_SIZE(ccids); i++) { | ||
209 | err = ccid_activate(ccids[i]); | ||
210 | if (err) | ||
211 | goto unwind_registrations; | ||
212 | } | ||
213 | return 0; | ||
214 | |||
215 | unwind_registrations: | ||
216 | while(--i >= 0) | ||
217 | ccid_deactivate(ccids[i]); | ||
218 | tfrc_lib_exit(); | ||
219 | return err; | ||
289 | } | 220 | } |
290 | 221 | ||
291 | EXPORT_SYMBOL_GPL(ccid_hc_tx_delete); | 222 | void ccid_cleanup_builtins(void) |
223 | { | ||
224 | int i; | ||
225 | |||
226 | for (i = 0; i < ARRAY_SIZE(ccids); i++) | ||
227 | ccid_deactivate(ccids[i]); | ||
228 | tfrc_lib_exit(); | ||
229 | } | ||
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h index 18f69423a708..facedd20b531 100644 --- a/net/dccp/ccid.h +++ b/net/dccp/ccid.h | |||
@@ -29,7 +29,6 @@ struct tcp_info; | |||
29 | * @ccid_id: numerical CCID ID (up to %CCID_MAX, cf. table 5 in RFC 4340, 10.) | 29 | * @ccid_id: numerical CCID ID (up to %CCID_MAX, cf. table 5 in RFC 4340, 10.) |
30 | * @ccid_ccmps: the CCMPS including network/transport headers (0 when disabled) | 30 | * @ccid_ccmps: the CCMPS including network/transport headers (0 when disabled) |
31 | * @ccid_name: alphabetical identifier string for @ccid_id | 31 | * @ccid_name: alphabetical identifier string for @ccid_id |
32 | * @ccid_owner: module which implements/owns this CCID | ||
33 | * @ccid_hc_{r,t}x_slab: memory pool for the receiver/sender half-connection | 32 | * @ccid_hc_{r,t}x_slab: memory pool for the receiver/sender half-connection |
34 | * @ccid_hc_{r,t}x_obj_size: size of the receiver/sender half-connection socket | 33 | * @ccid_hc_{r,t}x_obj_size: size of the receiver/sender half-connection socket |
35 | * | 34 | * |
@@ -48,7 +47,6 @@ struct ccid_operations { | |||
48 | unsigned char ccid_id; | 47 | unsigned char ccid_id; |
49 | __u32 ccid_ccmps; | 48 | __u32 ccid_ccmps; |
50 | const char *ccid_name; | 49 | const char *ccid_name; |
51 | struct module *ccid_owner; | ||
52 | struct kmem_cache *ccid_hc_rx_slab, | 50 | struct kmem_cache *ccid_hc_rx_slab, |
53 | *ccid_hc_tx_slab; | 51 | *ccid_hc_tx_slab; |
54 | __u32 ccid_hc_rx_obj_size, | 52 | __u32 ccid_hc_rx_obj_size, |
@@ -90,8 +88,13 @@ struct ccid_operations { | |||
90 | int __user *optlen); | 88 | int __user *optlen); |
91 | }; | 89 | }; |
92 | 90 | ||
93 | extern int ccid_register(struct ccid_operations *ccid_ops); | 91 | extern struct ccid_operations ccid2_ops; |
94 | extern int ccid_unregister(struct ccid_operations *ccid_ops); | 92 | #ifdef CONFIG_IP_DCCP_CCID3 |
93 | extern struct ccid_operations ccid3_ops; | ||
94 | #endif | ||
95 | |||
96 | extern int ccid_initialize_builtins(void); | ||
97 | extern void ccid_cleanup_builtins(void); | ||
95 | 98 | ||
96 | struct ccid { | 99 | struct ccid { |
97 | struct ccid_operations *ccid_ops; | 100 | struct ccid_operations *ccid_ops; |
@@ -108,8 +111,7 @@ extern int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len); | |||
108 | extern int ccid_getsockopt_builtin_ccids(struct sock *sk, int len, | 111 | extern int ccid_getsockopt_builtin_ccids(struct sock *sk, int len, |
109 | char __user *, int __user *); | 112 | char __user *, int __user *); |
110 | 113 | ||
111 | extern struct ccid *ccid_new(unsigned char id, struct sock *sk, int rx, | 114 | extern struct ccid *ccid_new(const u8 id, struct sock *sk, bool rx); |
112 | gfp_t gfp); | ||
113 | 115 | ||
114 | static inline int ccid_get_current_rx_ccid(struct dccp_sock *dp) | 116 | static inline int ccid_get_current_rx_ccid(struct dccp_sock *dp) |
115 | { | 117 | { |
diff --git a/net/dccp/ccids/Kconfig b/net/dccp/ccids/Kconfig index 12275943eab8..b28bf962edc3 100644 --- a/net/dccp/ccids/Kconfig +++ b/net/dccp/ccids/Kconfig | |||
@@ -1,80 +1,51 @@ | |||
1 | menu "DCCP CCIDs Configuration (EXPERIMENTAL)" | 1 | menu "DCCP CCIDs Configuration (EXPERIMENTAL)" |
2 | depends on EXPERIMENTAL | 2 | depends on EXPERIMENTAL |
3 | 3 | ||
4 | config IP_DCCP_CCID2 | ||
5 | tristate "CCID2 (TCP-Like) (EXPERIMENTAL)" | ||
6 | def_tristate IP_DCCP | ||
7 | select IP_DCCP_ACKVEC | ||
8 | ---help--- | ||
9 | CCID 2, TCP-like Congestion Control, denotes Additive Increase, | ||
10 | Multiplicative Decrease (AIMD) congestion control with behavior | ||
11 | modelled directly on TCP, including congestion window, slow start, | ||
12 | timeouts, and so forth [RFC 2581]. CCID 2 achieves maximum | ||
13 | bandwidth over the long term, consistent with the use of end-to-end | ||
14 | congestion control, but halves its congestion window in response to | ||
15 | each congestion event. This leads to the abrupt rate changes | ||
16 | typical of TCP. Applications should use CCID 2 if they prefer | ||
17 | maximum bandwidth utilization to steadiness of rate. This is often | ||
18 | the case for applications that are not playing their data directly | ||
19 | to the user. For example, a hypothetical application that | ||
20 | transferred files over DCCP, using application-level retransmissions | ||
21 | for lost packets, would prefer CCID 2 to CCID 3. On-line games may | ||
22 | also prefer CCID 2. See RFC 4341 for further details. | ||
23 | |||
24 | CCID2 is the default CCID used by DCCP. | ||
25 | |||
26 | config IP_DCCP_CCID2_DEBUG | 4 | config IP_DCCP_CCID2_DEBUG |
27 | bool "CCID2 debugging messages" | 5 | bool "CCID-2 debugging messages" |
28 | depends on IP_DCCP_CCID2 | 6 | ---help--- |
29 | ---help--- | 7 | Enable CCID-2 specific debugging messages. |
30 | Enable CCID2-specific debugging messages. | ||
31 | 8 | ||
32 | When compiling CCID2 as a module, this debugging output can | 9 | The debugging output can additionally be toggled by setting the |
33 | additionally be toggled by setting the ccid2_debug module | 10 | ccid2_debug parameter to 0 or 1. |
34 | parameter to 0 or 1. | ||
35 | 11 | ||
36 | If in doubt, say N. | 12 | If in doubt, say N. |
37 | 13 | ||
38 | config IP_DCCP_CCID3 | 14 | config IP_DCCP_CCID3 |
39 | tristate "CCID3 (TCP-Friendly) (EXPERIMENTAL)" | 15 | bool "CCID-3 (TCP-Friendly) (EXPERIMENTAL)" |
40 | def_tristate IP_DCCP | 16 | def_bool y if (IP_DCCP = y || IP_DCCP = m) |
41 | select IP_DCCP_TFRC_LIB | ||
42 | ---help--- | 17 | ---help--- |
43 | CCID 3 denotes TCP-Friendly Rate Control (TFRC), an equation-based | 18 | CCID-3 denotes TCP-Friendly Rate Control (TFRC), an equation-based |
44 | rate-controlled congestion control mechanism. TFRC is designed to | 19 | rate-controlled congestion control mechanism. TFRC is designed to |
45 | be reasonably fair when competing for bandwidth with TCP-like flows, | 20 | be reasonably fair when competing for bandwidth with TCP-like flows, |
46 | where a flow is "reasonably fair" if its sending rate is generally | 21 | where a flow is "reasonably fair" if its sending rate is generally |
47 | within a factor of two of the sending rate of a TCP flow under the | 22 | within a factor of two of the sending rate of a TCP flow under the |
48 | same conditions. However, TFRC has a much lower variation of | 23 | same conditions. However, TFRC has a much lower variation of |
49 | throughput over time compared with TCP, which makes CCID 3 more | 24 | throughput over time compared with TCP, which makes CCID-3 more |
50 | suitable than CCID 2 for applications such streaming media where a | 25 | suitable than CCID-2 for applications such streaming media where a |
51 | relatively smooth sending rate is of importance. | 26 | relatively smooth sending rate is of importance. |
52 | 27 | ||
53 | CCID 3 is further described in RFC 4342, | 28 | CCID-3 is further described in RFC 4342, |
54 | http://www.ietf.org/rfc/rfc4342.txt | 29 | http://www.ietf.org/rfc/rfc4342.txt |
55 | 30 | ||
56 | The TFRC congestion control algorithms were initially described in | 31 | The TFRC congestion control algorithms were initially described in |
57 | RFC 3448. | 32 | RFC 5448. |
58 | 33 | ||
59 | This text was extracted from RFC 4340 (sec. 10.2), | 34 | This text was extracted from RFC 4340 (sec. 10.2), |
60 | http://www.ietf.org/rfc/rfc4340.txt | 35 | http://www.ietf.org/rfc/rfc4340.txt |
61 | |||
62 | To compile this CCID as a module, choose M here: the module will be | ||
63 | called dccp_ccid3. | ||
64 | 36 | ||
65 | If in doubt, say M. | 37 | If in doubt, say N. |
66 | 38 | ||
67 | config IP_DCCP_CCID3_DEBUG | 39 | config IP_DCCP_CCID3_DEBUG |
68 | bool "CCID3 debugging messages" | 40 | bool "CCID-3 debugging messages" |
69 | depends on IP_DCCP_CCID3 | 41 | depends on IP_DCCP_CCID3 |
70 | ---help--- | 42 | ---help--- |
71 | Enable CCID3-specific debugging messages. | 43 | Enable CCID-3 specific debugging messages. |
72 | 44 | ||
73 | When compiling CCID3 as a module, this debugging output can | 45 | The debugging output can additionally be toggled by setting the |
74 | additionally be toggled by setting the ccid3_debug module | 46 | ccid3_debug parameter to 0 or 1. |
75 | parameter to 0 or 1. | ||
76 | 47 | ||
77 | If in doubt, say N. | 48 | If in doubt, say N. |
78 | 49 | ||
79 | config IP_DCCP_CCID3_RTO | 50 | config IP_DCCP_CCID3_RTO |
80 | int "Use higher bound for nofeedback timer" | 51 | int "Use higher bound for nofeedback timer" |
@@ -108,12 +79,8 @@ config IP_DCCP_CCID3_RTO | |||
108 | therefore not be performed on WANs. | 79 | therefore not be performed on WANs. |
109 | 80 | ||
110 | config IP_DCCP_TFRC_LIB | 81 | config IP_DCCP_TFRC_LIB |
111 | tristate | 82 | def_bool y if IP_DCCP_CCID3 |
112 | default n | ||
113 | 83 | ||
114 | config IP_DCCP_TFRC_DEBUG | 84 | config IP_DCCP_TFRC_DEBUG |
115 | bool | 85 | def_bool y if IP_DCCP_CCID3_DEBUG |
116 | depends on IP_DCCP_TFRC_LIB | ||
117 | default y if IP_DCCP_CCID3_DEBUG | ||
118 | |||
119 | endmenu | 86 | endmenu |
diff --git a/net/dccp/ccids/Makefile b/net/dccp/ccids/Makefile deleted file mode 100644 index 438f20bccff7..000000000000 --- a/net/dccp/ccids/Makefile +++ /dev/null | |||
@@ -1,9 +0,0 @@ | |||
1 | obj-$(CONFIG_IP_DCCP_CCID3) += dccp_ccid3.o | ||
2 | |||
3 | dccp_ccid3-y := ccid3.o | ||
4 | |||
5 | obj-$(CONFIG_IP_DCCP_CCID2) += dccp_ccid2.o | ||
6 | |||
7 | dccp_ccid2-y := ccid2.o | ||
8 | |||
9 | obj-y += lib/ | ||
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c index c9ea19a4d85e..d235294ace23 100644 --- a/net/dccp/ccids/ccid2.c +++ b/net/dccp/ccids/ccid2.c | |||
@@ -768,10 +768,9 @@ static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
768 | } | 768 | } |
769 | } | 769 | } |
770 | 770 | ||
771 | static struct ccid_operations ccid2 = { | 771 | struct ccid_operations ccid2_ops = { |
772 | .ccid_id = DCCPC_CCID2, | 772 | .ccid_id = DCCPC_CCID2, |
773 | .ccid_name = "TCP-like", | 773 | .ccid_name = "TCP-like", |
774 | .ccid_owner = THIS_MODULE, | ||
775 | .ccid_hc_tx_obj_size = sizeof(struct ccid2_hc_tx_sock), | 774 | .ccid_hc_tx_obj_size = sizeof(struct ccid2_hc_tx_sock), |
776 | .ccid_hc_tx_init = ccid2_hc_tx_init, | 775 | .ccid_hc_tx_init = ccid2_hc_tx_init, |
777 | .ccid_hc_tx_exit = ccid2_hc_tx_exit, | 776 | .ccid_hc_tx_exit = ccid2_hc_tx_exit, |
@@ -784,22 +783,5 @@ static struct ccid_operations ccid2 = { | |||
784 | 783 | ||
785 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG | 784 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG |
786 | module_param(ccid2_debug, bool, 0644); | 785 | module_param(ccid2_debug, bool, 0644); |
787 | MODULE_PARM_DESC(ccid2_debug, "Enable debug messages"); | 786 | MODULE_PARM_DESC(ccid2_debug, "Enable CCID-2 debug messages"); |
788 | #endif | 787 | #endif |
789 | |||
790 | static __init int ccid2_module_init(void) | ||
791 | { | ||
792 | return ccid_register(&ccid2); | ||
793 | } | ||
794 | module_init(ccid2_module_init); | ||
795 | |||
796 | static __exit void ccid2_module_exit(void) | ||
797 | { | ||
798 | ccid_unregister(&ccid2); | ||
799 | } | ||
800 | module_exit(ccid2_module_exit); | ||
801 | |||
802 | MODULE_AUTHOR("Andrea Bittau <a.bittau@cs.ucl.ac.uk>"); | ||
803 | MODULE_DESCRIPTION("DCCP TCP-Like (CCID2) CCID"); | ||
804 | MODULE_LICENSE("GPL"); | ||
805 | MODULE_ALIAS("net-dccp-ccid-2"); | ||
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index 3b8bd7ca6761..a27b7f4c19c5 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c | |||
@@ -940,10 +940,9 @@ static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len, | |||
940 | return 0; | 940 | return 0; |
941 | } | 941 | } |
942 | 942 | ||
943 | static struct ccid_operations ccid3 = { | 943 | struct ccid_operations ccid3_ops = { |
944 | .ccid_id = DCCPC_CCID3, | 944 | .ccid_id = DCCPC_CCID3, |
945 | .ccid_name = "TCP-Friendly Rate Control", | 945 | .ccid_name = "TCP-Friendly Rate Control", |
946 | .ccid_owner = THIS_MODULE, | ||
947 | .ccid_hc_tx_obj_size = sizeof(struct ccid3_hc_tx_sock), | 946 | .ccid_hc_tx_obj_size = sizeof(struct ccid3_hc_tx_sock), |
948 | .ccid_hc_tx_init = ccid3_hc_tx_init, | 947 | .ccid_hc_tx_init = ccid3_hc_tx_init, |
949 | .ccid_hc_tx_exit = ccid3_hc_tx_exit, | 948 | .ccid_hc_tx_exit = ccid3_hc_tx_exit, |
@@ -964,23 +963,5 @@ static struct ccid_operations ccid3 = { | |||
964 | 963 | ||
965 | #ifdef CONFIG_IP_DCCP_CCID3_DEBUG | 964 | #ifdef CONFIG_IP_DCCP_CCID3_DEBUG |
966 | module_param(ccid3_debug, bool, 0644); | 965 | module_param(ccid3_debug, bool, 0644); |
967 | MODULE_PARM_DESC(ccid3_debug, "Enable debug messages"); | 966 | MODULE_PARM_DESC(ccid3_debug, "Enable CCID-3 debug messages"); |
968 | #endif | 967 | #endif |
969 | |||
970 | static __init int ccid3_module_init(void) | ||
971 | { | ||
972 | return ccid_register(&ccid3); | ||
973 | } | ||
974 | module_init(ccid3_module_init); | ||
975 | |||
976 | static __exit void ccid3_module_exit(void) | ||
977 | { | ||
978 | ccid_unregister(&ccid3); | ||
979 | } | ||
980 | module_exit(ccid3_module_exit); | ||
981 | |||
982 | MODULE_AUTHOR("Ian McDonald <ian.mcdonald@jandi.co.nz>, " | ||
983 | "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>"); | ||
984 | MODULE_DESCRIPTION("DCCP TFRC CCID3 CCID"); | ||
985 | MODULE_LICENSE("GPL"); | ||
986 | MODULE_ALIAS("net-dccp-ccid-3"); | ||
diff --git a/net/dccp/ccids/lib/Makefile b/net/dccp/ccids/lib/Makefile deleted file mode 100644 index 68c93e3d89dc..000000000000 --- a/net/dccp/ccids/lib/Makefile +++ /dev/null | |||
@@ -1,3 +0,0 @@ | |||
1 | obj-$(CONFIG_IP_DCCP_TFRC_LIB) += dccp_tfrc_lib.o | ||
2 | |||
3 | dccp_tfrc_lib-y := tfrc.o tfrc_equation.o packet_history.o loss_interval.o | ||
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c index 5b3ce0688c5c..4d1e40127264 100644 --- a/net/dccp/ccids/lib/loss_interval.c +++ b/net/dccp/ccids/lib/loss_interval.c | |||
@@ -60,7 +60,6 @@ void tfrc_lh_cleanup(struct tfrc_loss_hist *lh) | |||
60 | lh->ring[LIH_INDEX(lh->counter)] = NULL; | 60 | lh->ring[LIH_INDEX(lh->counter)] = NULL; |
61 | } | 61 | } |
62 | } | 62 | } |
63 | EXPORT_SYMBOL_GPL(tfrc_lh_cleanup); | ||
64 | 63 | ||
65 | static void tfrc_lh_calc_i_mean(struct tfrc_loss_hist *lh) | 64 | static void tfrc_lh_calc_i_mean(struct tfrc_loss_hist *lh) |
66 | { | 65 | { |
@@ -121,7 +120,6 @@ u8 tfrc_lh_update_i_mean(struct tfrc_loss_hist *lh, struct sk_buff *skb) | |||
121 | 120 | ||
122 | return (lh->i_mean < old_i_mean); | 121 | return (lh->i_mean < old_i_mean); |
123 | } | 122 | } |
124 | EXPORT_SYMBOL_GPL(tfrc_lh_update_i_mean); | ||
125 | 123 | ||
126 | /* Determine if `new_loss' does begin a new loss interval [RFC 4342, 10.2] */ | 124 | /* Determine if `new_loss' does begin a new loss interval [RFC 4342, 10.2] */ |
127 | static inline u8 tfrc_lh_is_new_loss(struct tfrc_loss_interval *cur, | 125 | static inline u8 tfrc_lh_is_new_loss(struct tfrc_loss_interval *cur, |
@@ -169,7 +167,6 @@ int tfrc_lh_interval_add(struct tfrc_loss_hist *lh, struct tfrc_rx_hist *rh, | |||
169 | } | 167 | } |
170 | return 1; | 168 | return 1; |
171 | } | 169 | } |
172 | EXPORT_SYMBOL_GPL(tfrc_lh_interval_add); | ||
173 | 170 | ||
174 | int __init tfrc_li_init(void) | 171 | int __init tfrc_li_init(void) |
175 | { | 172 | { |
diff --git a/net/dccp/ccids/lib/packet_history.c b/net/dccp/ccids/lib/packet_history.c index 6cc108afdc3b..b7785b3581ec 100644 --- a/net/dccp/ccids/lib/packet_history.c +++ b/net/dccp/ccids/lib/packet_history.c | |||
@@ -94,7 +94,6 @@ int tfrc_tx_hist_add(struct tfrc_tx_hist_entry **headp, u64 seqno) | |||
94 | *headp = entry; | 94 | *headp = entry; |
95 | return 0; | 95 | return 0; |
96 | } | 96 | } |
97 | EXPORT_SYMBOL_GPL(tfrc_tx_hist_add); | ||
98 | 97 | ||
99 | void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp) | 98 | void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp) |
100 | { | 99 | { |
@@ -109,7 +108,6 @@ void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp) | |||
109 | 108 | ||
110 | *headp = NULL; | 109 | *headp = NULL; |
111 | } | 110 | } |
112 | EXPORT_SYMBOL_GPL(tfrc_tx_hist_purge); | ||
113 | 111 | ||
114 | u32 tfrc_tx_hist_rtt(struct tfrc_tx_hist_entry *head, const u64 seqno, | 112 | u32 tfrc_tx_hist_rtt(struct tfrc_tx_hist_entry *head, const u64 seqno, |
115 | const ktime_t now) | 113 | const ktime_t now) |
@@ -127,7 +125,6 @@ u32 tfrc_tx_hist_rtt(struct tfrc_tx_hist_entry *head, const u64 seqno, | |||
127 | 125 | ||
128 | return rtt; | 126 | return rtt; |
129 | } | 127 | } |
130 | EXPORT_SYMBOL_GPL(tfrc_tx_hist_rtt); | ||
131 | 128 | ||
132 | 129 | ||
133 | /* | 130 | /* |
@@ -172,7 +169,6 @@ void tfrc_rx_hist_add_packet(struct tfrc_rx_hist *h, | |||
172 | 169 | ||
173 | tfrc_rx_hist_entry_from_skb(entry, skb, ndp); | 170 | tfrc_rx_hist_entry_from_skb(entry, skb, ndp); |
174 | } | 171 | } |
175 | EXPORT_SYMBOL_GPL(tfrc_rx_hist_add_packet); | ||
176 | 172 | ||
177 | /* has the packet contained in skb been seen before? */ | 173 | /* has the packet contained in skb been seen before? */ |
178 | int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb) | 174 | int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb) |
@@ -189,7 +185,6 @@ int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb) | |||
189 | 185 | ||
190 | return 0; | 186 | return 0; |
191 | } | 187 | } |
192 | EXPORT_SYMBOL_GPL(tfrc_rx_hist_duplicate); | ||
193 | 188 | ||
194 | static void tfrc_rx_hist_swap(struct tfrc_rx_hist *h, const u8 a, const u8 b) | 189 | static void tfrc_rx_hist_swap(struct tfrc_rx_hist *h, const u8 a, const u8 b) |
195 | { | 190 | { |
@@ -390,7 +385,6 @@ int tfrc_rx_handle_loss(struct tfrc_rx_hist *h, | |||
390 | } | 385 | } |
391 | return is_new_loss; | 386 | return is_new_loss; |
392 | } | 387 | } |
393 | EXPORT_SYMBOL_GPL(tfrc_rx_handle_loss); | ||
394 | 388 | ||
395 | int tfrc_rx_hist_alloc(struct tfrc_rx_hist *h) | 389 | int tfrc_rx_hist_alloc(struct tfrc_rx_hist *h) |
396 | { | 390 | { |
@@ -412,7 +406,6 @@ out_free: | |||
412 | } | 406 | } |
413 | return -ENOBUFS; | 407 | return -ENOBUFS; |
414 | } | 408 | } |
415 | EXPORT_SYMBOL_GPL(tfrc_rx_hist_alloc); | ||
416 | 409 | ||
417 | void tfrc_rx_hist_purge(struct tfrc_rx_hist *h) | 410 | void tfrc_rx_hist_purge(struct tfrc_rx_hist *h) |
418 | { | 411 | { |
@@ -424,7 +417,6 @@ void tfrc_rx_hist_purge(struct tfrc_rx_hist *h) | |||
424 | h->ring[i] = NULL; | 417 | h->ring[i] = NULL; |
425 | } | 418 | } |
426 | } | 419 | } |
427 | EXPORT_SYMBOL_GPL(tfrc_rx_hist_purge); | ||
428 | 420 | ||
429 | /** | 421 | /** |
430 | * tfrc_rx_hist_rtt_last_s - reference entry to compute RTT samples against | 422 | * tfrc_rx_hist_rtt_last_s - reference entry to compute RTT samples against |
@@ -495,4 +487,3 @@ keep_ref_for_next_time: | |||
495 | 487 | ||
496 | return sample; | 488 | return sample; |
497 | } | 489 | } |
498 | EXPORT_SYMBOL_GPL(tfrc_rx_hist_sample_rtt); | ||
diff --git a/net/dccp/ccids/lib/tfrc.c b/net/dccp/ccids/lib/tfrc.c index 185916218e07..60c412ccfeef 100644 --- a/net/dccp/ccids/lib/tfrc.c +++ b/net/dccp/ccids/lib/tfrc.c | |||
@@ -1,20 +1,18 @@ | |||
1 | /* | 1 | /* |
2 | * TFRC: main module holding the pieces of the TFRC library together | 2 | * TFRC library initialisation |
3 | * | 3 | * |
4 | * Copyright (c) 2007 The University of Aberdeen, Scotland, UK | 4 | * Copyright (c) 2007 The University of Aberdeen, Scotland, UK |
5 | * Copyright (c) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> | 5 | * Copyright (c) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> |
6 | */ | 6 | */ |
7 | #include <linux/module.h> | ||
8 | #include <linux/moduleparam.h> | ||
9 | #include "tfrc.h" | 7 | #include "tfrc.h" |
10 | 8 | ||
11 | #ifdef CONFIG_IP_DCCP_TFRC_DEBUG | 9 | #ifdef CONFIG_IP_DCCP_TFRC_DEBUG |
12 | int tfrc_debug; | 10 | int tfrc_debug; |
13 | module_param(tfrc_debug, bool, 0644); | 11 | module_param(tfrc_debug, bool, 0644); |
14 | MODULE_PARM_DESC(tfrc_debug, "Enable debug messages"); | 12 | MODULE_PARM_DESC(tfrc_debug, "Enable TFRC debug messages"); |
15 | #endif | 13 | #endif |
16 | 14 | ||
17 | static int __init tfrc_module_init(void) | 15 | int __init tfrc_lib_init(void) |
18 | { | 16 | { |
19 | int rc = tfrc_li_init(); | 17 | int rc = tfrc_li_init(); |
20 | 18 | ||
@@ -38,18 +36,9 @@ out: | |||
38 | return rc; | 36 | return rc; |
39 | } | 37 | } |
40 | 38 | ||
41 | static void __exit tfrc_module_exit(void) | 39 | void __exit tfrc_lib_exit(void) |
42 | { | 40 | { |
43 | tfrc_rx_packet_history_exit(); | 41 | tfrc_rx_packet_history_exit(); |
44 | tfrc_tx_packet_history_exit(); | 42 | tfrc_tx_packet_history_exit(); |
45 | tfrc_li_exit(); | 43 | tfrc_li_exit(); |
46 | } | 44 | } |
47 | |||
48 | module_init(tfrc_module_init); | ||
49 | module_exit(tfrc_module_exit); | ||
50 | |||
51 | MODULE_AUTHOR("Gerrit Renker <gerrit@erg.abdn.ac.uk>, " | ||
52 | "Ian McDonald <ian.mcdonald@jandi.co.nz>, " | ||
53 | "Arnaldo Carvalho de Melo <acme@redhat.com>"); | ||
54 | MODULE_DESCRIPTION("DCCP TFRC library"); | ||
55 | MODULE_LICENSE("GPL"); | ||
diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h index ed9857527acf..e9720b143275 100644 --- a/net/dccp/ccids/lib/tfrc.h +++ b/net/dccp/ccids/lib/tfrc.h | |||
@@ -17,7 +17,8 @@ | |||
17 | #include <linux/types.h> | 17 | #include <linux/types.h> |
18 | #include <linux/math64.h> | 18 | #include <linux/math64.h> |
19 | #include "../../dccp.h" | 19 | #include "../../dccp.h" |
20 | /* internal includes that this module exports: */ | 20 | |
21 | /* internal includes that this library exports: */ | ||
21 | #include "loss_interval.h" | 22 | #include "loss_interval.h" |
22 | #include "packet_history.h" | 23 | #include "packet_history.h" |
23 | 24 | ||
@@ -66,4 +67,12 @@ extern void tfrc_rx_packet_history_exit(void); | |||
66 | 67 | ||
67 | extern int tfrc_li_init(void); | 68 | extern int tfrc_li_init(void); |
68 | extern void tfrc_li_exit(void); | 69 | extern void tfrc_li_exit(void); |
70 | |||
71 | #ifdef CONFIG_IP_DCCP_TFRC_LIB | ||
72 | extern int tfrc_lib_init(void); | ||
73 | extern void tfrc_lib_exit(void); | ||
74 | #else | ||
75 | #define tfrc_lib_init() (0) | ||
76 | #define tfrc_lib_exit() | ||
77 | #endif | ||
69 | #endif /* _TFRC_H_ */ | 78 | #endif /* _TFRC_H_ */ |
diff --git a/net/dccp/ccids/lib/tfrc_equation.c b/net/dccp/ccids/lib/tfrc_equation.c index 2f20a29cffe4..c5d3a9e5a5a4 100644 --- a/net/dccp/ccids/lib/tfrc_equation.c +++ b/net/dccp/ccids/lib/tfrc_equation.c | |||
@@ -659,8 +659,6 @@ u32 tfrc_calc_x(u16 s, u32 R, u32 p) | |||
659 | return scaled_div32(result, f); | 659 | return scaled_div32(result, f); |
660 | } | 660 | } |
661 | 661 | ||
662 | EXPORT_SYMBOL_GPL(tfrc_calc_x); | ||
663 | |||
664 | /** | 662 | /** |
665 | * tfrc_calc_x_reverse_lookup - try to find p given f(p) | 663 | * tfrc_calc_x_reverse_lookup - try to find p given f(p) |
666 | * | 664 | * |
@@ -693,5 +691,3 @@ u32 tfrc_calc_x_reverse_lookup(u32 fvalue) | |||
693 | index = tfrc_binsearch(fvalue, 0); | 691 | index = tfrc_binsearch(fvalue, 0); |
694 | return (index + 1) * 1000000 / TFRC_CALC_X_ARRSIZE; | 692 | return (index + 1) * 1000000 / TFRC_CALC_X_ARRSIZE; |
695 | } | 693 | } |
696 | |||
697 | EXPORT_SYMBOL_GPL(tfrc_calc_x_reverse_lookup); | ||
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 0bc4c9a02e19..f2230fc168e1 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h | |||
@@ -432,10 +432,8 @@ static inline int dccp_ack_pending(const struct sock *sk) | |||
432 | { | 432 | { |
433 | const struct dccp_sock *dp = dccp_sk(sk); | 433 | const struct dccp_sock *dp = dccp_sk(sk); |
434 | return dp->dccps_timestamp_echo != 0 || | 434 | return dp->dccps_timestamp_echo != 0 || |
435 | #ifdef CONFIG_IP_DCCP_ACKVEC | ||
436 | (dp->dccps_hc_rx_ackvec != NULL && | 435 | (dp->dccps_hc_rx_ackvec != NULL && |
437 | dccp_ackvec_pending(dp->dccps_hc_rx_ackvec)) || | 436 | dccp_ackvec_pending(dp->dccps_hc_rx_ackvec)) || |
438 | #endif | ||
439 | inet_csk_ack_scheduled(sk); | 437 | inet_csk_ack_scheduled(sk); |
440 | } | 438 | } |
441 | 439 | ||
diff --git a/net/dccp/feat.c b/net/dccp/feat.c index 30f9fb76b921..4152308958ab 100644 --- a/net/dccp/feat.c +++ b/net/dccp/feat.c | |||
@@ -34,7 +34,7 @@ | |||
34 | static int dccp_hdlr_ccid(struct sock *sk, u64 ccid, bool rx) | 34 | static int dccp_hdlr_ccid(struct sock *sk, u64 ccid, bool rx) |
35 | { | 35 | { |
36 | struct dccp_sock *dp = dccp_sk(sk); | 36 | struct dccp_sock *dp = dccp_sk(sk); |
37 | struct ccid *new_ccid = ccid_new(ccid, sk, rx, gfp_any()); | 37 | struct ccid *new_ccid = ccid_new(ccid, sk, rx); |
38 | 38 | ||
39 | if (new_ccid == NULL) | 39 | if (new_ccid == NULL) |
40 | return -ENOMEM; | 40 | return -ENOMEM; |
@@ -1214,8 +1214,6 @@ const char *dccp_feat_typename(const u8 type) | |||
1214 | return NULL; | 1214 | return NULL; |
1215 | } | 1215 | } |
1216 | 1216 | ||
1217 | EXPORT_SYMBOL_GPL(dccp_feat_typename); | ||
1218 | |||
1219 | const char *dccp_feat_name(const u8 feat) | 1217 | const char *dccp_feat_name(const u8 feat) |
1220 | { | 1218 | { |
1221 | static const char *feature_names[] = { | 1219 | static const char *feature_names[] = { |
@@ -1240,6 +1238,4 @@ const char *dccp_feat_name(const u8 feat) | |||
1240 | 1238 | ||
1241 | return feature_names[feat]; | 1239 | return feature_names[feat]; |
1242 | } | 1240 | } |
1243 | |||
1244 | EXPORT_SYMBOL_GPL(dccp_feat_name); | ||
1245 | #endif /* CONFIG_IP_DCCP_DEBUG */ | 1241 | #endif /* CONFIG_IP_DCCP_DEBUG */ |
diff --git a/net/dccp/input.c b/net/dccp/input.c index 5eb443f656c1..7648f316310f 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c | |||
@@ -741,5 +741,3 @@ u32 dccp_sample_rtt(struct sock *sk, long delta) | |||
741 | 741 | ||
742 | return delta; | 742 | return delta; |
743 | } | 743 | } |
744 | |||
745 | EXPORT_SYMBOL_GPL(dccp_sample_rtt); | ||
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 1747ccae8e8d..945b4d5d23b3 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -1118,9 +1118,15 @@ static int __init dccp_init(void) | |||
1118 | if (rc) | 1118 | if (rc) |
1119 | goto out_ackvec_exit; | 1119 | goto out_ackvec_exit; |
1120 | 1120 | ||
1121 | rc = ccid_initialize_builtins(); | ||
1122 | if (rc) | ||
1123 | goto out_sysctl_exit; | ||
1124 | |||
1121 | dccp_timestamping_init(); | 1125 | dccp_timestamping_init(); |
1122 | out: | 1126 | out: |
1123 | return rc; | 1127 | return rc; |
1128 | out_sysctl_exit: | ||
1129 | dccp_sysctl_exit(); | ||
1124 | out_ackvec_exit: | 1130 | out_ackvec_exit: |
1125 | dccp_ackvec_exit(); | 1131 | dccp_ackvec_exit(); |
1126 | out_free_dccp_mib: | 1132 | out_free_dccp_mib: |
@@ -1143,6 +1149,7 @@ out_free_percpu: | |||
1143 | 1149 | ||
1144 | static void __exit dccp_fini(void) | 1150 | static void __exit dccp_fini(void) |
1145 | { | 1151 | { |
1152 | ccid_cleanup_builtins(); | ||
1146 | dccp_mib_exit(); | 1153 | dccp_mib_exit(); |
1147 | free_pages((unsigned long)dccp_hashinfo.bhash, | 1154 | free_pages((unsigned long)dccp_hashinfo.bhash, |
1148 | get_order(dccp_hashinfo.bhash_size * | 1155 | get_order(dccp_hashinfo.bhash_size * |
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index a3a410d20da0..a68fd79e9eca 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
@@ -286,6 +286,42 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = { | |||
286 | .get_sset_count = dsa_slave_get_sset_count, | 286 | .get_sset_count = dsa_slave_get_sset_count, |
287 | }; | 287 | }; |
288 | 288 | ||
289 | #ifdef CONFIG_NET_DSA_TAG_DSA | ||
290 | static const struct net_device_ops dsa_netdev_ops = { | ||
291 | .ndo_open = dsa_slave_open, | ||
292 | .ndo_stop = dsa_slave_close, | ||
293 | .ndo_start_xmit = dsa_xmit, | ||
294 | .ndo_change_rx_flags = dsa_slave_change_rx_flags, | ||
295 | .ndo_set_rx_mode = dsa_slave_set_rx_mode, | ||
296 | .ndo_set_multicast_list = dsa_slave_set_rx_mode, | ||
297 | .ndo_set_mac_address = dsa_slave_set_mac_address, | ||
298 | .ndo_do_ioctl = dsa_slave_ioctl, | ||
299 | }; | ||
300 | #endif | ||
301 | #ifdef CONFIG_NET_DSA_TAG_EDSA | ||
302 | static const struct net_device_ops edsa_netdev_ops = { | ||
303 | .ndo_open = dsa_slave_open, | ||
304 | .ndo_stop = dsa_slave_close, | ||
305 | .ndo_start_xmit = edsa_xmit, | ||
306 | .ndo_change_rx_flags = dsa_slave_change_rx_flags, | ||
307 | .ndo_set_rx_mode = dsa_slave_set_rx_mode, | ||
308 | .ndo_set_multicast_list = dsa_slave_set_rx_mode, | ||
309 | .ndo_set_mac_address = dsa_slave_set_mac_address, | ||
310 | .ndo_do_ioctl = dsa_slave_ioctl, | ||
311 | }; | ||
312 | #endif | ||
313 | #ifdef CONFIG_NET_DSA_TAG_TRAILER | ||
314 | static const struct net_device_ops trailer_netdev_ops = { | ||
315 | .ndo_open = dsa_slave_open, | ||
316 | .ndo_stop = dsa_slave_close, | ||
317 | .ndo_start_xmit = trailer_xmit, | ||
318 | .ndo_change_rx_flags = dsa_slave_change_rx_flags, | ||
319 | .ndo_set_rx_mode = dsa_slave_set_rx_mode, | ||
320 | .ndo_set_multicast_list = dsa_slave_set_rx_mode, | ||
321 | .ndo_set_mac_address = dsa_slave_set_mac_address, | ||
322 | .ndo_do_ioctl = dsa_slave_ioctl, | ||
323 | }; | ||
324 | #endif | ||
289 | 325 | ||
290 | /* slave device setup *******************************************************/ | 326 | /* slave device setup *******************************************************/ |
291 | struct net_device * | 327 | struct net_device * |
@@ -306,32 +342,27 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent, | |||
306 | SET_ETHTOOL_OPS(slave_dev, &dsa_slave_ethtool_ops); | 342 | SET_ETHTOOL_OPS(slave_dev, &dsa_slave_ethtool_ops); |
307 | memcpy(slave_dev->dev_addr, master->dev_addr, ETH_ALEN); | 343 | memcpy(slave_dev->dev_addr, master->dev_addr, ETH_ALEN); |
308 | slave_dev->tx_queue_len = 0; | 344 | slave_dev->tx_queue_len = 0; |
345 | |||
309 | switch (ds->tag_protocol) { | 346 | switch (ds->tag_protocol) { |
310 | #ifdef CONFIG_NET_DSA_TAG_DSA | 347 | #ifdef CONFIG_NET_DSA_TAG_DSA |
311 | case htons(ETH_P_DSA): | 348 | case htons(ETH_P_DSA): |
312 | slave_dev->hard_start_xmit = dsa_xmit; | 349 | slave_dev->netdev_ops = &dsa_netdev_ops; |
313 | break; | 350 | break; |
314 | #endif | 351 | #endif |
315 | #ifdef CONFIG_NET_DSA_TAG_EDSA | 352 | #ifdef CONFIG_NET_DSA_TAG_EDSA |
316 | case htons(ETH_P_EDSA): | 353 | case htons(ETH_P_EDSA): |
317 | slave_dev->hard_start_xmit = edsa_xmit; | 354 | slave_dev->netdev_ops = &edsa_netdev_ops; |
318 | break; | 355 | break; |
319 | #endif | 356 | #endif |
320 | #ifdef CONFIG_NET_DSA_TAG_TRAILER | 357 | #ifdef CONFIG_NET_DSA_TAG_TRAILER |
321 | case htons(ETH_P_TRAILER): | 358 | case htons(ETH_P_TRAILER): |
322 | slave_dev->hard_start_xmit = trailer_xmit; | 359 | slave_dev->netdev_ops = &trailer_netdev_ops; |
323 | break; | 360 | break; |
324 | #endif | 361 | #endif |
325 | default: | 362 | default: |
326 | BUG(); | 363 | BUG(); |
327 | } | 364 | } |
328 | slave_dev->open = dsa_slave_open; | 365 | |
329 | slave_dev->stop = dsa_slave_close; | ||
330 | slave_dev->change_rx_flags = dsa_slave_change_rx_flags; | ||
331 | slave_dev->set_rx_mode = dsa_slave_set_rx_mode; | ||
332 | slave_dev->set_multicast_list = dsa_slave_set_rx_mode; | ||
333 | slave_dev->set_mac_address = dsa_slave_set_mac_address; | ||
334 | slave_dev->do_ioctl = dsa_slave_ioctl; | ||
335 | SET_NETDEV_DEV(slave_dev, parent); | 366 | SET_NETDEV_DEV(slave_dev, parent); |
336 | slave_dev->vlan_features = master->vlan_features; | 367 | slave_dev->vlan_features = master->vlan_features; |
337 | 368 | ||
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index e52799047a5f..6bb2635b5ded 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/spinlock.h> | 38 | #include <linux/spinlock.h> |
39 | #include <linux/string.h> | 39 | #include <linux/string.h> |
40 | #include <linux/jhash.h> | 40 | #include <linux/jhash.h> |
41 | #include <linux/audit.h> | ||
41 | #include <net/ip.h> | 42 | #include <net/ip.h> |
42 | #include <net/icmp.h> | 43 | #include <net/icmp.h> |
43 | #include <net/tcp.h> | 44 | #include <net/tcp.h> |
@@ -449,6 +450,7 @@ static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi) | |||
449 | /** | 450 | /** |
450 | * cipso_v4_doi_add - Add a new DOI to the CIPSO protocol engine | 451 | * cipso_v4_doi_add - Add a new DOI to the CIPSO protocol engine |
451 | * @doi_def: the DOI structure | 452 | * @doi_def: the DOI structure |
453 | * @audit_info: NetLabel audit information | ||
452 | * | 454 | * |
453 | * Description: | 455 | * Description: |
454 | * The caller defines a new DOI for use by the CIPSO engine and calls this | 456 | * The caller defines a new DOI for use by the CIPSO engine and calls this |
@@ -458,50 +460,78 @@ static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi) | |||
458 | * zero on success and non-zero on failure. | 460 | * zero on success and non-zero on failure. |
459 | * | 461 | * |
460 | */ | 462 | */ |
461 | int cipso_v4_doi_add(struct cipso_v4_doi *doi_def) | 463 | int cipso_v4_doi_add(struct cipso_v4_doi *doi_def, |
464 | struct netlbl_audit *audit_info) | ||
462 | { | 465 | { |
466 | int ret_val = -EINVAL; | ||
463 | u32 iter; | 467 | u32 iter; |
468 | u32 doi; | ||
469 | u32 doi_type; | ||
470 | struct audit_buffer *audit_buf; | ||
471 | |||
472 | doi = doi_def->doi; | ||
473 | doi_type = doi_def->type; | ||
464 | 474 | ||
465 | if (doi_def == NULL || doi_def->doi == CIPSO_V4_DOI_UNKNOWN) | 475 | if (doi_def == NULL || doi_def->doi == CIPSO_V4_DOI_UNKNOWN) |
466 | return -EINVAL; | 476 | goto doi_add_return; |
467 | for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) { | 477 | for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) { |
468 | switch (doi_def->tags[iter]) { | 478 | switch (doi_def->tags[iter]) { |
469 | case CIPSO_V4_TAG_RBITMAP: | 479 | case CIPSO_V4_TAG_RBITMAP: |
470 | break; | 480 | break; |
471 | case CIPSO_V4_TAG_RANGE: | 481 | case CIPSO_V4_TAG_RANGE: |
472 | if (doi_def->type != CIPSO_V4_MAP_PASS) | ||
473 | return -EINVAL; | ||
474 | break; | ||
475 | case CIPSO_V4_TAG_INVALID: | ||
476 | if (iter == 0) | ||
477 | return -EINVAL; | ||
478 | break; | ||
479 | case CIPSO_V4_TAG_ENUM: | 482 | case CIPSO_V4_TAG_ENUM: |
480 | if (doi_def->type != CIPSO_V4_MAP_PASS) | 483 | if (doi_def->type != CIPSO_V4_MAP_PASS) |
481 | return -EINVAL; | 484 | goto doi_add_return; |
482 | break; | 485 | break; |
483 | case CIPSO_V4_TAG_LOCAL: | 486 | case CIPSO_V4_TAG_LOCAL: |
484 | if (doi_def->type != CIPSO_V4_MAP_LOCAL) | 487 | if (doi_def->type != CIPSO_V4_MAP_LOCAL) |
485 | return -EINVAL; | 488 | goto doi_add_return; |
489 | break; | ||
490 | case CIPSO_V4_TAG_INVALID: | ||
491 | if (iter == 0) | ||
492 | goto doi_add_return; | ||
486 | break; | 493 | break; |
487 | default: | 494 | default: |
488 | return -EINVAL; | 495 | goto doi_add_return; |
489 | } | 496 | } |
490 | } | 497 | } |
491 | 498 | ||
492 | atomic_set(&doi_def->refcount, 1); | 499 | atomic_set(&doi_def->refcount, 1); |
493 | 500 | ||
494 | spin_lock(&cipso_v4_doi_list_lock); | 501 | spin_lock(&cipso_v4_doi_list_lock); |
495 | if (cipso_v4_doi_search(doi_def->doi) != NULL) | 502 | if (cipso_v4_doi_search(doi_def->doi) != NULL) { |
496 | goto doi_add_failure; | 503 | spin_unlock(&cipso_v4_doi_list_lock); |
504 | ret_val = -EEXIST; | ||
505 | goto doi_add_return; | ||
506 | } | ||
497 | list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list); | 507 | list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list); |
498 | spin_unlock(&cipso_v4_doi_list_lock); | 508 | spin_unlock(&cipso_v4_doi_list_lock); |
509 | ret_val = 0; | ||
499 | 510 | ||
500 | return 0; | 511 | doi_add_return: |
512 | audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info); | ||
513 | if (audit_buf != NULL) { | ||
514 | const char *type_str; | ||
515 | switch (doi_type) { | ||
516 | case CIPSO_V4_MAP_TRANS: | ||
517 | type_str = "trans"; | ||
518 | break; | ||
519 | case CIPSO_V4_MAP_PASS: | ||
520 | type_str = "pass"; | ||
521 | break; | ||
522 | case CIPSO_V4_MAP_LOCAL: | ||
523 | type_str = "local"; | ||
524 | break; | ||
525 | default: | ||
526 | type_str = "(unknown)"; | ||
527 | } | ||
528 | audit_log_format(audit_buf, | ||
529 | " cipso_doi=%u cipso_type=%s res=%u", | ||
530 | doi, type_str, ret_val == 0 ? 1 : 0); | ||
531 | audit_log_end(audit_buf); | ||
532 | } | ||
501 | 533 | ||
502 | doi_add_failure: | 534 | return ret_val; |
503 | spin_unlock(&cipso_v4_doi_list_lock); | ||
504 | return -EEXIST; | ||
505 | } | 535 | } |
506 | 536 | ||
507 | /** | 537 | /** |
@@ -559,25 +589,39 @@ static void cipso_v4_doi_free_rcu(struct rcu_head *entry) | |||
559 | */ | 589 | */ |
560 | int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info) | 590 | int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info) |
561 | { | 591 | { |
592 | int ret_val; | ||
562 | struct cipso_v4_doi *doi_def; | 593 | struct cipso_v4_doi *doi_def; |
594 | struct audit_buffer *audit_buf; | ||
563 | 595 | ||
564 | spin_lock(&cipso_v4_doi_list_lock); | 596 | spin_lock(&cipso_v4_doi_list_lock); |
565 | doi_def = cipso_v4_doi_search(doi); | 597 | doi_def = cipso_v4_doi_search(doi); |
566 | if (doi_def == NULL) { | 598 | if (doi_def == NULL) { |
567 | spin_unlock(&cipso_v4_doi_list_lock); | 599 | spin_unlock(&cipso_v4_doi_list_lock); |
568 | return -ENOENT; | 600 | ret_val = -ENOENT; |
601 | goto doi_remove_return; | ||
569 | } | 602 | } |
570 | if (!atomic_dec_and_test(&doi_def->refcount)) { | 603 | if (!atomic_dec_and_test(&doi_def->refcount)) { |
571 | spin_unlock(&cipso_v4_doi_list_lock); | 604 | spin_unlock(&cipso_v4_doi_list_lock); |
572 | return -EBUSY; | 605 | ret_val = -EBUSY; |
606 | goto doi_remove_return; | ||
573 | } | 607 | } |
574 | list_del_rcu(&doi_def->list); | 608 | list_del_rcu(&doi_def->list); |
575 | spin_unlock(&cipso_v4_doi_list_lock); | 609 | spin_unlock(&cipso_v4_doi_list_lock); |
576 | 610 | ||
577 | cipso_v4_cache_invalidate(); | 611 | cipso_v4_cache_invalidate(); |
578 | call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); | 612 | call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); |
613 | ret_val = 0; | ||
614 | |||
615 | doi_remove_return: | ||
616 | audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info); | ||
617 | if (audit_buf != NULL) { | ||
618 | audit_log_format(audit_buf, | ||
619 | " cipso_doi=%u res=%u", | ||
620 | doi, ret_val == 0 ? 1 : 0); | ||
621 | audit_log_end(audit_buf); | ||
622 | } | ||
579 | 623 | ||
580 | return 0; | 624 | return ret_val; |
581 | } | 625 | } |
582 | 626 | ||
583 | /** | 627 | /** |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 9b275abc8eb9..ce572f9dff02 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -580,10 +580,6 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, | |||
580 | else if (!ret) { | 580 | else if (!ret) { |
581 | if (spliced) | 581 | if (spliced) |
582 | break; | 582 | break; |
583 | if (flags & SPLICE_F_NONBLOCK) { | ||
584 | ret = -EAGAIN; | ||
585 | break; | ||
586 | } | ||
587 | if (sock_flag(sk, SOCK_DONE)) | 583 | if (sock_flag(sk, SOCK_DONE)) |
588 | break; | 584 | break; |
589 | if (sk->sk_err) { | 585 | if (sk->sk_err) { |
@@ -2518,9 +2514,7 @@ found: | |||
2518 | flush |= memcmp(th + 1, th2 + 1, thlen - sizeof(*th)); | 2514 | flush |= memcmp(th + 1, th2 + 1, thlen - sizeof(*th)); |
2519 | 2515 | ||
2520 | total = p->len; | 2516 | total = p->len; |
2521 | mss = total; | 2517 | mss = skb_shinfo(p)->gso_size; |
2522 | if (skb_shinfo(p)->frag_list) | ||
2523 | mss = skb_shinfo(p)->frag_list->len; | ||
2524 | 2518 | ||
2525 | flush |= skb->len > mss || skb->len <= 0; | 2519 | flush |= skb->len > mss || skb->len <= 0; |
2526 | flush |= ntohl(th2->seq) + total != ntohl(th->seq); | 2520 | flush |= ntohl(th2->seq) + total != ntohl(th->seq); |
@@ -2547,6 +2541,7 @@ out: | |||
2547 | 2541 | ||
2548 | return pp; | 2542 | return pp; |
2549 | } | 2543 | } |
2544 | EXPORT_SYMBOL(tcp_gro_receive); | ||
2550 | 2545 | ||
2551 | int tcp_gro_complete(struct sk_buff *skb) | 2546 | int tcp_gro_complete(struct sk_buff *skb) |
2552 | { | 2547 | { |
@@ -2556,7 +2551,6 @@ int tcp_gro_complete(struct sk_buff *skb) | |||
2556 | skb->csum_offset = offsetof(struct tcphdr, check); | 2551 | skb->csum_offset = offsetof(struct tcphdr, check); |
2557 | skb->ip_summed = CHECKSUM_PARTIAL; | 2552 | skb->ip_summed = CHECKSUM_PARTIAL; |
2558 | 2553 | ||
2559 | skb_shinfo(skb)->gso_size = skb_shinfo(skb)->frag_list->len; | ||
2560 | skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; | 2554 | skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; |
2561 | 2555 | ||
2562 | if (th->cwr) | 2556 | if (th->cwr) |
@@ -2564,6 +2558,7 @@ int tcp_gro_complete(struct sk_buff *skb) | |||
2564 | 2558 | ||
2565 | return 0; | 2559 | return 0; |
2566 | } | 2560 | } |
2561 | EXPORT_SYMBOL(tcp_gro_complete); | ||
2567 | 2562 | ||
2568 | #ifdef CONFIG_TCP_MD5SIG | 2563 | #ifdef CONFIG_TCP_MD5SIG |
2569 | static unsigned long tcp_md5sig_users; | 2564 | static unsigned long tcp_md5sig_users; |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 437b750b98fd..94f74f5b0cbf 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -672,8 +672,7 @@ int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb) | |||
672 | 672 | ||
673 | EXPORT_SYMBOL_GPL(ipv6_opt_accepted); | 673 | EXPORT_SYMBOL_GPL(ipv6_opt_accepted); |
674 | 674 | ||
675 | static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb, | 675 | static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto) |
676 | int proto) | ||
677 | { | 676 | { |
678 | struct inet6_protocol *ops = NULL; | 677 | struct inet6_protocol *ops = NULL; |
679 | 678 | ||
@@ -704,7 +703,7 @@ static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb, | |||
704 | __skb_pull(skb, len); | 703 | __skb_pull(skb, len); |
705 | } | 704 | } |
706 | 705 | ||
707 | return ops; | 706 | return proto; |
708 | } | 707 | } |
709 | 708 | ||
710 | static int ipv6_gso_send_check(struct sk_buff *skb) | 709 | static int ipv6_gso_send_check(struct sk_buff *skb) |
@@ -721,7 +720,9 @@ static int ipv6_gso_send_check(struct sk_buff *skb) | |||
721 | err = -EPROTONOSUPPORT; | 720 | err = -EPROTONOSUPPORT; |
722 | 721 | ||
723 | rcu_read_lock(); | 722 | rcu_read_lock(); |
724 | ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); | 723 | ops = rcu_dereference(inet6_protos[ |
724 | ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]); | ||
725 | |||
725 | if (likely(ops && ops->gso_send_check)) { | 726 | if (likely(ops && ops->gso_send_check)) { |
726 | skb_reset_transport_header(skb); | 727 | skb_reset_transport_header(skb); |
727 | err = ops->gso_send_check(skb); | 728 | err = ops->gso_send_check(skb); |
@@ -757,7 +758,9 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features) | |||
757 | segs = ERR_PTR(-EPROTONOSUPPORT); | 758 | segs = ERR_PTR(-EPROTONOSUPPORT); |
758 | 759 | ||
759 | rcu_read_lock(); | 760 | rcu_read_lock(); |
760 | ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); | 761 | ops = rcu_dereference(inet6_protos[ |
762 | ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]); | ||
763 | |||
761 | if (likely(ops && ops->gso_segment)) { | 764 | if (likely(ops && ops->gso_segment)) { |
762 | skb_reset_transport_header(skb); | 765 | skb_reset_transport_header(skb); |
763 | segs = ops->gso_segment(skb, features); | 766 | segs = ops->gso_segment(skb, features); |
@@ -777,11 +780,105 @@ out: | |||
777 | return segs; | 780 | return segs; |
778 | } | 781 | } |
779 | 782 | ||
783 | struct ipv6_gro_cb { | ||
784 | struct napi_gro_cb napi; | ||
785 | int proto; | ||
786 | }; | ||
787 | |||
788 | #define IPV6_GRO_CB(skb) ((struct ipv6_gro_cb *)(skb)->cb) | ||
789 | |||
790 | static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, | ||
791 | struct sk_buff *skb) | ||
792 | { | ||
793 | struct inet6_protocol *ops; | ||
794 | struct sk_buff **pp = NULL; | ||
795 | struct sk_buff *p; | ||
796 | struct ipv6hdr *iph; | ||
797 | unsigned int nlen; | ||
798 | int flush = 1; | ||
799 | int proto; | ||
800 | |||
801 | if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) | ||
802 | goto out; | ||
803 | |||
804 | iph = ipv6_hdr(skb); | ||
805 | __skb_pull(skb, sizeof(*iph)); | ||
806 | |||
807 | flush += ntohs(iph->payload_len) != skb->len; | ||
808 | |||
809 | rcu_read_lock(); | ||
810 | proto = ipv6_gso_pull_exthdrs(skb, iph->nexthdr); | ||
811 | IPV6_GRO_CB(skb)->proto = proto; | ||
812 | ops = rcu_dereference(inet6_protos[proto]); | ||
813 | if (!ops || !ops->gro_receive) | ||
814 | goto out_unlock; | ||
815 | |||
816 | flush--; | ||
817 | skb_reset_transport_header(skb); | ||
818 | nlen = skb_network_header_len(skb); | ||
819 | |||
820 | for (p = *head; p; p = p->next) { | ||
821 | struct ipv6hdr *iph2; | ||
822 | |||
823 | if (!NAPI_GRO_CB(p)->same_flow) | ||
824 | continue; | ||
825 | |||
826 | iph2 = ipv6_hdr(p); | ||
827 | |||
828 | /* All fields must match except length. */ | ||
829 | if (nlen != skb_network_header_len(p) || | ||
830 | memcmp(iph, iph2, offsetof(struct ipv6hdr, payload_len)) || | ||
831 | memcmp(&iph->nexthdr, &iph2->nexthdr, | ||
832 | nlen - offsetof(struct ipv6hdr, nexthdr))) { | ||
833 | NAPI_GRO_CB(p)->same_flow = 0; | ||
834 | continue; | ||
835 | } | ||
836 | |||
837 | NAPI_GRO_CB(p)->flush |= flush; | ||
838 | } | ||
839 | |||
840 | NAPI_GRO_CB(skb)->flush |= flush; | ||
841 | |||
842 | pp = ops->gro_receive(head, skb); | ||
843 | |||
844 | out_unlock: | ||
845 | rcu_read_unlock(); | ||
846 | |||
847 | out: | ||
848 | NAPI_GRO_CB(skb)->flush |= flush; | ||
849 | |||
850 | return pp; | ||
851 | } | ||
852 | |||
853 | static int ipv6_gro_complete(struct sk_buff *skb) | ||
854 | { | ||
855 | struct inet6_protocol *ops; | ||
856 | struct ipv6hdr *iph = ipv6_hdr(skb); | ||
857 | int err = -ENOSYS; | ||
858 | |||
859 | iph->payload_len = htons(skb->len - skb_network_offset(skb) - | ||
860 | sizeof(*iph)); | ||
861 | |||
862 | rcu_read_lock(); | ||
863 | ops = rcu_dereference(inet6_protos[IPV6_GRO_CB(skb)->proto]); | ||
864 | if (WARN_ON(!ops || !ops->gro_complete)) | ||
865 | goto out_unlock; | ||
866 | |||
867 | err = ops->gro_complete(skb); | ||
868 | |||
869 | out_unlock: | ||
870 | rcu_read_unlock(); | ||
871 | |||
872 | return err; | ||
873 | } | ||
874 | |||
780 | static struct packet_type ipv6_packet_type = { | 875 | static struct packet_type ipv6_packet_type = { |
781 | .type = __constant_htons(ETH_P_IPV6), | 876 | .type = __constant_htons(ETH_P_IPV6), |
782 | .func = ipv6_rcv, | 877 | .func = ipv6_rcv, |
783 | .gso_send_check = ipv6_gso_send_check, | 878 | .gso_send_check = ipv6_gso_send_check, |
784 | .gso_segment = ipv6_gso_segment, | 879 | .gso_segment = ipv6_gso_segment, |
880 | .gro_receive = ipv6_gro_receive, | ||
881 | .gro_complete = ipv6_gro_complete, | ||
785 | }; | 882 | }; |
786 | 883 | ||
787 | static int __init ipv6_packet_init(void) | 884 | static int __init ipv6_packet_init(void) |
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index eeeaad2e8b5c..40f324655e24 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
@@ -404,7 +404,7 @@ sticky_done: | |||
404 | else if (optlen < sizeof(struct in6_pktinfo) || optval == NULL) | 404 | else if (optlen < sizeof(struct in6_pktinfo) || optval == NULL) |
405 | goto e_inval; | 405 | goto e_inval; |
406 | 406 | ||
407 | if (copy_from_user(&pkt, optval, optlen)) { | 407 | if (copy_from_user(&pkt, optval, sizeof(struct in6_pktinfo))) { |
408 | retv = -EFAULT; | 408 | retv = -EFAULT; |
409 | break; | 409 | break; |
410 | } | 410 | } |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 18c486cf4987..c4a59824ac2c 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -627,6 +627,9 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *dad | |||
627 | rt = ip6_rt_copy(ort); | 627 | rt = ip6_rt_copy(ort); |
628 | 628 | ||
629 | if (rt) { | 629 | if (rt) { |
630 | struct neighbour *neigh; | ||
631 | int attempts = !in_softirq(); | ||
632 | |||
630 | if (!(rt->rt6i_flags&RTF_GATEWAY)) { | 633 | if (!(rt->rt6i_flags&RTF_GATEWAY)) { |
631 | if (rt->rt6i_dst.plen != 128 && | 634 | if (rt->rt6i_dst.plen != 128 && |
632 | ipv6_addr_equal(&rt->rt6i_dst.addr, daddr)) | 635 | ipv6_addr_equal(&rt->rt6i_dst.addr, daddr)) |
@@ -646,7 +649,35 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *dad | |||
646 | } | 649 | } |
647 | #endif | 650 | #endif |
648 | 651 | ||
649 | rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway); | 652 | retry: |
653 | neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway); | ||
654 | if (IS_ERR(neigh)) { | ||
655 | struct net *net = dev_net(rt->rt6i_dev); | ||
656 | int saved_rt_min_interval = | ||
657 | net->ipv6.sysctl.ip6_rt_gc_min_interval; | ||
658 | int saved_rt_elasticity = | ||
659 | net->ipv6.sysctl.ip6_rt_gc_elasticity; | ||
660 | |||
661 | if (attempts-- > 0) { | ||
662 | net->ipv6.sysctl.ip6_rt_gc_elasticity = 1; | ||
663 | net->ipv6.sysctl.ip6_rt_gc_min_interval = 0; | ||
664 | |||
665 | ip6_dst_gc(net->ipv6.ip6_dst_ops); | ||
666 | |||
667 | net->ipv6.sysctl.ip6_rt_gc_elasticity = | ||
668 | saved_rt_elasticity; | ||
669 | net->ipv6.sysctl.ip6_rt_gc_min_interval = | ||
670 | saved_rt_min_interval; | ||
671 | goto retry; | ||
672 | } | ||
673 | |||
674 | if (net_ratelimit()) | ||
675 | printk(KERN_WARNING | ||
676 | "Neighbour table overflow.\n"); | ||
677 | dst_free(&rt->u.dst); | ||
678 | return NULL; | ||
679 | } | ||
680 | rt->rt6i_nexthop = neigh; | ||
650 | 681 | ||
651 | } | 682 | } |
652 | 683 | ||
@@ -945,8 +976,11 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev, | |||
945 | dev_hold(dev); | 976 | dev_hold(dev); |
946 | if (neigh) | 977 | if (neigh) |
947 | neigh_hold(neigh); | 978 | neigh_hold(neigh); |
948 | else | 979 | else { |
949 | neigh = ndisc_get_neigh(dev, addr); | 980 | neigh = ndisc_get_neigh(dev, addr); |
981 | if (IS_ERR(neigh)) | ||
982 | neigh = NULL; | ||
983 | } | ||
950 | 984 | ||
951 | rt->rt6i_dev = dev; | 985 | rt->rt6i_dev = dev; |
952 | rt->rt6i_idev = idev; | 986 | rt->rt6i_idev = idev; |
@@ -1887,6 +1921,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, | |||
1887 | { | 1921 | { |
1888 | struct net *net = dev_net(idev->dev); | 1922 | struct net *net = dev_net(idev->dev); |
1889 | struct rt6_info *rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops); | 1923 | struct rt6_info *rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops); |
1924 | struct neighbour *neigh; | ||
1890 | 1925 | ||
1891 | if (rt == NULL) | 1926 | if (rt == NULL) |
1892 | return ERR_PTR(-ENOMEM); | 1927 | return ERR_PTR(-ENOMEM); |
@@ -1909,11 +1944,18 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, | |||
1909 | rt->rt6i_flags |= RTF_ANYCAST; | 1944 | rt->rt6i_flags |= RTF_ANYCAST; |
1910 | else | 1945 | else |
1911 | rt->rt6i_flags |= RTF_LOCAL; | 1946 | rt->rt6i_flags |= RTF_LOCAL; |
1912 | rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway); | 1947 | neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway); |
1913 | if (rt->rt6i_nexthop == NULL) { | 1948 | if (IS_ERR(neigh)) { |
1914 | dst_free(&rt->u.dst); | 1949 | dst_free(&rt->u.dst); |
1915 | return ERR_PTR(-ENOMEM); | 1950 | |
1951 | /* We are casting this because that is the return | ||
1952 | * value type. But an errno encoded pointer is the | ||
1953 | * same regardless of the underlying pointer type, | ||
1954 | * and that's what we are returning. So this is OK. | ||
1955 | */ | ||
1956 | return (struct rt6_info *) neigh; | ||
1916 | } | 1957 | } |
1958 | rt->rt6i_nexthop = neigh; | ||
1917 | 1959 | ||
1918 | ipv6_addr_copy(&rt->rt6i_dst.addr, addr); | 1960 | ipv6_addr_copy(&rt->rt6i_dst.addr, addr); |
1919 | rt->rt6i_dst.plen = 128; | 1961 | rt->rt6i_dst.plen = 128; |
@@ -2710,7 +2752,7 @@ int __init ip6_route_init(void) | |||
2710 | kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0, | 2752 | kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0, |
2711 | SLAB_HWCACHE_ALIGN, NULL); | 2753 | SLAB_HWCACHE_ALIGN, NULL); |
2712 | if (!ip6_dst_ops_template.kmem_cachep) | 2754 | if (!ip6_dst_ops_template.kmem_cachep) |
2713 | goto out;; | 2755 | goto out; |
2714 | 2756 | ||
2715 | ret = register_pernet_subsys(&ip6_route_net_ops); | 2757 | ret = register_pernet_subsys(&ip6_route_net_ops); |
2716 | if (ret) | 2758 | if (ret) |
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c index 9048fe7e7ea7..a031034720b4 100644 --- a/net/ipv6/sysctl_net_ipv6.c +++ b/net/ipv6/sysctl_net_ipv6.c | |||
@@ -128,7 +128,7 @@ static struct ctl_table_header *ip6_header; | |||
128 | 128 | ||
129 | int ipv6_sysctl_register(void) | 129 | int ipv6_sysctl_register(void) |
130 | { | 130 | { |
131 | int err = -ENOMEM;; | 131 | int err = -ENOMEM; |
132 | 132 | ||
133 | ip6_header = register_net_sysctl_rotable(net_ipv6_ctl_path, ipv6_table); | 133 | ip6_header = register_net_sysctl_rotable(net_ipv6_ctl_path, ipv6_table); |
134 | if (ip6_header == NULL) | 134 | if (ip6_header == NULL) |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 71cd70951d7d..e5b85d45bee8 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -101,7 +101,7 @@ static void tcp_v6_hash(struct sock *sk) | |||
101 | } | 101 | } |
102 | } | 102 | } |
103 | 103 | ||
104 | static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len, | 104 | static __inline__ __sum16 tcp_v6_check(int len, |
105 | struct in6_addr *saddr, | 105 | struct in6_addr *saddr, |
106 | struct in6_addr *daddr, | 106 | struct in6_addr *daddr, |
107 | __wsum base) | 107 | __wsum base) |
@@ -501,7 +501,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req) | |||
501 | if (skb) { | 501 | if (skb) { |
502 | struct tcphdr *th = tcp_hdr(skb); | 502 | struct tcphdr *th = tcp_hdr(skb); |
503 | 503 | ||
504 | th->check = tcp_v6_check(th, skb->len, | 504 | th->check = tcp_v6_check(skb->len, |
505 | &treq->loc_addr, &treq->rmt_addr, | 505 | &treq->loc_addr, &treq->rmt_addr, |
506 | csum_partial(th, skb->len, skb->csum)); | 506 | csum_partial(th, skb->len, skb->csum)); |
507 | 507 | ||
@@ -942,6 +942,41 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb) | |||
942 | return 0; | 942 | return 0; |
943 | } | 943 | } |
944 | 944 | ||
945 | struct sk_buff **tcp6_gro_receive(struct sk_buff **head, struct sk_buff *skb) | ||
946 | { | ||
947 | struct ipv6hdr *iph = ipv6_hdr(skb); | ||
948 | |||
949 | switch (skb->ip_summed) { | ||
950 | case CHECKSUM_COMPLETE: | ||
951 | if (!tcp_v6_check(skb->len, &iph->saddr, &iph->daddr, | ||
952 | skb->csum)) { | ||
953 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
954 | break; | ||
955 | } | ||
956 | |||
957 | /* fall through */ | ||
958 | case CHECKSUM_NONE: | ||
959 | NAPI_GRO_CB(skb)->flush = 1; | ||
960 | return NULL; | ||
961 | } | ||
962 | |||
963 | return tcp_gro_receive(head, skb); | ||
964 | } | ||
965 | EXPORT_SYMBOL(tcp6_gro_receive); | ||
966 | |||
967 | int tcp6_gro_complete(struct sk_buff *skb) | ||
968 | { | ||
969 | struct ipv6hdr *iph = ipv6_hdr(skb); | ||
970 | struct tcphdr *th = tcp_hdr(skb); | ||
971 | |||
972 | th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), | ||
973 | &iph->saddr, &iph->daddr, 0); | ||
974 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; | ||
975 | |||
976 | return tcp_gro_complete(skb); | ||
977 | } | ||
978 | EXPORT_SYMBOL(tcp6_gro_complete); | ||
979 | |||
945 | static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, | 980 | static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, |
946 | u32 ts, struct tcp_md5sig_key *key, int rst) | 981 | u32 ts, struct tcp_md5sig_key *key, int rst) |
947 | { | 982 | { |
@@ -1429,14 +1464,14 @@ out: | |||
1429 | static __sum16 tcp_v6_checksum_init(struct sk_buff *skb) | 1464 | static __sum16 tcp_v6_checksum_init(struct sk_buff *skb) |
1430 | { | 1465 | { |
1431 | if (skb->ip_summed == CHECKSUM_COMPLETE) { | 1466 | if (skb->ip_summed == CHECKSUM_COMPLETE) { |
1432 | if (!tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr, | 1467 | if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr, |
1433 | &ipv6_hdr(skb)->daddr, skb->csum)) { | 1468 | &ipv6_hdr(skb)->daddr, skb->csum)) { |
1434 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1469 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1435 | return 0; | 1470 | return 0; |
1436 | } | 1471 | } |
1437 | } | 1472 | } |
1438 | 1473 | ||
1439 | skb->csum = ~csum_unfold(tcp_v6_check(tcp_hdr(skb), skb->len, | 1474 | skb->csum = ~csum_unfold(tcp_v6_check(skb->len, |
1440 | &ipv6_hdr(skb)->saddr, | 1475 | &ipv6_hdr(skb)->saddr, |
1441 | &ipv6_hdr(skb)->daddr, 0)); | 1476 | &ipv6_hdr(skb)->daddr, 0)); |
1442 | 1477 | ||
@@ -2062,6 +2097,8 @@ static struct inet6_protocol tcpv6_protocol = { | |||
2062 | .err_handler = tcp_v6_err, | 2097 | .err_handler = tcp_v6_err, |
2063 | .gso_send_check = tcp_v6_gso_send_check, | 2098 | .gso_send_check = tcp_v6_gso_send_check, |
2064 | .gso_segment = tcp_tso_segment, | 2099 | .gso_segment = tcp_tso_segment, |
2100 | .gro_receive = tcp6_gro_receive, | ||
2101 | .gro_complete = tcp6_gro_complete, | ||
2065 | .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, | 2102 | .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, |
2066 | }; | 2103 | }; |
2067 | 2104 | ||
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index af3192d2a5a3..eb8a2a0b6eb7 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -494,7 +494,21 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr, | |||
494 | if (err) { | 494 | if (err) { |
495 | iucv_path_free(iucv->path); | 495 | iucv_path_free(iucv->path); |
496 | iucv->path = NULL; | 496 | iucv->path = NULL; |
497 | err = -ECONNREFUSED; | 497 | switch (err) { |
498 | case 0x0b: /* Target communicator is not logged on */ | ||
499 | err = -ENETUNREACH; | ||
500 | break; | ||
501 | case 0x0d: /* Max connections for this guest exceeded */ | ||
502 | case 0x0e: /* Max connections for target guest exceeded */ | ||
503 | err = -EAGAIN; | ||
504 | break; | ||
505 | case 0x0f: /* Missing IUCV authorization */ | ||
506 | err = -EACCES; | ||
507 | break; | ||
508 | default: | ||
509 | err = -ECONNREFUSED; | ||
510 | break; | ||
511 | } | ||
498 | goto done; | 512 | goto done; |
499 | } | 513 | } |
500 | 514 | ||
@@ -507,6 +521,13 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr, | |||
507 | release_sock(sk); | 521 | release_sock(sk); |
508 | return -ECONNREFUSED; | 522 | return -ECONNREFUSED; |
509 | } | 523 | } |
524 | |||
525 | if (err) { | ||
526 | iucv_path_sever(iucv->path, NULL); | ||
527 | iucv_path_free(iucv->path); | ||
528 | iucv->path = NULL; | ||
529 | } | ||
530 | |||
510 | done: | 531 | done: |
511 | release_sock(sk); | 532 | release_sock(sk); |
512 | return err; | 533 | return err; |
@@ -1021,12 +1042,14 @@ static int iucv_callback_connreq(struct iucv_path *path, | |||
1021 | ASCEBC(user_data, sizeof(user_data)); | 1042 | ASCEBC(user_data, sizeof(user_data)); |
1022 | if (sk->sk_state != IUCV_LISTEN) { | 1043 | if (sk->sk_state != IUCV_LISTEN) { |
1023 | err = iucv_path_sever(path, user_data); | 1044 | err = iucv_path_sever(path, user_data); |
1045 | iucv_path_free(path); | ||
1024 | goto fail; | 1046 | goto fail; |
1025 | } | 1047 | } |
1026 | 1048 | ||
1027 | /* Check for backlog size */ | 1049 | /* Check for backlog size */ |
1028 | if (sk_acceptq_is_full(sk)) { | 1050 | if (sk_acceptq_is_full(sk)) { |
1029 | err = iucv_path_sever(path, user_data); | 1051 | err = iucv_path_sever(path, user_data); |
1052 | iucv_path_free(path); | ||
1030 | goto fail; | 1053 | goto fail; |
1031 | } | 1054 | } |
1032 | 1055 | ||
@@ -1034,6 +1057,7 @@ static int iucv_callback_connreq(struct iucv_path *path, | |||
1034 | nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC); | 1057 | nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC); |
1035 | if (!nsk) { | 1058 | if (!nsk) { |
1036 | err = iucv_path_sever(path, user_data); | 1059 | err = iucv_path_sever(path, user_data); |
1060 | iucv_path_free(path); | ||
1037 | goto fail; | 1061 | goto fail; |
1038 | } | 1062 | } |
1039 | 1063 | ||
@@ -1057,6 +1081,8 @@ static int iucv_callback_connreq(struct iucv_path *path, | |||
1057 | err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk); | 1081 | err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk); |
1058 | if (err) { | 1082 | if (err) { |
1059 | err = iucv_path_sever(path, user_data); | 1083 | err = iucv_path_sever(path, user_data); |
1084 | iucv_path_free(path); | ||
1085 | iucv_sock_kill(nsk); | ||
1060 | goto fail; | 1086 | goto fail; |
1061 | } | 1087 | } |
1062 | 1088 | ||
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 8f57d4f4328a..a35240f61ec3 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c | |||
@@ -50,7 +50,6 @@ | |||
50 | #include <asm/ebcdic.h> | 50 | #include <asm/ebcdic.h> |
51 | #include <asm/io.h> | 51 | #include <asm/io.h> |
52 | #include <asm/s390_ext.h> | 52 | #include <asm/s390_ext.h> |
53 | #include <asm/s390_rdev.h> | ||
54 | #include <asm/smp.h> | 53 | #include <asm/smp.h> |
55 | 54 | ||
56 | /* | 55 | /* |
@@ -517,6 +516,7 @@ static int iucv_enable(void) | |||
517 | size_t alloc_size; | 516 | size_t alloc_size; |
518 | int cpu, rc; | 517 | int cpu, rc; |
519 | 518 | ||
519 | get_online_cpus(); | ||
520 | rc = -ENOMEM; | 520 | rc = -ENOMEM; |
521 | alloc_size = iucv_max_pathid * sizeof(struct iucv_path); | 521 | alloc_size = iucv_max_pathid * sizeof(struct iucv_path); |
522 | iucv_path_table = kzalloc(alloc_size, GFP_KERNEL); | 522 | iucv_path_table = kzalloc(alloc_size, GFP_KERNEL); |
@@ -524,19 +524,17 @@ static int iucv_enable(void) | |||
524 | goto out; | 524 | goto out; |
525 | /* Declare per cpu buffers. */ | 525 | /* Declare per cpu buffers. */ |
526 | rc = -EIO; | 526 | rc = -EIO; |
527 | get_online_cpus(); | ||
528 | for_each_online_cpu(cpu) | 527 | for_each_online_cpu(cpu) |
529 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); | 528 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); |
530 | if (cpus_empty(iucv_buffer_cpumask)) | 529 | if (cpus_empty(iucv_buffer_cpumask)) |
531 | /* No cpu could declare an iucv buffer. */ | 530 | /* No cpu could declare an iucv buffer. */ |
532 | goto out_path; | 531 | goto out; |
533 | put_online_cpus(); | 532 | put_online_cpus(); |
534 | return 0; | 533 | return 0; |
535 | |||
536 | out_path: | ||
537 | put_online_cpus(); | ||
538 | kfree(iucv_path_table); | ||
539 | out: | 534 | out: |
535 | kfree(iucv_path_table); | ||
536 | iucv_path_table = NULL; | ||
537 | put_online_cpus(); | ||
540 | return rc; | 538 | return rc; |
541 | } | 539 | } |
542 | 540 | ||
@@ -551,8 +549,9 @@ static void iucv_disable(void) | |||
551 | { | 549 | { |
552 | get_online_cpus(); | 550 | get_online_cpus(); |
553 | on_each_cpu(iucv_retrieve_cpu, NULL, 1); | 551 | on_each_cpu(iucv_retrieve_cpu, NULL, 1); |
554 | put_online_cpus(); | ||
555 | kfree(iucv_path_table); | 552 | kfree(iucv_path_table); |
553 | iucv_path_table = NULL; | ||
554 | put_online_cpus(); | ||
556 | } | 555 | } |
557 | 556 | ||
558 | static int __cpuinit iucv_cpu_notify(struct notifier_block *self, | 557 | static int __cpuinit iucv_cpu_notify(struct notifier_block *self, |
@@ -589,10 +588,14 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self, | |||
589 | case CPU_ONLINE_FROZEN: | 588 | case CPU_ONLINE_FROZEN: |
590 | case CPU_DOWN_FAILED: | 589 | case CPU_DOWN_FAILED: |
591 | case CPU_DOWN_FAILED_FROZEN: | 590 | case CPU_DOWN_FAILED_FROZEN: |
591 | if (!iucv_path_table) | ||
592 | break; | ||
592 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); | 593 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); |
593 | break; | 594 | break; |
594 | case CPU_DOWN_PREPARE: | 595 | case CPU_DOWN_PREPARE: |
595 | case CPU_DOWN_PREPARE_FROZEN: | 596 | case CPU_DOWN_PREPARE_FROZEN: |
597 | if (!iucv_path_table) | ||
598 | break; | ||
596 | cpumask = iucv_buffer_cpumask; | 599 | cpumask = iucv_buffer_cpumask; |
597 | cpu_clear(cpu, cpumask); | 600 | cpu_clear(cpu, cpumask); |
598 | if (cpus_empty(cpumask)) | 601 | if (cpus_empty(cpumask)) |
@@ -1692,7 +1695,7 @@ static int __init iucv_init(void) | |||
1692 | rc = register_external_interrupt(0x4000, iucv_external_interrupt); | 1695 | rc = register_external_interrupt(0x4000, iucv_external_interrupt); |
1693 | if (rc) | 1696 | if (rc) |
1694 | goto out; | 1697 | goto out; |
1695 | iucv_root = s390_root_dev_register("iucv"); | 1698 | iucv_root = root_device_register("iucv"); |
1696 | if (IS_ERR(iucv_root)) { | 1699 | if (IS_ERR(iucv_root)) { |
1697 | rc = PTR_ERR(iucv_root); | 1700 | rc = PTR_ERR(iucv_root); |
1698 | goto out_int; | 1701 | goto out_int; |
@@ -1736,7 +1739,7 @@ out_free: | |||
1736 | kfree(iucv_irq_data[cpu]); | 1739 | kfree(iucv_irq_data[cpu]); |
1737 | iucv_irq_data[cpu] = NULL; | 1740 | iucv_irq_data[cpu] = NULL; |
1738 | } | 1741 | } |
1739 | s390_root_dev_unregister(iucv_root); | 1742 | root_device_unregister(iucv_root); |
1740 | out_int: | 1743 | out_int: |
1741 | unregister_external_interrupt(0x4000, iucv_external_interrupt); | 1744 | unregister_external_interrupt(0x4000, iucv_external_interrupt); |
1742 | out: | 1745 | out: |
@@ -1766,7 +1769,7 @@ static void __exit iucv_exit(void) | |||
1766 | kfree(iucv_irq_data[cpu]); | 1769 | kfree(iucv_irq_data[cpu]); |
1767 | iucv_irq_data[cpu] = NULL; | 1770 | iucv_irq_data[cpu] = NULL; |
1768 | } | 1771 | } |
1769 | s390_root_dev_unregister(iucv_root); | 1772 | root_device_unregister(iucv_root); |
1770 | bus_unregister(&iucv_bus); | 1773 | bus_unregister(&iucv_bus); |
1771 | unregister_external_interrupt(0x4000, iucv_external_interrupt); | 1774 | unregister_external_interrupt(0x4000, iucv_external_interrupt); |
1772 | } | 1775 | } |
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c index fff32b70efa9..bf1ab1a6790d 100644 --- a/net/netlabel/netlabel_cipso_v4.c +++ b/net/netlabel/netlabel_cipso_v4.c | |||
@@ -130,6 +130,7 @@ static int netlbl_cipsov4_add_common(struct genl_info *info, | |||
130 | /** | 130 | /** |
131 | * netlbl_cipsov4_add_std - Adds a CIPSO V4 DOI definition | 131 | * netlbl_cipsov4_add_std - Adds a CIPSO V4 DOI definition |
132 | * @info: the Generic NETLINK info block | 132 | * @info: the Generic NETLINK info block |
133 | * @audit_info: NetLabel audit information | ||
133 | * | 134 | * |
134 | * Description: | 135 | * Description: |
135 | * Create a new CIPSO_V4_MAP_TRANS DOI definition based on the given ADD | 136 | * Create a new CIPSO_V4_MAP_TRANS DOI definition based on the given ADD |
@@ -137,7 +138,8 @@ static int netlbl_cipsov4_add_common(struct genl_info *info, | |||
137 | * non-zero on error. | 138 | * non-zero on error. |
138 | * | 139 | * |
139 | */ | 140 | */ |
140 | static int netlbl_cipsov4_add_std(struct genl_info *info) | 141 | static int netlbl_cipsov4_add_std(struct genl_info *info, |
142 | struct netlbl_audit *audit_info) | ||
141 | { | 143 | { |
142 | int ret_val = -EINVAL; | 144 | int ret_val = -EINVAL; |
143 | struct cipso_v4_doi *doi_def = NULL; | 145 | struct cipso_v4_doi *doi_def = NULL; |
@@ -316,7 +318,7 @@ static int netlbl_cipsov4_add_std(struct genl_info *info) | |||
316 | } | 318 | } |
317 | } | 319 | } |
318 | 320 | ||
319 | ret_val = cipso_v4_doi_add(doi_def); | 321 | ret_val = cipso_v4_doi_add(doi_def, audit_info); |
320 | if (ret_val != 0) | 322 | if (ret_val != 0) |
321 | goto add_std_failure; | 323 | goto add_std_failure; |
322 | return 0; | 324 | return 0; |
@@ -330,6 +332,7 @@ add_std_failure: | |||
330 | /** | 332 | /** |
331 | * netlbl_cipsov4_add_pass - Adds a CIPSO V4 DOI definition | 333 | * netlbl_cipsov4_add_pass - Adds a CIPSO V4 DOI definition |
332 | * @info: the Generic NETLINK info block | 334 | * @info: the Generic NETLINK info block |
335 | * @audit_info: NetLabel audit information | ||
333 | * | 336 | * |
334 | * Description: | 337 | * Description: |
335 | * Create a new CIPSO_V4_MAP_PASS DOI definition based on the given ADD message | 338 | * Create a new CIPSO_V4_MAP_PASS DOI definition based on the given ADD message |
@@ -337,7 +340,8 @@ add_std_failure: | |||
337 | * error. | 340 | * error. |
338 | * | 341 | * |
339 | */ | 342 | */ |
340 | static int netlbl_cipsov4_add_pass(struct genl_info *info) | 343 | static int netlbl_cipsov4_add_pass(struct genl_info *info, |
344 | struct netlbl_audit *audit_info) | ||
341 | { | 345 | { |
342 | int ret_val; | 346 | int ret_val; |
343 | struct cipso_v4_doi *doi_def = NULL; | 347 | struct cipso_v4_doi *doi_def = NULL; |
@@ -354,7 +358,7 @@ static int netlbl_cipsov4_add_pass(struct genl_info *info) | |||
354 | if (ret_val != 0) | 358 | if (ret_val != 0) |
355 | goto add_pass_failure; | 359 | goto add_pass_failure; |
356 | 360 | ||
357 | ret_val = cipso_v4_doi_add(doi_def); | 361 | ret_val = cipso_v4_doi_add(doi_def, audit_info); |
358 | if (ret_val != 0) | 362 | if (ret_val != 0) |
359 | goto add_pass_failure; | 363 | goto add_pass_failure; |
360 | return 0; | 364 | return 0; |
@@ -367,6 +371,7 @@ add_pass_failure: | |||
367 | /** | 371 | /** |
368 | * netlbl_cipsov4_add_local - Adds a CIPSO V4 DOI definition | 372 | * netlbl_cipsov4_add_local - Adds a CIPSO V4 DOI definition |
369 | * @info: the Generic NETLINK info block | 373 | * @info: the Generic NETLINK info block |
374 | * @audit_info: NetLabel audit information | ||
370 | * | 375 | * |
371 | * Description: | 376 | * Description: |
372 | * Create a new CIPSO_V4_MAP_LOCAL DOI definition based on the given ADD | 377 | * Create a new CIPSO_V4_MAP_LOCAL DOI definition based on the given ADD |
@@ -374,7 +379,8 @@ add_pass_failure: | |||
374 | * non-zero on error. | 379 | * non-zero on error. |
375 | * | 380 | * |
376 | */ | 381 | */ |
377 | static int netlbl_cipsov4_add_local(struct genl_info *info) | 382 | static int netlbl_cipsov4_add_local(struct genl_info *info, |
383 | struct netlbl_audit *audit_info) | ||
378 | { | 384 | { |
379 | int ret_val; | 385 | int ret_val; |
380 | struct cipso_v4_doi *doi_def = NULL; | 386 | struct cipso_v4_doi *doi_def = NULL; |
@@ -391,7 +397,7 @@ static int netlbl_cipsov4_add_local(struct genl_info *info) | |||
391 | if (ret_val != 0) | 397 | if (ret_val != 0) |
392 | goto add_local_failure; | 398 | goto add_local_failure; |
393 | 399 | ||
394 | ret_val = cipso_v4_doi_add(doi_def); | 400 | ret_val = cipso_v4_doi_add(doi_def, audit_info); |
395 | if (ret_val != 0) | 401 | if (ret_val != 0) |
396 | goto add_local_failure; | 402 | goto add_local_failure; |
397 | return 0; | 403 | return 0; |
@@ -415,48 +421,31 @@ static int netlbl_cipsov4_add(struct sk_buff *skb, struct genl_info *info) | |||
415 | 421 | ||
416 | { | 422 | { |
417 | int ret_val = -EINVAL; | 423 | int ret_val = -EINVAL; |
418 | u32 type; | ||
419 | u32 doi; | ||
420 | const char *type_str = "(unknown)"; | 424 | const char *type_str = "(unknown)"; |
421 | struct audit_buffer *audit_buf; | ||
422 | struct netlbl_audit audit_info; | 425 | struct netlbl_audit audit_info; |
423 | 426 | ||
424 | if (!info->attrs[NLBL_CIPSOV4_A_DOI] || | 427 | if (!info->attrs[NLBL_CIPSOV4_A_DOI] || |
425 | !info->attrs[NLBL_CIPSOV4_A_MTYPE]) | 428 | !info->attrs[NLBL_CIPSOV4_A_MTYPE]) |
426 | return -EINVAL; | 429 | return -EINVAL; |
427 | 430 | ||
428 | doi = nla_get_u32(info->attrs[NLBL_CIPSOV4_A_DOI]); | ||
429 | netlbl_netlink_auditinfo(skb, &audit_info); | 431 | netlbl_netlink_auditinfo(skb, &audit_info); |
430 | 432 | switch (nla_get_u32(info->attrs[NLBL_CIPSOV4_A_MTYPE])) { | |
431 | type = nla_get_u32(info->attrs[NLBL_CIPSOV4_A_MTYPE]); | ||
432 | switch (type) { | ||
433 | case CIPSO_V4_MAP_TRANS: | 433 | case CIPSO_V4_MAP_TRANS: |
434 | type_str = "trans"; | 434 | type_str = "trans"; |
435 | ret_val = netlbl_cipsov4_add_std(info); | 435 | ret_val = netlbl_cipsov4_add_std(info, &audit_info); |
436 | break; | 436 | break; |
437 | case CIPSO_V4_MAP_PASS: | 437 | case CIPSO_V4_MAP_PASS: |
438 | type_str = "pass"; | 438 | type_str = "pass"; |
439 | ret_val = netlbl_cipsov4_add_pass(info); | 439 | ret_val = netlbl_cipsov4_add_pass(info, &audit_info); |
440 | break; | 440 | break; |
441 | case CIPSO_V4_MAP_LOCAL: | 441 | case CIPSO_V4_MAP_LOCAL: |
442 | type_str = "local"; | 442 | type_str = "local"; |
443 | ret_val = netlbl_cipsov4_add_local(info); | 443 | ret_val = netlbl_cipsov4_add_local(info, &audit_info); |
444 | break; | 444 | break; |
445 | } | 445 | } |
446 | if (ret_val == 0) | 446 | if (ret_val == 0) |
447 | atomic_inc(&netlabel_mgmt_protocount); | 447 | atomic_inc(&netlabel_mgmt_protocount); |
448 | 448 | ||
449 | audit_buf = netlbl_audit_start_common(AUDIT_MAC_CIPSOV4_ADD, | ||
450 | &audit_info); | ||
451 | if (audit_buf != NULL) { | ||
452 | audit_log_format(audit_buf, | ||
453 | " cipso_doi=%u cipso_type=%s res=%u", | ||
454 | doi, | ||
455 | type_str, | ||
456 | ret_val == 0 ? 1 : 0); | ||
457 | audit_log_end(audit_buf); | ||
458 | } | ||
459 | |||
460 | return ret_val; | 449 | return ret_val; |
461 | } | 450 | } |
462 | 451 | ||
@@ -725,9 +714,7 @@ static int netlbl_cipsov4_remove_cb(struct netlbl_dom_map *entry, void *arg) | |||
725 | static int netlbl_cipsov4_remove(struct sk_buff *skb, struct genl_info *info) | 714 | static int netlbl_cipsov4_remove(struct sk_buff *skb, struct genl_info *info) |
726 | { | 715 | { |
727 | int ret_val = -EINVAL; | 716 | int ret_val = -EINVAL; |
728 | u32 doi = 0; | ||
729 | struct netlbl_domhsh_walk_arg cb_arg; | 717 | struct netlbl_domhsh_walk_arg cb_arg; |
730 | struct audit_buffer *audit_buf; | ||
731 | struct netlbl_audit audit_info; | 718 | struct netlbl_audit audit_info; |
732 | u32 skip_bkt = 0; | 719 | u32 skip_bkt = 0; |
733 | u32 skip_chain = 0; | 720 | u32 skip_chain = 0; |
@@ -735,29 +722,17 @@ static int netlbl_cipsov4_remove(struct sk_buff *skb, struct genl_info *info) | |||
735 | if (!info->attrs[NLBL_CIPSOV4_A_DOI]) | 722 | if (!info->attrs[NLBL_CIPSOV4_A_DOI]) |
736 | return -EINVAL; | 723 | return -EINVAL; |
737 | 724 | ||
738 | doi = nla_get_u32(info->attrs[NLBL_CIPSOV4_A_DOI]); | ||
739 | netlbl_netlink_auditinfo(skb, &audit_info); | 725 | netlbl_netlink_auditinfo(skb, &audit_info); |
740 | 726 | cb_arg.doi = nla_get_u32(info->attrs[NLBL_CIPSOV4_A_DOI]); | |
741 | cb_arg.doi = doi; | ||
742 | cb_arg.audit_info = &audit_info; | 727 | cb_arg.audit_info = &audit_info; |
743 | ret_val = netlbl_domhsh_walk(&skip_bkt, &skip_chain, | 728 | ret_val = netlbl_domhsh_walk(&skip_bkt, &skip_chain, |
744 | netlbl_cipsov4_remove_cb, &cb_arg); | 729 | netlbl_cipsov4_remove_cb, &cb_arg); |
745 | if (ret_val == 0 || ret_val == -ENOENT) { | 730 | if (ret_val == 0 || ret_val == -ENOENT) { |
746 | ret_val = cipso_v4_doi_remove(doi, &audit_info); | 731 | ret_val = cipso_v4_doi_remove(cb_arg.doi, &audit_info); |
747 | if (ret_val == 0) | 732 | if (ret_val == 0) |
748 | atomic_dec(&netlabel_mgmt_protocount); | 733 | atomic_dec(&netlabel_mgmt_protocount); |
749 | } | 734 | } |
750 | 735 | ||
751 | audit_buf = netlbl_audit_start_common(AUDIT_MAC_CIPSOV4_DEL, | ||
752 | &audit_info); | ||
753 | if (audit_buf != NULL) { | ||
754 | audit_log_format(audit_buf, | ||
755 | " cipso_doi=%u res=%u", | ||
756 | doi, | ||
757 | ret_val == 0 ? 1 : 0); | ||
758 | audit_log_end(audit_buf); | ||
759 | } | ||
760 | |||
761 | return ret_val; | 736 | return ret_val; |
762 | } | 737 | } |
763 | 738 | ||
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c index 5fadf10e5ddf..7a10bbe02c13 100644 --- a/net/netlabel/netlabel_domainhash.c +++ b/net/netlabel/netlabel_domainhash.c | |||
@@ -483,6 +483,73 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, | |||
483 | } | 483 | } |
484 | 484 | ||
485 | /** | 485 | /** |
486 | * netlbl_domhsh_remove_af4 - Removes an address selector entry | ||
487 | * @domain: the domain | ||
488 | * @addr: IPv4 address | ||
489 | * @mask: IPv4 address mask | ||
490 | * @audit_info: NetLabel audit information | ||
491 | * | ||
492 | * Description: | ||
493 | * Removes an individual address selector from a domain mapping and potentially | ||
494 | * the entire mapping if it is empty. Returns zero on success, negative values | ||
495 | * on failure. | ||
496 | * | ||
497 | */ | ||
498 | int netlbl_domhsh_remove_af4(const char *domain, | ||
499 | const struct in_addr *addr, | ||
500 | const struct in_addr *mask, | ||
501 | struct netlbl_audit *audit_info) | ||
502 | { | ||
503 | struct netlbl_dom_map *entry_map; | ||
504 | struct netlbl_af4list *entry_addr; | ||
505 | struct netlbl_af4list *iter4; | ||
506 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
507 | struct netlbl_af6list *iter6; | ||
508 | #endif /* IPv6 */ | ||
509 | struct netlbl_domaddr4_map *entry; | ||
510 | |||
511 | rcu_read_lock(); | ||
512 | |||
513 | if (domain) | ||
514 | entry_map = netlbl_domhsh_search(domain); | ||
515 | else | ||
516 | entry_map = netlbl_domhsh_search_def(domain); | ||
517 | if (entry_map == NULL || entry_map->type != NETLBL_NLTYPE_ADDRSELECT) | ||
518 | goto remove_af4_failure; | ||
519 | |||
520 | spin_lock(&netlbl_domhsh_lock); | ||
521 | entry_addr = netlbl_af4list_remove(addr->s_addr, mask->s_addr, | ||
522 | &entry_map->type_def.addrsel->list4); | ||
523 | spin_unlock(&netlbl_domhsh_lock); | ||
524 | |||
525 | if (entry_addr == NULL) | ||
526 | goto remove_af4_failure; | ||
527 | netlbl_af4list_foreach_rcu(iter4, &entry_map->type_def.addrsel->list4) | ||
528 | goto remove_af4_single_addr; | ||
529 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
530 | netlbl_af6list_foreach_rcu(iter6, &entry_map->type_def.addrsel->list6) | ||
531 | goto remove_af4_single_addr; | ||
532 | #endif /* IPv6 */ | ||
533 | /* the domain mapping is empty so remove it from the mapping table */ | ||
534 | netlbl_domhsh_remove_entry(entry_map, audit_info); | ||
535 | |||
536 | remove_af4_single_addr: | ||
537 | rcu_read_unlock(); | ||
538 | /* yick, we can't use call_rcu here because we don't have a rcu head | ||
539 | * pointer but hopefully this should be a rare case so the pause | ||
540 | * shouldn't be a problem */ | ||
541 | synchronize_rcu(); | ||
542 | entry = netlbl_domhsh_addr4_entry(entry_addr); | ||
543 | cipso_v4_doi_putdef(entry->type_def.cipsov4); | ||
544 | kfree(entry); | ||
545 | return 0; | ||
546 | |||
547 | remove_af4_failure: | ||
548 | rcu_read_unlock(); | ||
549 | return -ENOENT; | ||
550 | } | ||
551 | |||
552 | /** | ||
486 | * netlbl_domhsh_remove - Removes an entry from the domain hash table | 553 | * netlbl_domhsh_remove - Removes an entry from the domain hash table |
487 | * @domain: the domain to remove | 554 | * @domain: the domain to remove |
488 | * @audit_info: NetLabel audit information | 555 | * @audit_info: NetLabel audit information |
diff --git a/net/netlabel/netlabel_domainhash.h b/net/netlabel/netlabel_domainhash.h index bfcb6763a1a1..0261dda3f2d2 100644 --- a/net/netlabel/netlabel_domainhash.h +++ b/net/netlabel/netlabel_domainhash.h | |||
@@ -90,6 +90,10 @@ int netlbl_domhsh_add_default(struct netlbl_dom_map *entry, | |||
90 | struct netlbl_audit *audit_info); | 90 | struct netlbl_audit *audit_info); |
91 | int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, | 91 | int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, |
92 | struct netlbl_audit *audit_info); | 92 | struct netlbl_audit *audit_info); |
93 | int netlbl_domhsh_remove_af4(const char *domain, | ||
94 | const struct in_addr *addr, | ||
95 | const struct in_addr *mask, | ||
96 | struct netlbl_audit *audit_info); | ||
93 | int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info); | 97 | int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info); |
94 | int netlbl_domhsh_remove_default(struct netlbl_audit *audit_info); | 98 | int netlbl_domhsh_remove_default(struct netlbl_audit *audit_info); |
95 | struct netlbl_dom_map *netlbl_domhsh_getentry(const char *domain); | 99 | struct netlbl_dom_map *netlbl_domhsh_getentry(const char *domain); |
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c index b32eceb3ab0d..fd9229db075c 100644 --- a/net/netlabel/netlabel_kapi.c +++ b/net/netlabel/netlabel_kapi.c | |||
@@ -31,7 +31,10 @@ | |||
31 | #include <linux/init.h> | 31 | #include <linux/init.h> |
32 | #include <linux/types.h> | 32 | #include <linux/types.h> |
33 | #include <linux/audit.h> | 33 | #include <linux/audit.h> |
34 | #include <linux/in.h> | ||
35 | #include <linux/in6.h> | ||
34 | #include <net/ip.h> | 36 | #include <net/ip.h> |
37 | #include <net/ipv6.h> | ||
35 | #include <net/netlabel.h> | 38 | #include <net/netlabel.h> |
36 | #include <net/cipso_ipv4.h> | 39 | #include <net/cipso_ipv4.h> |
37 | #include <asm/bug.h> | 40 | #include <asm/bug.h> |
@@ -42,6 +45,7 @@ | |||
42 | #include "netlabel_cipso_v4.h" | 45 | #include "netlabel_cipso_v4.h" |
43 | #include "netlabel_user.h" | 46 | #include "netlabel_user.h" |
44 | #include "netlabel_mgmt.h" | 47 | #include "netlabel_mgmt.h" |
48 | #include "netlabel_addrlist.h" | ||
45 | 49 | ||
46 | /* | 50 | /* |
47 | * Configuration Functions | 51 | * Configuration Functions |
@@ -50,6 +54,9 @@ | |||
50 | /** | 54 | /** |
51 | * netlbl_cfg_map_del - Remove a NetLabel/LSM domain mapping | 55 | * netlbl_cfg_map_del - Remove a NetLabel/LSM domain mapping |
52 | * @domain: the domain mapping to remove | 56 | * @domain: the domain mapping to remove |
57 | * @family: address family | ||
58 | * @addr: IP address | ||
59 | * @mask: IP address mask | ||
53 | * @audit_info: NetLabel audit information | 60 | * @audit_info: NetLabel audit information |
54 | * | 61 | * |
55 | * Description: | 62 | * Description: |
@@ -58,14 +65,32 @@ | |||
58 | * values on failure. | 65 | * values on failure. |
59 | * | 66 | * |
60 | */ | 67 | */ |
61 | int netlbl_cfg_map_del(const char *domain, struct netlbl_audit *audit_info) | 68 | int netlbl_cfg_map_del(const char *domain, |
69 | u16 family, | ||
70 | const void *addr, | ||
71 | const void *mask, | ||
72 | struct netlbl_audit *audit_info) | ||
62 | { | 73 | { |
63 | return netlbl_domhsh_remove(domain, audit_info); | 74 | if (addr == NULL && mask == NULL) { |
75 | return netlbl_domhsh_remove(domain, audit_info); | ||
76 | } else if (addr != NULL && mask != NULL) { | ||
77 | switch (family) { | ||
78 | case AF_INET: | ||
79 | return netlbl_domhsh_remove_af4(domain, addr, mask, | ||
80 | audit_info); | ||
81 | default: | ||
82 | return -EPFNOSUPPORT; | ||
83 | } | ||
84 | } else | ||
85 | return -EINVAL; | ||
64 | } | 86 | } |
65 | 87 | ||
66 | /** | 88 | /** |
67 | * netlbl_cfg_unlbl_add_map - Add an unlabeled NetLabel/LSM domain mapping | 89 | * netlbl_cfg_unlbl_map_add - Add a new unlabeled mapping |
68 | * @domain: the domain mapping to add | 90 | * @domain: the domain mapping to add |
91 | * @family: address family | ||
92 | * @addr: IP address | ||
93 | * @mask: IP address mask | ||
69 | * @audit_info: NetLabel audit information | 94 | * @audit_info: NetLabel audit information |
70 | * | 95 | * |
71 | * Description: | 96 | * Description: |
@@ -74,11 +99,19 @@ int netlbl_cfg_map_del(const char *domain, struct netlbl_audit *audit_info) | |||
74 | * negative values on failure. | 99 | * negative values on failure. |
75 | * | 100 | * |
76 | */ | 101 | */ |
77 | int netlbl_cfg_unlbl_add_map(const char *domain, | 102 | int netlbl_cfg_unlbl_map_add(const char *domain, |
103 | u16 family, | ||
104 | const void *addr, | ||
105 | const void *mask, | ||
78 | struct netlbl_audit *audit_info) | 106 | struct netlbl_audit *audit_info) |
79 | { | 107 | { |
80 | int ret_val = -ENOMEM; | 108 | int ret_val = -ENOMEM; |
81 | struct netlbl_dom_map *entry; | 109 | struct netlbl_dom_map *entry; |
110 | struct netlbl_domaddr_map *addrmap = NULL; | ||
111 | struct netlbl_domaddr4_map *map4 = NULL; | ||
112 | struct netlbl_domaddr6_map *map6 = NULL; | ||
113 | const struct in_addr *addr4, *mask4; | ||
114 | const struct in6_addr *addr6, *mask6; | ||
82 | 115 | ||
83 | entry = kzalloc(sizeof(*entry), GFP_ATOMIC); | 116 | entry = kzalloc(sizeof(*entry), GFP_ATOMIC); |
84 | if (entry == NULL) | 117 | if (entry == NULL) |
@@ -86,49 +119,225 @@ int netlbl_cfg_unlbl_add_map(const char *domain, | |||
86 | if (domain != NULL) { | 119 | if (domain != NULL) { |
87 | entry->domain = kstrdup(domain, GFP_ATOMIC); | 120 | entry->domain = kstrdup(domain, GFP_ATOMIC); |
88 | if (entry->domain == NULL) | 121 | if (entry->domain == NULL) |
89 | goto cfg_unlbl_add_map_failure; | 122 | goto cfg_unlbl_map_add_failure; |
123 | } | ||
124 | |||
125 | if (addr == NULL && mask == NULL) | ||
126 | entry->type = NETLBL_NLTYPE_UNLABELED; | ||
127 | else if (addr != NULL && mask != NULL) { | ||
128 | addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC); | ||
129 | if (addrmap == NULL) | ||
130 | goto cfg_unlbl_map_add_failure; | ||
131 | INIT_LIST_HEAD(&addrmap->list4); | ||
132 | INIT_LIST_HEAD(&addrmap->list6); | ||
133 | |||
134 | switch (family) { | ||
135 | case AF_INET: | ||
136 | addr4 = addr; | ||
137 | mask4 = mask; | ||
138 | map4 = kzalloc(sizeof(*map4), GFP_ATOMIC); | ||
139 | if (map4 == NULL) | ||
140 | goto cfg_unlbl_map_add_failure; | ||
141 | map4->type = NETLBL_NLTYPE_UNLABELED; | ||
142 | map4->list.addr = addr4->s_addr & mask4->s_addr; | ||
143 | map4->list.mask = mask4->s_addr; | ||
144 | map4->list.valid = 1; | ||
145 | ret_val = netlbl_af4list_add(&map4->list, | ||
146 | &addrmap->list4); | ||
147 | if (ret_val != 0) | ||
148 | goto cfg_unlbl_map_add_failure; | ||
149 | break; | ||
150 | case AF_INET6: | ||
151 | addr6 = addr; | ||
152 | mask6 = mask; | ||
153 | map6 = kzalloc(sizeof(*map6), GFP_ATOMIC); | ||
154 | if (map4 == NULL) | ||
155 | goto cfg_unlbl_map_add_failure; | ||
156 | map6->type = NETLBL_NLTYPE_UNLABELED; | ||
157 | ipv6_addr_copy(&map6->list.addr, addr6); | ||
158 | map6->list.addr.s6_addr32[0] &= mask6->s6_addr32[0]; | ||
159 | map6->list.addr.s6_addr32[1] &= mask6->s6_addr32[1]; | ||
160 | map6->list.addr.s6_addr32[2] &= mask6->s6_addr32[2]; | ||
161 | map6->list.addr.s6_addr32[3] &= mask6->s6_addr32[3]; | ||
162 | ipv6_addr_copy(&map6->list.mask, mask6); | ||
163 | map6->list.valid = 1; | ||
164 | ret_val = netlbl_af4list_add(&map4->list, | ||
165 | &addrmap->list4); | ||
166 | if (ret_val != 0) | ||
167 | goto cfg_unlbl_map_add_failure; | ||
168 | break; | ||
169 | default: | ||
170 | goto cfg_unlbl_map_add_failure; | ||
171 | break; | ||
172 | } | ||
173 | |||
174 | entry->type_def.addrsel = addrmap; | ||
175 | entry->type = NETLBL_NLTYPE_ADDRSELECT; | ||
176 | } else { | ||
177 | ret_val = -EINVAL; | ||
178 | goto cfg_unlbl_map_add_failure; | ||
90 | } | 179 | } |
91 | entry->type = NETLBL_NLTYPE_UNLABELED; | ||
92 | 180 | ||
93 | ret_val = netlbl_domhsh_add(entry, audit_info); | 181 | ret_val = netlbl_domhsh_add(entry, audit_info); |
94 | if (ret_val != 0) | 182 | if (ret_val != 0) |
95 | goto cfg_unlbl_add_map_failure; | 183 | goto cfg_unlbl_map_add_failure; |
96 | 184 | ||
97 | return 0; | 185 | return 0; |
98 | 186 | ||
99 | cfg_unlbl_add_map_failure: | 187 | cfg_unlbl_map_add_failure: |
100 | if (entry != NULL) | 188 | if (entry != NULL) |
101 | kfree(entry->domain); | 189 | kfree(entry->domain); |
102 | kfree(entry); | 190 | kfree(entry); |
191 | kfree(addrmap); | ||
192 | kfree(map4); | ||
193 | kfree(map6); | ||
103 | return ret_val; | 194 | return ret_val; |
104 | } | 195 | } |
105 | 196 | ||
197 | |||
198 | /** | ||
199 | * netlbl_cfg_unlbl_static_add - Adds a new static label | ||
200 | * @net: network namespace | ||
201 | * @dev_name: interface name | ||
202 | * @addr: IP address in network byte order (struct in[6]_addr) | ||
203 | * @mask: address mask in network byte order (struct in[6]_addr) | ||
204 | * @family: address family | ||
205 | * @secid: LSM secid value for the entry | ||
206 | * @audit_info: NetLabel audit information | ||
207 | * | ||
208 | * Description: | ||
209 | * Adds a new NetLabel static label to be used when protocol provided labels | ||
210 | * are not present on incoming traffic. If @dev_name is NULL then the default | ||
211 | * interface will be used. Returns zero on success, negative values on failure. | ||
212 | * | ||
213 | */ | ||
214 | int netlbl_cfg_unlbl_static_add(struct net *net, | ||
215 | const char *dev_name, | ||
216 | const void *addr, | ||
217 | const void *mask, | ||
218 | u16 family, | ||
219 | u32 secid, | ||
220 | struct netlbl_audit *audit_info) | ||
221 | { | ||
222 | u32 addr_len; | ||
223 | |||
224 | switch (family) { | ||
225 | case AF_INET: | ||
226 | addr_len = sizeof(struct in_addr); | ||
227 | break; | ||
228 | case AF_INET6: | ||
229 | addr_len = sizeof(struct in6_addr); | ||
230 | break; | ||
231 | default: | ||
232 | return -EPFNOSUPPORT; | ||
233 | } | ||
234 | |||
235 | return netlbl_unlhsh_add(net, | ||
236 | dev_name, addr, mask, addr_len, | ||
237 | secid, audit_info); | ||
238 | } | ||
239 | |||
240 | /** | ||
241 | * netlbl_cfg_unlbl_static_del - Removes an existing static label | ||
242 | * @net: network namespace | ||
243 | * @dev_name: interface name | ||
244 | * @addr: IP address in network byte order (struct in[6]_addr) | ||
245 | * @mask: address mask in network byte order (struct in[6]_addr) | ||
246 | * @family: address family | ||
247 | * @secid: LSM secid value for the entry | ||
248 | * @audit_info: NetLabel audit information | ||
249 | * | ||
250 | * Description: | ||
251 | * Removes an existing NetLabel static label used when protocol provided labels | ||
252 | * are not present on incoming traffic. If @dev_name is NULL then the default | ||
253 | * interface will be used. Returns zero on success, negative values on failure. | ||
254 | * | ||
255 | */ | ||
256 | int netlbl_cfg_unlbl_static_del(struct net *net, | ||
257 | const char *dev_name, | ||
258 | const void *addr, | ||
259 | const void *mask, | ||
260 | u16 family, | ||
261 | struct netlbl_audit *audit_info) | ||
262 | { | ||
263 | u32 addr_len; | ||
264 | |||
265 | switch (family) { | ||
266 | case AF_INET: | ||
267 | addr_len = sizeof(struct in_addr); | ||
268 | break; | ||
269 | case AF_INET6: | ||
270 | addr_len = sizeof(struct in6_addr); | ||
271 | break; | ||
272 | default: | ||
273 | return -EPFNOSUPPORT; | ||
274 | } | ||
275 | |||
276 | return netlbl_unlhsh_remove(net, | ||
277 | dev_name, addr, mask, addr_len, | ||
278 | audit_info); | ||
279 | } | ||
280 | |||
281 | /** | ||
282 | * netlbl_cfg_cipsov4_add - Add a new CIPSOv4 DOI definition | ||
283 | * @doi_def: CIPSO DOI definition | ||
284 | * @audit_info: NetLabel audit information | ||
285 | * | ||
286 | * Description: | ||
287 | * Add a new CIPSO DOI definition as defined by @doi_def. Returns zero on | ||
288 | * success and negative values on failure. | ||
289 | * | ||
290 | */ | ||
291 | int netlbl_cfg_cipsov4_add(struct cipso_v4_doi *doi_def, | ||
292 | struct netlbl_audit *audit_info) | ||
293 | { | ||
294 | return cipso_v4_doi_add(doi_def, audit_info); | ||
295 | } | ||
296 | |||
297 | /** | ||
298 | * netlbl_cfg_cipsov4_del - Remove an existing CIPSOv4 DOI definition | ||
299 | * @doi: CIPSO DOI | ||
300 | * @audit_info: NetLabel audit information | ||
301 | * | ||
302 | * Description: | ||
303 | * Remove an existing CIPSO DOI definition matching @doi. Returns zero on | ||
304 | * success and negative values on failure. | ||
305 | * | ||
306 | */ | ||
307 | void netlbl_cfg_cipsov4_del(u32 doi, struct netlbl_audit *audit_info) | ||
308 | { | ||
309 | cipso_v4_doi_remove(doi, audit_info); | ||
310 | } | ||
311 | |||
106 | /** | 312 | /** |
107 | * netlbl_cfg_cipsov4_add_map - Add a new CIPSOv4 DOI definition and mapping | 313 | * netlbl_cfg_cipsov4_map_add - Add a new CIPSOv4 DOI mapping |
108 | * @doi_def: the DOI definition | 314 | * @doi: the CIPSO DOI |
109 | * @domain: the domain mapping to add | 315 | * @domain: the domain mapping to add |
316 | * @addr: IP address | ||
317 | * @mask: IP address mask | ||
110 | * @audit_info: NetLabel audit information | 318 | * @audit_info: NetLabel audit information |
111 | * | 319 | * |
112 | * Description: | 320 | * Description: |
113 | * Add a new CIPSOv4 DOI definition and NetLabel/LSM domain mapping for this | 321 | * Add a new NetLabel/LSM domain mapping for the given CIPSO DOI to the NetLabel |
114 | * new DOI definition to the NetLabel subsystem. A @domain value of NULL adds | 322 | * subsystem. A @domain value of NULL adds a new default domain mapping. |
115 | * a new default domain mapping. Returns zero on success, negative values on | 323 | * Returns zero on success, negative values on failure. |
116 | * failure. | ||
117 | * | 324 | * |
118 | */ | 325 | */ |
119 | int netlbl_cfg_cipsov4_add_map(struct cipso_v4_doi *doi_def, | 326 | int netlbl_cfg_cipsov4_map_add(u32 doi, |
120 | const char *domain, | 327 | const char *domain, |
328 | const struct in_addr *addr, | ||
329 | const struct in_addr *mask, | ||
121 | struct netlbl_audit *audit_info) | 330 | struct netlbl_audit *audit_info) |
122 | { | 331 | { |
123 | int ret_val = -ENOMEM; | 332 | int ret_val = -ENOMEM; |
124 | u32 doi; | 333 | struct cipso_v4_doi *doi_def; |
125 | u32 doi_type; | ||
126 | struct netlbl_dom_map *entry; | 334 | struct netlbl_dom_map *entry; |
127 | const char *type_str; | 335 | struct netlbl_domaddr_map *addrmap = NULL; |
128 | struct audit_buffer *audit_buf; | 336 | struct netlbl_domaddr4_map *addrinfo = NULL; |
129 | 337 | ||
130 | doi = doi_def->doi; | 338 | doi_def = cipso_v4_doi_getdef(doi); |
131 | doi_type = doi_def->type; | 339 | if (doi_def == NULL) |
340 | return -ENOENT; | ||
132 | 341 | ||
133 | entry = kzalloc(sizeof(*entry), GFP_ATOMIC); | 342 | entry = kzalloc(sizeof(*entry), GFP_ATOMIC); |
134 | if (entry == NULL) | 343 | if (entry == NULL) |
@@ -136,56 +345,52 @@ int netlbl_cfg_cipsov4_add_map(struct cipso_v4_doi *doi_def, | |||
136 | if (domain != NULL) { | 345 | if (domain != NULL) { |
137 | entry->domain = kstrdup(domain, GFP_ATOMIC); | 346 | entry->domain = kstrdup(domain, GFP_ATOMIC); |
138 | if (entry->domain == NULL) | 347 | if (entry->domain == NULL) |
139 | goto cfg_cipsov4_add_map_failure; | 348 | goto cfg_cipsov4_map_add_failure; |
140 | } | 349 | } |
141 | 350 | ||
142 | ret_val = cipso_v4_doi_add(doi_def); | 351 | if (addr == NULL && mask == NULL) { |
143 | if (ret_val != 0) | 352 | entry->type_def.cipsov4 = doi_def; |
144 | goto cfg_cipsov4_add_map_failure_remove_doi; | 353 | entry->type = NETLBL_NLTYPE_CIPSOV4; |
145 | entry->type = NETLBL_NLTYPE_CIPSOV4; | 354 | } else if (addr != NULL && mask != NULL) { |
146 | entry->type_def.cipsov4 = cipso_v4_doi_getdef(doi); | 355 | addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC); |
147 | if (entry->type_def.cipsov4 == NULL) { | 356 | if (addrmap == NULL) |
148 | ret_val = -ENOENT; | 357 | goto cfg_cipsov4_map_add_failure; |
149 | goto cfg_cipsov4_add_map_failure_remove_doi; | 358 | INIT_LIST_HEAD(&addrmap->list4); |
359 | INIT_LIST_HEAD(&addrmap->list6); | ||
360 | |||
361 | addrinfo = kzalloc(sizeof(*addrinfo), GFP_ATOMIC); | ||
362 | if (addrinfo == NULL) | ||
363 | goto cfg_cipsov4_map_add_failure; | ||
364 | addrinfo->type_def.cipsov4 = doi_def; | ||
365 | addrinfo->type = NETLBL_NLTYPE_CIPSOV4; | ||
366 | addrinfo->list.addr = addr->s_addr & mask->s_addr; | ||
367 | addrinfo->list.mask = mask->s_addr; | ||
368 | addrinfo->list.valid = 1; | ||
369 | ret_val = netlbl_af4list_add(&addrinfo->list, &addrmap->list4); | ||
370 | if (ret_val != 0) | ||
371 | goto cfg_cipsov4_map_add_failure; | ||
372 | |||
373 | entry->type_def.addrsel = addrmap; | ||
374 | entry->type = NETLBL_NLTYPE_ADDRSELECT; | ||
375 | } else { | ||
376 | ret_val = -EINVAL; | ||
377 | goto cfg_cipsov4_map_add_failure; | ||
150 | } | 378 | } |
379 | |||
151 | ret_val = netlbl_domhsh_add(entry, audit_info); | 380 | ret_val = netlbl_domhsh_add(entry, audit_info); |
152 | if (ret_val != 0) | 381 | if (ret_val != 0) |
153 | goto cfg_cipsov4_add_map_failure_release_doi; | 382 | goto cfg_cipsov4_map_add_failure; |
154 | |||
155 | cfg_cipsov4_add_map_return: | ||
156 | audit_buf = netlbl_audit_start_common(AUDIT_MAC_CIPSOV4_ADD, | ||
157 | audit_info); | ||
158 | if (audit_buf != NULL) { | ||
159 | switch (doi_type) { | ||
160 | case CIPSO_V4_MAP_TRANS: | ||
161 | type_str = "trans"; | ||
162 | break; | ||
163 | case CIPSO_V4_MAP_PASS: | ||
164 | type_str = "pass"; | ||
165 | break; | ||
166 | case CIPSO_V4_MAP_LOCAL: | ||
167 | type_str = "local"; | ||
168 | break; | ||
169 | default: | ||
170 | type_str = "(unknown)"; | ||
171 | } | ||
172 | audit_log_format(audit_buf, | ||
173 | " cipso_doi=%u cipso_type=%s res=%u", | ||
174 | doi, type_str, ret_val == 0 ? 1 : 0); | ||
175 | audit_log_end(audit_buf); | ||
176 | } | ||
177 | 383 | ||
178 | return ret_val; | 384 | return 0; |
179 | 385 | ||
180 | cfg_cipsov4_add_map_failure_release_doi: | 386 | cfg_cipsov4_map_add_failure: |
181 | cipso_v4_doi_putdef(doi_def); | 387 | cipso_v4_doi_putdef(doi_def); |
182 | cfg_cipsov4_add_map_failure_remove_doi: | ||
183 | cipso_v4_doi_remove(doi, audit_info); | ||
184 | cfg_cipsov4_add_map_failure: | ||
185 | if (entry != NULL) | 388 | if (entry != NULL) |
186 | kfree(entry->domain); | 389 | kfree(entry->domain); |
187 | kfree(entry); | 390 | kfree(entry); |
188 | goto cfg_cipsov4_add_map_return; | 391 | kfree(addrmap); |
392 | kfree(addrinfo); | ||
393 | return ret_val; | ||
189 | } | 394 | } |
190 | 395 | ||
191 | /* | 396 | /* |
@@ -691,6 +896,28 @@ int netlbl_cache_add(const struct sk_buff *skb, | |||
691 | } | 896 | } |
692 | 897 | ||
693 | /* | 898 | /* |
899 | * Protocol Engine Functions | ||
900 | */ | ||
901 | |||
902 | /** | ||
903 | * netlbl_audit_start - Start an audit message | ||
904 | * @type: audit message type | ||
905 | * @audit_info: NetLabel audit information | ||
906 | * | ||
907 | * Description: | ||
908 | * Start an audit message using the type specified in @type and fill the audit | ||
909 | * message with some fields common to all NetLabel audit messages. This | ||
910 | * function should only be used by protocol engines, not LSMs. Returns a | ||
911 | * pointer to the audit buffer on success, NULL on failure. | ||
912 | * | ||
913 | */ | ||
914 | struct audit_buffer *netlbl_audit_start(int type, | ||
915 | struct netlbl_audit *audit_info) | ||
916 | { | ||
917 | return netlbl_audit_start_common(type, audit_info); | ||
918 | } | ||
919 | |||
920 | /* | ||
694 | * Setup Functions | 921 | * Setup Functions |
695 | */ | 922 | */ |
696 | 923 | ||
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index 8c0308032178..f3c5c68c6848 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c | |||
@@ -450,13 +450,13 @@ add_iface_failure: | |||
450 | * success, negative values on failure. | 450 | * success, negative values on failure. |
451 | * | 451 | * |
452 | */ | 452 | */ |
453 | static int netlbl_unlhsh_add(struct net *net, | 453 | int netlbl_unlhsh_add(struct net *net, |
454 | const char *dev_name, | 454 | const char *dev_name, |
455 | const void *addr, | 455 | const void *addr, |
456 | const void *mask, | 456 | const void *mask, |
457 | u32 addr_len, | 457 | u32 addr_len, |
458 | u32 secid, | 458 | u32 secid, |
459 | struct netlbl_audit *audit_info) | 459 | struct netlbl_audit *audit_info) |
460 | { | 460 | { |
461 | int ret_val; | 461 | int ret_val; |
462 | int ifindex; | 462 | int ifindex; |
@@ -720,12 +720,12 @@ unlhsh_condremove_failure: | |||
720 | * Returns zero on success, negative values on failure. | 720 | * Returns zero on success, negative values on failure. |
721 | * | 721 | * |
722 | */ | 722 | */ |
723 | static int netlbl_unlhsh_remove(struct net *net, | 723 | int netlbl_unlhsh_remove(struct net *net, |
724 | const char *dev_name, | 724 | const char *dev_name, |
725 | const void *addr, | 725 | const void *addr, |
726 | const void *mask, | 726 | const void *mask, |
727 | u32 addr_len, | 727 | u32 addr_len, |
728 | struct netlbl_audit *audit_info) | 728 | struct netlbl_audit *audit_info) |
729 | { | 729 | { |
730 | int ret_val; | 730 | int ret_val; |
731 | struct net_device *dev; | 731 | struct net_device *dev; |
diff --git a/net/netlabel/netlabel_unlabeled.h b/net/netlabel/netlabel_unlabeled.h index 06b1301ac072..7aba63595137 100644 --- a/net/netlabel/netlabel_unlabeled.h +++ b/net/netlabel/netlabel_unlabeled.h | |||
@@ -221,6 +221,21 @@ int netlbl_unlabel_genl_init(void); | |||
221 | /* General Unlabeled init function */ | 221 | /* General Unlabeled init function */ |
222 | int netlbl_unlabel_init(u32 size); | 222 | int netlbl_unlabel_init(u32 size); |
223 | 223 | ||
224 | /* Static/Fallback label management functions */ | ||
225 | int netlbl_unlhsh_add(struct net *net, | ||
226 | const char *dev_name, | ||
227 | const void *addr, | ||
228 | const void *mask, | ||
229 | u32 addr_len, | ||
230 | u32 secid, | ||
231 | struct netlbl_audit *audit_info); | ||
232 | int netlbl_unlhsh_remove(struct net *net, | ||
233 | const char *dev_name, | ||
234 | const void *addr, | ||
235 | const void *mask, | ||
236 | u32 addr_len, | ||
237 | struct netlbl_audit *audit_info); | ||
238 | |||
224 | /* Process Unlabeled incoming network packets */ | 239 | /* Process Unlabeled incoming network packets */ |
225 | int netlbl_unlabel_getattr(const struct sk_buff *skb, | 240 | int netlbl_unlabel_getattr(const struct sk_buff *skb, |
226 | u16 family, | 241 | u16 family, |
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 3e1191cecaf0..1d3dd30099df 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
@@ -225,6 +225,7 @@ void genl_unregister_mc_group(struct genl_family *family, | |||
225 | __genl_unregister_mc_group(family, grp); | 225 | __genl_unregister_mc_group(family, grp); |
226 | genl_unlock(); | 226 | genl_unlock(); |
227 | } | 227 | } |
228 | EXPORT_SYMBOL(genl_unregister_mc_group); | ||
228 | 229 | ||
229 | static void genl_unregister_mc_groups(struct genl_family *family) | 230 | static void genl_unregister_mc_groups(struct genl_family *family) |
230 | { | 231 | { |
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c index b0ceac2d6cd1..6a91a32a80c1 100644 --- a/net/phonet/pep-gprs.c +++ b/net/phonet/pep-gprs.c | |||
@@ -227,6 +227,13 @@ static int gprs_set_mtu(struct net_device *dev, int new_mtu) | |||
227 | return 0; | 227 | return 0; |
228 | } | 228 | } |
229 | 229 | ||
230 | static const struct net_device_ops gprs_netdev_ops = { | ||
231 | .ndo_open = gprs_open, | ||
232 | .ndo_stop = gprs_close, | ||
233 | .ndo_start_xmit = gprs_xmit, | ||
234 | .ndo_change_mtu = gprs_set_mtu, | ||
235 | }; | ||
236 | |||
230 | static void gprs_setup(struct net_device *dev) | 237 | static void gprs_setup(struct net_device *dev) |
231 | { | 238 | { |
232 | dev->features = NETIF_F_FRAGLIST; | 239 | dev->features = NETIF_F_FRAGLIST; |
@@ -237,11 +244,8 @@ static void gprs_setup(struct net_device *dev) | |||
237 | dev->addr_len = 0; | 244 | dev->addr_len = 0; |
238 | dev->tx_queue_len = 10; | 245 | dev->tx_queue_len = 10; |
239 | 246 | ||
247 | dev->netdev_ops = &gprs_netdev_ops; | ||
240 | dev->destructor = free_netdev; | 248 | dev->destructor = free_netdev; |
241 | dev->open = gprs_open; | ||
242 | dev->stop = gprs_close; | ||
243 | dev->hard_start_xmit = gprs_xmit; /* mandatory */ | ||
244 | dev->change_mtu = gprs_set_mtu; | ||
245 | } | 249 | } |
246 | 250 | ||
247 | /* | 251 | /* |
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c index 3c94f76d5525..3eaa39403c13 100644 --- a/net/rfkill/rfkill.c +++ b/net/rfkill/rfkill.c | |||
@@ -54,10 +54,10 @@ static unsigned long rfkill_states_lockdflt[BITS_TO_LONGS(RFKILL_TYPE_MAX)]; | |||
54 | static bool rfkill_epo_lock_active; | 54 | static bool rfkill_epo_lock_active; |
55 | 55 | ||
56 | 56 | ||
57 | #ifdef CONFIG_RFKILL_LEDS | ||
57 | static void rfkill_led_trigger(struct rfkill *rfkill, | 58 | static void rfkill_led_trigger(struct rfkill *rfkill, |
58 | enum rfkill_state state) | 59 | enum rfkill_state state) |
59 | { | 60 | { |
60 | #ifdef CONFIG_RFKILL_LEDS | ||
61 | struct led_trigger *led = &rfkill->led_trigger; | 61 | struct led_trigger *led = &rfkill->led_trigger; |
62 | 62 | ||
63 | if (!led->name) | 63 | if (!led->name) |
@@ -66,10 +66,8 @@ static void rfkill_led_trigger(struct rfkill *rfkill, | |||
66 | led_trigger_event(led, LED_OFF); | 66 | led_trigger_event(led, LED_OFF); |
67 | else | 67 | else |
68 | led_trigger_event(led, LED_FULL); | 68 | led_trigger_event(led, LED_FULL); |
69 | #endif /* CONFIG_RFKILL_LEDS */ | ||
70 | } | 69 | } |
71 | 70 | ||
72 | #ifdef CONFIG_RFKILL_LEDS | ||
73 | static void rfkill_led_trigger_activate(struct led_classdev *led) | 71 | static void rfkill_led_trigger_activate(struct led_classdev *led) |
74 | { | 72 | { |
75 | struct rfkill *rfkill = container_of(led->trigger, | 73 | struct rfkill *rfkill = container_of(led->trigger, |
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 05d178008cbc..07372f60bee3 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
@@ -638,8 +638,9 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle, | |||
638 | break; | 638 | break; |
639 | 639 | ||
640 | n->next = *ins; | 640 | n->next = *ins; |
641 | wmb(); | 641 | tcf_tree_lock(tp); |
642 | *ins = n; | 642 | *ins = n; |
643 | tcf_tree_unlock(tp); | ||
643 | 644 | ||
644 | *arg = (unsigned long)n; | 645 | *arg = (unsigned long)n; |
645 | return 0; | 646 | return 0; |
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index f3965df00559..33133d27b539 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c | |||
@@ -435,7 +435,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt) | |||
435 | int i; | 435 | int i; |
436 | 436 | ||
437 | q->perturb_timer.function = sfq_perturbation; | 437 | q->perturb_timer.function = sfq_perturbation; |
438 | q->perturb_timer.data = (unsigned long)sch;; | 438 | q->perturb_timer.data = (unsigned long)sch; |
439 | init_timer_deferrable(&q->perturb_timer); | 439 | init_timer_deferrable(&q->perturb_timer); |
440 | 440 | ||
441 | for (i = 0; i < SFQ_HASH_DIVISOR; i++) | 441 | for (i = 0; i < SFQ_HASH_DIVISOR; i++) |
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index cfc8e7caba62..ec697cebb63b 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -289,9 +289,9 @@ restart: | |||
289 | 289 | ||
290 | do { | 290 | do { |
291 | struct net_device *slave = qdisc_dev(q); | 291 | struct net_device *slave = qdisc_dev(q); |
292 | struct netdev_queue *slave_txq; | 292 | struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0); |
293 | const struct net_device_ops *slave_ops = slave->netdev_ops; | ||
293 | 294 | ||
294 | slave_txq = netdev_get_tx_queue(slave, 0); | ||
295 | if (slave_txq->qdisc_sleeping != q) | 295 | if (slave_txq->qdisc_sleeping != q) |
296 | continue; | 296 | continue; |
297 | if (__netif_subqueue_stopped(slave, subq) || | 297 | if (__netif_subqueue_stopped(slave, subq) || |
@@ -305,7 +305,7 @@ restart: | |||
305 | if (__netif_tx_trylock(slave_txq)) { | 305 | if (__netif_tx_trylock(slave_txq)) { |
306 | if (!netif_tx_queue_stopped(slave_txq) && | 306 | if (!netif_tx_queue_stopped(slave_txq) && |
307 | !netif_tx_queue_frozen(slave_txq) && | 307 | !netif_tx_queue_frozen(slave_txq) && |
308 | slave->hard_start_xmit(skb, slave) == 0) { | 308 | slave_ops->ndo_start_xmit(skb, slave) == 0) { |
309 | __netif_tx_unlock(slave_txq); | 309 | __netif_tx_unlock(slave_txq); |
310 | master->slaves = NEXT_SLAVE(q); | 310 | master->slaves = NEXT_SLAVE(q); |
311 | netif_wake_queue(dev); | 311 | netif_wake_queue(dev); |
@@ -420,6 +420,14 @@ static int teql_master_mtu(struct net_device *dev, int new_mtu) | |||
420 | return 0; | 420 | return 0; |
421 | } | 421 | } |
422 | 422 | ||
423 | static const struct net_device_ops teql_netdev_ops = { | ||
424 | .ndo_open = teql_master_open, | ||
425 | .ndo_stop = teql_master_close, | ||
426 | .ndo_start_xmit = teql_master_xmit, | ||
427 | .ndo_get_stats = teql_master_stats, | ||
428 | .ndo_change_mtu = teql_master_mtu, | ||
429 | }; | ||
430 | |||
423 | static __init void teql_master_setup(struct net_device *dev) | 431 | static __init void teql_master_setup(struct net_device *dev) |
424 | { | 432 | { |
425 | struct teql_master *master = netdev_priv(dev); | 433 | struct teql_master *master = netdev_priv(dev); |
@@ -436,11 +444,7 @@ static __init void teql_master_setup(struct net_device *dev) | |||
436 | ops->destroy = teql_destroy; | 444 | ops->destroy = teql_destroy; |
437 | ops->owner = THIS_MODULE; | 445 | ops->owner = THIS_MODULE; |
438 | 446 | ||
439 | dev->open = teql_master_open; | 447 | dev->netdev_ops = &teql_netdev_ops; |
440 | dev->hard_start_xmit = teql_master_xmit; | ||
441 | dev->stop = teql_master_close; | ||
442 | dev->get_stats = teql_master_stats; | ||
443 | dev->change_mtu = teql_master_mtu; | ||
444 | dev->type = ARPHRD_VOID; | 448 | dev->type = ARPHRD_VOID; |
445 | dev->mtu = 1500; | 449 | dev->mtu = 1500; |
446 | dev->tx_queue_len = 100; | 450 | dev->tx_queue_len = 100; |
diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 52db5f60daa0..56935bbc1496 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c | |||
@@ -141,8 +141,8 @@ void sctp_auth_destroy_keys(struct list_head *keys) | |||
141 | /* Compare two byte vectors as numbers. Return values | 141 | /* Compare two byte vectors as numbers. Return values |
142 | * are: | 142 | * are: |
143 | * 0 - vectors are equal | 143 | * 0 - vectors are equal |
144 | * < 0 - vector 1 is smaller then vector2 | 144 | * < 0 - vector 1 is smaller than vector2 |
145 | * > 0 - vector 1 is greater then vector2 | 145 | * > 0 - vector 1 is greater than vector2 |
146 | * | 146 | * |
147 | * Algorithm is: | 147 | * Algorithm is: |
148 | * This is performed by selecting the numerically smaller key vector... | 148 | * This is performed by selecting the numerically smaller key vector... |
@@ -489,7 +489,7 @@ int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp) | |||
489 | return 0; | 489 | return 0; |
490 | 490 | ||
491 | out_err: | 491 | out_err: |
492 | /* Clean up any successfull allocations */ | 492 | /* Clean up any successful allocations */ |
493 | sctp_auth_destroy_hmacs(ep->auth_hmacs); | 493 | sctp_auth_destroy_hmacs(ep->auth_hmacs); |
494 | return -ENOMEM; | 494 | return -ENOMEM; |
495 | } | 495 | } |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 1c4e5d6c29c0..3a0cd075914f 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -4268,9 +4268,9 @@ nomem: | |||
4268 | 4268 | ||
4269 | /* | 4269 | /* |
4270 | * Handle a protocol violation when the chunk length is invalid. | 4270 | * Handle a protocol violation when the chunk length is invalid. |
4271 | * "Invalid" length is identified as smaller then the minimal length a | 4271 | * "Invalid" length is identified as smaller than the minimal length a |
4272 | * given chunk can be. For example, a SACK chunk has invalid length | 4272 | * given chunk can be. For example, a SACK chunk has invalid length |
4273 | * if it's length is set to be smaller then the size of sctp_sack_chunk_t. | 4273 | * if its length is set to be smaller than the size of sctp_sack_chunk_t. |
4274 | * | 4274 | * |
4275 | * We inform the other end by sending an ABORT with a Protocol Violation | 4275 | * We inform the other end by sending an ABORT with a Protocol Violation |
4276 | * error code. | 4276 | * error code. |
@@ -4300,7 +4300,7 @@ static sctp_disposition_t sctp_sf_violation_chunklen( | |||
4300 | 4300 | ||
4301 | /* | 4301 | /* |
4302 | * Handle a protocol violation when the parameter length is invalid. | 4302 | * Handle a protocol violation when the parameter length is invalid. |
4303 | * "Invalid" length is identified as smaller then the minimal length a | 4303 | * "Invalid" length is identified as smaller than the minimal length a |
4304 | * given parameter can be. | 4304 | * given parameter can be. |
4305 | */ | 4305 | */ |
4306 | static sctp_disposition_t sctp_sf_violation_paramlen( | 4306 | static sctp_disposition_t sctp_sf_violation_paramlen( |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index b14a8f33e42d..ff0a8f88de04 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -2717,7 +2717,7 @@ static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, int o | |||
2717 | paths++; | 2717 | paths++; |
2718 | } | 2718 | } |
2719 | 2719 | ||
2720 | /* Only validate asocmaxrxt if we have more then | 2720 | /* Only validate asocmaxrxt if we have more than |
2721 | * one path/transport. We do this because path | 2721 | * one path/transport. We do this because path |
2722 | * retransmissions are only counted when we have more | 2722 | * retransmissions are only counted when we have more |
2723 | * then one path. | 2723 | * then one path. |
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c index 35c73e82553a..9bd64565021a 100644 --- a/net/sctp/tsnmap.c +++ b/net/sctp/tsnmap.c | |||
@@ -227,7 +227,7 @@ void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn) | |||
227 | */ | 227 | */ |
228 | bitmap_zero(map->tsn_map, map->len); | 228 | bitmap_zero(map->tsn_map, map->len); |
229 | } else { | 229 | } else { |
230 | /* If the gap is smaller then the map size, | 230 | /* If the gap is smaller than the map size, |
231 | * shift the map by 'gap' bits and update further. | 231 | * shift the map by 'gap' bits and update further. |
232 | */ | 232 | */ |
233 | bitmap_shift_right(map->tsn_map, map->tsn_map, gap, map->len); | 233 | bitmap_shift_right(map->tsn_map, map->tsn_map, gap, map->len); |
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index c9966713282a..4735caad26ed 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -98,7 +98,7 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, | |||
98 | 98 | ||
99 | return new; | 99 | return new; |
100 | } | 100 | } |
101 | EXPORT_SYMBOL(sunrpc_cache_lookup); | 101 | EXPORT_SYMBOL_GPL(sunrpc_cache_lookup); |
102 | 102 | ||
103 | 103 | ||
104 | static void queue_loose(struct cache_detail *detail, struct cache_head *ch); | 104 | static void queue_loose(struct cache_detail *detail, struct cache_head *ch); |
@@ -173,7 +173,7 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail, | |||
173 | cache_put(old, detail); | 173 | cache_put(old, detail); |
174 | return tmp; | 174 | return tmp; |
175 | } | 175 | } |
176 | EXPORT_SYMBOL(sunrpc_cache_update); | 176 | EXPORT_SYMBOL_GPL(sunrpc_cache_update); |
177 | 177 | ||
178 | static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h); | 178 | static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h); |
179 | /* | 179 | /* |
@@ -245,7 +245,7 @@ int cache_check(struct cache_detail *detail, | |||
245 | cache_put(h, detail); | 245 | cache_put(h, detail); |
246 | return rv; | 246 | return rv; |
247 | } | 247 | } |
248 | EXPORT_SYMBOL(cache_check); | 248 | EXPORT_SYMBOL_GPL(cache_check); |
249 | 249 | ||
250 | /* | 250 | /* |
251 | * caches need to be periodically cleaned. | 251 | * caches need to be periodically cleaned. |
@@ -373,7 +373,7 @@ int cache_register(struct cache_detail *cd) | |||
373 | schedule_delayed_work(&cache_cleaner, 0); | 373 | schedule_delayed_work(&cache_cleaner, 0); |
374 | return 0; | 374 | return 0; |
375 | } | 375 | } |
376 | EXPORT_SYMBOL(cache_register); | 376 | EXPORT_SYMBOL_GPL(cache_register); |
377 | 377 | ||
378 | void cache_unregister(struct cache_detail *cd) | 378 | void cache_unregister(struct cache_detail *cd) |
379 | { | 379 | { |
@@ -399,7 +399,7 @@ void cache_unregister(struct cache_detail *cd) | |||
399 | out: | 399 | out: |
400 | printk(KERN_ERR "nfsd: failed to unregister %s cache\n", cd->name); | 400 | printk(KERN_ERR "nfsd: failed to unregister %s cache\n", cd->name); |
401 | } | 401 | } |
402 | EXPORT_SYMBOL(cache_unregister); | 402 | EXPORT_SYMBOL_GPL(cache_unregister); |
403 | 403 | ||
404 | /* clean cache tries to find something to clean | 404 | /* clean cache tries to find something to clean |
405 | * and cleans it. | 405 | * and cleans it. |
@@ -514,7 +514,7 @@ void cache_flush(void) | |||
514 | while (cache_clean() != -1) | 514 | while (cache_clean() != -1) |
515 | cond_resched(); | 515 | cond_resched(); |
516 | } | 516 | } |
517 | EXPORT_SYMBOL(cache_flush); | 517 | EXPORT_SYMBOL_GPL(cache_flush); |
518 | 518 | ||
519 | void cache_purge(struct cache_detail *detail) | 519 | void cache_purge(struct cache_detail *detail) |
520 | { | 520 | { |
@@ -523,7 +523,7 @@ void cache_purge(struct cache_detail *detail) | |||
523 | cache_flush(); | 523 | cache_flush(); |
524 | detail->flush_time = 1; | 524 | detail->flush_time = 1; |
525 | } | 525 | } |
526 | EXPORT_SYMBOL(cache_purge); | 526 | EXPORT_SYMBOL_GPL(cache_purge); |
527 | 527 | ||
528 | 528 | ||
529 | /* | 529 | /* |
@@ -988,7 +988,7 @@ void qword_add(char **bpp, int *lp, char *str) | |||
988 | *bpp = bp; | 988 | *bpp = bp; |
989 | *lp = len; | 989 | *lp = len; |
990 | } | 990 | } |
991 | EXPORT_SYMBOL(qword_add); | 991 | EXPORT_SYMBOL_GPL(qword_add); |
992 | 992 | ||
993 | void qword_addhex(char **bpp, int *lp, char *buf, int blen) | 993 | void qword_addhex(char **bpp, int *lp, char *buf, int blen) |
994 | { | 994 | { |
@@ -1017,7 +1017,7 @@ void qword_addhex(char **bpp, int *lp, char *buf, int blen) | |||
1017 | *bpp = bp; | 1017 | *bpp = bp; |
1018 | *lp = len; | 1018 | *lp = len; |
1019 | } | 1019 | } |
1020 | EXPORT_SYMBOL(qword_addhex); | 1020 | EXPORT_SYMBOL_GPL(qword_addhex); |
1021 | 1021 | ||
1022 | static void warn_no_listener(struct cache_detail *detail) | 1022 | static void warn_no_listener(struct cache_detail *detail) |
1023 | { | 1023 | { |
@@ -1140,7 +1140,7 @@ int qword_get(char **bpp, char *dest, int bufsize) | |||
1140 | *dest = '\0'; | 1140 | *dest = '\0'; |
1141 | return len; | 1141 | return len; |
1142 | } | 1142 | } |
1143 | EXPORT_SYMBOL(qword_get); | 1143 | EXPORT_SYMBOL_GPL(qword_get); |
1144 | 1144 | ||
1145 | 1145 | ||
1146 | /* | 1146 | /* |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 192453248870..577385a4a5dc 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -522,8 +522,6 @@ rpc_get_inode(struct super_block *sb, int mode) | |||
522 | if (!inode) | 522 | if (!inode) |
523 | return NULL; | 523 | return NULL; |
524 | inode->i_mode = mode; | 524 | inode->i_mode = mode; |
525 | inode->i_uid = inode->i_gid = 0; | ||
526 | inode->i_blocks = 0; | ||
527 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; | 525 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; |
528 | switch(mode & S_IFMT) { | 526 | switch(mode & S_IFMT) { |
529 | case S_IFDIR: | 527 | case S_IFDIR: |
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c index 50b049c6598a..085372ef4feb 100644 --- a/net/sunrpc/stats.c +++ b/net/sunrpc/stats.c | |||
@@ -106,7 +106,7 @@ void svc_seq_show(struct seq_file *seq, const struct svc_stat *statp) { | |||
106 | seq_putc(seq, '\n'); | 106 | seq_putc(seq, '\n'); |
107 | } | 107 | } |
108 | } | 108 | } |
109 | EXPORT_SYMBOL(svc_seq_show); | 109 | EXPORT_SYMBOL_GPL(svc_seq_show); |
110 | 110 | ||
111 | /** | 111 | /** |
112 | * rpc_alloc_iostats - allocate an rpc_iostats structure | 112 | * rpc_alloc_iostats - allocate an rpc_iostats structure |
@@ -249,14 +249,14 @@ svc_proc_register(struct svc_stat *statp, const struct file_operations *fops) | |||
249 | { | 249 | { |
250 | return do_register(statp->program->pg_name, statp, fops); | 250 | return do_register(statp->program->pg_name, statp, fops); |
251 | } | 251 | } |
252 | EXPORT_SYMBOL(svc_proc_register); | 252 | EXPORT_SYMBOL_GPL(svc_proc_register); |
253 | 253 | ||
254 | void | 254 | void |
255 | svc_proc_unregister(const char *name) | 255 | svc_proc_unregister(const char *name) |
256 | { | 256 | { |
257 | remove_proc_entry(name, proc_net_rpc); | 257 | remove_proc_entry(name, proc_net_rpc); |
258 | } | 258 | } |
259 | EXPORT_SYMBOL(svc_proc_unregister); | 259 | EXPORT_SYMBOL_GPL(svc_proc_unregister); |
260 | 260 | ||
261 | void | 261 | void |
262 | rpc_proc_init(void) | 262 | rpc_proc_init(void) |
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 54c98d876847..c51fed4d1af1 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -431,7 +431,7 @@ svc_create(struct svc_program *prog, unsigned int bufsize, | |||
431 | { | 431 | { |
432 | return __svc_create(prog, bufsize, /*npools*/1, family, shutdown); | 432 | return __svc_create(prog, bufsize, /*npools*/1, family, shutdown); |
433 | } | 433 | } |
434 | EXPORT_SYMBOL(svc_create); | 434 | EXPORT_SYMBOL_GPL(svc_create); |
435 | 435 | ||
436 | struct svc_serv * | 436 | struct svc_serv * |
437 | svc_create_pooled(struct svc_program *prog, unsigned int bufsize, | 437 | svc_create_pooled(struct svc_program *prog, unsigned int bufsize, |
@@ -450,7 +450,7 @@ svc_create_pooled(struct svc_program *prog, unsigned int bufsize, | |||
450 | 450 | ||
451 | return serv; | 451 | return serv; |
452 | } | 452 | } |
453 | EXPORT_SYMBOL(svc_create_pooled); | 453 | EXPORT_SYMBOL_GPL(svc_create_pooled); |
454 | 454 | ||
455 | /* | 455 | /* |
456 | * Destroy an RPC service. Should be called with appropriate locking to | 456 | * Destroy an RPC service. Should be called with appropriate locking to |
@@ -492,7 +492,7 @@ svc_destroy(struct svc_serv *serv) | |||
492 | kfree(serv->sv_pools); | 492 | kfree(serv->sv_pools); |
493 | kfree(serv); | 493 | kfree(serv); |
494 | } | 494 | } |
495 | EXPORT_SYMBOL(svc_destroy); | 495 | EXPORT_SYMBOL_GPL(svc_destroy); |
496 | 496 | ||
497 | /* | 497 | /* |
498 | * Allocate an RPC server's buffer space. | 498 | * Allocate an RPC server's buffer space. |
@@ -567,7 +567,7 @@ out_thread: | |||
567 | out_enomem: | 567 | out_enomem: |
568 | return ERR_PTR(-ENOMEM); | 568 | return ERR_PTR(-ENOMEM); |
569 | } | 569 | } |
570 | EXPORT_SYMBOL(svc_prepare_thread); | 570 | EXPORT_SYMBOL_GPL(svc_prepare_thread); |
571 | 571 | ||
572 | /* | 572 | /* |
573 | * Choose a pool in which to create a new thread, for svc_set_num_threads | 573 | * Choose a pool in which to create a new thread, for svc_set_num_threads |
@@ -689,7 +689,7 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) | |||
689 | 689 | ||
690 | return error; | 690 | return error; |
691 | } | 691 | } |
692 | EXPORT_SYMBOL(svc_set_num_threads); | 692 | EXPORT_SYMBOL_GPL(svc_set_num_threads); |
693 | 693 | ||
694 | /* | 694 | /* |
695 | * Called from a server thread as it's exiting. Caller must hold the BKL or | 695 | * Called from a server thread as it's exiting. Caller must hold the BKL or |
@@ -717,7 +717,7 @@ svc_exit_thread(struct svc_rqst *rqstp) | |||
717 | if (serv) | 717 | if (serv) |
718 | svc_destroy(serv); | 718 | svc_destroy(serv); |
719 | } | 719 | } |
720 | EXPORT_SYMBOL(svc_exit_thread); | 720 | EXPORT_SYMBOL_GPL(svc_exit_thread); |
721 | 721 | ||
722 | #ifdef CONFIG_SUNRPC_REGISTER_V4 | 722 | #ifdef CONFIG_SUNRPC_REGISTER_V4 |
723 | 723 | ||
@@ -1231,7 +1231,7 @@ err_bad: | |||
1231 | svc_putnl(resv, ntohl(rpc_stat)); | 1231 | svc_putnl(resv, ntohl(rpc_stat)); |
1232 | goto sendit; | 1232 | goto sendit; |
1233 | } | 1233 | } |
1234 | EXPORT_SYMBOL(svc_process); | 1234 | EXPORT_SYMBOL_GPL(svc_process); |
1235 | 1235 | ||
1236 | /* | 1236 | /* |
1237 | * Return (transport-specific) limit on the rpc payload. | 1237 | * Return (transport-specific) limit on the rpc payload. |
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index bf5b5cdafebf..e588df5d6b34 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -440,7 +440,7 @@ void svc_reserve(struct svc_rqst *rqstp, int space) | |||
440 | svc_xprt_enqueue(xprt); | 440 | svc_xprt_enqueue(xprt); |
441 | } | 441 | } |
442 | } | 442 | } |
443 | EXPORT_SYMBOL(svc_reserve); | 443 | EXPORT_SYMBOL_GPL(svc_reserve); |
444 | 444 | ||
445 | static void svc_xprt_release(struct svc_rqst *rqstp) | 445 | static void svc_xprt_release(struct svc_rqst *rqstp) |
446 | { | 446 | { |
@@ -448,6 +448,9 @@ static void svc_xprt_release(struct svc_rqst *rqstp) | |||
448 | 448 | ||
449 | rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); | 449 | rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); |
450 | 450 | ||
451 | kfree(rqstp->rq_deferred); | ||
452 | rqstp->rq_deferred = NULL; | ||
453 | |||
451 | svc_free_res_pages(rqstp); | 454 | svc_free_res_pages(rqstp); |
452 | rqstp->rq_res.page_len = 0; | 455 | rqstp->rq_res.page_len = 0; |
453 | rqstp->rq_res.page_base = 0; | 456 | rqstp->rq_res.page_base = 0; |
@@ -498,7 +501,7 @@ void svc_wake_up(struct svc_serv *serv) | |||
498 | spin_unlock_bh(&pool->sp_lock); | 501 | spin_unlock_bh(&pool->sp_lock); |
499 | } | 502 | } |
500 | } | 503 | } |
501 | EXPORT_SYMBOL(svc_wake_up); | 504 | EXPORT_SYMBOL_GPL(svc_wake_up); |
502 | 505 | ||
503 | int svc_port_is_privileged(struct sockaddr *sin) | 506 | int svc_port_is_privileged(struct sockaddr *sin) |
504 | { | 507 | { |
@@ -515,8 +518,10 @@ int svc_port_is_privileged(struct sockaddr *sin) | |||
515 | } | 518 | } |
516 | 519 | ||
517 | /* | 520 | /* |
518 | * Make sure that we don't have too many active connections. If we | 521 | * Make sure that we don't have too many active connections. If we have, |
519 | * have, something must be dropped. | 522 | * something must be dropped. It's not clear what will happen if we allow |
523 | * "too many" connections, but when dealing with network-facing software, | ||
524 | * we have to code defensively. Here we do that by imposing hard limits. | ||
520 | * | 525 | * |
521 | * There's no point in trying to do random drop here for DoS | 526 | * There's no point in trying to do random drop here for DoS |
522 | * prevention. The NFS clients does 1 reconnect in 15 seconds. An | 527 | * prevention. The NFS clients does 1 reconnect in 15 seconds. An |
@@ -525,19 +530,27 @@ int svc_port_is_privileged(struct sockaddr *sin) | |||
525 | * The only somewhat efficient mechanism would be if drop old | 530 | * The only somewhat efficient mechanism would be if drop old |
526 | * connections from the same IP first. But right now we don't even | 531 | * connections from the same IP first. But right now we don't even |
527 | * record the client IP in svc_sock. | 532 | * record the client IP in svc_sock. |
533 | * | ||
534 | * single-threaded services that expect a lot of clients will probably | ||
535 | * need to set sv_maxconn to override the default value which is based | ||
536 | * on the number of threads | ||
528 | */ | 537 | */ |
529 | static void svc_check_conn_limits(struct svc_serv *serv) | 538 | static void svc_check_conn_limits(struct svc_serv *serv) |
530 | { | 539 | { |
531 | if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) { | 540 | unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn : |
541 | (serv->sv_nrthreads+3) * 20; | ||
542 | |||
543 | if (serv->sv_tmpcnt > limit) { | ||
532 | struct svc_xprt *xprt = NULL; | 544 | struct svc_xprt *xprt = NULL; |
533 | spin_lock_bh(&serv->sv_lock); | 545 | spin_lock_bh(&serv->sv_lock); |
534 | if (!list_empty(&serv->sv_tempsocks)) { | 546 | if (!list_empty(&serv->sv_tempsocks)) { |
535 | if (net_ratelimit()) { | 547 | if (net_ratelimit()) { |
536 | /* Try to help the admin */ | 548 | /* Try to help the admin */ |
537 | printk(KERN_NOTICE "%s: too many open " | 549 | printk(KERN_NOTICE "%s: too many open " |
538 | "connections, consider increasing the " | 550 | "connections, consider increasing %s\n", |
539 | "number of nfsd threads\n", | 551 | serv->sv_name, serv->sv_maxconn ? |
540 | serv->sv_name); | 552 | "the max number of connections." : |
553 | "the number of threads."); | ||
541 | } | 554 | } |
542 | /* | 555 | /* |
543 | * Always select the oldest connection. It's not fair, | 556 | * Always select the oldest connection. It's not fair, |
@@ -730,7 +743,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
730 | serv->sv_stats->netcnt++; | 743 | serv->sv_stats->netcnt++; |
731 | return len; | 744 | return len; |
732 | } | 745 | } |
733 | EXPORT_SYMBOL(svc_recv); | 746 | EXPORT_SYMBOL_GPL(svc_recv); |
734 | 747 | ||
735 | /* | 748 | /* |
736 | * Drop request | 749 | * Drop request |
@@ -740,7 +753,7 @@ void svc_drop(struct svc_rqst *rqstp) | |||
740 | dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt); | 753 | dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt); |
741 | svc_xprt_release(rqstp); | 754 | svc_xprt_release(rqstp); |
742 | } | 755 | } |
743 | EXPORT_SYMBOL(svc_drop); | 756 | EXPORT_SYMBOL_GPL(svc_drop); |
744 | 757 | ||
745 | /* | 758 | /* |
746 | * Return reply to client. | 759 | * Return reply to client. |
@@ -837,6 +850,11 @@ static void svc_age_temp_xprts(unsigned long closure) | |||
837 | void svc_delete_xprt(struct svc_xprt *xprt) | 850 | void svc_delete_xprt(struct svc_xprt *xprt) |
838 | { | 851 | { |
839 | struct svc_serv *serv = xprt->xpt_server; | 852 | struct svc_serv *serv = xprt->xpt_server; |
853 | struct svc_deferred_req *dr; | ||
854 | |||
855 | /* Only do this once */ | ||
856 | if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) | ||
857 | return; | ||
840 | 858 | ||
841 | dprintk("svc: svc_delete_xprt(%p)\n", xprt); | 859 | dprintk("svc: svc_delete_xprt(%p)\n", xprt); |
842 | xprt->xpt_ops->xpo_detach(xprt); | 860 | xprt->xpt_ops->xpo_detach(xprt); |
@@ -851,12 +869,16 @@ void svc_delete_xprt(struct svc_xprt *xprt) | |||
851 | * while still attached to a queue, the queue itself | 869 | * while still attached to a queue, the queue itself |
852 | * is about to be destroyed (in svc_destroy). | 870 | * is about to be destroyed (in svc_destroy). |
853 | */ | 871 | */ |
854 | if (!test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) { | 872 | if (test_bit(XPT_TEMP, &xprt->xpt_flags)) |
855 | BUG_ON(atomic_read(&xprt->xpt_ref.refcount) < 2); | 873 | serv->sv_tmpcnt--; |
856 | if (test_bit(XPT_TEMP, &xprt->xpt_flags)) | 874 | |
857 | serv->sv_tmpcnt--; | 875 | for (dr = svc_deferred_dequeue(xprt); dr; |
876 | dr = svc_deferred_dequeue(xprt)) { | ||
858 | svc_xprt_put(xprt); | 877 | svc_xprt_put(xprt); |
878 | kfree(dr); | ||
859 | } | 879 | } |
880 | |||
881 | svc_xprt_put(xprt); | ||
860 | spin_unlock_bh(&serv->sv_lock); | 882 | spin_unlock_bh(&serv->sv_lock); |
861 | } | 883 | } |
862 | 884 | ||
@@ -902,17 +924,19 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many) | |||
902 | container_of(dreq, struct svc_deferred_req, handle); | 924 | container_of(dreq, struct svc_deferred_req, handle); |
903 | struct svc_xprt *xprt = dr->xprt; | 925 | struct svc_xprt *xprt = dr->xprt; |
904 | 926 | ||
905 | if (too_many) { | 927 | spin_lock(&xprt->xpt_lock); |
928 | set_bit(XPT_DEFERRED, &xprt->xpt_flags); | ||
929 | if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) { | ||
930 | spin_unlock(&xprt->xpt_lock); | ||
931 | dprintk("revisit canceled\n"); | ||
906 | svc_xprt_put(xprt); | 932 | svc_xprt_put(xprt); |
907 | kfree(dr); | 933 | kfree(dr); |
908 | return; | 934 | return; |
909 | } | 935 | } |
910 | dprintk("revisit queued\n"); | 936 | dprintk("revisit queued\n"); |
911 | dr->xprt = NULL; | 937 | dr->xprt = NULL; |
912 | spin_lock(&xprt->xpt_lock); | ||
913 | list_add(&dr->handle.recent, &xprt->xpt_deferred); | 938 | list_add(&dr->handle.recent, &xprt->xpt_deferred); |
914 | spin_unlock(&xprt->xpt_lock); | 939 | spin_unlock(&xprt->xpt_lock); |
915 | set_bit(XPT_DEFERRED, &xprt->xpt_flags); | ||
916 | svc_xprt_enqueue(xprt); | 940 | svc_xprt_enqueue(xprt); |
917 | svc_xprt_put(xprt); | 941 | svc_xprt_put(xprt); |
918 | } | 942 | } |
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c index 8a73cbb16052..e64109b02aee 100644 --- a/net/sunrpc/svcauth.c +++ b/net/sunrpc/svcauth.c | |||
@@ -57,13 +57,13 @@ svc_authenticate(struct svc_rqst *rqstp, __be32 *authp) | |||
57 | rqstp->rq_authop = aops; | 57 | rqstp->rq_authop = aops; |
58 | return aops->accept(rqstp, authp); | 58 | return aops->accept(rqstp, authp); |
59 | } | 59 | } |
60 | EXPORT_SYMBOL(svc_authenticate); | 60 | EXPORT_SYMBOL_GPL(svc_authenticate); |
61 | 61 | ||
62 | int svc_set_client(struct svc_rqst *rqstp) | 62 | int svc_set_client(struct svc_rqst *rqstp) |
63 | { | 63 | { |
64 | return rqstp->rq_authop->set_client(rqstp); | 64 | return rqstp->rq_authop->set_client(rqstp); |
65 | } | 65 | } |
66 | EXPORT_SYMBOL(svc_set_client); | 66 | EXPORT_SYMBOL_GPL(svc_set_client); |
67 | 67 | ||
68 | /* A request, which was authenticated, has now executed. | 68 | /* A request, which was authenticated, has now executed. |
69 | * Time to finalise the credentials and verifier | 69 | * Time to finalise the credentials and verifier |
@@ -95,7 +95,7 @@ svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops) | |||
95 | spin_unlock(&authtab_lock); | 95 | spin_unlock(&authtab_lock); |
96 | return rv; | 96 | return rv; |
97 | } | 97 | } |
98 | EXPORT_SYMBOL(svc_auth_register); | 98 | EXPORT_SYMBOL_GPL(svc_auth_register); |
99 | 99 | ||
100 | void | 100 | void |
101 | svc_auth_unregister(rpc_authflavor_t flavor) | 101 | svc_auth_unregister(rpc_authflavor_t flavor) |
@@ -105,7 +105,7 @@ svc_auth_unregister(rpc_authflavor_t flavor) | |||
105 | authtab[flavor] = NULL; | 105 | authtab[flavor] = NULL; |
106 | spin_unlock(&authtab_lock); | 106 | spin_unlock(&authtab_lock); |
107 | } | 107 | } |
108 | EXPORT_SYMBOL(svc_auth_unregister); | 108 | EXPORT_SYMBOL_GPL(svc_auth_unregister); |
109 | 109 | ||
110 | /************************************************** | 110 | /************************************************** |
111 | * 'auth_domains' are stored in a hash table indexed by name. | 111 | * 'auth_domains' are stored in a hash table indexed by name. |
@@ -132,7 +132,7 @@ void auth_domain_put(struct auth_domain *dom) | |||
132 | spin_unlock(&auth_domain_lock); | 132 | spin_unlock(&auth_domain_lock); |
133 | } | 133 | } |
134 | } | 134 | } |
135 | EXPORT_SYMBOL(auth_domain_put); | 135 | EXPORT_SYMBOL_GPL(auth_domain_put); |
136 | 136 | ||
137 | struct auth_domain * | 137 | struct auth_domain * |
138 | auth_domain_lookup(char *name, struct auth_domain *new) | 138 | auth_domain_lookup(char *name, struct auth_domain *new) |
@@ -157,10 +157,10 @@ auth_domain_lookup(char *name, struct auth_domain *new) | |||
157 | spin_unlock(&auth_domain_lock); | 157 | spin_unlock(&auth_domain_lock); |
158 | return new; | 158 | return new; |
159 | } | 159 | } |
160 | EXPORT_SYMBOL(auth_domain_lookup); | 160 | EXPORT_SYMBOL_GPL(auth_domain_lookup); |
161 | 161 | ||
162 | struct auth_domain *auth_domain_find(char *name) | 162 | struct auth_domain *auth_domain_find(char *name) |
163 | { | 163 | { |
164 | return auth_domain_lookup(name, NULL); | 164 | return auth_domain_lookup(name, NULL); |
165 | } | 165 | } |
166 | EXPORT_SYMBOL(auth_domain_find); | 166 | EXPORT_SYMBOL_GPL(auth_domain_find); |
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index 82240e6127b2..5c865e2d299e 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c | |||
@@ -64,7 +64,7 @@ struct auth_domain *unix_domain_find(char *name) | |||
64 | rv = auth_domain_lookup(name, &new->h); | 64 | rv = auth_domain_lookup(name, &new->h); |
65 | } | 65 | } |
66 | } | 66 | } |
67 | EXPORT_SYMBOL(unix_domain_find); | 67 | EXPORT_SYMBOL_GPL(unix_domain_find); |
68 | 68 | ||
69 | static void svcauth_unix_domain_release(struct auth_domain *dom) | 69 | static void svcauth_unix_domain_release(struct auth_domain *dom) |
70 | { | 70 | { |
@@ -358,7 +358,7 @@ int auth_unix_add_addr(struct in6_addr *addr, struct auth_domain *dom) | |||
358 | else | 358 | else |
359 | return -ENOMEM; | 359 | return -ENOMEM; |
360 | } | 360 | } |
361 | EXPORT_SYMBOL(auth_unix_add_addr); | 361 | EXPORT_SYMBOL_GPL(auth_unix_add_addr); |
362 | 362 | ||
363 | int auth_unix_forget_old(struct auth_domain *dom) | 363 | int auth_unix_forget_old(struct auth_domain *dom) |
364 | { | 364 | { |
@@ -370,7 +370,7 @@ int auth_unix_forget_old(struct auth_domain *dom) | |||
370 | udom->addr_changes++; | 370 | udom->addr_changes++; |
371 | return 0; | 371 | return 0; |
372 | } | 372 | } |
373 | EXPORT_SYMBOL(auth_unix_forget_old); | 373 | EXPORT_SYMBOL_GPL(auth_unix_forget_old); |
374 | 374 | ||
375 | struct auth_domain *auth_unix_lookup(struct in6_addr *addr) | 375 | struct auth_domain *auth_unix_lookup(struct in6_addr *addr) |
376 | { | 376 | { |
@@ -395,13 +395,13 @@ struct auth_domain *auth_unix_lookup(struct in6_addr *addr) | |||
395 | cache_put(&ipm->h, &ip_map_cache); | 395 | cache_put(&ipm->h, &ip_map_cache); |
396 | return rv; | 396 | return rv; |
397 | } | 397 | } |
398 | EXPORT_SYMBOL(auth_unix_lookup); | 398 | EXPORT_SYMBOL_GPL(auth_unix_lookup); |
399 | 399 | ||
400 | void svcauth_unix_purge(void) | 400 | void svcauth_unix_purge(void) |
401 | { | 401 | { |
402 | cache_purge(&ip_map_cache); | 402 | cache_purge(&ip_map_cache); |
403 | } | 403 | } |
404 | EXPORT_SYMBOL(svcauth_unix_purge); | 404 | EXPORT_SYMBOL_GPL(svcauth_unix_purge); |
405 | 405 | ||
406 | static inline struct ip_map * | 406 | static inline struct ip_map * |
407 | ip_map_cached_get(struct svc_rqst *rqstp) | 407 | ip_map_cached_get(struct svc_rqst *rqstp) |
@@ -714,7 +714,7 @@ svcauth_unix_set_client(struct svc_rqst *rqstp) | |||
714 | return SVC_OK; | 714 | return SVC_OK; |
715 | } | 715 | } |
716 | 716 | ||
717 | EXPORT_SYMBOL(svcauth_unix_set_client); | 717 | EXPORT_SYMBOL_GPL(svcauth_unix_set_client); |
718 | 718 | ||
719 | static int | 719 | static int |
720 | svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp) | 720 | svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp) |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index ef3238d665ee..5763e6460fea 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -59,6 +59,7 @@ static void svc_udp_data_ready(struct sock *, int); | |||
59 | static int svc_udp_recvfrom(struct svc_rqst *); | 59 | static int svc_udp_recvfrom(struct svc_rqst *); |
60 | static int svc_udp_sendto(struct svc_rqst *); | 60 | static int svc_udp_sendto(struct svc_rqst *); |
61 | static void svc_sock_detach(struct svc_xprt *); | 61 | static void svc_sock_detach(struct svc_xprt *); |
62 | static void svc_tcp_sock_detach(struct svc_xprt *); | ||
62 | static void svc_sock_free(struct svc_xprt *); | 63 | static void svc_sock_free(struct svc_xprt *); |
63 | 64 | ||
64 | static struct svc_xprt *svc_create_socket(struct svc_serv *, int, | 65 | static struct svc_xprt *svc_create_socket(struct svc_serv *, int, |
@@ -102,7 +103,6 @@ static void svc_reclassify_socket(struct socket *sock) | |||
102 | static void svc_release_skb(struct svc_rqst *rqstp) | 103 | static void svc_release_skb(struct svc_rqst *rqstp) |
103 | { | 104 | { |
104 | struct sk_buff *skb = rqstp->rq_xprt_ctxt; | 105 | struct sk_buff *skb = rqstp->rq_xprt_ctxt; |
105 | struct svc_deferred_req *dr = rqstp->rq_deferred; | ||
106 | 106 | ||
107 | if (skb) { | 107 | if (skb) { |
108 | struct svc_sock *svsk = | 108 | struct svc_sock *svsk = |
@@ -112,10 +112,6 @@ static void svc_release_skb(struct svc_rqst *rqstp) | |||
112 | dprintk("svc: service %p, releasing skb %p\n", rqstp, skb); | 112 | dprintk("svc: service %p, releasing skb %p\n", rqstp, skb); |
113 | skb_free_datagram(svsk->sk_sk, skb); | 113 | skb_free_datagram(svsk->sk_sk, skb); |
114 | } | 114 | } |
115 | if (dr) { | ||
116 | rqstp->rq_deferred = NULL; | ||
117 | kfree(dr); | ||
118 | } | ||
119 | } | 115 | } |
120 | 116 | ||
121 | union svc_pktinfo_u { | 117 | union svc_pktinfo_u { |
@@ -289,7 +285,7 @@ svc_sock_names(char *buf, struct svc_serv *serv, char *toclose) | |||
289 | return -ENOENT; | 285 | return -ENOENT; |
290 | return len; | 286 | return len; |
291 | } | 287 | } |
292 | EXPORT_SYMBOL(svc_sock_names); | 288 | EXPORT_SYMBOL_GPL(svc_sock_names); |
293 | 289 | ||
294 | /* | 290 | /* |
295 | * Check input queue length | 291 | * Check input queue length |
@@ -1017,7 +1013,7 @@ static struct svc_xprt_ops svc_tcp_ops = { | |||
1017 | .xpo_recvfrom = svc_tcp_recvfrom, | 1013 | .xpo_recvfrom = svc_tcp_recvfrom, |
1018 | .xpo_sendto = svc_tcp_sendto, | 1014 | .xpo_sendto = svc_tcp_sendto, |
1019 | .xpo_release_rqst = svc_release_skb, | 1015 | .xpo_release_rqst = svc_release_skb, |
1020 | .xpo_detach = svc_sock_detach, | 1016 | .xpo_detach = svc_tcp_sock_detach, |
1021 | .xpo_free = svc_sock_free, | 1017 | .xpo_free = svc_sock_free, |
1022 | .xpo_prep_reply_hdr = svc_tcp_prep_reply_hdr, | 1018 | .xpo_prep_reply_hdr = svc_tcp_prep_reply_hdr, |
1023 | .xpo_has_wspace = svc_tcp_has_wspace, | 1019 | .xpo_has_wspace = svc_tcp_has_wspace, |
@@ -1101,7 +1097,7 @@ void svc_sock_update_bufs(struct svc_serv *serv) | |||
1101 | } | 1097 | } |
1102 | spin_unlock_bh(&serv->sv_lock); | 1098 | spin_unlock_bh(&serv->sv_lock); |
1103 | } | 1099 | } |
1104 | EXPORT_SYMBOL(svc_sock_update_bufs); | 1100 | EXPORT_SYMBOL_GPL(svc_sock_update_bufs); |
1105 | 1101 | ||
1106 | /* | 1102 | /* |
1107 | * Initialize socket for RPC use and create svc_sock struct | 1103 | * Initialize socket for RPC use and create svc_sock struct |
@@ -1287,6 +1283,24 @@ static void svc_sock_detach(struct svc_xprt *xprt) | |||
1287 | sk->sk_state_change = svsk->sk_ostate; | 1283 | sk->sk_state_change = svsk->sk_ostate; |
1288 | sk->sk_data_ready = svsk->sk_odata; | 1284 | sk->sk_data_ready = svsk->sk_odata; |
1289 | sk->sk_write_space = svsk->sk_owspace; | 1285 | sk->sk_write_space = svsk->sk_owspace; |
1286 | |||
1287 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | ||
1288 | wake_up_interruptible(sk->sk_sleep); | ||
1289 | } | ||
1290 | |||
1291 | /* | ||
1292 | * Disconnect the socket, and reset the callbacks | ||
1293 | */ | ||
1294 | static void svc_tcp_sock_detach(struct svc_xprt *xprt) | ||
1295 | { | ||
1296 | struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); | ||
1297 | |||
1298 | dprintk("svc: svc_tcp_sock_detach(%p)\n", svsk); | ||
1299 | |||
1300 | svc_sock_detach(xprt); | ||
1301 | |||
1302 | if (!test_bit(XPT_LISTENER, &xprt->xpt_flags)) | ||
1303 | kernel_sock_shutdown(svsk->sk_sock, SHUT_RDWR); | ||
1290 | } | 1304 | } |
1291 | 1305 | ||
1292 | /* | 1306 | /* |
diff --git a/net/wimax/Kconfig b/net/wimax/Kconfig new file mode 100644 index 000000000000..18495cdcd10d --- /dev/null +++ b/net/wimax/Kconfig | |||
@@ -0,0 +1,52 @@ | |||
1 | # | ||
2 | # WiMAX LAN device configuration | ||
3 | # | ||
4 | # Note the ugly 'depends on' on WIMAX: that disallows RFKILL to be a | ||
5 | # module if WIMAX is to be linked in. The WiMAX code is done in such a | ||
6 | # way that it doesn't require and explicit dependency on RFKILL in | ||
7 | # case an embedded system wants to rip it out. | ||
8 | # | ||
9 | # As well, enablement of the RFKILL code means we need the INPUT layer | ||
10 | # support to inject events coming from hw rfkill switches. That | ||
11 | # dependency could be killed if input.h provided appropiate means to | ||
12 | # work when input is disabled. | ||
13 | |||
14 | comment "WiMAX Wireless Broadband support requires CONFIG_INPUT enabled" | ||
15 | depends on INPUT = n && RFKILL != n | ||
16 | |||
17 | menuconfig WIMAX | ||
18 | tristate "WiMAX Wireless Broadband support" | ||
19 | depends on (y && RFKILL != m) || m | ||
20 | depends on (INPUT && RFKILL != n) || RFKILL = n | ||
21 | help | ||
22 | |||
23 | Select to configure support for devices that provide | ||
24 | wireless broadband connectivity using the WiMAX protocol | ||
25 | (IEEE 802.16). | ||
26 | |||
27 | Please note that most of these devices require signing up | ||
28 | for a service plan with a provider. | ||
29 | |||
30 | The different WiMAX drivers can be enabled in the menu entry | ||
31 | |||
32 | Device Drivers > Network device support > WiMAX Wireless | ||
33 | Broadband devices | ||
34 | |||
35 | If unsure, it is safe to select M (module). | ||
36 | |||
37 | config WIMAX_DEBUG_LEVEL | ||
38 | int "WiMAX debug level" | ||
39 | depends on WIMAX | ||
40 | default 8 | ||
41 | help | ||
42 | |||
43 | Select the maximum debug verbosity level to be compiled into | ||
44 | the WiMAX stack code. | ||
45 | |||
46 | By default, debug messages are disabled at runtime and can | ||
47 | be selectively enabled for different parts of the code using | ||
48 | the sysfs debug-levels file. | ||
49 | |||
50 | If set at zero, this will compile out all the debug code. | ||
51 | |||
52 | It is recommended that it is left at 8. | ||
diff --git a/net/wimax/Makefile b/net/wimax/Makefile new file mode 100644 index 000000000000..5b80b941c2c9 --- /dev/null +++ b/net/wimax/Makefile | |||
@@ -0,0 +1,13 @@ | |||
1 | |||
2 | obj-$(CONFIG_WIMAX) += wimax.o | ||
3 | |||
4 | wimax-y := \ | ||
5 | id-table.o \ | ||
6 | op-msg.o \ | ||
7 | op-reset.o \ | ||
8 | op-rfkill.o \ | ||
9 | stack.o | ||
10 | |||
11 | wimax-$(CONFIG_DEBUG_FS) += debugfs.o | ||
12 | |||
13 | |||
diff --git a/net/wimax/debug-levels.h b/net/wimax/debug-levels.h new file mode 100644 index 000000000000..1c29123a3aa9 --- /dev/null +++ b/net/wimax/debug-levels.h | |||
@@ -0,0 +1,42 @@ | |||
1 | /* | ||
2 | * Linux WiMAX Stack | ||
3 | * Debug levels control file for the wimax module | ||
4 | * | ||
5 | * | ||
6 | * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com> | ||
7 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version | ||
11 | * 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
21 | * 02110-1301, USA. | ||
22 | */ | ||
23 | #ifndef __debug_levels__h__ | ||
24 | #define __debug_levels__h__ | ||
25 | |||
26 | /* Maximum compile and run time debug level for all submodules */ | ||
27 | #define D_MODULENAME wimax | ||
28 | #define D_MASTER CONFIG_WIMAX_DEBUG_LEVEL | ||
29 | |||
30 | #include <linux/wimax/debug.h> | ||
31 | |||
32 | /* List of all the enabled modules */ | ||
33 | enum d_module { | ||
34 | D_SUBMODULE_DECLARE(debugfs), | ||
35 | D_SUBMODULE_DECLARE(id_table), | ||
36 | D_SUBMODULE_DECLARE(op_msg), | ||
37 | D_SUBMODULE_DECLARE(op_reset), | ||
38 | D_SUBMODULE_DECLARE(op_rfkill), | ||
39 | D_SUBMODULE_DECLARE(stack), | ||
40 | }; | ||
41 | |||
42 | #endif /* #ifndef __debug_levels__h__ */ | ||
diff --git a/net/wimax/debugfs.c b/net/wimax/debugfs.c new file mode 100644 index 000000000000..87cf4430079c --- /dev/null +++ b/net/wimax/debugfs.c | |||
@@ -0,0 +1,90 @@ | |||
1 | /* | ||
2 | * Linux WiMAX | ||
3 | * Debugfs support | ||
4 | * | ||
5 | * | ||
6 | * Copyright (C) 2005-2006 Intel Corporation <linux-wimax@intel.com> | ||
7 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version | ||
11 | * 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
21 | * 02110-1301, USA. | ||
22 | */ | ||
23 | #include <linux/debugfs.h> | ||
24 | #include <linux/wimax.h> | ||
25 | #include "wimax-internal.h" | ||
26 | |||
27 | #define D_SUBMODULE debugfs | ||
28 | #include "debug-levels.h" | ||
29 | |||
30 | |||
31 | /* Debug framework control of debug levels */ | ||
32 | struct d_level D_LEVEL[] = { | ||
33 | D_SUBMODULE_DEFINE(debugfs), | ||
34 | D_SUBMODULE_DEFINE(id_table), | ||
35 | D_SUBMODULE_DEFINE(op_msg), | ||
36 | D_SUBMODULE_DEFINE(op_reset), | ||
37 | D_SUBMODULE_DEFINE(op_rfkill), | ||
38 | D_SUBMODULE_DEFINE(stack), | ||
39 | }; | ||
40 | size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL); | ||
41 | |||
42 | #define __debugfs_register(prefix, name, parent) \ | ||
43 | do { \ | ||
44 | result = d_level_register_debugfs(prefix, name, parent); \ | ||
45 | if (result < 0) \ | ||
46 | goto error; \ | ||
47 | } while (0) | ||
48 | |||
49 | |||
50 | int wimax_debugfs_add(struct wimax_dev *wimax_dev) | ||
51 | { | ||
52 | int result; | ||
53 | struct net_device *net_dev = wimax_dev->net_dev; | ||
54 | struct device *dev = net_dev->dev.parent; | ||
55 | struct dentry *dentry; | ||
56 | char buf[128]; | ||
57 | |||
58 | snprintf(buf, sizeof(buf), "wimax:%s", net_dev->name); | ||
59 | dentry = debugfs_create_dir(buf, NULL); | ||
60 | result = PTR_ERR(dentry); | ||
61 | if (IS_ERR(dentry)) { | ||
62 | if (result == -ENODEV) | ||
63 | result = 0; /* No debugfs support */ | ||
64 | else | ||
65 | dev_err(dev, "Can't create debugfs dentry: %d\n", | ||
66 | result); | ||
67 | goto out; | ||
68 | } | ||
69 | wimax_dev->debugfs_dentry = dentry; | ||
70 | __debugfs_register("wimax_dl_", debugfs, dentry); | ||
71 | __debugfs_register("wimax_dl_", id_table, dentry); | ||
72 | __debugfs_register("wimax_dl_", op_msg, dentry); | ||
73 | __debugfs_register("wimax_dl_", op_reset, dentry); | ||
74 | __debugfs_register("wimax_dl_", op_rfkill, dentry); | ||
75 | __debugfs_register("wimax_dl_", stack, dentry); | ||
76 | result = 0; | ||
77 | out: | ||
78 | return result; | ||
79 | |||
80 | error: | ||
81 | debugfs_remove_recursive(wimax_dev->debugfs_dentry); | ||
82 | return result; | ||
83 | } | ||
84 | |||
85 | void wimax_debugfs_rm(struct wimax_dev *wimax_dev) | ||
86 | { | ||
87 | debugfs_remove_recursive(wimax_dev->debugfs_dentry); | ||
88 | } | ||
89 | |||
90 | |||
diff --git a/net/wimax/id-table.c b/net/wimax/id-table.c new file mode 100644 index 000000000000..5e685f7eda90 --- /dev/null +++ b/net/wimax/id-table.c | |||
@@ -0,0 +1,144 @@ | |||
1 | /* | ||
2 | * Linux WiMAX | ||
3 | * Mappping of generic netlink family IDs to net devices | ||
4 | * | ||
5 | * | ||
6 | * Copyright (C) 2005-2006 Intel Corporation <linux-wimax@intel.com> | ||
7 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version | ||
11 | * 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
21 | * 02110-1301, USA. | ||
22 | * | ||
23 | * | ||
24 | * We assign a single generic netlink family ID to each device (to | ||
25 | * simplify lookup). | ||
26 | * | ||
27 | * We need a way to map family ID to a wimax_dev pointer. | ||
28 | * | ||
29 | * The idea is to use a very simple lookup. Using a netlink attribute | ||
30 | * with (for example) the interface name implies a heavier search over | ||
31 | * all the network devices; seemed kind of a waste given that we know | ||
32 | * we are looking for a WiMAX device and that most systems will have | ||
33 | * just a single WiMAX adapter. | ||
34 | * | ||
35 | * We put all the WiMAX devices in the system in a linked list and | ||
36 | * match the generic link family ID against the list. | ||
37 | * | ||
38 | * By using a linked list, the case of a single adapter in the system | ||
39 | * becomes (almost) no overhead, while still working for many more. If | ||
40 | * it ever goes beyond two, I'll be surprised. | ||
41 | */ | ||
42 | #include <linux/device.h> | ||
43 | #include <net/genetlink.h> | ||
44 | #include <linux/netdevice.h> | ||
45 | #include <linux/list.h> | ||
46 | #include <linux/wimax.h> | ||
47 | #include "wimax-internal.h" | ||
48 | |||
49 | |||
50 | #define D_SUBMODULE id_table | ||
51 | #include "debug-levels.h" | ||
52 | |||
53 | |||
54 | static DEFINE_SPINLOCK(wimax_id_table_lock); | ||
55 | static struct list_head wimax_id_table = LIST_HEAD_INIT(wimax_id_table); | ||
56 | |||
57 | |||
58 | /* | ||
59 | * wimax_id_table_add - add a gennetlink familiy ID / wimax_dev mapping | ||
60 | * | ||
61 | * @wimax_dev: WiMAX device descriptor to associate to the Generic | ||
62 | * Netlink family ID. | ||
63 | * | ||
64 | * Look for an empty spot in the ID table; if none found, double the | ||
65 | * table's size and get the first spot. | ||
66 | */ | ||
67 | void wimax_id_table_add(struct wimax_dev *wimax_dev) | ||
68 | { | ||
69 | d_fnstart(3, NULL, "(wimax_dev %p)\n", wimax_dev); | ||
70 | spin_lock(&wimax_id_table_lock); | ||
71 | list_add(&wimax_dev->id_table_node, &wimax_id_table); | ||
72 | spin_unlock(&wimax_id_table_lock); | ||
73 | d_fnend(3, NULL, "(wimax_dev %p)\n", wimax_dev); | ||
74 | } | ||
75 | |||
76 | |||
77 | /* | ||
78 | * wimax_get_netdev_by_info - lookup a wimax_dev from the gennetlink info | ||
79 | * | ||
80 | * The generic netlink family ID has been filled out in the | ||
81 | * nlmsghdr->nlmsg_type field, so we pull it from there, look it up in | ||
82 | * the mapping table and reference the wimax_dev. | ||
83 | * | ||
84 | * When done, the reference should be dropped with | ||
85 | * 'dev_put(wimax_dev->net_dev)'. | ||
86 | */ | ||
87 | struct wimax_dev *wimax_dev_get_by_genl_info( | ||
88 | struct genl_info *info, int ifindex) | ||
89 | { | ||
90 | struct wimax_dev *wimax_dev = NULL; | ||
91 | |||
92 | d_fnstart(3, NULL, "(info %p ifindex %d)\n", info, ifindex); | ||
93 | spin_lock(&wimax_id_table_lock); | ||
94 | list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) { | ||
95 | if (wimax_dev->net_dev->ifindex == ifindex) { | ||
96 | dev_hold(wimax_dev->net_dev); | ||
97 | break; | ||
98 | } | ||
99 | } | ||
100 | if (wimax_dev == NULL) | ||
101 | d_printf(1, NULL, "wimax: no devices found with ifindex %d\n", | ||
102 | ifindex); | ||
103 | spin_unlock(&wimax_id_table_lock); | ||
104 | d_fnend(3, NULL, "(info %p ifindex %d) = %p\n", | ||
105 | info, ifindex, wimax_dev); | ||
106 | return wimax_dev; | ||
107 | } | ||
108 | |||
109 | |||
110 | /* | ||
111 | * wimax_id_table_rm - Remove a gennetlink familiy ID / wimax_dev mapping | ||
112 | * | ||
113 | * @id: family ID to remove from the table | ||
114 | */ | ||
115 | void wimax_id_table_rm(struct wimax_dev *wimax_dev) | ||
116 | { | ||
117 | spin_lock(&wimax_id_table_lock); | ||
118 | list_del_init(&wimax_dev->id_table_node); | ||
119 | spin_unlock(&wimax_id_table_lock); | ||
120 | } | ||
121 | |||
122 | |||
123 | /* | ||
124 | * Release the gennetlink family id / mapping table | ||
125 | * | ||
126 | * On debug, verify that the table is empty upon removal. We want the | ||
127 | * code always compiled, to ensure it doesn't bit rot. It will be | ||
128 | * compiled out if CONFIG_BUG is disabled. | ||
129 | */ | ||
130 | void wimax_id_table_release(void) | ||
131 | { | ||
132 | struct wimax_dev *wimax_dev; | ||
133 | |||
134 | #ifndef CONFIG_BUG | ||
135 | return; | ||
136 | #endif | ||
137 | spin_lock(&wimax_id_table_lock); | ||
138 | list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) { | ||
139 | printk(KERN_ERR "BUG: %s wimax_dev %p ifindex %d not cleared\n", | ||
140 | __func__, wimax_dev, wimax_dev->net_dev->ifindex); | ||
141 | WARN_ON(1); | ||
142 | } | ||
143 | spin_unlock(&wimax_id_table_lock); | ||
144 | } | ||
diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c new file mode 100644 index 000000000000..cb3b4ad53683 --- /dev/null +++ b/net/wimax/op-msg.c | |||
@@ -0,0 +1,421 @@ | |||
1 | /* | ||
2 | * Linux WiMAX | ||
3 | * Generic messaging interface between userspace and driver/device | ||
4 | * | ||
5 | * | ||
6 | * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com> | ||
7 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version | ||
11 | * 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
21 | * 02110-1301, USA. | ||
22 | * | ||
23 | * | ||
24 | * This implements a direct communication channel between user space and | ||
25 | * the driver/device, by which free form messages can be sent back and | ||
26 | * forth. | ||
27 | * | ||
28 | * This is intended for device-specific features, vendor quirks, etc. | ||
29 | * | ||
30 | * See include/net/wimax.h | ||
31 | * | ||
32 | * GENERIC NETLINK ENCODING AND CAPACITY | ||
33 | * | ||
34 | * A destination "pipe name" is added to each message; it is up to the | ||
35 | * drivers to assign or use those names (if using them at all). | ||
36 | * | ||
37 | * Messages are encoded as a binary netlink attribute using nla_put() | ||
38 | * using type NLA_UNSPEC (as some versions of libnl still in | ||
39 | * deployment don't yet understand NLA_BINARY). | ||
40 | * | ||
41 | * The maximum capacity of this transport is PAGESIZE per message (so | ||
42 | * the actual payload will be bit smaller depending on the | ||
43 | * netlink/generic netlink attributes and headers). | ||
44 | * | ||
45 | * RECEPTION OF MESSAGES | ||
46 | * | ||
47 | * When a message is received from user space, it is passed verbatim | ||
48 | * to the driver calling wimax_dev->op_msg_from_user(). The return | ||
49 | * value from this function is passed back to user space as an ack | ||
50 | * over the generic netlink protocol. | ||
51 | * | ||
52 | * The stack doesn't do any processing or interpretation of these | ||
53 | * messages. | ||
54 | * | ||
55 | * SENDING MESSAGES | ||
56 | * | ||
57 | * Messages can be sent with wimax_msg(). | ||
58 | * | ||
59 | * If the message delivery needs to happen on a different context to | ||
60 | * that of its creation, wimax_msg_alloc() can be used to get a | ||
61 | * pointer to the message that can be delivered later on with | ||
62 | * wimax_msg_send(). | ||
63 | * | ||
64 | * ROADMAP | ||
65 | * | ||
66 | * wimax_gnl_doit_msg_from_user() Process a message from user space | ||
67 | * wimax_dev_get_by_genl_info() | ||
68 | * wimax_dev->op_msg_from_user() Delivery of message to the driver | ||
69 | * | ||
70 | * wimax_msg() Send a message to user space | ||
71 | * wimax_msg_alloc() | ||
72 | * wimax_msg_send() | ||
73 | */ | ||
74 | #include <linux/device.h> | ||
75 | #include <net/genetlink.h> | ||
76 | #include <linux/netdevice.h> | ||
77 | #include <linux/wimax.h> | ||
78 | #include <linux/security.h> | ||
79 | #include "wimax-internal.h" | ||
80 | |||
81 | |||
82 | #define D_SUBMODULE op_msg | ||
83 | #include "debug-levels.h" | ||
84 | |||
85 | |||
86 | /** | ||
87 | * wimax_msg_alloc - Create a new skb for sending a message to userspace | ||
88 | * | ||
89 | * @wimax_dev: WiMAX device descriptor | ||
90 | * @pipe_name: "named pipe" the message will be sent to | ||
91 | * @msg: pointer to the message data to send | ||
92 | * @size: size of the message to send (in bytes), including the header. | ||
93 | * @gfp_flags: flags for memory allocation. | ||
94 | * | ||
95 | * Returns: %0 if ok, negative errno code on error | ||
96 | * | ||
97 | * Description: | ||
98 | * | ||
99 | * Allocates an skb that will contain the message to send to user | ||
100 | * space over the messaging pipe and initializes it, copying the | ||
101 | * payload. | ||
102 | * | ||
103 | * Once this call is done, you can deliver it with | ||
104 | * wimax_msg_send(). | ||
105 | * | ||
106 | * IMPORTANT: | ||
107 | * | ||
108 | * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as | ||
109 | * wimax_msg_send() depends on skb->data being placed at the | ||
110 | * beginning of the user message. | ||
111 | */ | ||
112 | struct sk_buff *wimax_msg_alloc(struct wimax_dev *wimax_dev, | ||
113 | const char *pipe_name, | ||
114 | const void *msg, size_t size, | ||
115 | gfp_t gfp_flags) | ||
116 | { | ||
117 | int result; | ||
118 | struct device *dev = wimax_dev->net_dev->dev.parent; | ||
119 | size_t msg_size; | ||
120 | void *genl_msg; | ||
121 | struct sk_buff *skb; | ||
122 | |||
123 | msg_size = nla_total_size(size) | ||
124 | + nla_total_size(sizeof(u32)) | ||
125 | + (pipe_name ? nla_total_size(strlen(pipe_name)) : 0); | ||
126 | result = -ENOMEM; | ||
127 | skb = genlmsg_new(msg_size, gfp_flags); | ||
128 | if (skb == NULL) | ||
129 | goto error_new; | ||
130 | genl_msg = genlmsg_put(skb, 0, 0, &wimax_gnl_family, | ||
131 | 0, WIMAX_GNL_OP_MSG_TO_USER); | ||
132 | if (genl_msg == NULL) { | ||
133 | dev_err(dev, "no memory to create generic netlink message\n"); | ||
134 | goto error_genlmsg_put; | ||
135 | } | ||
136 | result = nla_put_u32(skb, WIMAX_GNL_MSG_IFIDX, | ||
137 | wimax_dev->net_dev->ifindex); | ||
138 | if (result < 0) { | ||
139 | dev_err(dev, "no memory to add ifindex attribute\n"); | ||
140 | goto error_nla_put; | ||
141 | } | ||
142 | if (pipe_name) { | ||
143 | result = nla_put_string(skb, WIMAX_GNL_MSG_PIPE_NAME, | ||
144 | pipe_name); | ||
145 | if (result < 0) { | ||
146 | dev_err(dev, "no memory to add pipe_name attribute\n"); | ||
147 | goto error_nla_put; | ||
148 | } | ||
149 | } | ||
150 | result = nla_put(skb, WIMAX_GNL_MSG_DATA, size, msg); | ||
151 | if (result < 0) { | ||
152 | dev_err(dev, "no memory to add payload in attribute\n"); | ||
153 | goto error_nla_put; | ||
154 | } | ||
155 | genlmsg_end(skb, genl_msg); | ||
156 | return skb; | ||
157 | |||
158 | error_nla_put: | ||
159 | error_genlmsg_put: | ||
160 | error_new: | ||
161 | nlmsg_free(skb); | ||
162 | return ERR_PTR(result); | ||
163 | |||
164 | } | ||
165 | EXPORT_SYMBOL_GPL(wimax_msg_alloc); | ||
166 | |||
167 | |||
168 | /** | ||
169 | * wimax_msg_data_len - Return a pointer and size of a message's payload | ||
170 | * | ||
171 | * @msg: Pointer to a message created with wimax_msg_alloc() | ||
172 | * @size: Pointer to where to store the message's size | ||
173 | * | ||
174 | * Returns the pointer to the message data. | ||
175 | */ | ||
176 | const void *wimax_msg_data_len(struct sk_buff *msg, size_t *size) | ||
177 | { | ||
178 | struct nlmsghdr *nlh = (void *) msg->head; | ||
179 | struct nlattr *nla; | ||
180 | |||
181 | nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr), | ||
182 | WIMAX_GNL_MSG_DATA); | ||
183 | if (nla == NULL) { | ||
184 | printk(KERN_ERR "Cannot find attribute WIMAX_GNL_MSG_DATA\n"); | ||
185 | return NULL; | ||
186 | } | ||
187 | *size = nla_len(nla); | ||
188 | return nla_data(nla); | ||
189 | } | ||
190 | EXPORT_SYMBOL_GPL(wimax_msg_data_len); | ||
191 | |||
192 | |||
193 | /** | ||
194 | * wimax_msg_data - Return a pointer to a message's payload | ||
195 | * | ||
196 | * @msg: Pointer to a message created with wimax_msg_alloc() | ||
197 | */ | ||
198 | const void *wimax_msg_data(struct sk_buff *msg) | ||
199 | { | ||
200 | struct nlmsghdr *nlh = (void *) msg->head; | ||
201 | struct nlattr *nla; | ||
202 | |||
203 | nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr), | ||
204 | WIMAX_GNL_MSG_DATA); | ||
205 | if (nla == NULL) { | ||
206 | printk(KERN_ERR "Cannot find attribute WIMAX_GNL_MSG_DATA\n"); | ||
207 | return NULL; | ||
208 | } | ||
209 | return nla_data(nla); | ||
210 | } | ||
211 | EXPORT_SYMBOL_GPL(wimax_msg_data); | ||
212 | |||
213 | |||
214 | /** | ||
215 | * wimax_msg_len - Return a message's payload length | ||
216 | * | ||
217 | * @msg: Pointer to a message created with wimax_msg_alloc() | ||
218 | */ | ||
219 | ssize_t wimax_msg_len(struct sk_buff *msg) | ||
220 | { | ||
221 | struct nlmsghdr *nlh = (void *) msg->head; | ||
222 | struct nlattr *nla; | ||
223 | |||
224 | nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr), | ||
225 | WIMAX_GNL_MSG_DATA); | ||
226 | if (nla == NULL) { | ||
227 | printk(KERN_ERR "Cannot find attribute WIMAX_GNL_MSG_DATA\n"); | ||
228 | return -EINVAL; | ||
229 | } | ||
230 | return nla_len(nla); | ||
231 | } | ||
232 | EXPORT_SYMBOL_GPL(wimax_msg_len); | ||
233 | |||
234 | |||
235 | /** | ||
236 | * wimax_msg_send - Send a pre-allocated message to user space | ||
237 | * | ||
238 | * @wimax_dev: WiMAX device descriptor | ||
239 | * | ||
240 | * @skb: &struct sk_buff returned by wimax_msg_alloc(). Note the | ||
241 | * ownership of @skb is transferred to this function. | ||
242 | * | ||
243 | * Returns: 0 if ok, < 0 errno code on error | ||
244 | * | ||
245 | * Description: | ||
246 | * | ||
247 | * Sends a free-form message that was preallocated with | ||
248 | * wimax_msg_alloc() and filled up. | ||
249 | * | ||
250 | * Assumes that once you pass an skb to this function for sending, it | ||
251 | * owns it and will release it when done (on success). | ||
252 | * | ||
253 | * IMPORTANT: | ||
254 | * | ||
255 | * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as | ||
256 | * wimax_msg_send() depends on skb->data being placed at the | ||
257 | * beginning of the user message. | ||
258 | */ | ||
259 | int wimax_msg_send(struct wimax_dev *wimax_dev, struct sk_buff *skb) | ||
260 | { | ||
261 | int result; | ||
262 | struct device *dev = wimax_dev->net_dev->dev.parent; | ||
263 | void *msg = skb->data; | ||
264 | size_t size = skb->len; | ||
265 | might_sleep(); | ||
266 | |||
267 | d_printf(1, dev, "CTX: wimax msg, %zu bytes\n", size); | ||
268 | d_dump(2, dev, msg, size); | ||
269 | result = genlmsg_multicast(skb, 0, wimax_gnl_mcg.id, GFP_KERNEL); | ||
270 | d_printf(1, dev, "CTX: genl multicast result %d\n", result); | ||
271 | if (result == -ESRCH) /* Nobody connected, ignore it */ | ||
272 | result = 0; /* btw, the skb is freed already */ | ||
273 | return result; | ||
274 | } | ||
275 | EXPORT_SYMBOL_GPL(wimax_msg_send); | ||
276 | |||
277 | |||
278 | /** | ||
279 | * wimax_msg - Send a message to user space | ||
280 | * | ||
281 | * @wimax_dev: WiMAX device descriptor (properly referenced) | ||
282 | * @pipe_name: "named pipe" the message will be sent to | ||
283 | * @buf: pointer to the message to send. | ||
284 | * @size: size of the buffer pointed to by @buf (in bytes). | ||
285 | * @gfp_flags: flags for memory allocation. | ||
286 | * | ||
287 | * Returns: %0 if ok, negative errno code on error. | ||
288 | * | ||
289 | * Description: | ||
290 | * | ||
291 | * Sends a free-form message to user space on the device @wimax_dev. | ||
292 | * | ||
293 | * NOTES: | ||
294 | * | ||
295 | * Once the @skb is given to this function, who will own it and will | ||
296 | * release it when done (unless it returns error). | ||
297 | */ | ||
298 | int wimax_msg(struct wimax_dev *wimax_dev, const char *pipe_name, | ||
299 | const void *buf, size_t size, gfp_t gfp_flags) | ||
300 | { | ||
301 | int result = -ENOMEM; | ||
302 | struct sk_buff *skb; | ||
303 | |||
304 | skb = wimax_msg_alloc(wimax_dev, pipe_name, buf, size, gfp_flags); | ||
305 | if (skb == NULL) | ||
306 | goto error_msg_new; | ||
307 | result = wimax_msg_send(wimax_dev, skb); | ||
308 | error_msg_new: | ||
309 | return result; | ||
310 | } | ||
311 | EXPORT_SYMBOL_GPL(wimax_msg); | ||
312 | |||
313 | |||
314 | static const | ||
315 | struct nla_policy wimax_gnl_msg_policy[WIMAX_GNL_ATTR_MAX + 1] = { | ||
316 | [WIMAX_GNL_MSG_IFIDX] = { | ||
317 | .type = NLA_U32, | ||
318 | }, | ||
319 | [WIMAX_GNL_MSG_DATA] = { | ||
320 | .type = NLA_UNSPEC, /* libnl doesn't grok BINARY yet */ | ||
321 | }, | ||
322 | }; | ||
323 | |||
324 | |||
325 | /* | ||
326 | * Relays a message from user space to the driver | ||
327 | * | ||
328 | * The skb is passed to the driver-specific function with the netlink | ||
329 | * and generic netlink headers already stripped. | ||
330 | * | ||
331 | * This call will block while handling/relaying the message. | ||
332 | */ | ||
333 | static | ||
334 | int wimax_gnl_doit_msg_from_user(struct sk_buff *skb, struct genl_info *info) | ||
335 | { | ||
336 | int result, ifindex; | ||
337 | struct wimax_dev *wimax_dev; | ||
338 | struct device *dev; | ||
339 | struct nlmsghdr *nlh = info->nlhdr; | ||
340 | char *pipe_name; | ||
341 | void *msg_buf; | ||
342 | size_t msg_len; | ||
343 | |||
344 | might_sleep(); | ||
345 | d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info); | ||
346 | result = -ENODEV; | ||
347 | if (info->attrs[WIMAX_GNL_MSG_IFIDX] == NULL) { | ||
348 | printk(KERN_ERR "WIMAX_GNL_MSG_FROM_USER: can't find IFIDX " | ||
349 | "attribute\n"); | ||
350 | goto error_no_wimax_dev; | ||
351 | } | ||
352 | ifindex = nla_get_u32(info->attrs[WIMAX_GNL_MSG_IFIDX]); | ||
353 | wimax_dev = wimax_dev_get_by_genl_info(info, ifindex); | ||
354 | if (wimax_dev == NULL) | ||
355 | goto error_no_wimax_dev; | ||
356 | dev = wimax_dev_to_dev(wimax_dev); | ||
357 | |||
358 | /* Unpack arguments */ | ||
359 | result = -EINVAL; | ||
360 | if (info->attrs[WIMAX_GNL_MSG_DATA] == NULL) { | ||
361 | dev_err(dev, "WIMAX_GNL_MSG_FROM_USER: can't find MSG_DATA " | ||
362 | "attribute\n"); | ||
363 | goto error_no_data; | ||
364 | } | ||
365 | msg_buf = nla_data(info->attrs[WIMAX_GNL_MSG_DATA]); | ||
366 | msg_len = nla_len(info->attrs[WIMAX_GNL_MSG_DATA]); | ||
367 | |||
368 | if (info->attrs[WIMAX_GNL_MSG_PIPE_NAME] == NULL) | ||
369 | pipe_name = NULL; | ||
370 | else { | ||
371 | struct nlattr *attr = info->attrs[WIMAX_GNL_MSG_PIPE_NAME]; | ||
372 | size_t attr_len = nla_len(attr); | ||
373 | /* libnl-1.1 does not yet support NLA_NUL_STRING */ | ||
374 | result = -ENOMEM; | ||
375 | pipe_name = kstrndup(nla_data(attr), attr_len + 1, GFP_KERNEL); | ||
376 | if (pipe_name == NULL) | ||
377 | goto error_alloc; | ||
378 | pipe_name[attr_len] = 0; | ||
379 | } | ||
380 | mutex_lock(&wimax_dev->mutex); | ||
381 | result = wimax_dev_is_ready(wimax_dev); | ||
382 | if (result < 0) | ||
383 | goto error_not_ready; | ||
384 | result = -ENOSYS; | ||
385 | if (wimax_dev->op_msg_from_user == NULL) | ||
386 | goto error_noop; | ||
387 | |||
388 | d_printf(1, dev, | ||
389 | "CRX: nlmsghdr len %u type %u flags 0x%04x seq 0x%x pid %u\n", | ||
390 | nlh->nlmsg_len, nlh->nlmsg_type, nlh->nlmsg_flags, | ||
391 | nlh->nlmsg_seq, nlh->nlmsg_pid); | ||
392 | d_printf(1, dev, "CRX: wimax message %zu bytes\n", msg_len); | ||
393 | d_dump(2, dev, msg_buf, msg_len); | ||
394 | |||
395 | result = wimax_dev->op_msg_from_user(wimax_dev, pipe_name, | ||
396 | msg_buf, msg_len, info); | ||
397 | error_noop: | ||
398 | error_not_ready: | ||
399 | mutex_unlock(&wimax_dev->mutex); | ||
400 | error_alloc: | ||
401 | kfree(pipe_name); | ||
402 | error_no_data: | ||
403 | dev_put(wimax_dev->net_dev); | ||
404 | error_no_wimax_dev: | ||
405 | d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result); | ||
406 | return result; | ||
407 | } | ||
408 | |||
409 | |||
410 | /* | ||
411 | * Generic Netlink glue | ||
412 | */ | ||
413 | |||
414 | struct genl_ops wimax_gnl_msg_from_user = { | ||
415 | .cmd = WIMAX_GNL_OP_MSG_FROM_USER, | ||
416 | .flags = GENL_ADMIN_PERM, | ||
417 | .policy = wimax_gnl_msg_policy, | ||
418 | .doit = wimax_gnl_doit_msg_from_user, | ||
419 | .dumpit = NULL, | ||
420 | }; | ||
421 | |||
diff --git a/net/wimax/op-reset.c b/net/wimax/op-reset.c new file mode 100644 index 000000000000..ca269178c4d4 --- /dev/null +++ b/net/wimax/op-reset.c | |||
@@ -0,0 +1,143 @@ | |||
1 | /* | ||
2 | * Linux WiMAX | ||
3 | * Implement and export a method for resetting a WiMAX device | ||
4 | * | ||
5 | * | ||
6 | * Copyright (C) 2008 Intel Corporation <linux-wimax@intel.com> | ||
7 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version | ||
11 | * 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
21 | * 02110-1301, USA. | ||
22 | * | ||
23 | * | ||
24 | * This implements a simple synchronous call to reset a WiMAX device. | ||
25 | * | ||
26 | * Resets aim at being warm, keeping the device handles active; | ||
27 | * however, when that fails, it falls back to a cold reset (that will | ||
28 | * disconnect and reconnect the device). | ||
29 | */ | ||
30 | |||
31 | #include <net/wimax.h> | ||
32 | #include <net/genetlink.h> | ||
33 | #include <linux/wimax.h> | ||
34 | #include <linux/security.h> | ||
35 | #include "wimax-internal.h" | ||
36 | |||
37 | #define D_SUBMODULE op_reset | ||
38 | #include "debug-levels.h" | ||
39 | |||
40 | |||
41 | /** | ||
42 | * wimax_reset - Reset a WiMAX device | ||
43 | * | ||
44 | * @wimax_dev: WiMAX device descriptor | ||
45 | * | ||
46 | * Returns: | ||
47 | * | ||
48 | * %0 if ok and a warm reset was done (the device still exists in | ||
49 | * the system). | ||
50 | * | ||
51 | * -%ENODEV if a cold/bus reset had to be done (device has | ||
52 | * disconnected and reconnected, so current handle is not valid | ||
53 | * any more). | ||
54 | * | ||
55 | * -%EINVAL if the device is not even registered. | ||
56 | * | ||
57 | * Any other negative error code shall be considered as | ||
58 | * non-recoverable. | ||
59 | * | ||
60 | * Description: | ||
61 | * | ||
62 | * Called when wanting to reset the device for any reason. Device is | ||
63 | * taken back to power on status. | ||
64 | * | ||
65 | * This call blocks; on succesful return, the device has completed the | ||
66 | * reset process and is ready to operate. | ||
67 | */ | ||
68 | int wimax_reset(struct wimax_dev *wimax_dev) | ||
69 | { | ||
70 | int result = -EINVAL; | ||
71 | struct device *dev = wimax_dev_to_dev(wimax_dev); | ||
72 | enum wimax_st state; | ||
73 | |||
74 | might_sleep(); | ||
75 | d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev); | ||
76 | mutex_lock(&wimax_dev->mutex); | ||
77 | dev_hold(wimax_dev->net_dev); | ||
78 | state = wimax_dev->state; | ||
79 | mutex_unlock(&wimax_dev->mutex); | ||
80 | |||
81 | if (state >= WIMAX_ST_DOWN) { | ||
82 | mutex_lock(&wimax_dev->mutex_reset); | ||
83 | result = wimax_dev->op_reset(wimax_dev); | ||
84 | mutex_unlock(&wimax_dev->mutex_reset); | ||
85 | } | ||
86 | dev_put(wimax_dev->net_dev); | ||
87 | |||
88 | d_fnend(3, dev, "(wimax_dev %p) = %d\n", wimax_dev, result); | ||
89 | return result; | ||
90 | } | ||
91 | EXPORT_SYMBOL(wimax_reset); | ||
92 | |||
93 | |||
94 | static const | ||
95 | struct nla_policy wimax_gnl_reset_policy[WIMAX_GNL_ATTR_MAX + 1] = { | ||
96 | [WIMAX_GNL_RESET_IFIDX] = { | ||
97 | .type = NLA_U32, | ||
98 | }, | ||
99 | }; | ||
100 | |||
101 | |||
102 | /* | ||
103 | * Exporting to user space over generic netlink | ||
104 | * | ||
105 | * Parse the reset command from user space, return error code. | ||
106 | * | ||
107 | * No attributes. | ||
108 | */ | ||
109 | static | ||
110 | int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info) | ||
111 | { | ||
112 | int result, ifindex; | ||
113 | struct wimax_dev *wimax_dev; | ||
114 | struct device *dev; | ||
115 | |||
116 | d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info); | ||
117 | result = -ENODEV; | ||
118 | if (info->attrs[WIMAX_GNL_RESET_IFIDX] == NULL) { | ||
119 | printk(KERN_ERR "WIMAX_GNL_OP_RFKILL: can't find IFIDX " | ||
120 | "attribute\n"); | ||
121 | goto error_no_wimax_dev; | ||
122 | } | ||
123 | ifindex = nla_get_u32(info->attrs[WIMAX_GNL_RESET_IFIDX]); | ||
124 | wimax_dev = wimax_dev_get_by_genl_info(info, ifindex); | ||
125 | if (wimax_dev == NULL) | ||
126 | goto error_no_wimax_dev; | ||
127 | dev = wimax_dev_to_dev(wimax_dev); | ||
128 | /* Execute the operation and send the result back to user space */ | ||
129 | result = wimax_reset(wimax_dev); | ||
130 | dev_put(wimax_dev->net_dev); | ||
131 | error_no_wimax_dev: | ||
132 | d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result); | ||
133 | return result; | ||
134 | } | ||
135 | |||
136 | |||
137 | struct genl_ops wimax_gnl_reset = { | ||
138 | .cmd = WIMAX_GNL_OP_RESET, | ||
139 | .flags = GENL_ADMIN_PERM, | ||
140 | .policy = wimax_gnl_reset_policy, | ||
141 | .doit = wimax_gnl_doit_reset, | ||
142 | .dumpit = NULL, | ||
143 | }; | ||
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c new file mode 100644 index 000000000000..2b75aee04217 --- /dev/null +++ b/net/wimax/op-rfkill.c | |||
@@ -0,0 +1,532 @@ | |||
1 | /* | ||
2 | * Linux WiMAX | ||
3 | * RF-kill framework integration | ||
4 | * | ||
5 | * | ||
6 | * Copyright (C) 2008 Intel Corporation <linux-wimax@intel.com> | ||
7 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version | ||
11 | * 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
21 | * 02110-1301, USA. | ||
22 | * | ||
23 | * | ||
24 | * This integrates into the Linux Kernel rfkill susbystem so that the | ||
25 | * drivers just have to do the bare minimal work, which is providing a | ||
26 | * method to set the software RF-Kill switch and to report changes in | ||
27 | * the software and hardware switch status. | ||
28 | * | ||
29 | * A non-polled generic rfkill device is embedded into the WiMAX | ||
30 | * subsystem's representation of a device. | ||
31 | * | ||
32 | * FIXME: Need polled support? use a timer or add the implementation | ||
33 | * to the stack. | ||
34 | * | ||
35 | * All device drivers have to do is after wimax_dev_init(), call | ||
36 | * wimax_report_rfkill_hw() and wimax_report_rfkill_sw() to update | ||
37 | * initial state and then every time it changes. See wimax.h:struct | ||
38 | * wimax_dev for more information. | ||
39 | * | ||
40 | * ROADMAP | ||
41 | * | ||
42 | * wimax_gnl_doit_rfkill() User space calling wimax_rfkill() | ||
43 | * wimax_rfkill() Kernel calling wimax_rfkill() | ||
44 | * __wimax_rf_toggle_radio() | ||
45 | * | ||
46 | * wimax_rfkill_toggle_radio() RF-Kill subsytem calling | ||
47 | * __wimax_rf_toggle_radio() | ||
48 | * | ||
49 | * __wimax_rf_toggle_radio() | ||
50 | * wimax_dev->op_rfkill_sw_toggle() Driver backend | ||
51 | * __wimax_state_change() | ||
52 | * | ||
53 | * wimax_report_rfkill_sw() Driver reports state change | ||
54 | * __wimax_state_change() | ||
55 | * | ||
56 | * wimax_report_rfkill_hw() Driver reports state change | ||
57 | * __wimax_state_change() | ||
58 | * | ||
59 | * wimax_rfkill_add() Initialize/shutdown rfkill support | ||
60 | * wimax_rfkill_rm() [called by wimax_dev_add/rm()] | ||
61 | */ | ||
62 | |||
63 | #include <net/wimax.h> | ||
64 | #include <net/genetlink.h> | ||
65 | #include <linux/wimax.h> | ||
66 | #include <linux/security.h> | ||
67 | #include <linux/rfkill.h> | ||
68 | #include <linux/input.h> | ||
69 | #include "wimax-internal.h" | ||
70 | |||
71 | #define D_SUBMODULE op_rfkill | ||
72 | #include "debug-levels.h" | ||
73 | |||
74 | #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) | ||
75 | |||
76 | |||
77 | /** | ||
78 | * wimax_report_rfkill_hw - Reports changes in the hardware RF switch | ||
79 | * | ||
80 | * @wimax_dev: WiMAX device descriptor | ||
81 | * | ||
82 | * @state: New state of the RF Kill switch. %WIMAX_RF_ON radio on, | ||
83 | * %WIMAX_RF_OFF radio off. | ||
84 | * | ||
85 | * When the device detects a change in the state of thehardware RF | ||
86 | * switch, it must call this function to let the WiMAX kernel stack | ||
87 | * know that the state has changed so it can be properly propagated. | ||
88 | * | ||
89 | * The WiMAX stack caches the state (the driver doesn't need to). As | ||
90 | * well, as the change is propagated it will come back as a request to | ||
91 | * change the software state to mirror the hardware state. | ||
92 | * | ||
93 | * If the device doesn't have a hardware kill switch, just report | ||
94 | * it on initialization as always on (%WIMAX_RF_ON, radio on). | ||
95 | */ | ||
96 | void wimax_report_rfkill_hw(struct wimax_dev *wimax_dev, | ||
97 | enum wimax_rf_state state) | ||
98 | { | ||
99 | int result; | ||
100 | struct device *dev = wimax_dev_to_dev(wimax_dev); | ||
101 | enum wimax_st wimax_state; | ||
102 | enum rfkill_state rfkill_state; | ||
103 | |||
104 | d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state); | ||
105 | BUG_ON(state == WIMAX_RF_QUERY); | ||
106 | BUG_ON(state != WIMAX_RF_ON && state != WIMAX_RF_OFF); | ||
107 | |||
108 | mutex_lock(&wimax_dev->mutex); | ||
109 | result = wimax_dev_is_ready(wimax_dev); | ||
110 | if (result < 0) | ||
111 | goto error_not_ready; | ||
112 | |||
113 | if (state != wimax_dev->rf_hw) { | ||
114 | wimax_dev->rf_hw = state; | ||
115 | rfkill_state = state == WIMAX_RF_ON ? | ||
116 | RFKILL_STATE_OFF : RFKILL_STATE_ON; | ||
117 | if (wimax_dev->rf_hw == WIMAX_RF_ON | ||
118 | && wimax_dev->rf_sw == WIMAX_RF_ON) | ||
119 | wimax_state = WIMAX_ST_READY; | ||
120 | else | ||
121 | wimax_state = WIMAX_ST_RADIO_OFF; | ||
122 | __wimax_state_change(wimax_dev, wimax_state); | ||
123 | input_report_key(wimax_dev->rfkill_input, KEY_WIMAX, | ||
124 | rfkill_state); | ||
125 | } | ||
126 | error_not_ready: | ||
127 | mutex_unlock(&wimax_dev->mutex); | ||
128 | d_fnend(3, dev, "(wimax_dev %p state %u) = void [%d]\n", | ||
129 | wimax_dev, state, result); | ||
130 | } | ||
131 | EXPORT_SYMBOL_GPL(wimax_report_rfkill_hw); | ||
132 | |||
133 | |||
134 | /** | ||
135 | * wimax_report_rfkill_sw - Reports changes in the software RF switch | ||
136 | * | ||
137 | * @wimax_dev: WiMAX device descriptor | ||
138 | * | ||
139 | * @state: New state of the RF kill switch. %WIMAX_RF_ON radio on, | ||
140 | * %WIMAX_RF_OFF radio off. | ||
141 | * | ||
142 | * Reports changes in the software RF switch state to the the WiMAX | ||
143 | * stack. | ||
144 | * | ||
145 | * The main use is during initialization, so the driver can query the | ||
146 | * device for its current software radio kill switch state and feed it | ||
147 | * to the system. | ||
148 | * | ||
149 | * On the side, the device does not change the software state by | ||
150 | * itself. In practice, this can happen, as the device might decide to | ||
151 | * switch (in software) the radio off for different reasons. | ||
152 | */ | ||
153 | void wimax_report_rfkill_sw(struct wimax_dev *wimax_dev, | ||
154 | enum wimax_rf_state state) | ||
155 | { | ||
156 | int result; | ||
157 | struct device *dev = wimax_dev_to_dev(wimax_dev); | ||
158 | enum wimax_st wimax_state; | ||
159 | |||
160 | d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state); | ||
161 | BUG_ON(state == WIMAX_RF_QUERY); | ||
162 | BUG_ON(state != WIMAX_RF_ON && state != WIMAX_RF_OFF); | ||
163 | |||
164 | mutex_lock(&wimax_dev->mutex); | ||
165 | result = wimax_dev_is_ready(wimax_dev); | ||
166 | if (result < 0) | ||
167 | goto error_not_ready; | ||
168 | |||
169 | if (state != wimax_dev->rf_sw) { | ||
170 | wimax_dev->rf_sw = state; | ||
171 | if (wimax_dev->rf_hw == WIMAX_RF_ON | ||
172 | && wimax_dev->rf_sw == WIMAX_RF_ON) | ||
173 | wimax_state = WIMAX_ST_READY; | ||
174 | else | ||
175 | wimax_state = WIMAX_ST_RADIO_OFF; | ||
176 | __wimax_state_change(wimax_dev, wimax_state); | ||
177 | } | ||
178 | error_not_ready: | ||
179 | mutex_unlock(&wimax_dev->mutex); | ||
180 | d_fnend(3, dev, "(wimax_dev %p state %u) = void [%d]\n", | ||
181 | wimax_dev, state, result); | ||
182 | } | ||
183 | EXPORT_SYMBOL_GPL(wimax_report_rfkill_sw); | ||
184 | |||
185 | |||
186 | /* | ||
187 | * Callback for the RF Kill toggle operation | ||
188 | * | ||
189 | * This function is called by: | ||
190 | * | ||
191 | * - The rfkill subsystem when the RF-Kill key is pressed in the | ||
192 | * hardware and the driver notifies through | ||
193 | * wimax_report_rfkill_hw(). The rfkill subsystem ends up calling back | ||
194 | * here so the software RF Kill switch state is changed to reflect | ||
195 | * the hardware switch state. | ||
196 | * | ||
197 | * - When the user sets the state through sysfs' rfkill/state file | ||
198 | * | ||
199 | * - When the user calls wimax_rfkill(). | ||
200 | * | ||
201 | * This call blocks! | ||
202 | * | ||
203 | * WARNING! When we call rfkill_unregister(), this will be called with | ||
204 | * state 0! | ||
205 | * | ||
206 | * WARNING: wimax_dev must be locked | ||
207 | */ | ||
208 | static | ||
209 | int __wimax_rf_toggle_radio(struct wimax_dev *wimax_dev, | ||
210 | enum wimax_rf_state state) | ||
211 | { | ||
212 | int result = 0; | ||
213 | struct device *dev = wimax_dev_to_dev(wimax_dev); | ||
214 | enum wimax_st wimax_state; | ||
215 | |||
216 | might_sleep(); | ||
217 | d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state); | ||
218 | if (wimax_dev->rf_sw == state) | ||
219 | goto out_no_change; | ||
220 | if (wimax_dev->op_rfkill_sw_toggle != NULL) | ||
221 | result = wimax_dev->op_rfkill_sw_toggle(wimax_dev, state); | ||
222 | else if (state == WIMAX_RF_OFF) /* No op? can't turn off */ | ||
223 | result = -ENXIO; | ||
224 | else /* No op? can turn on */ | ||
225 | result = 0; /* should never happen tho */ | ||
226 | if (result >= 0) { | ||
227 | result = 0; | ||
228 | wimax_dev->rf_sw = state; | ||
229 | wimax_state = state == WIMAX_RF_ON ? | ||
230 | WIMAX_ST_READY : WIMAX_ST_RADIO_OFF; | ||
231 | __wimax_state_change(wimax_dev, wimax_state); | ||
232 | } | ||
233 | out_no_change: | ||
234 | d_fnend(3, dev, "(wimax_dev %p state %u) = %d\n", | ||
235 | wimax_dev, state, result); | ||
236 | return result; | ||
237 | } | ||
238 | |||
239 | |||
240 | /* | ||
241 | * Translate from rfkill state to wimax state | ||
242 | * | ||
243 | * NOTE: Special state handling rules here | ||
244 | * | ||
245 | * Just pretend the call didn't happen if we are in a state where | ||
246 | * we know for sure it cannot be handled (WIMAX_ST_DOWN or | ||
247 | * __WIMAX_ST_QUIESCING). rfkill() needs it to register and | ||
248 | * unregister, as it will run this path. | ||
249 | * | ||
250 | * NOTE: This call will block until the operation is completed. | ||
251 | */ | ||
252 | static | ||
253 | int wimax_rfkill_toggle_radio(void *data, enum rfkill_state state) | ||
254 | { | ||
255 | int result; | ||
256 | struct wimax_dev *wimax_dev = data; | ||
257 | struct device *dev = wimax_dev_to_dev(wimax_dev); | ||
258 | enum wimax_rf_state rf_state; | ||
259 | |||
260 | d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state); | ||
261 | switch (state) { | ||
262 | case RFKILL_STATE_ON: | ||
263 | rf_state = WIMAX_RF_OFF; | ||
264 | break; | ||
265 | case RFKILL_STATE_OFF: | ||
266 | rf_state = WIMAX_RF_ON; | ||
267 | break; | ||
268 | default: | ||
269 | BUG(); | ||
270 | } | ||
271 | mutex_lock(&wimax_dev->mutex); | ||
272 | if (wimax_dev->state <= __WIMAX_ST_QUIESCING) | ||
273 | result = 0; /* just pretend it didn't happen */ | ||
274 | else | ||
275 | result = __wimax_rf_toggle_radio(wimax_dev, rf_state); | ||
276 | mutex_unlock(&wimax_dev->mutex); | ||
277 | d_fnend(3, dev, "(wimax_dev %p state %u) = %d\n", | ||
278 | wimax_dev, state, result); | ||
279 | return result; | ||
280 | } | ||
281 | |||
282 | |||
283 | /** | ||
284 | * wimax_rfkill - Set the software RF switch state for a WiMAX device | ||
285 | * | ||
286 | * @wimax_dev: WiMAX device descriptor | ||
287 | * | ||
288 | * @state: New RF state. | ||
289 | * | ||
290 | * Returns: | ||
291 | * | ||
292 | * >= 0 toggle state if ok, < 0 errno code on error. The toggle state | ||
293 | * is returned as a bitmap, bit 0 being the hardware RF state, bit 1 | ||
294 | * the software RF state. | ||
295 | * | ||
296 | * 0 means disabled (%WIMAX_RF_ON, radio on), 1 means enabled radio | ||
297 | * off (%WIMAX_RF_OFF). | ||
298 | * | ||
299 | * Description: | ||
300 | * | ||
301 | * Called by the user when he wants to request the WiMAX radio to be | ||
302 | * switched on (%WIMAX_RF_ON) or off (%WIMAX_RF_OFF). With | ||
303 | * %WIMAX_RF_QUERY, just the current state is returned. | ||
304 | * | ||
305 | * NOTE: | ||
306 | * | ||
307 | * This call will block until the operation is complete. | ||
308 | */ | ||
309 | int wimax_rfkill(struct wimax_dev *wimax_dev, enum wimax_rf_state state) | ||
310 | { | ||
311 | int result; | ||
312 | struct device *dev = wimax_dev_to_dev(wimax_dev); | ||
313 | |||
314 | d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state); | ||
315 | mutex_lock(&wimax_dev->mutex); | ||
316 | result = wimax_dev_is_ready(wimax_dev); | ||
317 | if (result < 0) | ||
318 | goto error_not_ready; | ||
319 | switch (state) { | ||
320 | case WIMAX_RF_ON: | ||
321 | case WIMAX_RF_OFF: | ||
322 | result = __wimax_rf_toggle_radio(wimax_dev, state); | ||
323 | if (result < 0) | ||
324 | goto error; | ||
325 | break; | ||
326 | case WIMAX_RF_QUERY: | ||
327 | break; | ||
328 | default: | ||
329 | result = -EINVAL; | ||
330 | goto error; | ||
331 | } | ||
332 | result = wimax_dev->rf_sw << 1 | wimax_dev->rf_hw; | ||
333 | error: | ||
334 | error_not_ready: | ||
335 | mutex_unlock(&wimax_dev->mutex); | ||
336 | d_fnend(3, dev, "(wimax_dev %p state %u) = %d\n", | ||
337 | wimax_dev, state, result); | ||
338 | return result; | ||
339 | } | ||
340 | EXPORT_SYMBOL(wimax_rfkill); | ||
341 | |||
342 | |||
343 | /* | ||
344 | * Register a new WiMAX device's RF Kill support | ||
345 | * | ||
346 | * WARNING: wimax_dev->mutex must be unlocked | ||
347 | */ | ||
348 | int wimax_rfkill_add(struct wimax_dev *wimax_dev) | ||
349 | { | ||
350 | int result; | ||
351 | struct rfkill *rfkill; | ||
352 | struct input_dev *input_dev; | ||
353 | struct device *dev = wimax_dev_to_dev(wimax_dev); | ||
354 | |||
355 | d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev); | ||
356 | /* Initialize RF Kill */ | ||
357 | result = -ENOMEM; | ||
358 | rfkill = rfkill_allocate(dev, RFKILL_TYPE_WIMAX); | ||
359 | if (rfkill == NULL) | ||
360 | goto error_rfkill_allocate; | ||
361 | wimax_dev->rfkill = rfkill; | ||
362 | |||
363 | rfkill->name = wimax_dev->name; | ||
364 | rfkill->state = RFKILL_STATE_OFF; | ||
365 | rfkill->data = wimax_dev; | ||
366 | rfkill->toggle_radio = wimax_rfkill_toggle_radio; | ||
367 | rfkill->user_claim_unsupported = 1; | ||
368 | |||
369 | /* Initialize the input device for the hw key */ | ||
370 | input_dev = input_allocate_device(); | ||
371 | if (input_dev == NULL) | ||
372 | goto error_input_allocate; | ||
373 | wimax_dev->rfkill_input = input_dev; | ||
374 | d_printf(1, dev, "rfkill %p input %p\n", rfkill, input_dev); | ||
375 | |||
376 | input_dev->name = wimax_dev->name; | ||
377 | /* FIXME: get a real device bus ID and stuff? do we care? */ | ||
378 | input_dev->id.bustype = BUS_HOST; | ||
379 | input_dev->id.vendor = 0xffff; | ||
380 | input_dev->evbit[0] = BIT(EV_KEY); | ||
381 | set_bit(KEY_WIMAX, input_dev->keybit); | ||
382 | |||
383 | /* Register both */ | ||
384 | result = input_register_device(wimax_dev->rfkill_input); | ||
385 | if (result < 0) | ||
386 | goto error_input_register; | ||
387 | result = rfkill_register(wimax_dev->rfkill); | ||
388 | if (result < 0) | ||
389 | goto error_rfkill_register; | ||
390 | |||
391 | /* If there is no SW toggle op, SW RFKill is always on */ | ||
392 | if (wimax_dev->op_rfkill_sw_toggle == NULL) | ||
393 | wimax_dev->rf_sw = WIMAX_RF_ON; | ||
394 | |||
395 | d_fnend(3, dev, "(wimax_dev %p) = 0\n", wimax_dev); | ||
396 | return 0; | ||
397 | |||
398 | /* if rfkill_register() suceeds, can't use rfkill_free() any | ||
399 | * more, only rfkill_unregister() [it owns the refcount]; with | ||
400 | * the input device we have the same issue--hence the if. */ | ||
401 | error_rfkill_register: | ||
402 | input_unregister_device(wimax_dev->rfkill_input); | ||
403 | wimax_dev->rfkill_input = NULL; | ||
404 | error_input_register: | ||
405 | if (wimax_dev->rfkill_input) | ||
406 | input_free_device(wimax_dev->rfkill_input); | ||
407 | error_input_allocate: | ||
408 | rfkill_free(wimax_dev->rfkill); | ||
409 | error_rfkill_allocate: | ||
410 | d_fnend(3, dev, "(wimax_dev %p) = %d\n", wimax_dev, result); | ||
411 | return result; | ||
412 | } | ||
413 | |||
414 | |||
415 | /* | ||
416 | * Deregister a WiMAX device's RF Kill support | ||
417 | * | ||
418 | * Ick, we can't call rfkill_free() after rfkill_unregister()...oh | ||
419 | * well. | ||
420 | * | ||
421 | * WARNING: wimax_dev->mutex must be unlocked | ||
422 | */ | ||
423 | void wimax_rfkill_rm(struct wimax_dev *wimax_dev) | ||
424 | { | ||
425 | struct device *dev = wimax_dev_to_dev(wimax_dev); | ||
426 | d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev); | ||
427 | rfkill_unregister(wimax_dev->rfkill); /* frees */ | ||
428 | input_unregister_device(wimax_dev->rfkill_input); | ||
429 | d_fnend(3, dev, "(wimax_dev %p)\n", wimax_dev); | ||
430 | } | ||
431 | |||
432 | |||
433 | #else /* #ifdef CONFIG_RFKILL */ | ||
434 | |||
435 | void wimax_report_rfkill_hw(struct wimax_dev *wimax_dev, | ||
436 | enum wimax_rf_state state) | ||
437 | { | ||
438 | } | ||
439 | EXPORT_SYMBOL_GPL(wimax_report_rfkill_hw); | ||
440 | |||
441 | void wimax_report_rfkill_sw(struct wimax_dev *wimax_dev, | ||
442 | enum wimax_rf_state state) | ||
443 | { | ||
444 | } | ||
445 | EXPORT_SYMBOL_GPL(wimax_report_rfkill_sw); | ||
446 | |||
447 | int wimax_rfkill(struct wimax_dev *wimax_dev, | ||
448 | enum wimax_rf_state state) | ||
449 | { | ||
450 | return WIMAX_RF_ON << 1 | WIMAX_RF_ON; | ||
451 | } | ||
452 | EXPORT_SYMBOL_GPL(wimax_rfkill); | ||
453 | |||
454 | int wimax_rfkill_add(struct wimax_dev *wimax_dev) | ||
455 | { | ||
456 | return 0; | ||
457 | } | ||
458 | |||
459 | void wimax_rfkill_rm(struct wimax_dev *wimax_dev) | ||
460 | { | ||
461 | } | ||
462 | |||
463 | #endif /* #ifdef CONFIG_RFKILL */ | ||
464 | |||
465 | |||
466 | /* | ||
467 | * Exporting to user space over generic netlink | ||
468 | * | ||
469 | * Parse the rfkill command from user space, return a combination | ||
470 | * value that describe the states of the different toggles. | ||
471 | * | ||
472 | * Only one attribute: the new state requested (on, off or no change, | ||
473 | * just query). | ||
474 | */ | ||
475 | |||
476 | static const | ||
477 | struct nla_policy wimax_gnl_rfkill_policy[WIMAX_GNL_ATTR_MAX + 1] = { | ||
478 | [WIMAX_GNL_RFKILL_IFIDX] = { | ||
479 | .type = NLA_U32, | ||
480 | }, | ||
481 | [WIMAX_GNL_RFKILL_STATE] = { | ||
482 | .type = NLA_U32 /* enum wimax_rf_state */ | ||
483 | }, | ||
484 | }; | ||
485 | |||
486 | |||
487 | static | ||
488 | int wimax_gnl_doit_rfkill(struct sk_buff *skb, struct genl_info *info) | ||
489 | { | ||
490 | int result, ifindex; | ||
491 | struct wimax_dev *wimax_dev; | ||
492 | struct device *dev; | ||
493 | enum wimax_rf_state new_state; | ||
494 | |||
495 | d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info); | ||
496 | result = -ENODEV; | ||
497 | if (info->attrs[WIMAX_GNL_RFKILL_IFIDX] == NULL) { | ||
498 | printk(KERN_ERR "WIMAX_GNL_OP_RFKILL: can't find IFIDX " | ||
499 | "attribute\n"); | ||
500 | goto error_no_wimax_dev; | ||
501 | } | ||
502 | ifindex = nla_get_u32(info->attrs[WIMAX_GNL_RFKILL_IFIDX]); | ||
503 | wimax_dev = wimax_dev_get_by_genl_info(info, ifindex); | ||
504 | if (wimax_dev == NULL) | ||
505 | goto error_no_wimax_dev; | ||
506 | dev = wimax_dev_to_dev(wimax_dev); | ||
507 | result = -EINVAL; | ||
508 | if (info->attrs[WIMAX_GNL_RFKILL_STATE] == NULL) { | ||
509 | dev_err(dev, "WIMAX_GNL_RFKILL: can't find RFKILL_STATE " | ||
510 | "attribute\n"); | ||
511 | goto error_no_pid; | ||
512 | } | ||
513 | new_state = nla_get_u32(info->attrs[WIMAX_GNL_RFKILL_STATE]); | ||
514 | |||
515 | /* Execute the operation and send the result back to user space */ | ||
516 | result = wimax_rfkill(wimax_dev, new_state); | ||
517 | error_no_pid: | ||
518 | dev_put(wimax_dev->net_dev); | ||
519 | error_no_wimax_dev: | ||
520 | d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result); | ||
521 | return result; | ||
522 | } | ||
523 | |||
524 | |||
525 | struct genl_ops wimax_gnl_rfkill = { | ||
526 | .cmd = WIMAX_GNL_OP_RFKILL, | ||
527 | .flags = GENL_ADMIN_PERM, | ||
528 | .policy = wimax_gnl_rfkill_policy, | ||
529 | .doit = wimax_gnl_doit_rfkill, | ||
530 | .dumpit = NULL, | ||
531 | }; | ||
532 | |||
diff --git a/net/wimax/stack.c b/net/wimax/stack.c new file mode 100644 index 000000000000..d4da92f8981a --- /dev/null +++ b/net/wimax/stack.c | |||
@@ -0,0 +1,599 @@ | |||
1 | /* | ||
2 | * Linux WiMAX | ||
3 | * Initialization, addition and removal of wimax devices | ||
4 | * | ||
5 | * | ||
6 | * Copyright (C) 2005-2006 Intel Corporation <linux-wimax@intel.com> | ||
7 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version | ||
11 | * 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
21 | * 02110-1301, USA. | ||
22 | * | ||
23 | * | ||
24 | * This implements: | ||
25 | * | ||
26 | * - basic life cycle of 'struct wimax_dev' [wimax_dev_*()]; on | ||
27 | * addition/registration initialize all subfields and allocate | ||
28 | * generic netlink resources for user space communication. On | ||
29 | * removal/unregistration, undo all that. | ||
30 | * | ||
31 | * - device state machine [wimax_state_change()] and support to send | ||
32 | * reports to user space when the state changes | ||
33 | * [wimax_gnl_re_state_change*()]. | ||
34 | * | ||
35 | * See include/net/wimax.h for rationales and design. | ||
36 | * | ||
37 | * ROADMAP | ||
38 | * | ||
39 | * [__]wimax_state_change() Called by drivers to update device's state | ||
40 | * wimax_gnl_re_state_change_alloc() | ||
41 | * wimax_gnl_re_state_change_send() | ||
42 | * | ||
43 | * wimax_dev_init() Init a device | ||
44 | * wimax_dev_add() Register | ||
45 | * wimax_rfkill_add() | ||
46 | * wimax_gnl_add() Register all the generic netlink resources. | ||
47 | * wimax_id_table_add() | ||
48 | * wimax_dev_rm() Unregister | ||
49 | * wimax_id_table_rm() | ||
50 | * wimax_gnl_rm() | ||
51 | * wimax_rfkill_rm() | ||
52 | */ | ||
53 | #include <linux/device.h> | ||
54 | #include <net/genetlink.h> | ||
55 | #include <linux/netdevice.h> | ||
56 | #include <linux/wimax.h> | ||
57 | #include "wimax-internal.h" | ||
58 | |||
59 | |||
60 | #define D_SUBMODULE stack | ||
61 | #include "debug-levels.h" | ||
62 | |||
63 | /* | ||
64 | * Authoritative source for the RE_STATE_CHANGE attribute policy | ||
65 | * | ||
66 | * We don't really use it here, but /me likes to keep the definition | ||
67 | * close to where the data is generated. | ||
68 | */ | ||
69 | /* | ||
70 | static const | ||
71 | struct nla_policy wimax_gnl_re_status_change[WIMAX_GNL_ATTR_MAX + 1] = { | ||
72 | [WIMAX_GNL_STCH_STATE_OLD] = { .type = NLA_U8 }, | ||
73 | [WIMAX_GNL_STCH_STATE_NEW] = { .type = NLA_U8 }, | ||
74 | }; | ||
75 | */ | ||
76 | |||
77 | |||
78 | /* | ||
79 | * Allocate a Report State Change message | ||
80 | * | ||
81 | * @header: save it, you need it for _send() | ||
82 | * | ||
83 | * Creates and fills a basic state change message; different code | ||
84 | * paths can then add more attributes to the message as needed. | ||
85 | * | ||
86 | * Use wimax_gnl_re_state_change_send() to send the returned skb. | ||
87 | * | ||
88 | * Returns: skb with the genl message if ok, IS_ERR() ptr on error | ||
89 | * with an errno code. | ||
90 | */ | ||
91 | static | ||
92 | struct sk_buff *wimax_gnl_re_state_change_alloc( | ||
93 | struct wimax_dev *wimax_dev, | ||
94 | enum wimax_st new_state, enum wimax_st old_state, | ||
95 | void **header) | ||
96 | { | ||
97 | int result; | ||
98 | struct device *dev = wimax_dev_to_dev(wimax_dev); | ||
99 | void *data; | ||
100 | struct sk_buff *report_skb; | ||
101 | |||
102 | d_fnstart(3, dev, "(wimax_dev %p new_state %u old_state %u)\n", | ||
103 | wimax_dev, new_state, old_state); | ||
104 | result = -ENOMEM; | ||
105 | report_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | ||
106 | if (report_skb == NULL) { | ||
107 | dev_err(dev, "RE_STCH: can't create message\n"); | ||
108 | goto error_new; | ||
109 | } | ||
110 | data = genlmsg_put(report_skb, 0, wimax_gnl_mcg.id, &wimax_gnl_family, | ||
111 | 0, WIMAX_GNL_RE_STATE_CHANGE); | ||
112 | if (data == NULL) { | ||
113 | dev_err(dev, "RE_STCH: can't put data into message\n"); | ||
114 | goto error_put; | ||
115 | } | ||
116 | *header = data; | ||
117 | |||
118 | result = nla_put_u8(report_skb, WIMAX_GNL_STCH_STATE_OLD, old_state); | ||
119 | if (result < 0) { | ||
120 | dev_err(dev, "RE_STCH: Error adding OLD attr: %d\n", result); | ||
121 | goto error_put; | ||
122 | } | ||
123 | result = nla_put_u8(report_skb, WIMAX_GNL_STCH_STATE_NEW, new_state); | ||
124 | if (result < 0) { | ||
125 | dev_err(dev, "RE_STCH: Error adding NEW attr: %d\n", result); | ||
126 | goto error_put; | ||
127 | } | ||
128 | result = nla_put_u32(report_skb, WIMAX_GNL_STCH_IFIDX, | ||
129 | wimax_dev->net_dev->ifindex); | ||
130 | if (result < 0) { | ||
131 | dev_err(dev, "RE_STCH: Error adding IFINDEX attribute\n"); | ||
132 | goto error_put; | ||
133 | } | ||
134 | d_fnend(3, dev, "(wimax_dev %p new_state %u old_state %u) = %p\n", | ||
135 | wimax_dev, new_state, old_state, report_skb); | ||
136 | return report_skb; | ||
137 | |||
138 | error_put: | ||
139 | nlmsg_free(report_skb); | ||
140 | error_new: | ||
141 | d_fnend(3, dev, "(wimax_dev %p new_state %u old_state %u) = %d\n", | ||
142 | wimax_dev, new_state, old_state, result); | ||
143 | return ERR_PTR(result); | ||
144 | } | ||
145 | |||
146 | |||
147 | /* | ||
148 | * Send a Report State Change message (as created with _alloc). | ||
149 | * | ||
150 | * @report_skb: as returned by wimax_gnl_re_state_change_alloc() | ||
151 | * @header: as returned by wimax_gnl_re_state_change_alloc() | ||
152 | * | ||
153 | * Returns: 0 if ok, < 0 errno code on error. | ||
154 | * | ||
155 | * If the message is NULL, pretend it didn't happen. | ||
156 | */ | ||
157 | static | ||
158 | int wimax_gnl_re_state_change_send( | ||
159 | struct wimax_dev *wimax_dev, struct sk_buff *report_skb, | ||
160 | void *header) | ||
161 | { | ||
162 | int result = 0; | ||
163 | struct device *dev = wimax_dev_to_dev(wimax_dev); | ||
164 | d_fnstart(3, dev, "(wimax_dev %p report_skb %p)\n", | ||
165 | wimax_dev, report_skb); | ||
166 | if (report_skb == NULL) | ||
167 | goto out; | ||
168 | genlmsg_end(report_skb, header); | ||
169 | result = genlmsg_multicast(report_skb, 0, wimax_gnl_mcg.id, GFP_KERNEL); | ||
170 | if (result == -ESRCH) /* Nobody connected, ignore it */ | ||
171 | result = 0; /* btw, the skb is freed already */ | ||
172 | if (result < 0) { | ||
173 | dev_err(dev, "RE_STCH: Error sending: %d\n", result); | ||
174 | nlmsg_free(report_skb); | ||
175 | } | ||
176 | out: | ||
177 | d_fnend(3, dev, "(wimax_dev %p report_skb %p) = %d\n", | ||
178 | wimax_dev, report_skb, result); | ||
179 | return result; | ||
180 | } | ||
181 | |||
182 | |||
183 | static | ||
184 | void __check_new_state(enum wimax_st old_state, enum wimax_st new_state, | ||
185 | unsigned allowed_states_bm) | ||
186 | { | ||
187 | if (WARN_ON(((1 << new_state) & allowed_states_bm) == 0)) { | ||
188 | printk(KERN_ERR "SW BUG! Forbidden state change %u -> %u\n", | ||
189 | old_state, new_state); | ||
190 | } | ||
191 | } | ||
192 | |||
193 | |||
194 | /* | ||
195 | * Set the current state of a WiMAX device [unlocking version of | ||
196 | * wimax_state_change(). | ||
197 | */ | ||
198 | void __wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state) | ||
199 | { | ||
200 | struct device *dev = wimax_dev_to_dev(wimax_dev); | ||
201 | enum wimax_st old_state = wimax_dev->state; | ||
202 | struct sk_buff *stch_skb; | ||
203 | void *header; | ||
204 | |||
205 | d_fnstart(3, dev, "(wimax_dev %p new_state %u [old %u])\n", | ||
206 | wimax_dev, new_state, old_state); | ||
207 | |||
208 | if (WARN_ON(new_state >= __WIMAX_ST_INVALID)) { | ||
209 | dev_err(dev, "SW BUG: requesting invalid state %u\n", | ||
210 | new_state); | ||
211 | goto out; | ||
212 | } | ||
213 | if (old_state == new_state) | ||
214 | goto out; | ||
215 | header = NULL; /* gcc complains? can't grok why */ | ||
216 | stch_skb = wimax_gnl_re_state_change_alloc( | ||
217 | wimax_dev, new_state, old_state, &header); | ||
218 | |||
219 | /* Verify the state transition and do exit-from-state actions */ | ||
220 | switch (old_state) { | ||
221 | case __WIMAX_ST_NULL: | ||
222 | __check_new_state(old_state, new_state, | ||
223 | 1 << WIMAX_ST_DOWN); | ||
224 | break; | ||
225 | case WIMAX_ST_DOWN: | ||
226 | __check_new_state(old_state, new_state, | ||
227 | 1 << __WIMAX_ST_QUIESCING | ||
228 | | 1 << WIMAX_ST_UNINITIALIZED | ||
229 | | 1 << WIMAX_ST_RADIO_OFF); | ||
230 | break; | ||
231 | case __WIMAX_ST_QUIESCING: | ||
232 | __check_new_state(old_state, new_state, 1 << WIMAX_ST_DOWN); | ||
233 | break; | ||
234 | case WIMAX_ST_UNINITIALIZED: | ||
235 | __check_new_state(old_state, new_state, | ||
236 | 1 << __WIMAX_ST_QUIESCING | ||
237 | | 1 << WIMAX_ST_RADIO_OFF); | ||
238 | break; | ||
239 | case WIMAX_ST_RADIO_OFF: | ||
240 | __check_new_state(old_state, new_state, | ||
241 | 1 << __WIMAX_ST_QUIESCING | ||
242 | | 1 << WIMAX_ST_READY); | ||
243 | break; | ||
244 | case WIMAX_ST_READY: | ||
245 | __check_new_state(old_state, new_state, | ||
246 | 1 << __WIMAX_ST_QUIESCING | ||
247 | | 1 << WIMAX_ST_RADIO_OFF | ||
248 | | 1 << WIMAX_ST_SCANNING | ||
249 | | 1 << WIMAX_ST_CONNECTING | ||
250 | | 1 << WIMAX_ST_CONNECTED); | ||
251 | break; | ||
252 | case WIMAX_ST_SCANNING: | ||
253 | __check_new_state(old_state, new_state, | ||
254 | 1 << __WIMAX_ST_QUIESCING | ||
255 | | 1 << WIMAX_ST_RADIO_OFF | ||
256 | | 1 << WIMAX_ST_READY | ||
257 | | 1 << WIMAX_ST_CONNECTING | ||
258 | | 1 << WIMAX_ST_CONNECTED); | ||
259 | break; | ||
260 | case WIMAX_ST_CONNECTING: | ||
261 | __check_new_state(old_state, new_state, | ||
262 | 1 << __WIMAX_ST_QUIESCING | ||
263 | | 1 << WIMAX_ST_RADIO_OFF | ||
264 | | 1 << WIMAX_ST_READY | ||
265 | | 1 << WIMAX_ST_SCANNING | ||
266 | | 1 << WIMAX_ST_CONNECTED); | ||
267 | break; | ||
268 | case WIMAX_ST_CONNECTED: | ||
269 | __check_new_state(old_state, new_state, | ||
270 | 1 << __WIMAX_ST_QUIESCING | ||
271 | | 1 << WIMAX_ST_RADIO_OFF | ||
272 | | 1 << WIMAX_ST_READY); | ||
273 | netif_tx_disable(wimax_dev->net_dev); | ||
274 | netif_carrier_off(wimax_dev->net_dev); | ||
275 | break; | ||
276 | case __WIMAX_ST_INVALID: | ||
277 | default: | ||
278 | dev_err(dev, "SW BUG: wimax_dev %p is in unknown state %u\n", | ||
279 | wimax_dev, wimax_dev->state); | ||
280 | WARN_ON(1); | ||
281 | goto out; | ||
282 | } | ||
283 | |||
284 | /* Execute the actions of entry to the new state */ | ||
285 | switch (new_state) { | ||
286 | case __WIMAX_ST_NULL: | ||
287 | dev_err(dev, "SW BUG: wimax_dev %p entering NULL state " | ||
288 | "from %u\n", wimax_dev, wimax_dev->state); | ||
289 | WARN_ON(1); /* Nobody can enter this state */ | ||
290 | break; | ||
291 | case WIMAX_ST_DOWN: | ||
292 | break; | ||
293 | case __WIMAX_ST_QUIESCING: | ||
294 | break; | ||
295 | case WIMAX_ST_UNINITIALIZED: | ||
296 | break; | ||
297 | case WIMAX_ST_RADIO_OFF: | ||
298 | break; | ||
299 | case WIMAX_ST_READY: | ||
300 | break; | ||
301 | case WIMAX_ST_SCANNING: | ||
302 | break; | ||
303 | case WIMAX_ST_CONNECTING: | ||
304 | break; | ||
305 | case WIMAX_ST_CONNECTED: | ||
306 | netif_carrier_on(wimax_dev->net_dev); | ||
307 | netif_wake_queue(wimax_dev->net_dev); | ||
308 | break; | ||
309 | case __WIMAX_ST_INVALID: | ||
310 | default: | ||
311 | BUG(); | ||
312 | } | ||
313 | __wimax_state_set(wimax_dev, new_state); | ||
314 | if (stch_skb) | ||
315 | wimax_gnl_re_state_change_send(wimax_dev, stch_skb, header); | ||
316 | out: | ||
317 | d_fnend(3, dev, "(wimax_dev %p new_state %u [old %u]) = void\n", | ||
318 | wimax_dev, new_state, old_state); | ||
319 | return; | ||
320 | } | ||
321 | |||
322 | |||
323 | /** | ||
324 | * wimax_state_change - Set the current state of a WiMAX device | ||
325 | * | ||
326 | * @wimax_dev: WiMAX device descriptor (properly referenced) | ||
327 | * @new_state: New state to switch to | ||
328 | * | ||
329 | * This implements the state changes for the wimax devices. It will | ||
330 | * | ||
331 | * - verify that the state transition is legal (for now it'll just | ||
332 | * print a warning if not) according to the table in | ||
333 | * linux/wimax.h's documentation for 'enum wimax_st'. | ||
334 | * | ||
335 | * - perform the actions needed for leaving the current state and | ||
336 | * whichever are needed for entering the new state. | ||
337 | * | ||
338 | * - issue a report to user space indicating the new state (and an | ||
339 | * optional payload with information about the new state). | ||
340 | * | ||
341 | * NOTE: @wimax_dev must be locked | ||
342 | */ | ||
343 | void wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state) | ||
344 | { | ||
345 | mutex_lock(&wimax_dev->mutex); | ||
346 | __wimax_state_change(wimax_dev, new_state); | ||
347 | mutex_unlock(&wimax_dev->mutex); | ||
348 | return; | ||
349 | } | ||
350 | EXPORT_SYMBOL_GPL(wimax_state_change); | ||
351 | |||
352 | |||
353 | /** | ||
354 | * wimax_state_get() - Return the current state of a WiMAX device | ||
355 | * | ||
356 | * @wimax_dev: WiMAX device descriptor | ||
357 | * | ||
358 | * Returns: Current state of the device according to its driver. | ||
359 | */ | ||
360 | enum wimax_st wimax_state_get(struct wimax_dev *wimax_dev) | ||
361 | { | ||
362 | enum wimax_st state; | ||
363 | mutex_lock(&wimax_dev->mutex); | ||
364 | state = wimax_dev->state; | ||
365 | mutex_unlock(&wimax_dev->mutex); | ||
366 | return state; | ||
367 | } | ||
368 | EXPORT_SYMBOL_GPL(wimax_state_get); | ||
369 | |||
370 | |||
371 | /** | ||
372 | * wimax_dev_init - initialize a newly allocated instance | ||
373 | * | ||
374 | * @wimax_dev: WiMAX device descriptor to initialize. | ||
375 | * | ||
376 | * Initializes fields of a freshly allocated @wimax_dev instance. This | ||
377 | * function assumes that after allocation, the memory occupied by | ||
378 | * @wimax_dev was zeroed. | ||
379 | */ | ||
380 | void wimax_dev_init(struct wimax_dev *wimax_dev) | ||
381 | { | ||
382 | INIT_LIST_HEAD(&wimax_dev->id_table_node); | ||
383 | __wimax_state_set(wimax_dev, WIMAX_ST_UNINITIALIZED); | ||
384 | mutex_init(&wimax_dev->mutex); | ||
385 | mutex_init(&wimax_dev->mutex_reset); | ||
386 | } | ||
387 | EXPORT_SYMBOL_GPL(wimax_dev_init); | ||
388 | |||
389 | /* | ||
390 | * This extern is declared here because it's easier to keep track -- | ||
391 | * both declarations are a list of the same | ||
392 | */ | ||
393 | extern struct genl_ops | ||
394 | wimax_gnl_msg_from_user, | ||
395 | wimax_gnl_reset, | ||
396 | wimax_gnl_rfkill; | ||
397 | |||
398 | static | ||
399 | struct genl_ops *wimax_gnl_ops[] = { | ||
400 | &wimax_gnl_msg_from_user, | ||
401 | &wimax_gnl_reset, | ||
402 | &wimax_gnl_rfkill, | ||
403 | }; | ||
404 | |||
405 | |||
406 | static | ||
407 | size_t wimax_addr_scnprint(char *addr_str, size_t addr_str_size, | ||
408 | unsigned char *addr, size_t addr_len) | ||
409 | { | ||
410 | unsigned cnt, total; | ||
411 | for (total = cnt = 0; cnt < addr_len; cnt++) | ||
412 | total += scnprintf(addr_str + total, addr_str_size - total, | ||
413 | "%02x%c", addr[cnt], | ||
414 | cnt == addr_len - 1 ? '\0' : ':'); | ||
415 | return total; | ||
416 | } | ||
417 | |||
418 | |||
419 | /** | ||
420 | * wimax_dev_add - Register a new WiMAX device | ||
421 | * | ||
422 | * @wimax_dev: WiMAX device descriptor (as embedded in your @net_dev's | ||
423 | * priv data). You must have called wimax_dev_init() on it before. | ||
424 | * | ||
425 | * @net_dev: net device the @wimax_dev is associated with. The | ||
426 | * function expects SET_NETDEV_DEV() and register_netdev() were | ||
427 | * already called on it. | ||
428 | * | ||
429 | * Registers the new WiMAX device, sets up the user-kernel control | ||
430 | * interface (generic netlink) and common WiMAX infrastructure. | ||
431 | * | ||
432 | * Note that the parts that will allow interaction with user space are | ||
433 | * setup at the very end, when the rest is in place, as once that | ||
434 | * happens, the driver might get user space control requests via | ||
435 | * netlink or from debugfs that might translate into calls into | ||
436 | * wimax_dev->op_*(). | ||
437 | */ | ||
438 | int wimax_dev_add(struct wimax_dev *wimax_dev, struct net_device *net_dev) | ||
439 | { | ||
440 | int result; | ||
441 | struct device *dev = net_dev->dev.parent; | ||
442 | char addr_str[32]; | ||
443 | |||
444 | d_fnstart(3, dev, "(wimax_dev %p net_dev %p)\n", wimax_dev, net_dev); | ||
445 | |||
446 | /* Do the RFKILL setup before locking, as RFKILL will call | ||
447 | * into our functions. */ | ||
448 | wimax_dev->net_dev = net_dev; | ||
449 | result = wimax_rfkill_add(wimax_dev); | ||
450 | if (result < 0) | ||
451 | goto error_rfkill_add; | ||
452 | |||
453 | /* Set up user-space interaction */ | ||
454 | mutex_lock(&wimax_dev->mutex); | ||
455 | wimax_id_table_add(wimax_dev); | ||
456 | result = wimax_debugfs_add(wimax_dev); | ||
457 | if (result < 0) { | ||
458 | dev_err(dev, "cannot initialize debugfs: %d\n", | ||
459 | result); | ||
460 | goto error_debugfs_add; | ||
461 | } | ||
462 | |||
463 | __wimax_state_set(wimax_dev, WIMAX_ST_DOWN); | ||
464 | mutex_unlock(&wimax_dev->mutex); | ||
465 | |||
466 | wimax_addr_scnprint(addr_str, sizeof(addr_str), | ||
467 | net_dev->dev_addr, net_dev->addr_len); | ||
468 | dev_err(dev, "WiMAX interface %s (%s) ready\n", | ||
469 | net_dev->name, addr_str); | ||
470 | d_fnend(3, dev, "(wimax_dev %p net_dev %p) = 0\n", wimax_dev, net_dev); | ||
471 | return 0; | ||
472 | |||
473 | error_debugfs_add: | ||
474 | wimax_id_table_rm(wimax_dev); | ||
475 | mutex_unlock(&wimax_dev->mutex); | ||
476 | wimax_rfkill_rm(wimax_dev); | ||
477 | error_rfkill_add: | ||
478 | d_fnend(3, dev, "(wimax_dev %p net_dev %p) = %d\n", | ||
479 | wimax_dev, net_dev, result); | ||
480 | return result; | ||
481 | } | ||
482 | EXPORT_SYMBOL_GPL(wimax_dev_add); | ||
483 | |||
484 | |||
485 | /** | ||
486 | * wimax_dev_rm - Unregister an existing WiMAX device | ||
487 | * | ||
488 | * @wimax_dev: WiMAX device descriptor | ||
489 | * | ||
490 | * Unregisters a WiMAX device previously registered for use with | ||
491 | * wimax_add_rm(). | ||
492 | * | ||
493 | * IMPORTANT! Must call before calling unregister_netdev(). | ||
494 | * | ||
495 | * After this function returns, you will not get any more user space | ||
496 | * control requests (via netlink or debugfs) and thus to wimax_dev->ops. | ||
497 | * | ||
498 | * Reentrancy control is ensured by setting the state to | ||
499 | * %__WIMAX_ST_QUIESCING. rfkill operations coming through | ||
500 | * wimax_*rfkill*() will be stopped by the quiescing state; ops coming | ||
501 | * from the rfkill subsystem will be stopped by the support being | ||
502 | * removed by wimax_rfkill_rm(). | ||
503 | */ | ||
504 | void wimax_dev_rm(struct wimax_dev *wimax_dev) | ||
505 | { | ||
506 | d_fnstart(3, NULL, "(wimax_dev %p)\n", wimax_dev); | ||
507 | |||
508 | mutex_lock(&wimax_dev->mutex); | ||
509 | __wimax_state_change(wimax_dev, __WIMAX_ST_QUIESCING); | ||
510 | wimax_debugfs_rm(wimax_dev); | ||
511 | wimax_id_table_rm(wimax_dev); | ||
512 | __wimax_state_change(wimax_dev, WIMAX_ST_DOWN); | ||
513 | mutex_unlock(&wimax_dev->mutex); | ||
514 | wimax_rfkill_rm(wimax_dev); | ||
515 | d_fnend(3, NULL, "(wimax_dev %p) = void\n", wimax_dev); | ||
516 | } | ||
517 | EXPORT_SYMBOL_GPL(wimax_dev_rm); | ||
518 | |||
519 | struct genl_family wimax_gnl_family = { | ||
520 | .id = GENL_ID_GENERATE, | ||
521 | .name = "WiMAX", | ||
522 | .version = WIMAX_GNL_VERSION, | ||
523 | .hdrsize = 0, | ||
524 | .maxattr = WIMAX_GNL_ATTR_MAX, | ||
525 | }; | ||
526 | |||
527 | struct genl_multicast_group wimax_gnl_mcg = { | ||
528 | .name = "msg", | ||
529 | }; | ||
530 | |||
531 | |||
532 | |||
533 | /* Shutdown the wimax stack */ | ||
534 | static | ||
535 | int __init wimax_subsys_init(void) | ||
536 | { | ||
537 | int result, cnt; | ||
538 | |||
539 | d_fnstart(4, NULL, "()\n"); | ||
540 | snprintf(wimax_gnl_family.name, sizeof(wimax_gnl_family.name), | ||
541 | "WiMAX"); | ||
542 | result = genl_register_family(&wimax_gnl_family); | ||
543 | if (unlikely(result < 0)) { | ||
544 | printk(KERN_ERR "cannot register generic netlink family: %d\n", | ||
545 | result); | ||
546 | goto error_register_family; | ||
547 | } | ||
548 | |||
549 | for (cnt = 0; cnt < ARRAY_SIZE(wimax_gnl_ops); cnt++) { | ||
550 | result = genl_register_ops(&wimax_gnl_family, | ||
551 | wimax_gnl_ops[cnt]); | ||
552 | d_printf(4, NULL, "registering generic netlink op code " | ||
553 | "%u: %d\n", wimax_gnl_ops[cnt]->cmd, result); | ||
554 | if (unlikely(result < 0)) { | ||
555 | printk(KERN_ERR "cannot register generic netlink op " | ||
556 | "code %u: %d\n", | ||
557 | wimax_gnl_ops[cnt]->cmd, result); | ||
558 | goto error_register_ops; | ||
559 | } | ||
560 | } | ||
561 | |||
562 | result = genl_register_mc_group(&wimax_gnl_family, &wimax_gnl_mcg); | ||
563 | if (result < 0) | ||
564 | goto error_mc_group; | ||
565 | d_fnend(4, NULL, "() = 0\n"); | ||
566 | return 0; | ||
567 | |||
568 | error_mc_group: | ||
569 | error_register_ops: | ||
570 | for (cnt--; cnt >= 0; cnt--) | ||
571 | genl_unregister_ops(&wimax_gnl_family, | ||
572 | wimax_gnl_ops[cnt]); | ||
573 | genl_unregister_family(&wimax_gnl_family); | ||
574 | error_register_family: | ||
575 | d_fnend(4, NULL, "() = %d\n", result); | ||
576 | return result; | ||
577 | |||
578 | } | ||
579 | module_init(wimax_subsys_init); | ||
580 | |||
581 | |||
582 | /* Shutdown the wimax stack */ | ||
583 | static | ||
584 | void __exit wimax_subsys_exit(void) | ||
585 | { | ||
586 | int cnt; | ||
587 | wimax_id_table_release(); | ||
588 | genl_unregister_mc_group(&wimax_gnl_family, &wimax_gnl_mcg); | ||
589 | for (cnt = ARRAY_SIZE(wimax_gnl_ops) - 1; cnt >= 0; cnt--) | ||
590 | genl_unregister_ops(&wimax_gnl_family, | ||
591 | wimax_gnl_ops[cnt]); | ||
592 | genl_unregister_family(&wimax_gnl_family); | ||
593 | } | ||
594 | module_exit(wimax_subsys_exit); | ||
595 | |||
596 | MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>"); | ||
597 | MODULE_DESCRIPTION("Linux WiMAX stack"); | ||
598 | MODULE_LICENSE("GPL"); | ||
599 | |||
diff --git a/net/wimax/wimax-internal.h b/net/wimax/wimax-internal.h new file mode 100644 index 000000000000..1e743d214856 --- /dev/null +++ b/net/wimax/wimax-internal.h | |||
@@ -0,0 +1,91 @@ | |||
1 | /* | ||
2 | * Linux WiMAX | ||
3 | * Internal API for kernel space WiMAX stack | ||
4 | * | ||
5 | * | ||
6 | * Copyright (C) 2007 Intel Corporation <linux-wimax@intel.com> | ||
7 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version | ||
11 | * 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
21 | * 02110-1301, USA. | ||
22 | * | ||
23 | * | ||
24 | * This header file is for declarations and definitions internal to | ||
25 | * the WiMAX stack. For public APIs and documentation, see | ||
26 | * include/net/wimax.h and include/linux/wimax.h. | ||
27 | */ | ||
28 | |||
29 | #ifndef __WIMAX_INTERNAL_H__ | ||
30 | #define __WIMAX_INTERNAL_H__ | ||
31 | #ifdef __KERNEL__ | ||
32 | |||
33 | #include <linux/device.h> | ||
34 | #include <net/wimax.h> | ||
35 | |||
36 | |||
37 | /* | ||
38 | * Decide if a (locked) device is ready for use | ||
39 | * | ||
40 | * Before using the device structure, it must be locked | ||
41 | * (wimax_dev->mutex). As well, most operations need to call this | ||
42 | * function to check if the state is the right one. | ||
43 | * | ||
44 | * An error value will be returned if the state is not the right | ||
45 | * one. In that case, the caller should not attempt to use the device | ||
46 | * and just unlock it. | ||
47 | */ | ||
48 | static inline __must_check | ||
49 | int wimax_dev_is_ready(struct wimax_dev *wimax_dev) | ||
50 | { | ||
51 | if (wimax_dev->state == __WIMAX_ST_NULL) | ||
52 | return -EINVAL; /* Device is not even registered! */ | ||
53 | if (wimax_dev->state == WIMAX_ST_DOWN) | ||
54 | return -ENOMEDIUM; | ||
55 | if (wimax_dev->state == __WIMAX_ST_QUIESCING) | ||
56 | return -ESHUTDOWN; | ||
57 | return 0; | ||
58 | } | ||
59 | |||
60 | |||
61 | static inline | ||
62 | void __wimax_state_set(struct wimax_dev *wimax_dev, enum wimax_st state) | ||
63 | { | ||
64 | wimax_dev->state = state; | ||
65 | } | ||
66 | extern void __wimax_state_change(struct wimax_dev *, enum wimax_st); | ||
67 | |||
68 | #ifdef CONFIG_DEBUG_FS | ||
69 | extern int wimax_debugfs_add(struct wimax_dev *); | ||
70 | extern void wimax_debugfs_rm(struct wimax_dev *); | ||
71 | #else | ||
72 | static inline int wimax_debugfs_add(struct wimax_dev *wimax_dev) | ||
73 | { | ||
74 | return 0; | ||
75 | } | ||
76 | static inline void wimax_debugfs_rm(struct wimax_dev *wimax_dev) {} | ||
77 | #endif | ||
78 | |||
79 | extern void wimax_id_table_add(struct wimax_dev *); | ||
80 | extern struct wimax_dev *wimax_dev_get_by_genl_info(struct genl_info *, int); | ||
81 | extern void wimax_id_table_rm(struct wimax_dev *); | ||
82 | extern void wimax_id_table_release(void); | ||
83 | |||
84 | extern int wimax_rfkill_add(struct wimax_dev *); | ||
85 | extern void wimax_rfkill_rm(struct wimax_dev *); | ||
86 | |||
87 | extern struct genl_family wimax_gnl_family; | ||
88 | extern struct genl_multicast_group wimax_gnl_mcg; | ||
89 | |||
90 | #endif /* #ifdef __KERNEL__ */ | ||
91 | #endif /* #ifndef __WIMAX_INTERNAL_H__ */ | ||
diff --git a/net/wireless/wext.c b/net/wireless/wext.c index e49a2d1ef1e4..cb6a5bb85d80 100644 --- a/net/wireless/wext.c +++ b/net/wireless/wext.c | |||
@@ -1055,8 +1055,8 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, | |||
1055 | return private(dev, iwr, cmd, info, handler); | 1055 | return private(dev, iwr, cmd, info, handler); |
1056 | } | 1056 | } |
1057 | /* Old driver API : call driver ioctl handler */ | 1057 | /* Old driver API : call driver ioctl handler */ |
1058 | if (dev->do_ioctl) | 1058 | if (dev->netdev_ops->ndo_do_ioctl) |
1059 | return dev->do_ioctl(dev, ifr, cmd); | 1059 | return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd); |
1060 | return -EOPNOTSUPP; | 1060 | return -EOPNOTSUPP; |
1061 | } | 1061 | } |
1062 | 1062 | ||