diff options
Diffstat (limited to 'net')
232 files changed, 9453 insertions, 5697 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index a29c5ab5815c..1483243edf14 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -41,7 +41,7 @@ | |||
41 | 41 | ||
42 | /* Global VLAN variables */ | 42 | /* Global VLAN variables */ |
43 | 43 | ||
44 | int vlan_net_id; | 44 | int vlan_net_id __read_mostly; |
45 | 45 | ||
46 | /* Our listing of VLAN group(s) */ | 46 | /* Our listing of VLAN group(s) */ |
47 | static struct hlist_head vlan_group_hash[VLAN_GRP_HASH_SIZE]; | 47 | static struct hlist_head vlan_group_hash[VLAN_GRP_HASH_SIZE]; |
@@ -140,7 +140,7 @@ static void vlan_rcu_free(struct rcu_head *rcu) | |||
140 | vlan_group_free(container_of(rcu, struct vlan_group, rcu)); | 140 | vlan_group_free(container_of(rcu, struct vlan_group, rcu)); |
141 | } | 141 | } |
142 | 142 | ||
143 | void unregister_vlan_dev(struct net_device *dev) | 143 | void unregister_vlan_dev(struct net_device *dev, struct list_head *head) |
144 | { | 144 | { |
145 | struct vlan_dev_info *vlan = vlan_dev_info(dev); | 145 | struct vlan_dev_info *vlan = vlan_dev_info(dev); |
146 | struct net_device *real_dev = vlan->real_dev; | 146 | struct net_device *real_dev = vlan->real_dev; |
@@ -159,12 +159,13 @@ void unregister_vlan_dev(struct net_device *dev) | |||
159 | if (real_dev->features & NETIF_F_HW_VLAN_FILTER) | 159 | if (real_dev->features & NETIF_F_HW_VLAN_FILTER) |
160 | ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id); | 160 | ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id); |
161 | 161 | ||
162 | vlan_group_set_device(grp, vlan_id, NULL); | ||
163 | grp->nr_vlans--; | 162 | grp->nr_vlans--; |
164 | 163 | ||
165 | synchronize_net(); | 164 | vlan_group_set_device(grp, vlan_id, NULL); |
165 | if (!grp->killall) | ||
166 | synchronize_net(); | ||
166 | 167 | ||
167 | unregister_netdevice(dev); | 168 | unregister_netdevice_queue(dev, head); |
168 | 169 | ||
169 | /* If the group is now empty, kill off the group. */ | 170 | /* If the group is now empty, kill off the group. */ |
170 | if (grp->nr_vlans == 0) { | 171 | if (grp->nr_vlans == 0) { |
@@ -430,6 +431,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
430 | struct vlan_group *grp; | 431 | struct vlan_group *grp; |
431 | int i, flgs; | 432 | int i, flgs; |
432 | struct net_device *vlandev; | 433 | struct net_device *vlandev; |
434 | LIST_HEAD(list); | ||
433 | 435 | ||
434 | if (is_vlan_dev(dev)) | 436 | if (is_vlan_dev(dev)) |
435 | __vlan_device_event(dev, event); | 437 | __vlan_device_event(dev, event); |
@@ -528,6 +530,8 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
528 | 530 | ||
529 | case NETDEV_UNREGISTER: | 531 | case NETDEV_UNREGISTER: |
530 | /* Delete all VLANs for this dev. */ | 532 | /* Delete all VLANs for this dev. */ |
533 | grp->killall = 1; | ||
534 | |||
531 | for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { | 535 | for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { |
532 | vlandev = vlan_group_get_device(grp, i); | 536 | vlandev = vlan_group_get_device(grp, i); |
533 | if (!vlandev) | 537 | if (!vlandev) |
@@ -538,8 +542,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
538 | if (grp->nr_vlans == 1) | 542 | if (grp->nr_vlans == 1) |
539 | i = VLAN_GROUP_ARRAY_LEN; | 543 | i = VLAN_GROUP_ARRAY_LEN; |
540 | 544 | ||
541 | unregister_vlan_dev(vlandev); | 545 | unregister_vlan_dev(vlandev, &list); |
542 | } | 546 | } |
547 | unregister_netdevice_many(&list); | ||
543 | break; | 548 | break; |
544 | } | 549 | } |
545 | 550 | ||
@@ -645,7 +650,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg) | |||
645 | err = -EPERM; | 650 | err = -EPERM; |
646 | if (!capable(CAP_NET_ADMIN)) | 651 | if (!capable(CAP_NET_ADMIN)) |
647 | break; | 652 | break; |
648 | unregister_vlan_dev(dev); | 653 | unregister_vlan_dev(dev, NULL); |
649 | err = 0; | 654 | err = 0; |
650 | break; | 655 | break; |
651 | 656 | ||
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h index 82570bc2a180..5685296017e9 100644 --- a/net/8021q/vlan.h +++ b/net/8021q/vlan.h | |||
@@ -16,6 +16,21 @@ struct vlan_priority_tci_mapping { | |||
16 | struct vlan_priority_tci_mapping *next; | 16 | struct vlan_priority_tci_mapping *next; |
17 | }; | 17 | }; |
18 | 18 | ||
19 | |||
20 | /** | ||
21 | * struct vlan_rx_stats - VLAN percpu rx stats | ||
22 | * @rx_packets: number of received packets | ||
23 | * @rx_bytes: number of received bytes | ||
24 | * @multicast: number of received multicast packets | ||
25 | * @rx_errors: number of errors | ||
26 | */ | ||
27 | struct vlan_rx_stats { | ||
28 | unsigned long rx_packets; | ||
29 | unsigned long rx_bytes; | ||
30 | unsigned long multicast; | ||
31 | unsigned long rx_errors; | ||
32 | }; | ||
33 | |||
19 | /** | 34 | /** |
20 | * struct vlan_dev_info - VLAN private device data | 35 | * struct vlan_dev_info - VLAN private device data |
21 | * @nr_ingress_mappings: number of ingress priority mappings | 36 | * @nr_ingress_mappings: number of ingress priority mappings |
@@ -29,6 +44,7 @@ struct vlan_priority_tci_mapping { | |||
29 | * @dent: proc dir entry | 44 | * @dent: proc dir entry |
30 | * @cnt_inc_headroom_on_tx: statistic - number of skb expansions on TX | 45 | * @cnt_inc_headroom_on_tx: statistic - number of skb expansions on TX |
31 | * @cnt_encap_on_xmit: statistic - number of skb encapsulations on TX | 46 | * @cnt_encap_on_xmit: statistic - number of skb encapsulations on TX |
47 | * @vlan_rx_stats: ptr to percpu rx stats | ||
32 | */ | 48 | */ |
33 | struct vlan_dev_info { | 49 | struct vlan_dev_info { |
34 | unsigned int nr_ingress_mappings; | 50 | unsigned int nr_ingress_mappings; |
@@ -45,6 +61,7 @@ struct vlan_dev_info { | |||
45 | struct proc_dir_entry *dent; | 61 | struct proc_dir_entry *dent; |
46 | unsigned long cnt_inc_headroom_on_tx; | 62 | unsigned long cnt_inc_headroom_on_tx; |
47 | unsigned long cnt_encap_on_xmit; | 63 | unsigned long cnt_encap_on_xmit; |
64 | struct vlan_rx_stats *vlan_rx_stats; | ||
48 | }; | 65 | }; |
49 | 66 | ||
50 | static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev) | 67 | static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev) |
@@ -82,14 +99,14 @@ void vlan_dev_get_realdev_name(const struct net_device *dev, char *result); | |||
82 | int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id); | 99 | int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id); |
83 | void vlan_setup(struct net_device *dev); | 100 | void vlan_setup(struct net_device *dev); |
84 | int register_vlan_dev(struct net_device *dev); | 101 | int register_vlan_dev(struct net_device *dev); |
85 | void unregister_vlan_dev(struct net_device *dev); | 102 | void unregister_vlan_dev(struct net_device *dev, struct list_head *head); |
86 | 103 | ||
87 | static inline u32 vlan_get_ingress_priority(struct net_device *dev, | 104 | static inline u32 vlan_get_ingress_priority(struct net_device *dev, |
88 | u16 vlan_tci) | 105 | u16 vlan_tci) |
89 | { | 106 | { |
90 | struct vlan_dev_info *vip = vlan_dev_info(dev); | 107 | struct vlan_dev_info *vip = vlan_dev_info(dev); |
91 | 108 | ||
92 | return vip->ingress_priority_map[(vlan_tci >> 13) & 0x7]; | 109 | return vip->ingress_priority_map[(vlan_tci >> VLAN_PRIO_SHIFT) & 0x7]; |
93 | } | 110 | } |
94 | 111 | ||
95 | #ifdef CONFIG_VLAN_8021Q_GVRP | 112 | #ifdef CONFIG_VLAN_8021Q_GVRP |
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 7f7de1a04de6..e75a2f3b10af 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
@@ -14,7 +14,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, | |||
14 | if (skb_bond_should_drop(skb)) | 14 | if (skb_bond_should_drop(skb)) |
15 | goto drop; | 15 | goto drop; |
16 | 16 | ||
17 | skb->vlan_tci = vlan_tci; | 17 | __vlan_hwaccel_put_tag(skb, vlan_tci); |
18 | skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); | 18 | skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); |
19 | 19 | ||
20 | if (!skb->dev) | 20 | if (!skb->dev) |
@@ -31,7 +31,7 @@ EXPORT_SYMBOL(__vlan_hwaccel_rx); | |||
31 | int vlan_hwaccel_do_receive(struct sk_buff *skb) | 31 | int vlan_hwaccel_do_receive(struct sk_buff *skb) |
32 | { | 32 | { |
33 | struct net_device *dev = skb->dev; | 33 | struct net_device *dev = skb->dev; |
34 | struct net_device_stats *stats; | 34 | struct vlan_rx_stats *rx_stats; |
35 | 35 | ||
36 | skb->dev = vlan_dev_info(dev)->real_dev; | 36 | skb->dev = vlan_dev_info(dev)->real_dev; |
37 | netif_nit_deliver(skb); | 37 | netif_nit_deliver(skb); |
@@ -40,15 +40,17 @@ int vlan_hwaccel_do_receive(struct sk_buff *skb) | |||
40 | skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci); | 40 | skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci); |
41 | skb->vlan_tci = 0; | 41 | skb->vlan_tci = 0; |
42 | 42 | ||
43 | stats = &dev->stats; | 43 | rx_stats = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, |
44 | stats->rx_packets++; | 44 | smp_processor_id()); |
45 | stats->rx_bytes += skb->len; | 45 | |
46 | rx_stats->rx_packets++; | ||
47 | rx_stats->rx_bytes += skb->len; | ||
46 | 48 | ||
47 | switch (skb->pkt_type) { | 49 | switch (skb->pkt_type) { |
48 | case PACKET_BROADCAST: | 50 | case PACKET_BROADCAST: |
49 | break; | 51 | break; |
50 | case PACKET_MULTICAST: | 52 | case PACKET_MULTICAST: |
51 | stats->multicast++; | 53 | rx_stats->multicast++; |
52 | break; | 54 | break; |
53 | case PACKET_OTHERHOST: | 55 | case PACKET_OTHERHOST: |
54 | /* Our lower layer thinks this is not local, let's make sure. | 56 | /* Our lower layer thinks this is not local, let's make sure. |
@@ -74,15 +76,16 @@ u16 vlan_dev_vlan_id(const struct net_device *dev) | |||
74 | } | 76 | } |
75 | EXPORT_SYMBOL(vlan_dev_vlan_id); | 77 | EXPORT_SYMBOL(vlan_dev_vlan_id); |
76 | 78 | ||
77 | static int vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, | 79 | static gro_result_t |
78 | unsigned int vlan_tci, struct sk_buff *skb) | 80 | vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, |
81 | unsigned int vlan_tci, struct sk_buff *skb) | ||
79 | { | 82 | { |
80 | struct sk_buff *p; | 83 | struct sk_buff *p; |
81 | 84 | ||
82 | if (skb_bond_should_drop(skb)) | 85 | if (skb_bond_should_drop(skb)) |
83 | goto drop; | 86 | goto drop; |
84 | 87 | ||
85 | skb->vlan_tci = vlan_tci; | 88 | __vlan_hwaccel_put_tag(skb, vlan_tci); |
86 | skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); | 89 | skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); |
87 | 90 | ||
88 | if (!skb->dev) | 91 | if (!skb->dev) |
@@ -101,11 +104,12 @@ drop: | |||
101 | return GRO_DROP; | 104 | return GRO_DROP; |
102 | } | 105 | } |
103 | 106 | ||
104 | int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, | 107 | gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, |
105 | unsigned int vlan_tci, struct sk_buff *skb) | 108 | unsigned int vlan_tci, struct sk_buff *skb) |
106 | { | 109 | { |
107 | if (netpoll_rx_on(skb)) | 110 | if (netpoll_rx_on(skb)) |
108 | return vlan_hwaccel_receive_skb(skb, grp, vlan_tci); | 111 | return vlan_hwaccel_receive_skb(skb, grp, vlan_tci) |
112 | ? GRO_DROP : GRO_NORMAL; | ||
109 | 113 | ||
110 | skb_gro_reset_offset(skb); | 114 | skb_gro_reset_offset(skb); |
111 | 115 | ||
@@ -113,17 +117,18 @@ int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, | |||
113 | } | 117 | } |
114 | EXPORT_SYMBOL(vlan_gro_receive); | 118 | EXPORT_SYMBOL(vlan_gro_receive); |
115 | 119 | ||
116 | int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, | 120 | gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, |
117 | unsigned int vlan_tci) | 121 | unsigned int vlan_tci) |
118 | { | 122 | { |
119 | struct sk_buff *skb = napi_frags_skb(napi); | 123 | struct sk_buff *skb = napi_frags_skb(napi); |
120 | 124 | ||
121 | if (!skb) | 125 | if (!skb) |
122 | return NET_RX_DROP; | 126 | return GRO_DROP; |
123 | 127 | ||
124 | if (netpoll_rx_on(skb)) { | 128 | if (netpoll_rx_on(skb)) { |
125 | skb->protocol = eth_type_trans(skb, skb->dev); | 129 | skb->protocol = eth_type_trans(skb, skb->dev); |
126 | return vlan_hwaccel_receive_skb(skb, grp, vlan_tci); | 130 | return vlan_hwaccel_receive_skb(skb, grp, vlan_tci) |
131 | ? GRO_DROP : GRO_NORMAL; | ||
127 | } | 132 | } |
128 | 133 | ||
129 | return napi_frags_finish(napi, skb, | 134 | return napi_frags_finish(napi, skb, |
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 4198ec5c8abc..de0dc6bacbe8 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -140,7 +140,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, | |||
140 | struct packet_type *ptype, struct net_device *orig_dev) | 140 | struct packet_type *ptype, struct net_device *orig_dev) |
141 | { | 141 | { |
142 | struct vlan_hdr *vhdr; | 142 | struct vlan_hdr *vhdr; |
143 | struct net_device_stats *stats; | 143 | struct vlan_rx_stats *rx_stats; |
144 | u16 vlan_id; | 144 | u16 vlan_id; |
145 | u16 vlan_tci; | 145 | u16 vlan_tci; |
146 | 146 | ||
@@ -163,9 +163,10 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, | |||
163 | goto err_unlock; | 163 | goto err_unlock; |
164 | } | 164 | } |
165 | 165 | ||
166 | stats = &skb->dev->stats; | 166 | rx_stats = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, |
167 | stats->rx_packets++; | 167 | smp_processor_id()); |
168 | stats->rx_bytes += skb->len; | 168 | rx_stats->rx_packets++; |
169 | rx_stats->rx_bytes += skb->len; | ||
169 | 170 | ||
170 | skb_pull_rcsum(skb, VLAN_HLEN); | 171 | skb_pull_rcsum(skb, VLAN_HLEN); |
171 | 172 | ||
@@ -180,7 +181,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, | |||
180 | break; | 181 | break; |
181 | 182 | ||
182 | case PACKET_MULTICAST: | 183 | case PACKET_MULTICAST: |
183 | stats->multicast++; | 184 | rx_stats->multicast++; |
184 | break; | 185 | break; |
185 | 186 | ||
186 | case PACKET_OTHERHOST: | 187 | case PACKET_OTHERHOST: |
@@ -200,7 +201,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, | |||
200 | 201 | ||
201 | skb = vlan_check_reorder_header(skb); | 202 | skb = vlan_check_reorder_header(skb); |
202 | if (!skb) { | 203 | if (!skb) { |
203 | stats->rx_errors++; | 204 | rx_stats->rx_errors++; |
204 | goto err_unlock; | 205 | goto err_unlock; |
205 | } | 206 | } |
206 | 207 | ||
@@ -332,7 +333,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, | |||
332 | } else | 333 | } else |
333 | txq->tx_dropped++; | 334 | txq->tx_dropped++; |
334 | 335 | ||
335 | return NETDEV_TX_OK; | 336 | return ret; |
336 | } | 337 | } |
337 | 338 | ||
338 | static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, | 339 | static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, |
@@ -358,7 +359,7 @@ static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, | |||
358 | } else | 359 | } else |
359 | txq->tx_dropped++; | 360 | txq->tx_dropped++; |
360 | 361 | ||
361 | return NETDEV_TX_OK; | 362 | return ret; |
362 | } | 363 | } |
363 | 364 | ||
364 | static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) | 365 | static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) |
@@ -393,7 +394,7 @@ int vlan_dev_set_egress_priority(const struct net_device *dev, | |||
393 | struct vlan_dev_info *vlan = vlan_dev_info(dev); | 394 | struct vlan_dev_info *vlan = vlan_dev_info(dev); |
394 | struct vlan_priority_tci_mapping *mp = NULL; | 395 | struct vlan_priority_tci_mapping *mp = NULL; |
395 | struct vlan_priority_tci_mapping *np; | 396 | struct vlan_priority_tci_mapping *np; |
396 | u32 vlan_qos = (vlan_prio << 13) & 0xE000; | 397 | u32 vlan_qos = (vlan_prio << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK; |
397 | 398 | ||
398 | /* See if a priority mapping exists.. */ | 399 | /* See if a priority mapping exists.. */ |
399 | mp = vlan->egress_priority_map[skb_prio & 0xF]; | 400 | mp = vlan->egress_priority_map[skb_prio & 0xF]; |
@@ -626,6 +627,17 @@ static int vlan_dev_fcoe_disable(struct net_device *dev) | |||
626 | rc = ops->ndo_fcoe_disable(real_dev); | 627 | rc = ops->ndo_fcoe_disable(real_dev); |
627 | return rc; | 628 | return rc; |
628 | } | 629 | } |
630 | |||
631 | static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) | ||
632 | { | ||
633 | struct net_device *real_dev = vlan_dev_info(dev)->real_dev; | ||
634 | const struct net_device_ops *ops = real_dev->netdev_ops; | ||
635 | int rc = -EINVAL; | ||
636 | |||
637 | if (ops->ndo_fcoe_get_wwn) | ||
638 | rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type); | ||
639 | return rc; | ||
640 | } | ||
629 | #endif | 641 | #endif |
630 | 642 | ||
631 | static void vlan_dev_change_rx_flags(struct net_device *dev, int change) | 643 | static void vlan_dev_change_rx_flags(struct net_device *dev, int change) |
@@ -720,6 +732,11 @@ static int vlan_dev_init(struct net_device *dev) | |||
720 | subclass = 1; | 732 | subclass = 1; |
721 | 733 | ||
722 | vlan_dev_set_lockdep_class(dev, subclass); | 734 | vlan_dev_set_lockdep_class(dev, subclass); |
735 | |||
736 | vlan_dev_info(dev)->vlan_rx_stats = alloc_percpu(struct vlan_rx_stats); | ||
737 | if (!vlan_dev_info(dev)->vlan_rx_stats) | ||
738 | return -ENOMEM; | ||
739 | |||
723 | return 0; | 740 | return 0; |
724 | } | 741 | } |
725 | 742 | ||
@@ -729,6 +746,8 @@ static void vlan_dev_uninit(struct net_device *dev) | |||
729 | struct vlan_dev_info *vlan = vlan_dev_info(dev); | 746 | struct vlan_dev_info *vlan = vlan_dev_info(dev); |
730 | int i; | 747 | int i; |
731 | 748 | ||
749 | free_percpu(vlan->vlan_rx_stats); | ||
750 | vlan->vlan_rx_stats = NULL; | ||
732 | for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) { | 751 | for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) { |
733 | while ((pm = vlan->egress_priority_map[i]) != NULL) { | 752 | while ((pm = vlan->egress_priority_map[i]) != NULL) { |
734 | vlan->egress_priority_map[i] = pm->next; | 753 | vlan->egress_priority_map[i] = pm->next; |
@@ -764,6 +783,31 @@ static u32 vlan_ethtool_get_flags(struct net_device *dev) | |||
764 | return dev_ethtool_get_flags(vlan->real_dev); | 783 | return dev_ethtool_get_flags(vlan->real_dev); |
765 | } | 784 | } |
766 | 785 | ||
786 | static struct net_device_stats *vlan_dev_get_stats(struct net_device *dev) | ||
787 | { | ||
788 | struct net_device_stats *stats = &dev->stats; | ||
789 | |||
790 | dev_txq_stats_fold(dev, stats); | ||
791 | |||
792 | if (vlan_dev_info(dev)->vlan_rx_stats) { | ||
793 | struct vlan_rx_stats *p, rx = {0}; | ||
794 | int i; | ||
795 | |||
796 | for_each_possible_cpu(i) { | ||
797 | p = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, i); | ||
798 | rx.rx_packets += p->rx_packets; | ||
799 | rx.rx_bytes += p->rx_bytes; | ||
800 | rx.rx_errors += p->rx_errors; | ||
801 | rx.multicast += p->multicast; | ||
802 | } | ||
803 | stats->rx_packets = rx.rx_packets; | ||
804 | stats->rx_bytes = rx.rx_bytes; | ||
805 | stats->rx_errors = rx.rx_errors; | ||
806 | stats->multicast = rx.multicast; | ||
807 | } | ||
808 | return stats; | ||
809 | } | ||
810 | |||
767 | static const struct ethtool_ops vlan_ethtool_ops = { | 811 | static const struct ethtool_ops vlan_ethtool_ops = { |
768 | .get_settings = vlan_ethtool_get_settings, | 812 | .get_settings = vlan_ethtool_get_settings, |
769 | .get_drvinfo = vlan_ethtool_get_drvinfo, | 813 | .get_drvinfo = vlan_ethtool_get_drvinfo, |
@@ -786,11 +830,13 @@ static const struct net_device_ops vlan_netdev_ops = { | |||
786 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, | 830 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, |
787 | .ndo_do_ioctl = vlan_dev_ioctl, | 831 | .ndo_do_ioctl = vlan_dev_ioctl, |
788 | .ndo_neigh_setup = vlan_dev_neigh_setup, | 832 | .ndo_neigh_setup = vlan_dev_neigh_setup, |
833 | .ndo_get_stats = vlan_dev_get_stats, | ||
789 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | 834 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) |
790 | .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, | 835 | .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, |
791 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, | 836 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, |
792 | .ndo_fcoe_enable = vlan_dev_fcoe_enable, | 837 | .ndo_fcoe_enable = vlan_dev_fcoe_enable, |
793 | .ndo_fcoe_disable = vlan_dev_fcoe_disable, | 838 | .ndo_fcoe_disable = vlan_dev_fcoe_disable, |
839 | .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, | ||
794 | #endif | 840 | #endif |
795 | }; | 841 | }; |
796 | 842 | ||
@@ -808,11 +854,13 @@ static const struct net_device_ops vlan_netdev_accel_ops = { | |||
808 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, | 854 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, |
809 | .ndo_do_ioctl = vlan_dev_ioctl, | 855 | .ndo_do_ioctl = vlan_dev_ioctl, |
810 | .ndo_neigh_setup = vlan_dev_neigh_setup, | 856 | .ndo_neigh_setup = vlan_dev_neigh_setup, |
857 | .ndo_get_stats = vlan_dev_get_stats, | ||
811 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | 858 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) |
812 | .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, | 859 | .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, |
813 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, | 860 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, |
814 | .ndo_fcoe_enable = vlan_dev_fcoe_enable, | 861 | .ndo_fcoe_enable = vlan_dev_fcoe_enable, |
815 | .ndo_fcoe_disable = vlan_dev_fcoe_disable, | 862 | .ndo_fcoe_disable = vlan_dev_fcoe_disable, |
863 | .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, | ||
816 | #endif | 864 | #endif |
817 | }; | 865 | }; |
818 | 866 | ||
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c index a91504850195..3c9cf6a8e7fb 100644 --- a/net/8021q/vlan_netlink.c +++ b/net/8021q/vlan_netlink.c | |||
@@ -119,7 +119,7 @@ static int vlan_get_tx_queues(struct net *net, | |||
119 | return 0; | 119 | return 0; |
120 | } | 120 | } |
121 | 121 | ||
122 | static int vlan_newlink(struct net_device *dev, | 122 | static int vlan_newlink(struct net *src_net, struct net_device *dev, |
123 | struct nlattr *tb[], struct nlattr *data[]) | 123 | struct nlattr *tb[], struct nlattr *data[]) |
124 | { | 124 | { |
125 | struct vlan_dev_info *vlan = vlan_dev_info(dev); | 125 | struct vlan_dev_info *vlan = vlan_dev_info(dev); |
@@ -131,7 +131,7 @@ static int vlan_newlink(struct net_device *dev, | |||
131 | 131 | ||
132 | if (!tb[IFLA_LINK]) | 132 | if (!tb[IFLA_LINK]) |
133 | return -EINVAL; | 133 | return -EINVAL; |
134 | real_dev = __dev_get_by_index(dev_net(dev), nla_get_u32(tb[IFLA_LINK])); | 134 | real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); |
135 | if (!real_dev) | 135 | if (!real_dev) |
136 | return -ENODEV; | 136 | return -ENODEV; |
137 | 137 | ||
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c index 6262c335f3c2..9ec1f057c03a 100644 --- a/net/8021q/vlanproc.c +++ b/net/8021q/vlanproc.c | |||
@@ -201,18 +201,17 @@ int vlan_proc_rem_dev(struct net_device *vlandev) | |||
201 | 201 | ||
202 | /* start read of /proc/net/vlan/config */ | 202 | /* start read of /proc/net/vlan/config */ |
203 | static void *vlan_seq_start(struct seq_file *seq, loff_t *pos) | 203 | static void *vlan_seq_start(struct seq_file *seq, loff_t *pos) |
204 | __acquires(dev_base_lock) | 204 | __acquires(rcu) |
205 | { | 205 | { |
206 | struct net_device *dev; | 206 | struct net_device *dev; |
207 | struct net *net = seq_file_net(seq); | 207 | struct net *net = seq_file_net(seq); |
208 | loff_t i = 1; | 208 | loff_t i = 1; |
209 | 209 | ||
210 | read_lock(&dev_base_lock); | 210 | rcu_read_lock(); |
211 | |||
212 | if (*pos == 0) | 211 | if (*pos == 0) |
213 | return SEQ_START_TOKEN; | 212 | return SEQ_START_TOKEN; |
214 | 213 | ||
215 | for_each_netdev(net, dev) { | 214 | for_each_netdev_rcu(net, dev) { |
216 | if (!is_vlan_dev(dev)) | 215 | if (!is_vlan_dev(dev)) |
217 | continue; | 216 | continue; |
218 | 217 | ||
@@ -234,7 +233,7 @@ static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
234 | if (v == SEQ_START_TOKEN) | 233 | if (v == SEQ_START_TOKEN) |
235 | dev = net_device_entry(&net->dev_base_head); | 234 | dev = net_device_entry(&net->dev_base_head); |
236 | 235 | ||
237 | for_each_netdev_continue(net, dev) { | 236 | for_each_netdev_continue_rcu(net, dev) { |
238 | if (!is_vlan_dev(dev)) | 237 | if (!is_vlan_dev(dev)) |
239 | continue; | 238 | continue; |
240 | 239 | ||
@@ -245,9 +244,9 @@ static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
245 | } | 244 | } |
246 | 245 | ||
247 | static void vlan_seq_stop(struct seq_file *seq, void *v) | 246 | static void vlan_seq_stop(struct seq_file *seq, void *v) |
248 | __releases(dev_base_lock) | 247 | __releases(rcu) |
249 | { | 248 | { |
250 | read_unlock(&dev_base_lock); | 249 | rcu_read_unlock(); |
251 | } | 250 | } |
252 | 251 | ||
253 | static int vlan_seq_show(struct seq_file *seq, void *v) | 252 | static int vlan_seq_show(struct seq_file *seq, void *v) |
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index b1a4290996b5..73ca4d524928 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c | |||
@@ -56,6 +56,7 @@ | |||
56 | #include <linux/if_arp.h> | 56 | #include <linux/if_arp.h> |
57 | #include <linux/smp_lock.h> | 57 | #include <linux/smp_lock.h> |
58 | #include <linux/termios.h> /* For TIOCOUTQ/INQ */ | 58 | #include <linux/termios.h> /* For TIOCOUTQ/INQ */ |
59 | #include <linux/compat.h> | ||
59 | #include <net/datalink.h> | 60 | #include <net/datalink.h> |
60 | #include <net/psnap.h> | 61 | #include <net/psnap.h> |
61 | #include <net/sock.h> | 62 | #include <net/sock.h> |
@@ -922,13 +923,8 @@ static unsigned long atalk_sum_partial(const unsigned char *data, | |||
922 | { | 923 | { |
923 | /* This ought to be unwrapped neatly. I'll trust gcc for now */ | 924 | /* This ought to be unwrapped neatly. I'll trust gcc for now */ |
924 | while (len--) { | 925 | while (len--) { |
925 | sum += *data; | 926 | sum += *data++; |
926 | sum <<= 1; | 927 | sum = rol16(sum, 1); |
927 | if (sum & 0x10000) { | ||
928 | sum++; | ||
929 | sum &= 0xffff; | ||
930 | } | ||
931 | data++; | ||
932 | } | 928 | } |
933 | return sum; | 929 | return sum; |
934 | } | 930 | } |
@@ -1021,7 +1017,8 @@ static struct proto ddp_proto = { | |||
1021 | * Create a socket. Initialise the socket, blank the addresses | 1017 | * Create a socket. Initialise the socket, blank the addresses |
1022 | * set the state. | 1018 | * set the state. |
1023 | */ | 1019 | */ |
1024 | static int atalk_create(struct net *net, struct socket *sock, int protocol) | 1020 | static int atalk_create(struct net *net, struct socket *sock, int protocol, |
1021 | int kern) | ||
1025 | { | 1022 | { |
1026 | struct sock *sk; | 1023 | struct sock *sk; |
1027 | int rc = -ESOCKTNOSUPPORT; | 1024 | int rc = -ESOCKTNOSUPPORT; |
@@ -1054,11 +1051,13 @@ static int atalk_release(struct socket *sock) | |||
1054 | { | 1051 | { |
1055 | struct sock *sk = sock->sk; | 1052 | struct sock *sk = sock->sk; |
1056 | 1053 | ||
1054 | lock_kernel(); | ||
1057 | if (sk) { | 1055 | if (sk) { |
1058 | sock_orphan(sk); | 1056 | sock_orphan(sk); |
1059 | sock->sk = NULL; | 1057 | sock->sk = NULL; |
1060 | atalk_destroy_socket(sk); | 1058 | atalk_destroy_socket(sk); |
1061 | } | 1059 | } |
1060 | unlock_kernel(); | ||
1062 | return 0; | 1061 | return 0; |
1063 | } | 1062 | } |
1064 | 1063 | ||
@@ -1134,6 +1133,7 @@ static int atalk_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
1134 | struct sockaddr_at *addr = (struct sockaddr_at *)uaddr; | 1133 | struct sockaddr_at *addr = (struct sockaddr_at *)uaddr; |
1135 | struct sock *sk = sock->sk; | 1134 | struct sock *sk = sock->sk; |
1136 | struct atalk_sock *at = at_sk(sk); | 1135 | struct atalk_sock *at = at_sk(sk); |
1136 | int err; | ||
1137 | 1137 | ||
1138 | if (!sock_flag(sk, SOCK_ZAPPED) || | 1138 | if (!sock_flag(sk, SOCK_ZAPPED) || |
1139 | addr_len != sizeof(struct sockaddr_at)) | 1139 | addr_len != sizeof(struct sockaddr_at)) |
@@ -1142,37 +1142,44 @@ static int atalk_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
1142 | if (addr->sat_family != AF_APPLETALK) | 1142 | if (addr->sat_family != AF_APPLETALK) |
1143 | return -EAFNOSUPPORT; | 1143 | return -EAFNOSUPPORT; |
1144 | 1144 | ||
1145 | lock_kernel(); | ||
1145 | if (addr->sat_addr.s_net == htons(ATADDR_ANYNET)) { | 1146 | if (addr->sat_addr.s_net == htons(ATADDR_ANYNET)) { |
1146 | struct atalk_addr *ap = atalk_find_primary(); | 1147 | struct atalk_addr *ap = atalk_find_primary(); |
1147 | 1148 | ||
1149 | err = -EADDRNOTAVAIL; | ||
1148 | if (!ap) | 1150 | if (!ap) |
1149 | return -EADDRNOTAVAIL; | 1151 | goto out; |
1150 | 1152 | ||
1151 | at->src_net = addr->sat_addr.s_net = ap->s_net; | 1153 | at->src_net = addr->sat_addr.s_net = ap->s_net; |
1152 | at->src_node = addr->sat_addr.s_node= ap->s_node; | 1154 | at->src_node = addr->sat_addr.s_node= ap->s_node; |
1153 | } else { | 1155 | } else { |
1156 | err = -EADDRNOTAVAIL; | ||
1154 | if (!atalk_find_interface(addr->sat_addr.s_net, | 1157 | if (!atalk_find_interface(addr->sat_addr.s_net, |
1155 | addr->sat_addr.s_node)) | 1158 | addr->sat_addr.s_node)) |
1156 | return -EADDRNOTAVAIL; | 1159 | goto out; |
1157 | 1160 | ||
1158 | at->src_net = addr->sat_addr.s_net; | 1161 | at->src_net = addr->sat_addr.s_net; |
1159 | at->src_node = addr->sat_addr.s_node; | 1162 | at->src_node = addr->sat_addr.s_node; |
1160 | } | 1163 | } |
1161 | 1164 | ||
1162 | if (addr->sat_port == ATADDR_ANYPORT) { | 1165 | if (addr->sat_port == ATADDR_ANYPORT) { |
1163 | int n = atalk_pick_and_bind_port(sk, addr); | 1166 | err = atalk_pick_and_bind_port(sk, addr); |
1164 | 1167 | ||
1165 | if (n < 0) | 1168 | if (err < 0) |
1166 | return n; | 1169 | goto out; |
1167 | } else { | 1170 | } else { |
1168 | at->src_port = addr->sat_port; | 1171 | at->src_port = addr->sat_port; |
1169 | 1172 | ||
1173 | err = -EADDRINUSE; | ||
1170 | if (atalk_find_or_insert_socket(sk, addr)) | 1174 | if (atalk_find_or_insert_socket(sk, addr)) |
1171 | return -EADDRINUSE; | 1175 | goto out; |
1172 | } | 1176 | } |
1173 | 1177 | ||
1174 | sock_reset_flag(sk, SOCK_ZAPPED); | 1178 | sock_reset_flag(sk, SOCK_ZAPPED); |
1175 | return 0; | 1179 | err = 0; |
1180 | out: | ||
1181 | unlock_kernel(); | ||
1182 | return err; | ||
1176 | } | 1183 | } |
1177 | 1184 | ||
1178 | /* Set the address we talk to */ | 1185 | /* Set the address we talk to */ |
@@ -1182,6 +1189,7 @@ static int atalk_connect(struct socket *sock, struct sockaddr *uaddr, | |||
1182 | struct sock *sk = sock->sk; | 1189 | struct sock *sk = sock->sk; |
1183 | struct atalk_sock *at = at_sk(sk); | 1190 | struct atalk_sock *at = at_sk(sk); |
1184 | struct sockaddr_at *addr; | 1191 | struct sockaddr_at *addr; |
1192 | int err; | ||
1185 | 1193 | ||
1186 | sk->sk_state = TCP_CLOSE; | 1194 | sk->sk_state = TCP_CLOSE; |
1187 | sock->state = SS_UNCONNECTED; | 1195 | sock->state = SS_UNCONNECTED; |
@@ -1206,12 +1214,15 @@ static int atalk_connect(struct socket *sock, struct sockaddr *uaddr, | |||
1206 | #endif | 1214 | #endif |
1207 | } | 1215 | } |
1208 | 1216 | ||
1217 | lock_kernel(); | ||
1218 | err = -EBUSY; | ||
1209 | if (sock_flag(sk, SOCK_ZAPPED)) | 1219 | if (sock_flag(sk, SOCK_ZAPPED)) |
1210 | if (atalk_autobind(sk) < 0) | 1220 | if (atalk_autobind(sk) < 0) |
1211 | return -EBUSY; | 1221 | goto out; |
1212 | 1222 | ||
1223 | err = -ENETUNREACH; | ||
1213 | if (!atrtr_get_dev(&addr->sat_addr)) | 1224 | if (!atrtr_get_dev(&addr->sat_addr)) |
1214 | return -ENETUNREACH; | 1225 | goto out; |
1215 | 1226 | ||
1216 | at->dest_port = addr->sat_port; | 1227 | at->dest_port = addr->sat_port; |
1217 | at->dest_net = addr->sat_addr.s_net; | 1228 | at->dest_net = addr->sat_addr.s_net; |
@@ -1219,7 +1230,10 @@ static int atalk_connect(struct socket *sock, struct sockaddr *uaddr, | |||
1219 | 1230 | ||
1220 | sock->state = SS_CONNECTED; | 1231 | sock->state = SS_CONNECTED; |
1221 | sk->sk_state = TCP_ESTABLISHED; | 1232 | sk->sk_state = TCP_ESTABLISHED; |
1222 | return 0; | 1233 | err = 0; |
1234 | out: | ||
1235 | unlock_kernel(); | ||
1236 | return err; | ||
1223 | } | 1237 | } |
1224 | 1238 | ||
1225 | /* | 1239 | /* |
@@ -1232,17 +1246,21 @@ static int atalk_getname(struct socket *sock, struct sockaddr *uaddr, | |||
1232 | struct sockaddr_at sat; | 1246 | struct sockaddr_at sat; |
1233 | struct sock *sk = sock->sk; | 1247 | struct sock *sk = sock->sk; |
1234 | struct atalk_sock *at = at_sk(sk); | 1248 | struct atalk_sock *at = at_sk(sk); |
1249 | int err; | ||
1235 | 1250 | ||
1251 | lock_kernel(); | ||
1252 | err = -ENOBUFS; | ||
1236 | if (sock_flag(sk, SOCK_ZAPPED)) | 1253 | if (sock_flag(sk, SOCK_ZAPPED)) |
1237 | if (atalk_autobind(sk) < 0) | 1254 | if (atalk_autobind(sk) < 0) |
1238 | return -ENOBUFS; | 1255 | goto out; |
1239 | 1256 | ||
1240 | *uaddr_len = sizeof(struct sockaddr_at); | 1257 | *uaddr_len = sizeof(struct sockaddr_at); |
1241 | memset(&sat.sat_zero, 0, sizeof(sat.sat_zero)); | 1258 | memset(&sat.sat_zero, 0, sizeof(sat.sat_zero)); |
1242 | 1259 | ||
1243 | if (peer) { | 1260 | if (peer) { |
1261 | err = -ENOTCONN; | ||
1244 | if (sk->sk_state != TCP_ESTABLISHED) | 1262 | if (sk->sk_state != TCP_ESTABLISHED) |
1245 | return -ENOTCONN; | 1263 | goto out; |
1246 | 1264 | ||
1247 | sat.sat_addr.s_net = at->dest_net; | 1265 | sat.sat_addr.s_net = at->dest_net; |
1248 | sat.sat_addr.s_node = at->dest_node; | 1266 | sat.sat_addr.s_node = at->dest_node; |
@@ -1253,9 +1271,23 @@ static int atalk_getname(struct socket *sock, struct sockaddr *uaddr, | |||
1253 | sat.sat_port = at->src_port; | 1271 | sat.sat_port = at->src_port; |
1254 | } | 1272 | } |
1255 | 1273 | ||
1274 | err = 0; | ||
1256 | sat.sat_family = AF_APPLETALK; | 1275 | sat.sat_family = AF_APPLETALK; |
1257 | memcpy(uaddr, &sat, sizeof(sat)); | 1276 | memcpy(uaddr, &sat, sizeof(sat)); |
1258 | return 0; | 1277 | |
1278 | out: | ||
1279 | unlock_kernel(); | ||
1280 | return err; | ||
1281 | } | ||
1282 | |||
1283 | static unsigned int atalk_poll(struct file *file, struct socket *sock, | ||
1284 | poll_table *wait) | ||
1285 | { | ||
1286 | int err; | ||
1287 | lock_kernel(); | ||
1288 | err = datagram_poll(file, sock, wait); | ||
1289 | unlock_kernel(); | ||
1290 | return err; | ||
1259 | } | 1291 | } |
1260 | 1292 | ||
1261 | #if defined(CONFIG_IPDDP) || defined(CONFIG_IPDDP_MODULE) | 1293 | #if defined(CONFIG_IPDDP) || defined(CONFIG_IPDDP_MODULE) |
@@ -1563,23 +1595,28 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr | |||
1563 | if (len > DDP_MAXSZ) | 1595 | if (len > DDP_MAXSZ) |
1564 | return -EMSGSIZE; | 1596 | return -EMSGSIZE; |
1565 | 1597 | ||
1598 | lock_kernel(); | ||
1566 | if (usat) { | 1599 | if (usat) { |
1600 | err = -EBUSY; | ||
1567 | if (sock_flag(sk, SOCK_ZAPPED)) | 1601 | if (sock_flag(sk, SOCK_ZAPPED)) |
1568 | if (atalk_autobind(sk) < 0) | 1602 | if (atalk_autobind(sk) < 0) |
1569 | return -EBUSY; | 1603 | goto out; |
1570 | 1604 | ||
1605 | err = -EINVAL; | ||
1571 | if (msg->msg_namelen < sizeof(*usat) || | 1606 | if (msg->msg_namelen < sizeof(*usat) || |
1572 | usat->sat_family != AF_APPLETALK) | 1607 | usat->sat_family != AF_APPLETALK) |
1573 | return -EINVAL; | 1608 | goto out; |
1574 | 1609 | ||
1610 | err = -EPERM; | ||
1575 | /* netatalk didn't implement this check */ | 1611 | /* netatalk didn't implement this check */ |
1576 | if (usat->sat_addr.s_node == ATADDR_BCAST && | 1612 | if (usat->sat_addr.s_node == ATADDR_BCAST && |
1577 | !sock_flag(sk, SOCK_BROADCAST)) { | 1613 | !sock_flag(sk, SOCK_BROADCAST)) { |
1578 | return -EPERM; | 1614 | goto out; |
1579 | } | 1615 | } |
1580 | } else { | 1616 | } else { |
1617 | err = -ENOTCONN; | ||
1581 | if (sk->sk_state != TCP_ESTABLISHED) | 1618 | if (sk->sk_state != TCP_ESTABLISHED) |
1582 | return -ENOTCONN; | 1619 | goto out; |
1583 | usat = &local_satalk; | 1620 | usat = &local_satalk; |
1584 | usat->sat_family = AF_APPLETALK; | 1621 | usat->sat_family = AF_APPLETALK; |
1585 | usat->sat_port = at->dest_port; | 1622 | usat->sat_port = at->dest_port; |
@@ -1603,8 +1640,9 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr | |||
1603 | 1640 | ||
1604 | rt = atrtr_find(&at_hint); | 1641 | rt = atrtr_find(&at_hint); |
1605 | } | 1642 | } |
1643 | err = ENETUNREACH; | ||
1606 | if (!rt) | 1644 | if (!rt) |
1607 | return -ENETUNREACH; | 1645 | goto out; |
1608 | 1646 | ||
1609 | dev = rt->dev; | 1647 | dev = rt->dev; |
1610 | 1648 | ||
@@ -1614,7 +1652,7 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr | |||
1614 | size += dev->hard_header_len; | 1652 | size += dev->hard_header_len; |
1615 | skb = sock_alloc_send_skb(sk, size, (flags & MSG_DONTWAIT), &err); | 1653 | skb = sock_alloc_send_skb(sk, size, (flags & MSG_DONTWAIT), &err); |
1616 | if (!skb) | 1654 | if (!skb) |
1617 | return err; | 1655 | goto out; |
1618 | 1656 | ||
1619 | skb->sk = sk; | 1657 | skb->sk = sk; |
1620 | skb_reserve(skb, ddp_dl->header_length); | 1658 | skb_reserve(skb, ddp_dl->header_length); |
@@ -1637,7 +1675,8 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr | |||
1637 | err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); | 1675 | err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); |
1638 | if (err) { | 1676 | if (err) { |
1639 | kfree_skb(skb); | 1677 | kfree_skb(skb); |
1640 | return -EFAULT; | 1678 | err = -EFAULT; |
1679 | goto out; | ||
1641 | } | 1680 | } |
1642 | 1681 | ||
1643 | if (sk->sk_no_check == 1) | 1682 | if (sk->sk_no_check == 1) |
@@ -1676,7 +1715,8 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr | |||
1676 | rt = atrtr_find(&at_lo); | 1715 | rt = atrtr_find(&at_lo); |
1677 | if (!rt) { | 1716 | if (!rt) { |
1678 | kfree_skb(skb); | 1717 | kfree_skb(skb); |
1679 | return -ENETUNREACH; | 1718 | err = -ENETUNREACH; |
1719 | goto out; | ||
1680 | } | 1720 | } |
1681 | dev = rt->dev; | 1721 | dev = rt->dev; |
1682 | skb->dev = dev; | 1722 | skb->dev = dev; |
@@ -1696,7 +1736,9 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr | |||
1696 | } | 1736 | } |
1697 | SOCK_DEBUG(sk, "SK %p: Done write (%Zd).\n", sk, len); | 1737 | SOCK_DEBUG(sk, "SK %p: Done write (%Zd).\n", sk, len); |
1698 | 1738 | ||
1699 | return len; | 1739 | out: |
1740 | unlock_kernel(); | ||
1741 | return err ? : len; | ||
1700 | } | 1742 | } |
1701 | 1743 | ||
1702 | static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | 1744 | static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, |
@@ -1708,10 +1750,13 @@ static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr | |||
1708 | int copied = 0; | 1750 | int copied = 0; |
1709 | int offset = 0; | 1751 | int offset = 0; |
1710 | int err = 0; | 1752 | int err = 0; |
1711 | struct sk_buff *skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, | 1753 | struct sk_buff *skb; |
1754 | |||
1755 | lock_kernel(); | ||
1756 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, | ||
1712 | flags & MSG_DONTWAIT, &err); | 1757 | flags & MSG_DONTWAIT, &err); |
1713 | if (!skb) | 1758 | if (!skb) |
1714 | return err; | 1759 | goto out; |
1715 | 1760 | ||
1716 | /* FIXME: use skb->cb to be able to use shared skbs */ | 1761 | /* FIXME: use skb->cb to be able to use shared skbs */ |
1717 | ddp = ddp_hdr(skb); | 1762 | ddp = ddp_hdr(skb); |
@@ -1739,6 +1784,9 @@ static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr | |||
1739 | } | 1784 | } |
1740 | 1785 | ||
1741 | skb_free_datagram(sk, skb); /* Free the datagram. */ | 1786 | skb_free_datagram(sk, skb); /* Free the datagram. */ |
1787 | |||
1788 | out: | ||
1789 | unlock_kernel(); | ||
1742 | return err ? : copied; | 1790 | return err ? : copied; |
1743 | } | 1791 | } |
1744 | 1792 | ||
@@ -1810,24 +1858,26 @@ static int atalk_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
1810 | static int atalk_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | 1858 | static int atalk_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) |
1811 | { | 1859 | { |
1812 | /* | 1860 | /* |
1813 | * All Appletalk ioctls except SIOCATALKDIFADDR are standard. And | 1861 | * SIOCATALKDIFADDR is a SIOCPROTOPRIVATE ioctl number, so we |
1814 | * SIOCATALKDIFADDR is handled by upper layer as well, so there is | 1862 | * cannot handle it in common code. The data we access if ifreq |
1815 | * nothing to do. Eventually SIOCATALKDIFADDR should be moved | 1863 | * here is compatible, so we can simply call the native |
1816 | * here so there is no generic SIOCPROTOPRIVATE translation in the | 1864 | * handler. |
1817 | * system. | ||
1818 | */ | 1865 | */ |
1866 | if (cmd == SIOCATALKDIFADDR) | ||
1867 | return atalk_ioctl(sock, cmd, (unsigned long)compat_ptr(arg)); | ||
1868 | |||
1819 | return -ENOIOCTLCMD; | 1869 | return -ENOIOCTLCMD; |
1820 | } | 1870 | } |
1821 | #endif | 1871 | #endif |
1822 | 1872 | ||
1823 | 1873 | ||
1824 | static struct net_proto_family atalk_family_ops = { | 1874 | static const struct net_proto_family atalk_family_ops = { |
1825 | .family = PF_APPLETALK, | 1875 | .family = PF_APPLETALK, |
1826 | .create = atalk_create, | 1876 | .create = atalk_create, |
1827 | .owner = THIS_MODULE, | 1877 | .owner = THIS_MODULE, |
1828 | }; | 1878 | }; |
1829 | 1879 | ||
1830 | static const struct proto_ops SOCKOPS_WRAPPED(atalk_dgram_ops) = { | 1880 | static const struct proto_ops atalk_dgram_ops = { |
1831 | .family = PF_APPLETALK, | 1881 | .family = PF_APPLETALK, |
1832 | .owner = THIS_MODULE, | 1882 | .owner = THIS_MODULE, |
1833 | .release = atalk_release, | 1883 | .release = atalk_release, |
@@ -1836,7 +1886,7 @@ static const struct proto_ops SOCKOPS_WRAPPED(atalk_dgram_ops) = { | |||
1836 | .socketpair = sock_no_socketpair, | 1886 | .socketpair = sock_no_socketpair, |
1837 | .accept = sock_no_accept, | 1887 | .accept = sock_no_accept, |
1838 | .getname = atalk_getname, | 1888 | .getname = atalk_getname, |
1839 | .poll = datagram_poll, | 1889 | .poll = atalk_poll, |
1840 | .ioctl = atalk_ioctl, | 1890 | .ioctl = atalk_ioctl, |
1841 | #ifdef CONFIG_COMPAT | 1891 | #ifdef CONFIG_COMPAT |
1842 | .compat_ioctl = atalk_compat_ioctl, | 1892 | .compat_ioctl = atalk_compat_ioctl, |
@@ -1851,8 +1901,6 @@ static const struct proto_ops SOCKOPS_WRAPPED(atalk_dgram_ops) = { | |||
1851 | .sendpage = sock_no_sendpage, | 1901 | .sendpage = sock_no_sendpage, |
1852 | }; | 1902 | }; |
1853 | 1903 | ||
1854 | SOCKOPS_WRAP(atalk_dgram, PF_APPLETALK); | ||
1855 | |||
1856 | static struct notifier_block ddp_notifier = { | 1904 | static struct notifier_block ddp_notifier = { |
1857 | .notifier_call = ddp_device_event, | 1905 | .notifier_call = ddp_device_event, |
1858 | }; | 1906 | }; |
diff --git a/net/atm/common.c b/net/atm/common.c index 950bd16d2383..d61e051e0a3f 100644 --- a/net/atm/common.c +++ b/net/atm/common.c | |||
@@ -496,7 +496,7 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
496 | error = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | 496 | error = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); |
497 | if (error) | 497 | if (error) |
498 | return error; | 498 | return error; |
499 | sock_recv_timestamp(msg, sk, skb); | 499 | sock_recv_ts_and_drops(msg, sk, skb); |
500 | pr_debug("RcvM %d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize); | 500 | pr_debug("RcvM %d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize); |
501 | atm_return(vcc, skb->truesize); | 501 | atm_return(vcc, skb->truesize); |
502 | skb_free_datagram(sk, skb); | 502 | skb_free_datagram(sk, skb); |
diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c index 4da8892ced5f..2ea40995dced 100644 --- a/net/atm/ioctl.c +++ b/net/atm/ioctl.c | |||
@@ -191,8 +191,181 @@ int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
191 | } | 191 | } |
192 | 192 | ||
193 | #ifdef CONFIG_COMPAT | 193 | #ifdef CONFIG_COMPAT |
194 | int vcc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | 194 | /* |
195 | * FIXME: | ||
196 | * The compat_ioctl handling is duplicated, using both these conversion | ||
197 | * routines and the compat argument to the actual handlers. Both | ||
198 | * versions are somewhat incomplete and should be merged, e.g. by | ||
199 | * moving the ioctl number translation into the actual handlers and | ||
200 | * killing the conversion code. | ||
201 | * | ||
202 | * -arnd, November 2009 | ||
203 | */ | ||
204 | #define ATM_GETLINKRATE32 _IOW('a', ATMIOC_ITF+1, struct compat_atmif_sioc) | ||
205 | #define ATM_GETNAMES32 _IOW('a', ATMIOC_ITF+3, struct compat_atm_iobuf) | ||
206 | #define ATM_GETTYPE32 _IOW('a', ATMIOC_ITF+4, struct compat_atmif_sioc) | ||
207 | #define ATM_GETESI32 _IOW('a', ATMIOC_ITF+5, struct compat_atmif_sioc) | ||
208 | #define ATM_GETADDR32 _IOW('a', ATMIOC_ITF+6, struct compat_atmif_sioc) | ||
209 | #define ATM_RSTADDR32 _IOW('a', ATMIOC_ITF+7, struct compat_atmif_sioc) | ||
210 | #define ATM_ADDADDR32 _IOW('a', ATMIOC_ITF+8, struct compat_atmif_sioc) | ||
211 | #define ATM_DELADDR32 _IOW('a', ATMIOC_ITF+9, struct compat_atmif_sioc) | ||
212 | #define ATM_GETCIRANGE32 _IOW('a', ATMIOC_ITF+10, struct compat_atmif_sioc) | ||
213 | #define ATM_SETCIRANGE32 _IOW('a', ATMIOC_ITF+11, struct compat_atmif_sioc) | ||
214 | #define ATM_SETESI32 _IOW('a', ATMIOC_ITF+12, struct compat_atmif_sioc) | ||
215 | #define ATM_SETESIF32 _IOW('a', ATMIOC_ITF+13, struct compat_atmif_sioc) | ||
216 | #define ATM_GETSTAT32 _IOW('a', ATMIOC_SARCOM+0, struct compat_atmif_sioc) | ||
217 | #define ATM_GETSTATZ32 _IOW('a', ATMIOC_SARCOM+1, struct compat_atmif_sioc) | ||
218 | #define ATM_GETLOOP32 _IOW('a', ATMIOC_SARCOM+2, struct compat_atmif_sioc) | ||
219 | #define ATM_SETLOOP32 _IOW('a', ATMIOC_SARCOM+3, struct compat_atmif_sioc) | ||
220 | #define ATM_QUERYLOOP32 _IOW('a', ATMIOC_SARCOM+4, struct compat_atmif_sioc) | ||
221 | |||
222 | static struct { | ||
223 | unsigned int cmd32; | ||
224 | unsigned int cmd; | ||
225 | } atm_ioctl_map[] = { | ||
226 | { ATM_GETLINKRATE32, ATM_GETLINKRATE }, | ||
227 | { ATM_GETNAMES32, ATM_GETNAMES }, | ||
228 | { ATM_GETTYPE32, ATM_GETTYPE }, | ||
229 | { ATM_GETESI32, ATM_GETESI }, | ||
230 | { ATM_GETADDR32, ATM_GETADDR }, | ||
231 | { ATM_RSTADDR32, ATM_RSTADDR }, | ||
232 | { ATM_ADDADDR32, ATM_ADDADDR }, | ||
233 | { ATM_DELADDR32, ATM_DELADDR }, | ||
234 | { ATM_GETCIRANGE32, ATM_GETCIRANGE }, | ||
235 | { ATM_SETCIRANGE32, ATM_SETCIRANGE }, | ||
236 | { ATM_SETESI32, ATM_SETESI }, | ||
237 | { ATM_SETESIF32, ATM_SETESIF }, | ||
238 | { ATM_GETSTAT32, ATM_GETSTAT }, | ||
239 | { ATM_GETSTATZ32, ATM_GETSTATZ }, | ||
240 | { ATM_GETLOOP32, ATM_GETLOOP }, | ||
241 | { ATM_SETLOOP32, ATM_SETLOOP }, | ||
242 | { ATM_QUERYLOOP32, ATM_QUERYLOOP }, | ||
243 | }; | ||
244 | |||
245 | #define NR_ATM_IOCTL ARRAY_SIZE(atm_ioctl_map) | ||
246 | |||
247 | static int do_atm_iobuf(struct socket *sock, unsigned int cmd, | ||
248 | unsigned long arg) | ||
249 | { | ||
250 | struct atm_iobuf __user *iobuf; | ||
251 | struct compat_atm_iobuf __user *iobuf32; | ||
252 | u32 data; | ||
253 | void __user *datap; | ||
254 | int len, err; | ||
255 | |||
256 | iobuf = compat_alloc_user_space(sizeof(*iobuf)); | ||
257 | iobuf32 = compat_ptr(arg); | ||
258 | |||
259 | if (get_user(len, &iobuf32->length) || | ||
260 | get_user(data, &iobuf32->buffer)) | ||
261 | return -EFAULT; | ||
262 | datap = compat_ptr(data); | ||
263 | if (put_user(len, &iobuf->length) || | ||
264 | put_user(datap, &iobuf->buffer)) | ||
265 | return -EFAULT; | ||
266 | |||
267 | err = do_vcc_ioctl(sock, cmd, (unsigned long) iobuf, 0); | ||
268 | |||
269 | if (!err) { | ||
270 | if (copy_in_user(&iobuf32->length, &iobuf->length, | ||
271 | sizeof(int))) | ||
272 | err = -EFAULT; | ||
273 | } | ||
274 | |||
275 | return err; | ||
276 | } | ||
277 | |||
278 | static int do_atmif_sioc(struct socket *sock, unsigned int cmd, | ||
279 | unsigned long arg) | ||
280 | { | ||
281 | struct atmif_sioc __user *sioc; | ||
282 | struct compat_atmif_sioc __user *sioc32; | ||
283 | u32 data; | ||
284 | void __user *datap; | ||
285 | int err; | ||
286 | |||
287 | sioc = compat_alloc_user_space(sizeof(*sioc)); | ||
288 | sioc32 = compat_ptr(arg); | ||
289 | |||
290 | if (copy_in_user(&sioc->number, &sioc32->number, 2 * sizeof(int)) | ||
291 | || get_user(data, &sioc32->arg)) | ||
292 | return -EFAULT; | ||
293 | datap = compat_ptr(data); | ||
294 | if (put_user(datap, &sioc->arg)) | ||
295 | return -EFAULT; | ||
296 | |||
297 | err = do_vcc_ioctl(sock, cmd, (unsigned long) sioc, 0); | ||
298 | |||
299 | if (!err) { | ||
300 | if (copy_in_user(&sioc32->length, &sioc->length, | ||
301 | sizeof(int))) | ||
302 | err = -EFAULT; | ||
303 | } | ||
304 | return err; | ||
305 | } | ||
306 | |||
307 | static int do_atm_ioctl(struct socket *sock, unsigned int cmd32, | ||
308 | unsigned long arg) | ||
309 | { | ||
310 | int i; | ||
311 | unsigned int cmd = 0; | ||
312 | |||
313 | switch (cmd32) { | ||
314 | case SONET_GETSTAT: | ||
315 | case SONET_GETSTATZ: | ||
316 | case SONET_GETDIAG: | ||
317 | case SONET_SETDIAG: | ||
318 | case SONET_CLRDIAG: | ||
319 | case SONET_SETFRAMING: | ||
320 | case SONET_GETFRAMING: | ||
321 | case SONET_GETFRSENSE: | ||
322 | return do_atmif_sioc(sock, cmd32, arg); | ||
323 | } | ||
324 | |||
325 | for (i = 0; i < NR_ATM_IOCTL; i++) { | ||
326 | if (cmd32 == atm_ioctl_map[i].cmd32) { | ||
327 | cmd = atm_ioctl_map[i].cmd; | ||
328 | break; | ||
329 | } | ||
330 | } | ||
331 | if (i == NR_ATM_IOCTL) | ||
332 | return -EINVAL; | ||
333 | |||
334 | switch (cmd) { | ||
335 | case ATM_GETNAMES: | ||
336 | return do_atm_iobuf(sock, cmd, arg); | ||
337 | |||
338 | case ATM_GETLINKRATE: | ||
339 | case ATM_GETTYPE: | ||
340 | case ATM_GETESI: | ||
341 | case ATM_GETADDR: | ||
342 | case ATM_RSTADDR: | ||
343 | case ATM_ADDADDR: | ||
344 | case ATM_DELADDR: | ||
345 | case ATM_GETCIRANGE: | ||
346 | case ATM_SETCIRANGE: | ||
347 | case ATM_SETESI: | ||
348 | case ATM_SETESIF: | ||
349 | case ATM_GETSTAT: | ||
350 | case ATM_GETSTATZ: | ||
351 | case ATM_GETLOOP: | ||
352 | case ATM_SETLOOP: | ||
353 | case ATM_QUERYLOOP: | ||
354 | return do_atmif_sioc(sock, cmd, arg); | ||
355 | } | ||
356 | |||
357 | return -EINVAL; | ||
358 | } | ||
359 | |||
360 | int vcc_compat_ioctl(struct socket *sock, unsigned int cmd, | ||
361 | unsigned long arg) | ||
195 | { | 362 | { |
196 | return do_vcc_ioctl(sock, cmd, arg, 1); | 363 | int ret; |
364 | |||
365 | ret = do_vcc_ioctl(sock, cmd, arg, 1); | ||
366 | if (ret != -ENOIOCTLCMD) | ||
367 | return ret; | ||
368 | |||
369 | return do_atm_ioctl(sock, cmd, arg); | ||
197 | } | 370 | } |
198 | #endif | 371 | #endif |
diff --git a/net/atm/pvc.c b/net/atm/pvc.c index d4c024504f99..8d74e62b0d79 100644 --- a/net/atm/pvc.c +++ b/net/atm/pvc.c | |||
@@ -127,7 +127,8 @@ static const struct proto_ops pvc_proto_ops = { | |||
127 | }; | 127 | }; |
128 | 128 | ||
129 | 129 | ||
130 | static int pvc_create(struct net *net, struct socket *sock,int protocol) | 130 | static int pvc_create(struct net *net, struct socket *sock, int protocol, |
131 | int kern) | ||
131 | { | 132 | { |
132 | if (net != &init_net) | 133 | if (net != &init_net) |
133 | return -EAFNOSUPPORT; | 134 | return -EAFNOSUPPORT; |
@@ -137,7 +138,7 @@ static int pvc_create(struct net *net, struct socket *sock,int protocol) | |||
137 | } | 138 | } |
138 | 139 | ||
139 | 140 | ||
140 | static struct net_proto_family pvc_family_ops = { | 141 | static const struct net_proto_family pvc_family_ops = { |
141 | .family = PF_ATMPVC, | 142 | .family = PF_ATMPVC, |
142 | .create = pvc_create, | 143 | .create = pvc_create, |
143 | .owner = THIS_MODULE, | 144 | .owner = THIS_MODULE, |
diff --git a/net/atm/svc.c b/net/atm/svc.c index f90d143c4b25..c7395070ee78 100644 --- a/net/atm/svc.c +++ b/net/atm/svc.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include "signaling.h" | 25 | #include "signaling.h" |
26 | #include "addr.h" | 26 | #include "addr.h" |
27 | 27 | ||
28 | static int svc_create(struct net *net, struct socket *sock,int protocol); | 28 | static int svc_create(struct net *net, struct socket *sock, int protocol, int kern); |
29 | 29 | ||
30 | /* | 30 | /* |
31 | * Note: since all this is still nicely synchronized with the signaling demon, | 31 | * Note: since all this is still nicely synchronized with the signaling demon, |
@@ -330,7 +330,7 @@ static int svc_accept(struct socket *sock,struct socket *newsock,int flags) | |||
330 | 330 | ||
331 | lock_sock(sk); | 331 | lock_sock(sk); |
332 | 332 | ||
333 | error = svc_create(sock_net(sk), newsock,0); | 333 | error = svc_create(sock_net(sk), newsock, 0, 0); |
334 | if (error) | 334 | if (error) |
335 | goto out; | 335 | goto out; |
336 | 336 | ||
@@ -650,7 +650,8 @@ static const struct proto_ops svc_proto_ops = { | |||
650 | }; | 650 | }; |
651 | 651 | ||
652 | 652 | ||
653 | static int svc_create(struct net *net, struct socket *sock,int protocol) | 653 | static int svc_create(struct net *net, struct socket *sock, int protocol, |
654 | int kern) | ||
654 | { | 655 | { |
655 | int error; | 656 | int error; |
656 | 657 | ||
@@ -666,7 +667,7 @@ static int svc_create(struct net *net, struct socket *sock,int protocol) | |||
666 | } | 667 | } |
667 | 668 | ||
668 | 669 | ||
669 | static struct net_proto_family svc_family_ops = { | 670 | static const struct net_proto_family svc_family_ops = { |
670 | .family = PF_ATMSVC, | 671 | .family = PF_ATMSVC, |
671 | .create = svc_create, | 672 | .create = svc_create, |
672 | .owner = THIS_MODULE, | 673 | .owner = THIS_MODULE, |
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index f45460730371..d6ddfa4c4471 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c | |||
@@ -369,6 +369,9 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg) | |||
369 | if (ax25_ctl.digi_count > AX25_MAX_DIGIS) | 369 | if (ax25_ctl.digi_count > AX25_MAX_DIGIS) |
370 | return -EINVAL; | 370 | return -EINVAL; |
371 | 371 | ||
372 | if (ax25_ctl.arg > ULONG_MAX / HZ && ax25_ctl.cmd != AX25_KILL) | ||
373 | return -EINVAL; | ||
374 | |||
372 | digi.ndigi = ax25_ctl.digi_count; | 375 | digi.ndigi = ax25_ctl.digi_count; |
373 | for (k = 0; k < digi.ndigi; k++) | 376 | for (k = 0; k < digi.ndigi; k++) |
374 | digi.calls[k] = ax25_ctl.digi_addr[k]; | 377 | digi.calls[k] = ax25_ctl.digi_addr[k]; |
@@ -418,14 +421,10 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg) | |||
418 | break; | 421 | break; |
419 | 422 | ||
420 | case AX25_T3: | 423 | case AX25_T3: |
421 | if (ax25_ctl.arg < 0) | ||
422 | goto einval_put; | ||
423 | ax25->t3 = ax25_ctl.arg * HZ; | 424 | ax25->t3 = ax25_ctl.arg * HZ; |
424 | break; | 425 | break; |
425 | 426 | ||
426 | case AX25_IDLE: | 427 | case AX25_IDLE: |
427 | if (ax25_ctl.arg < 0) | ||
428 | goto einval_put; | ||
429 | ax25->idle = ax25_ctl.arg * 60 * HZ; | 428 | ax25->idle = ax25_ctl.arg * 60 * HZ; |
430 | break; | 429 | break; |
431 | 430 | ||
@@ -800,7 +799,8 @@ static struct proto ax25_proto = { | |||
800 | .obj_size = sizeof(struct sock), | 799 | .obj_size = sizeof(struct sock), |
801 | }; | 800 | }; |
802 | 801 | ||
803 | static int ax25_create(struct net *net, struct socket *sock, int protocol) | 802 | static int ax25_create(struct net *net, struct socket *sock, int protocol, |
803 | int kern) | ||
804 | { | 804 | { |
805 | struct sock *sk; | 805 | struct sock *sk; |
806 | ax25_cb *ax25; | 806 | ax25_cb *ax25; |
@@ -1961,7 +1961,7 @@ static const struct file_operations ax25_info_fops = { | |||
1961 | 1961 | ||
1962 | #endif | 1962 | #endif |
1963 | 1963 | ||
1964 | static struct net_proto_family ax25_family_ops = { | 1964 | static const struct net_proto_family ax25_family_ops = { |
1965 | .family = PF_AX25, | 1965 | .family = PF_AX25, |
1966 | .create = ax25_create, | 1966 | .create = ax25_create, |
1967 | .owner = THIS_MODULE, | 1967 | .owner = THIS_MODULE, |
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 8cfb5a849841..087cc51f5927 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c | |||
@@ -45,7 +45,7 @@ | |||
45 | 45 | ||
46 | /* Bluetooth sockets */ | 46 | /* Bluetooth sockets */ |
47 | #define BT_MAX_PROTO 8 | 47 | #define BT_MAX_PROTO 8 |
48 | static struct net_proto_family *bt_proto[BT_MAX_PROTO]; | 48 | static const struct net_proto_family *bt_proto[BT_MAX_PROTO]; |
49 | static DEFINE_RWLOCK(bt_proto_lock); | 49 | static DEFINE_RWLOCK(bt_proto_lock); |
50 | 50 | ||
51 | static struct lock_class_key bt_lock_key[BT_MAX_PROTO]; | 51 | static struct lock_class_key bt_lock_key[BT_MAX_PROTO]; |
@@ -86,7 +86,7 @@ static inline void bt_sock_reclassify_lock(struct socket *sock, int proto) | |||
86 | bt_key_strings[proto], &bt_lock_key[proto]); | 86 | bt_key_strings[proto], &bt_lock_key[proto]); |
87 | } | 87 | } |
88 | 88 | ||
89 | int bt_sock_register(int proto, struct net_proto_family *ops) | 89 | int bt_sock_register(int proto, const struct net_proto_family *ops) |
90 | { | 90 | { |
91 | int err = 0; | 91 | int err = 0; |
92 | 92 | ||
@@ -126,7 +126,8 @@ int bt_sock_unregister(int proto) | |||
126 | } | 126 | } |
127 | EXPORT_SYMBOL(bt_sock_unregister); | 127 | EXPORT_SYMBOL(bt_sock_unregister); |
128 | 128 | ||
129 | static int bt_sock_create(struct net *net, struct socket *sock, int proto) | 129 | static int bt_sock_create(struct net *net, struct socket *sock, int proto, |
130 | int kern) | ||
130 | { | 131 | { |
131 | int err; | 132 | int err; |
132 | 133 | ||
@@ -144,7 +145,7 @@ static int bt_sock_create(struct net *net, struct socket *sock, int proto) | |||
144 | read_lock(&bt_proto_lock); | 145 | read_lock(&bt_proto_lock); |
145 | 146 | ||
146 | if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) { | 147 | if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) { |
147 | err = bt_proto[proto]->create(net, sock, proto); | 148 | err = bt_proto[proto]->create(net, sock, proto, kern); |
148 | bt_sock_reclassify_lock(sock, proto); | 149 | bt_sock_reclassify_lock(sock, proto); |
149 | module_put(bt_proto[proto]->owner); | 150 | module_put(bt_proto[proto]->owner); |
150 | } | 151 | } |
@@ -257,7 +258,7 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
257 | skb_reset_transport_header(skb); | 258 | skb_reset_transport_header(skb); |
258 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | 259 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); |
259 | if (err == 0) | 260 | if (err == 0) |
260 | sock_recv_timestamp(msg, sk, skb); | 261 | sock_recv_ts_and_drops(msg, sk, skb); |
261 | 262 | ||
262 | skb_free_datagram(sk, skb); | 263 | skb_free_datagram(sk, skb); |
263 | 264 | ||
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c index e857628b0b27..2ff6ac7b2ed4 100644 --- a/net/bluetooth/bnep/sock.c +++ b/net/bluetooth/bnep/sock.c | |||
@@ -195,7 +195,8 @@ static struct proto bnep_proto = { | |||
195 | .obj_size = sizeof(struct bt_sock) | 195 | .obj_size = sizeof(struct bt_sock) |
196 | }; | 196 | }; |
197 | 197 | ||
198 | static int bnep_sock_create(struct net *net, struct socket *sock, int protocol) | 198 | static int bnep_sock_create(struct net *net, struct socket *sock, int protocol, |
199 | int kern) | ||
199 | { | 200 | { |
200 | struct sock *sk; | 201 | struct sock *sk; |
201 | 202 | ||
@@ -222,7 +223,7 @@ static int bnep_sock_create(struct net *net, struct socket *sock, int protocol) | |||
222 | return 0; | 223 | return 0; |
223 | } | 224 | } |
224 | 225 | ||
225 | static struct net_proto_family bnep_sock_family_ops = { | 226 | static const struct net_proto_family bnep_sock_family_ops = { |
226 | .family = PF_BLUETOOTH, | 227 | .family = PF_BLUETOOTH, |
227 | .owner = THIS_MODULE, | 228 | .owner = THIS_MODULE, |
228 | .create = bnep_sock_create | 229 | .create = bnep_sock_create |
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c index 16b0fad74f6e..978cc3a718ad 100644 --- a/net/bluetooth/cmtp/sock.c +++ b/net/bluetooth/cmtp/sock.c | |||
@@ -190,7 +190,8 @@ static struct proto cmtp_proto = { | |||
190 | .obj_size = sizeof(struct bt_sock) | 190 | .obj_size = sizeof(struct bt_sock) |
191 | }; | 191 | }; |
192 | 192 | ||
193 | static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol) | 193 | static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol, |
194 | int kern) | ||
194 | { | 195 | { |
195 | struct sock *sk; | 196 | struct sock *sk; |
196 | 197 | ||
@@ -217,7 +218,7 @@ static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol) | |||
217 | return 0; | 218 | return 0; |
218 | } | 219 | } |
219 | 220 | ||
220 | static struct net_proto_family cmtp_sock_family_ops = { | 221 | static const struct net_proto_family cmtp_sock_family_ops = { |
221 | .family = PF_BLUETOOTH, | 222 | .family = PF_BLUETOOTH, |
222 | .owner = THIS_MODULE, | 223 | .owner = THIS_MODULE, |
223 | .create = cmtp_sock_create | 224 | .create = cmtp_sock_create |
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 75302a986067..1ca5c7ca9bd4 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c | |||
@@ -621,7 +621,8 @@ static struct proto hci_sk_proto = { | |||
621 | .obj_size = sizeof(struct hci_pinfo) | 621 | .obj_size = sizeof(struct hci_pinfo) |
622 | }; | 622 | }; |
623 | 623 | ||
624 | static int hci_sock_create(struct net *net, struct socket *sock, int protocol) | 624 | static int hci_sock_create(struct net *net, struct socket *sock, int protocol, |
625 | int kern) | ||
625 | { | 626 | { |
626 | struct sock *sk; | 627 | struct sock *sk; |
627 | 628 | ||
@@ -687,7 +688,7 @@ static int hci_sock_dev_event(struct notifier_block *this, unsigned long event, | |||
687 | return NOTIFY_DONE; | 688 | return NOTIFY_DONE; |
688 | } | 689 | } |
689 | 690 | ||
690 | static struct net_proto_family hci_sock_family_ops = { | 691 | static const struct net_proto_family hci_sock_family_ops = { |
691 | .family = PF_BLUETOOTH, | 692 | .family = PF_BLUETOOTH, |
692 | .owner = THIS_MODULE, | 693 | .owner = THIS_MODULE, |
693 | .create = hci_sock_create, | 694 | .create = hci_sock_create, |
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c index 37c9d7d2e688..9cfef68b9fec 100644 --- a/net/bluetooth/hidp/sock.c +++ b/net/bluetooth/hidp/sock.c | |||
@@ -241,7 +241,8 @@ static struct proto hidp_proto = { | |||
241 | .obj_size = sizeof(struct bt_sock) | 241 | .obj_size = sizeof(struct bt_sock) |
242 | }; | 242 | }; |
243 | 243 | ||
244 | static int hidp_sock_create(struct net *net, struct socket *sock, int protocol) | 244 | static int hidp_sock_create(struct net *net, struct socket *sock, int protocol, |
245 | int kern) | ||
245 | { | 246 | { |
246 | struct sock *sk; | 247 | struct sock *sk; |
247 | 248 | ||
@@ -268,7 +269,7 @@ static int hidp_sock_create(struct net *net, struct socket *sock, int protocol) | |||
268 | return 0; | 269 | return 0; |
269 | } | 270 | } |
270 | 271 | ||
271 | static struct net_proto_family hidp_sock_family_ops = { | 272 | static const struct net_proto_family hidp_sock_family_ops = { |
272 | .family = PF_BLUETOOTH, | 273 | .family = PF_BLUETOOTH, |
273 | .owner = THIS_MODULE, | 274 | .owner = THIS_MODULE, |
274 | .create = hidp_sock_create | 275 | .create = hidp_sock_create |
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 947f8bbb4bb3..80d929842f04 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c | |||
@@ -819,7 +819,8 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p | |||
819 | return sk; | 819 | return sk; |
820 | } | 820 | } |
821 | 821 | ||
822 | static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol) | 822 | static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol, |
823 | int kern) | ||
823 | { | 824 | { |
824 | struct sock *sk; | 825 | struct sock *sk; |
825 | 826 | ||
@@ -831,7 +832,7 @@ static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol) | |||
831 | sock->type != SOCK_DGRAM && sock->type != SOCK_RAW) | 832 | sock->type != SOCK_DGRAM && sock->type != SOCK_RAW) |
832 | return -ESOCKTNOSUPPORT; | 833 | return -ESOCKTNOSUPPORT; |
833 | 834 | ||
834 | if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW)) | 835 | if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW)) |
835 | return -EPERM; | 836 | return -EPERM; |
836 | 837 | ||
837 | sock->ops = &l2cap_sock_ops; | 838 | sock->ops = &l2cap_sock_ops; |
@@ -3924,7 +3925,7 @@ static const struct proto_ops l2cap_sock_ops = { | |||
3924 | .getsockopt = l2cap_sock_getsockopt | 3925 | .getsockopt = l2cap_sock_getsockopt |
3925 | }; | 3926 | }; |
3926 | 3927 | ||
3927 | static struct net_proto_family l2cap_sock_family_ops = { | 3928 | static const struct net_proto_family l2cap_sock_family_ops = { |
3928 | .family = PF_BLUETOOTH, | 3929 | .family = PF_BLUETOOTH, |
3929 | .owner = THIS_MODULE, | 3930 | .owner = THIS_MODULE, |
3930 | .create = l2cap_sock_create, | 3931 | .create = l2cap_sock_create, |
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 8a20aaf1f231..4b5968dda673 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c | |||
@@ -323,7 +323,8 @@ static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int | |||
323 | return sk; | 323 | return sk; |
324 | } | 324 | } |
325 | 325 | ||
326 | static int rfcomm_sock_create(struct net *net, struct socket *sock, int protocol) | 326 | static int rfcomm_sock_create(struct net *net, struct socket *sock, |
327 | int protocol, int kern) | ||
327 | { | 328 | { |
328 | struct sock *sk; | 329 | struct sock *sk; |
329 | 330 | ||
@@ -703,7 +704,7 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
703 | copied += chunk; | 704 | copied += chunk; |
704 | size -= chunk; | 705 | size -= chunk; |
705 | 706 | ||
706 | sock_recv_timestamp(msg, sk, skb); | 707 | sock_recv_ts_and_drops(msg, sk, skb); |
707 | 708 | ||
708 | if (!(flags & MSG_PEEK)) { | 709 | if (!(flags & MSG_PEEK)) { |
709 | atomic_sub(chunk, &sk->sk_rmem_alloc); | 710 | atomic_sub(chunk, &sk->sk_rmem_alloc); |
@@ -1101,7 +1102,7 @@ static const struct proto_ops rfcomm_sock_ops = { | |||
1101 | .mmap = sock_no_mmap | 1102 | .mmap = sock_no_mmap |
1102 | }; | 1103 | }; |
1103 | 1104 | ||
1104 | static struct net_proto_family rfcomm_sock_family_ops = { | 1105 | static const struct net_proto_family rfcomm_sock_family_ops = { |
1105 | .family = PF_BLUETOOTH, | 1106 | .family = PF_BLUETOOTH, |
1106 | .owner = THIS_MODULE, | 1107 | .owner = THIS_MODULE, |
1107 | .create = rfcomm_sock_create | 1108 | .create = rfcomm_sock_create |
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 77f4153bdb5e..dd8f6ec57dce 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
@@ -430,7 +430,8 @@ static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, int pro | |||
430 | return sk; | 430 | return sk; |
431 | } | 431 | } |
432 | 432 | ||
433 | static int sco_sock_create(struct net *net, struct socket *sock, int protocol) | 433 | static int sco_sock_create(struct net *net, struct socket *sock, int protocol, |
434 | int kern) | ||
434 | { | 435 | { |
435 | struct sock *sk; | 436 | struct sock *sk; |
436 | 437 | ||
@@ -993,7 +994,7 @@ static const struct proto_ops sco_sock_ops = { | |||
993 | .getsockopt = sco_sock_getsockopt | 994 | .getsockopt = sco_sock_getsockopt |
994 | }; | 995 | }; |
995 | 996 | ||
996 | static struct net_proto_family sco_sock_family_ops = { | 997 | static const struct net_proto_family sco_sock_family_ops = { |
997 | .family = PF_BLUETOOTH, | 998 | .family = PF_BLUETOOTH, |
998 | .owner = THIS_MODULE, | 999 | .owner = THIS_MODULE, |
999 | .create = sco_sock_create, | 1000 | .create = sco_sock_create, |
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 07a07770c8b6..1a99c4e04e85 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -157,6 +157,7 @@ static const struct ethtool_ops br_ethtool_ops = { | |||
157 | .get_tso = ethtool_op_get_tso, | 157 | .get_tso = ethtool_op_get_tso, |
158 | .set_tso = br_set_tso, | 158 | .set_tso = br_set_tso, |
159 | .get_ufo = ethtool_op_get_ufo, | 159 | .get_ufo = ethtool_op_get_ufo, |
160 | .set_ufo = ethtool_op_set_ufo, | ||
160 | .get_flags = ethtool_op_get_flags, | 161 | .get_flags = ethtool_op_get_flags, |
161 | }; | 162 | }; |
162 | 163 | ||
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 4a9f52732655..a6f74b2b9571 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -154,7 +154,7 @@ static void del_nbp(struct net_bridge_port *p) | |||
154 | } | 154 | } |
155 | 155 | ||
156 | /* called with RTNL */ | 156 | /* called with RTNL */ |
157 | static void del_br(struct net_bridge *br) | 157 | static void del_br(struct net_bridge *br, struct list_head *head) |
158 | { | 158 | { |
159 | struct net_bridge_port *p, *n; | 159 | struct net_bridge_port *p, *n; |
160 | 160 | ||
@@ -165,7 +165,7 @@ static void del_br(struct net_bridge *br) | |||
165 | del_timer_sync(&br->gc_timer); | 165 | del_timer_sync(&br->gc_timer); |
166 | 166 | ||
167 | br_sysfs_delbr(br->dev); | 167 | br_sysfs_delbr(br->dev); |
168 | unregister_netdevice(br->dev); | 168 | unregister_netdevice_queue(br->dev, head); |
169 | } | 169 | } |
170 | 170 | ||
171 | static struct net_device *new_bridge_dev(struct net *net, const char *name) | 171 | static struct net_device *new_bridge_dev(struct net *net, const char *name) |
@@ -323,7 +323,7 @@ int br_del_bridge(struct net *net, const char *name) | |||
323 | } | 323 | } |
324 | 324 | ||
325 | else | 325 | else |
326 | del_br(netdev_priv(dev)); | 326 | del_br(netdev_priv(dev), NULL); |
327 | 327 | ||
328 | rtnl_unlock(); | 328 | rtnl_unlock(); |
329 | return ret; | 329 | return ret; |
@@ -466,15 +466,14 @@ int br_del_if(struct net_bridge *br, struct net_device *dev) | |||
466 | void br_net_exit(struct net *net) | 466 | void br_net_exit(struct net *net) |
467 | { | 467 | { |
468 | struct net_device *dev; | 468 | struct net_device *dev; |
469 | LIST_HEAD(list); | ||
469 | 470 | ||
470 | rtnl_lock(); | 471 | rtnl_lock(); |
471 | restart: | 472 | for_each_netdev(net, dev) |
472 | for_each_netdev(net, dev) { | 473 | if (dev->priv_flags & IFF_EBRIDGE) |
473 | if (dev->priv_flags & IFF_EBRIDGE) { | 474 | del_br(netdev_priv(dev), &list); |
474 | del_br(netdev_priv(dev)); | 475 | |
475 | goto restart; | 476 | unregister_netdevice_many(&list); |
476 | } | ||
477 | } | ||
478 | rtnl_unlock(); | 477 | rtnl_unlock(); |
479 | 478 | ||
480 | } | 479 | } |
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c index 6a6433daaf27..2af6e4a90262 100644 --- a/net/bridge/br_ioctl.c +++ b/net/bridge/br_ioctl.c | |||
@@ -81,6 +81,7 @@ static int get_fdb_entries(struct net_bridge *br, void __user *userbuf, | |||
81 | return num; | 81 | return num; |
82 | } | 82 | } |
83 | 83 | ||
84 | /* called with RTNL */ | ||
84 | static int add_del_if(struct net_bridge *br, int ifindex, int isadd) | 85 | static int add_del_if(struct net_bridge *br, int ifindex, int isadd) |
85 | { | 86 | { |
86 | struct net_device *dev; | 87 | struct net_device *dev; |
@@ -89,7 +90,7 @@ static int add_del_if(struct net_bridge *br, int ifindex, int isadd) | |||
89 | if (!capable(CAP_NET_ADMIN)) | 90 | if (!capable(CAP_NET_ADMIN)) |
90 | return -EPERM; | 91 | return -EPERM; |
91 | 92 | ||
92 | dev = dev_get_by_index(dev_net(br->dev), ifindex); | 93 | dev = __dev_get_by_index(dev_net(br->dev), ifindex); |
93 | if (dev == NULL) | 94 | if (dev == NULL) |
94 | return -EINVAL; | 95 | return -EINVAL; |
95 | 96 | ||
@@ -98,7 +99,6 @@ static int add_del_if(struct net_bridge *br, int ifindex, int isadd) | |||
98 | else | 99 | else |
99 | ret = br_del_if(br, dev); | 100 | ret = br_del_if(br, dev); |
100 | 101 | ||
101 | dev_put(dev); | ||
102 | return ret; | 102 | return ret; |
103 | } | 103 | } |
104 | 104 | ||
diff --git a/net/can/af_can.c b/net/can/af_can.c index 606832115674..833bd838edc6 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c | |||
@@ -114,7 +114,8 @@ static void can_sock_destruct(struct sock *sk) | |||
114 | skb_queue_purge(&sk->sk_receive_queue); | 114 | skb_queue_purge(&sk->sk_receive_queue); |
115 | } | 115 | } |
116 | 116 | ||
117 | static int can_create(struct net *net, struct socket *sock, int protocol) | 117 | static int can_create(struct net *net, struct socket *sock, int protocol, |
118 | int kern) | ||
118 | { | 119 | { |
119 | struct sock *sk; | 120 | struct sock *sk; |
120 | struct can_proto *cp; | 121 | struct can_proto *cp; |
@@ -160,11 +161,6 @@ static int can_create(struct net *net, struct socket *sock, int protocol) | |||
160 | goto errout; | 161 | goto errout; |
161 | } | 162 | } |
162 | 163 | ||
163 | if (cp->capability >= 0 && !capable(cp->capability)) { | ||
164 | err = -EPERM; | ||
165 | goto errout; | ||
166 | } | ||
167 | |||
168 | sock->ops = cp->ops; | 164 | sock->ops = cp->ops; |
169 | 165 | ||
170 | sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot); | 166 | sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot); |
@@ -842,7 +838,7 @@ static struct packet_type can_packet __read_mostly = { | |||
842 | .func = can_rcv, | 838 | .func = can_rcv, |
843 | }; | 839 | }; |
844 | 840 | ||
845 | static struct net_proto_family can_family_ops __read_mostly = { | 841 | static const struct net_proto_family can_family_ops = { |
846 | .family = PF_CAN, | 842 | .family = PF_CAN, |
847 | .create = can_create, | 843 | .create = can_create, |
848 | .owner = THIS_MODULE, | 844 | .owner = THIS_MODULE, |
diff --git a/net/can/bcm.c b/net/can/bcm.c index e8d58f33fe09..e32af52238a2 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
@@ -139,13 +139,13 @@ static char *bcm_proc_getifname(char *result, int ifindex) | |||
139 | if (!ifindex) | 139 | if (!ifindex) |
140 | return "any"; | 140 | return "any"; |
141 | 141 | ||
142 | read_lock(&dev_base_lock); | 142 | rcu_read_lock(); |
143 | dev = __dev_get_by_index(&init_net, ifindex); | 143 | dev = dev_get_by_index_rcu(&init_net, ifindex); |
144 | if (dev) | 144 | if (dev) |
145 | strcpy(result, dev->name); | 145 | strcpy(result, dev->name); |
146 | else | 146 | else |
147 | strcpy(result, "???"); | 147 | strcpy(result, "???"); |
148 | read_unlock(&dev_base_lock); | 148 | rcu_read_unlock(); |
149 | 149 | ||
150 | return result; | 150 | return result; |
151 | } | 151 | } |
@@ -1539,7 +1539,7 @@ static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1539 | return err; | 1539 | return err; |
1540 | } | 1540 | } |
1541 | 1541 | ||
1542 | sock_recv_timestamp(msg, sk, skb); | 1542 | sock_recv_ts_and_drops(msg, sk, skb); |
1543 | 1543 | ||
1544 | if (msg->msg_name) { | 1544 | if (msg->msg_name) { |
1545 | msg->msg_namelen = sizeof(struct sockaddr_can); | 1545 | msg->msg_namelen = sizeof(struct sockaddr_can); |
@@ -1581,7 +1581,6 @@ static struct proto bcm_proto __read_mostly = { | |||
1581 | static struct can_proto bcm_can_proto __read_mostly = { | 1581 | static struct can_proto bcm_can_proto __read_mostly = { |
1582 | .type = SOCK_DGRAM, | 1582 | .type = SOCK_DGRAM, |
1583 | .protocol = CAN_BCM, | 1583 | .protocol = CAN_BCM, |
1584 | .capability = -1, | ||
1585 | .ops = &bcm_ops, | 1584 | .ops = &bcm_ops, |
1586 | .prot = &bcm_proto, | 1585 | .prot = &bcm_proto, |
1587 | }; | 1586 | }; |
diff --git a/net/can/raw.c b/net/can/raw.c index b5e897922d32..abca920440b5 100644 --- a/net/can/raw.c +++ b/net/can/raw.c | |||
@@ -424,8 +424,6 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, | |||
424 | 424 | ||
425 | if (level != SOL_CAN_RAW) | 425 | if (level != SOL_CAN_RAW) |
426 | return -EINVAL; | 426 | return -EINVAL; |
427 | if (optlen < 0) | ||
428 | return -EINVAL; | ||
429 | 427 | ||
430 | switch (optname) { | 428 | switch (optname) { |
431 | 429 | ||
@@ -702,7 +700,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
702 | return err; | 700 | return err; |
703 | } | 701 | } |
704 | 702 | ||
705 | sock_recv_timestamp(msg, sk, skb); | 703 | sock_recv_ts_and_drops(msg, sk, skb); |
706 | 704 | ||
707 | if (msg->msg_name) { | 705 | if (msg->msg_name) { |
708 | msg->msg_namelen = sizeof(struct sockaddr_can); | 706 | msg->msg_namelen = sizeof(struct sockaddr_can); |
@@ -744,7 +742,6 @@ static struct proto raw_proto __read_mostly = { | |||
744 | static struct can_proto raw_can_proto __read_mostly = { | 742 | static struct can_proto raw_can_proto __read_mostly = { |
745 | .type = SOCK_RAW, | 743 | .type = SOCK_RAW, |
746 | .protocol = CAN_RAW, | 744 | .protocol = CAN_RAW, |
747 | .capability = -1, | ||
748 | .ops = &raw_ops, | 745 | .ops = &raw_ops, |
749 | .prot = &raw_proto, | 746 | .prot = &raw_proto, |
750 | }; | 747 | }; |
diff --git a/net/compat.c b/net/compat.c index a407c3addbae..6a2f75fb3f45 100644 --- a/net/compat.c +++ b/net/compat.c | |||
@@ -390,9 +390,6 @@ asmlinkage long compat_sys_setsockopt(int fd, int level, int optname, | |||
390 | int err; | 390 | int err; |
391 | struct socket *sock; | 391 | struct socket *sock; |
392 | 392 | ||
393 | if (optlen < 0) | ||
394 | return -EINVAL; | ||
395 | |||
396 | if ((sock = sockfd_lookup(fd, &err))!=NULL) | 393 | if ((sock = sockfd_lookup(fd, &err))!=NULL) |
397 | { | 394 | { |
398 | err = security_socket_setsockopt(sock,level,optname); | 395 | err = security_socket_setsockopt(sock,level,optname); |
@@ -727,10 +724,10 @@ EXPORT_SYMBOL(compat_mc_getsockopt); | |||
727 | 724 | ||
728 | /* Argument list sizes for compat_sys_socketcall */ | 725 | /* Argument list sizes for compat_sys_socketcall */ |
729 | #define AL(x) ((x) * sizeof(u32)) | 726 | #define AL(x) ((x) * sizeof(u32)) |
730 | static unsigned char nas[19]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), | 727 | static unsigned char nas[20]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), |
731 | AL(3),AL(3),AL(4),AL(4),AL(4),AL(6), | 728 | AL(3),AL(3),AL(4),AL(4),AL(4),AL(6), |
732 | AL(6),AL(2),AL(5),AL(5),AL(3),AL(3), | 729 | AL(6),AL(2),AL(5),AL(5),AL(3),AL(3), |
733 | AL(4)}; | 730 | AL(4),AL(5)}; |
734 | #undef AL | 731 | #undef AL |
735 | 732 | ||
736 | asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags) | 733 | asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags) |
@@ -755,13 +752,36 @@ asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len, | |||
755 | return sys_recvfrom(fd, buf, len, flags | MSG_CMSG_COMPAT, addr, addrlen); | 752 | return sys_recvfrom(fd, buf, len, flags | MSG_CMSG_COMPAT, addr, addrlen); |
756 | } | 753 | } |
757 | 754 | ||
755 | asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, | ||
756 | unsigned vlen, unsigned int flags, | ||
757 | struct timespec __user *timeout) | ||
758 | { | ||
759 | int datagrams; | ||
760 | struct timespec ktspec; | ||
761 | struct compat_timespec __user *utspec = | ||
762 | (struct compat_timespec __user *)timeout; | ||
763 | |||
764 | if (get_user(ktspec.tv_sec, &utspec->tv_sec) || | ||
765 | get_user(ktspec.tv_nsec, &utspec->tv_nsec)) | ||
766 | return -EFAULT; | ||
767 | |||
768 | datagrams = __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, | ||
769 | flags | MSG_CMSG_COMPAT, &ktspec); | ||
770 | if (datagrams > 0 && | ||
771 | (put_user(ktspec.tv_sec, &utspec->tv_sec) || | ||
772 | put_user(ktspec.tv_nsec, &utspec->tv_nsec))) | ||
773 | datagrams = -EFAULT; | ||
774 | |||
775 | return datagrams; | ||
776 | } | ||
777 | |||
758 | asmlinkage long compat_sys_socketcall(int call, u32 __user *args) | 778 | asmlinkage long compat_sys_socketcall(int call, u32 __user *args) |
759 | { | 779 | { |
760 | int ret; | 780 | int ret; |
761 | u32 a[6]; | 781 | u32 a[6]; |
762 | u32 a0, a1; | 782 | u32 a0, a1; |
763 | 783 | ||
764 | if (call < SYS_SOCKET || call > SYS_ACCEPT4) | 784 | if (call < SYS_SOCKET || call > SYS_RECVMMSG) |
765 | return -EINVAL; | 785 | return -EINVAL; |
766 | if (copy_from_user(a, args, nas[call])) | 786 | if (copy_from_user(a, args, nas[call])) |
767 | return -EFAULT; | 787 | return -EFAULT; |
@@ -823,6 +843,10 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args) | |||
823 | case SYS_RECVMSG: | 843 | case SYS_RECVMSG: |
824 | ret = compat_sys_recvmsg(a0, compat_ptr(a1), a[2]); | 844 | ret = compat_sys_recvmsg(a0, compat_ptr(a1), a[2]); |
825 | break; | 845 | break; |
846 | case SYS_RECVMMSG: | ||
847 | ret = compat_sys_recvmmsg(a0, compat_ptr(a1), a[2], a[3], | ||
848 | compat_ptr(a[4])); | ||
849 | break; | ||
826 | case SYS_ACCEPT4: | 850 | case SYS_ACCEPT4: |
827 | ret = sys_accept4(a0, compat_ptr(a1), compat_ptr(a[2]), a[3]); | 851 | ret = sys_accept4(a0, compat_ptr(a1), compat_ptr(a[2]), a[3]); |
828 | break; | 852 | break; |
diff --git a/net/core/datagram.c b/net/core/datagram.c index 4ade3011bb3c..95c2e0840d0d 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
@@ -271,6 +271,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags) | |||
271 | } | 271 | } |
272 | 272 | ||
273 | kfree_skb(skb); | 273 | kfree_skb(skb); |
274 | atomic_inc(&sk->sk_drops); | ||
274 | sk_mem_reclaim_partial(sk); | 275 | sk_mem_reclaim_partial(sk); |
275 | 276 | ||
276 | return err; | 277 | return err; |
diff --git a/net/core/dev.c b/net/core/dev.c index fe10551d3671..9977288583b8 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -79,6 +79,7 @@ | |||
79 | #include <linux/cpu.h> | 79 | #include <linux/cpu.h> |
80 | #include <linux/types.h> | 80 | #include <linux/types.h> |
81 | #include <linux/kernel.h> | 81 | #include <linux/kernel.h> |
82 | #include <linux/hash.h> | ||
82 | #include <linux/sched.h> | 83 | #include <linux/sched.h> |
83 | #include <linux/mutex.h> | 84 | #include <linux/mutex.h> |
84 | #include <linux/string.h> | 85 | #include <linux/string.h> |
@@ -175,7 +176,7 @@ static struct list_head ptype_all __read_mostly; /* Taps */ | |||
175 | * The @dev_base_head list is protected by @dev_base_lock and the rtnl | 176 | * The @dev_base_head list is protected by @dev_base_lock and the rtnl |
176 | * semaphore. | 177 | * semaphore. |
177 | * | 178 | * |
178 | * Pure readers hold dev_base_lock for reading. | 179 | * Pure readers hold dev_base_lock for reading, or rcu_read_lock() |
179 | * | 180 | * |
180 | * Writers must hold the rtnl semaphore while they loop through the | 181 | * Writers must hold the rtnl semaphore while they loop through the |
181 | * dev_base_head list, and hold dev_base_lock for writing when they do the | 182 | * dev_base_head list, and hold dev_base_lock for writing when they do the |
@@ -193,18 +194,15 @@ static struct list_head ptype_all __read_mostly; /* Taps */ | |||
193 | DEFINE_RWLOCK(dev_base_lock); | 194 | DEFINE_RWLOCK(dev_base_lock); |
194 | EXPORT_SYMBOL(dev_base_lock); | 195 | EXPORT_SYMBOL(dev_base_lock); |
195 | 196 | ||
196 | #define NETDEV_HASHBITS 8 | ||
197 | #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS) | ||
198 | |||
199 | static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) | 197 | static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) |
200 | { | 198 | { |
201 | unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); | 199 | unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); |
202 | return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)]; | 200 | return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; |
203 | } | 201 | } |
204 | 202 | ||
205 | static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) | 203 | static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) |
206 | { | 204 | { |
207 | return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)]; | 205 | return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; |
208 | } | 206 | } |
209 | 207 | ||
210 | /* Device list insertion */ | 208 | /* Device list insertion */ |
@@ -215,23 +213,26 @@ static int list_netdevice(struct net_device *dev) | |||
215 | ASSERT_RTNL(); | 213 | ASSERT_RTNL(); |
216 | 214 | ||
217 | write_lock_bh(&dev_base_lock); | 215 | write_lock_bh(&dev_base_lock); |
218 | list_add_tail(&dev->dev_list, &net->dev_base_head); | 216 | list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); |
219 | hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name)); | 217 | hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); |
220 | hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex)); | 218 | hlist_add_head_rcu(&dev->index_hlist, |
219 | dev_index_hash(net, dev->ifindex)); | ||
221 | write_unlock_bh(&dev_base_lock); | 220 | write_unlock_bh(&dev_base_lock); |
222 | return 0; | 221 | return 0; |
223 | } | 222 | } |
224 | 223 | ||
225 | /* Device list removal */ | 224 | /* Device list removal |
225 | * caller must respect a RCU grace period before freeing/reusing dev | ||
226 | */ | ||
226 | static void unlist_netdevice(struct net_device *dev) | 227 | static void unlist_netdevice(struct net_device *dev) |
227 | { | 228 | { |
228 | ASSERT_RTNL(); | 229 | ASSERT_RTNL(); |
229 | 230 | ||
230 | /* Unlink dev from the device chain */ | 231 | /* Unlink dev from the device chain */ |
231 | write_lock_bh(&dev_base_lock); | 232 | write_lock_bh(&dev_base_lock); |
232 | list_del(&dev->dev_list); | 233 | list_del_rcu(&dev->dev_list); |
233 | hlist_del(&dev->name_hlist); | 234 | hlist_del_rcu(&dev->name_hlist); |
234 | hlist_del(&dev->index_hlist); | 235 | hlist_del_rcu(&dev->index_hlist); |
235 | write_unlock_bh(&dev_base_lock); | 236 | write_unlock_bh(&dev_base_lock); |
236 | } | 237 | } |
237 | 238 | ||
@@ -587,18 +588,44 @@ __setup("netdev=", netdev_boot_setup); | |||
587 | struct net_device *__dev_get_by_name(struct net *net, const char *name) | 588 | struct net_device *__dev_get_by_name(struct net *net, const char *name) |
588 | { | 589 | { |
589 | struct hlist_node *p; | 590 | struct hlist_node *p; |
591 | struct net_device *dev; | ||
592 | struct hlist_head *head = dev_name_hash(net, name); | ||
590 | 593 | ||
591 | hlist_for_each(p, dev_name_hash(net, name)) { | 594 | hlist_for_each_entry(dev, p, head, name_hlist) |
592 | struct net_device *dev | ||
593 | = hlist_entry(p, struct net_device, name_hlist); | ||
594 | if (!strncmp(dev->name, name, IFNAMSIZ)) | 595 | if (!strncmp(dev->name, name, IFNAMSIZ)) |
595 | return dev; | 596 | return dev; |
596 | } | 597 | |
597 | return NULL; | 598 | return NULL; |
598 | } | 599 | } |
599 | EXPORT_SYMBOL(__dev_get_by_name); | 600 | EXPORT_SYMBOL(__dev_get_by_name); |
600 | 601 | ||
601 | /** | 602 | /** |
603 | * dev_get_by_name_rcu - find a device by its name | ||
604 | * @net: the applicable net namespace | ||
605 | * @name: name to find | ||
606 | * | ||
607 | * Find an interface by name. | ||
608 | * If the name is found a pointer to the device is returned. | ||
609 | * If the name is not found then %NULL is returned. | ||
610 | * The reference counters are not incremented so the caller must be | ||
611 | * careful with locks. The caller must hold RCU lock. | ||
612 | */ | ||
613 | |||
614 | struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) | ||
615 | { | ||
616 | struct hlist_node *p; | ||
617 | struct net_device *dev; | ||
618 | struct hlist_head *head = dev_name_hash(net, name); | ||
619 | |||
620 | hlist_for_each_entry_rcu(dev, p, head, name_hlist) | ||
621 | if (!strncmp(dev->name, name, IFNAMSIZ)) | ||
622 | return dev; | ||
623 | |||
624 | return NULL; | ||
625 | } | ||
626 | EXPORT_SYMBOL(dev_get_by_name_rcu); | ||
627 | |||
628 | /** | ||
602 | * dev_get_by_name - find a device by its name | 629 | * dev_get_by_name - find a device by its name |
603 | * @net: the applicable net namespace | 630 | * @net: the applicable net namespace |
604 | * @name: name to find | 631 | * @name: name to find |
@@ -614,11 +641,11 @@ struct net_device *dev_get_by_name(struct net *net, const char *name) | |||
614 | { | 641 | { |
615 | struct net_device *dev; | 642 | struct net_device *dev; |
616 | 643 | ||
617 | read_lock(&dev_base_lock); | 644 | rcu_read_lock(); |
618 | dev = __dev_get_by_name(net, name); | 645 | dev = dev_get_by_name_rcu(net, name); |
619 | if (dev) | 646 | if (dev) |
620 | dev_hold(dev); | 647 | dev_hold(dev); |
621 | read_unlock(&dev_base_lock); | 648 | rcu_read_unlock(); |
622 | return dev; | 649 | return dev; |
623 | } | 650 | } |
624 | EXPORT_SYMBOL(dev_get_by_name); | 651 | EXPORT_SYMBOL(dev_get_by_name); |
@@ -638,17 +665,42 @@ EXPORT_SYMBOL(dev_get_by_name); | |||
638 | struct net_device *__dev_get_by_index(struct net *net, int ifindex) | 665 | struct net_device *__dev_get_by_index(struct net *net, int ifindex) |
639 | { | 666 | { |
640 | struct hlist_node *p; | 667 | struct hlist_node *p; |
668 | struct net_device *dev; | ||
669 | struct hlist_head *head = dev_index_hash(net, ifindex); | ||
641 | 670 | ||
642 | hlist_for_each(p, dev_index_hash(net, ifindex)) { | 671 | hlist_for_each_entry(dev, p, head, index_hlist) |
643 | struct net_device *dev | ||
644 | = hlist_entry(p, struct net_device, index_hlist); | ||
645 | if (dev->ifindex == ifindex) | 672 | if (dev->ifindex == ifindex) |
646 | return dev; | 673 | return dev; |
647 | } | 674 | |
648 | return NULL; | 675 | return NULL; |
649 | } | 676 | } |
650 | EXPORT_SYMBOL(__dev_get_by_index); | 677 | EXPORT_SYMBOL(__dev_get_by_index); |
651 | 678 | ||
679 | /** | ||
680 | * dev_get_by_index_rcu - find a device by its ifindex | ||
681 | * @net: the applicable net namespace | ||
682 | * @ifindex: index of device | ||
683 | * | ||
684 | * Search for an interface by index. Returns %NULL if the device | ||
685 | * is not found or a pointer to the device. The device has not | ||
686 | * had its reference counter increased so the caller must be careful | ||
687 | * about locking. The caller must hold RCU lock. | ||
688 | */ | ||
689 | |||
690 | struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) | ||
691 | { | ||
692 | struct hlist_node *p; | ||
693 | struct net_device *dev; | ||
694 | struct hlist_head *head = dev_index_hash(net, ifindex); | ||
695 | |||
696 | hlist_for_each_entry_rcu(dev, p, head, index_hlist) | ||
697 | if (dev->ifindex == ifindex) | ||
698 | return dev; | ||
699 | |||
700 | return NULL; | ||
701 | } | ||
702 | EXPORT_SYMBOL(dev_get_by_index_rcu); | ||
703 | |||
652 | 704 | ||
653 | /** | 705 | /** |
654 | * dev_get_by_index - find a device by its ifindex | 706 | * dev_get_by_index - find a device by its ifindex |
@@ -665,11 +717,11 @@ struct net_device *dev_get_by_index(struct net *net, int ifindex) | |||
665 | { | 717 | { |
666 | struct net_device *dev; | 718 | struct net_device *dev; |
667 | 719 | ||
668 | read_lock(&dev_base_lock); | 720 | rcu_read_lock(); |
669 | dev = __dev_get_by_index(net, ifindex); | 721 | dev = dev_get_by_index_rcu(net, ifindex); |
670 | if (dev) | 722 | if (dev) |
671 | dev_hold(dev); | 723 | dev_hold(dev); |
672 | read_unlock(&dev_base_lock); | 724 | rcu_read_unlock(); |
673 | return dev; | 725 | return dev; |
674 | } | 726 | } |
675 | EXPORT_SYMBOL(dev_get_by_index); | 727 | EXPORT_SYMBOL(dev_get_by_index); |
@@ -748,15 +800,15 @@ struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags, | |||
748 | struct net_device *dev, *ret; | 800 | struct net_device *dev, *ret; |
749 | 801 | ||
750 | ret = NULL; | 802 | ret = NULL; |
751 | read_lock(&dev_base_lock); | 803 | rcu_read_lock(); |
752 | for_each_netdev(net, dev) { | 804 | for_each_netdev_rcu(net, dev) { |
753 | if (((dev->flags ^ if_flags) & mask) == 0) { | 805 | if (((dev->flags ^ if_flags) & mask) == 0) { |
754 | dev_hold(dev); | 806 | dev_hold(dev); |
755 | ret = dev; | 807 | ret = dev; |
756 | break; | 808 | break; |
757 | } | 809 | } |
758 | } | 810 | } |
759 | read_unlock(&dev_base_lock); | 811 | rcu_read_unlock(); |
760 | return ret; | 812 | return ret; |
761 | } | 813 | } |
762 | EXPORT_SYMBOL(dev_get_by_flags); | 814 | EXPORT_SYMBOL(dev_get_by_flags); |
@@ -841,7 +893,8 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf) | |||
841 | free_page((unsigned long) inuse); | 893 | free_page((unsigned long) inuse); |
842 | } | 894 | } |
843 | 895 | ||
844 | snprintf(buf, IFNAMSIZ, name, i); | 896 | if (buf != name) |
897 | snprintf(buf, IFNAMSIZ, name, i); | ||
845 | if (!__dev_get_by_name(net, buf)) | 898 | if (!__dev_get_by_name(net, buf)) |
846 | return i; | 899 | return i; |
847 | 900 | ||
@@ -881,6 +934,21 @@ int dev_alloc_name(struct net_device *dev, const char *name) | |||
881 | } | 934 | } |
882 | EXPORT_SYMBOL(dev_alloc_name); | 935 | EXPORT_SYMBOL(dev_alloc_name); |
883 | 936 | ||
937 | static int dev_get_valid_name(struct net *net, const char *name, char *buf, | ||
938 | bool fmt) | ||
939 | { | ||
940 | if (!dev_valid_name(name)) | ||
941 | return -EINVAL; | ||
942 | |||
943 | if (fmt && strchr(name, '%')) | ||
944 | return __dev_alloc_name(net, name, buf); | ||
945 | else if (__dev_get_by_name(net, name)) | ||
946 | return -EEXIST; | ||
947 | else if (buf != name) | ||
948 | strlcpy(buf, name, IFNAMSIZ); | ||
949 | |||
950 | return 0; | ||
951 | } | ||
884 | 952 | ||
885 | /** | 953 | /** |
886 | * dev_change_name - change name of a device | 954 | * dev_change_name - change name of a device |
@@ -904,22 +972,14 @@ int dev_change_name(struct net_device *dev, const char *newname) | |||
904 | if (dev->flags & IFF_UP) | 972 | if (dev->flags & IFF_UP) |
905 | return -EBUSY; | 973 | return -EBUSY; |
906 | 974 | ||
907 | if (!dev_valid_name(newname)) | ||
908 | return -EINVAL; | ||
909 | |||
910 | if (strncmp(newname, dev->name, IFNAMSIZ) == 0) | 975 | if (strncmp(newname, dev->name, IFNAMSIZ) == 0) |
911 | return 0; | 976 | return 0; |
912 | 977 | ||
913 | memcpy(oldname, dev->name, IFNAMSIZ); | 978 | memcpy(oldname, dev->name, IFNAMSIZ); |
914 | 979 | ||
915 | if (strchr(newname, '%')) { | 980 | err = dev_get_valid_name(net, newname, dev->name, 1); |
916 | err = dev_alloc_name(dev, newname); | 981 | if (err < 0) |
917 | if (err < 0) | 982 | return err; |
918 | return err; | ||
919 | } else if (__dev_get_by_name(net, newname)) | ||
920 | return -EEXIST; | ||
921 | else | ||
922 | strlcpy(dev->name, newname, IFNAMSIZ); | ||
923 | 983 | ||
924 | rollback: | 984 | rollback: |
925 | /* For now only devices in the initial network namespace | 985 | /* For now only devices in the initial network namespace |
@@ -935,7 +995,12 @@ rollback: | |||
935 | 995 | ||
936 | write_lock_bh(&dev_base_lock); | 996 | write_lock_bh(&dev_base_lock); |
937 | hlist_del(&dev->name_hlist); | 997 | hlist_del(&dev->name_hlist); |
938 | hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name)); | 998 | write_unlock_bh(&dev_base_lock); |
999 | |||
1000 | synchronize_rcu(); | ||
1001 | |||
1002 | write_lock_bh(&dev_base_lock); | ||
1003 | hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); | ||
939 | write_unlock_bh(&dev_base_lock); | 1004 | write_unlock_bh(&dev_base_lock); |
940 | 1005 | ||
941 | ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); | 1006 | ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); |
@@ -1038,9 +1103,9 @@ void dev_load(struct net *net, const char *name) | |||
1038 | { | 1103 | { |
1039 | struct net_device *dev; | 1104 | struct net_device *dev; |
1040 | 1105 | ||
1041 | read_lock(&dev_base_lock); | 1106 | rcu_read_lock(); |
1042 | dev = __dev_get_by_name(net, name); | 1107 | dev = dev_get_by_name_rcu(net, name); |
1043 | read_unlock(&dev_base_lock); | 1108 | rcu_read_unlock(); |
1044 | 1109 | ||
1045 | if (!dev && capable(CAP_NET_ADMIN)) | 1110 | if (!dev && capable(CAP_NET_ADMIN)) |
1046 | request_module("%s", name); | 1111 | request_module("%s", name); |
@@ -1287,6 +1352,7 @@ rollback: | |||
1287 | nb->notifier_call(nb, NETDEV_DOWN, dev); | 1352 | nb->notifier_call(nb, NETDEV_DOWN, dev); |
1288 | } | 1353 | } |
1289 | nb->notifier_call(nb, NETDEV_UNREGISTER, dev); | 1354 | nb->notifier_call(nb, NETDEV_UNREGISTER, dev); |
1355 | nb->notifier_call(nb, NETDEV_UNREGISTER_PERNET, dev); | ||
1290 | } | 1356 | } |
1291 | } | 1357 | } |
1292 | 1358 | ||
@@ -1701,7 +1767,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | |||
1701 | struct netdev_queue *txq) | 1767 | struct netdev_queue *txq) |
1702 | { | 1768 | { |
1703 | const struct net_device_ops *ops = dev->netdev_ops; | 1769 | const struct net_device_ops *ops = dev->netdev_ops; |
1704 | int rc; | 1770 | int rc = NETDEV_TX_OK; |
1705 | 1771 | ||
1706 | if (likely(!skb->next)) { | 1772 | if (likely(!skb->next)) { |
1707 | if (!list_empty(&ptype_all)) | 1773 | if (!list_empty(&ptype_all)) |
@@ -1749,6 +1815,8 @@ gso: | |||
1749 | nskb->next = NULL; | 1815 | nskb->next = NULL; |
1750 | rc = ops->ndo_start_xmit(nskb, dev); | 1816 | rc = ops->ndo_start_xmit(nskb, dev); |
1751 | if (unlikely(rc != NETDEV_TX_OK)) { | 1817 | if (unlikely(rc != NETDEV_TX_OK)) { |
1818 | if (rc & ~NETDEV_TX_MASK) | ||
1819 | goto out_kfree_gso_skb; | ||
1752 | nskb->next = skb->next; | 1820 | nskb->next = skb->next; |
1753 | skb->next = nskb; | 1821 | skb->next = nskb; |
1754 | return rc; | 1822 | return rc; |
@@ -1758,11 +1826,12 @@ gso: | |||
1758 | return NETDEV_TX_BUSY; | 1826 | return NETDEV_TX_BUSY; |
1759 | } while (skb->next); | 1827 | } while (skb->next); |
1760 | 1828 | ||
1761 | skb->destructor = DEV_GSO_CB(skb)->destructor; | 1829 | out_kfree_gso_skb: |
1762 | 1830 | if (likely(skb->next == NULL)) | |
1831 | skb->destructor = DEV_GSO_CB(skb)->destructor; | ||
1763 | out_kfree_skb: | 1832 | out_kfree_skb: |
1764 | kfree_skb(skb); | 1833 | kfree_skb(skb); |
1765 | return NETDEV_TX_OK; | 1834 | return rc; |
1766 | } | 1835 | } |
1767 | 1836 | ||
1768 | static u32 skb_tx_hashrnd; | 1837 | static u32 skb_tx_hashrnd; |
@@ -1789,16 +1858,43 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) | |||
1789 | } | 1858 | } |
1790 | EXPORT_SYMBOL(skb_tx_hash); | 1859 | EXPORT_SYMBOL(skb_tx_hash); |
1791 | 1860 | ||
1861 | static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) | ||
1862 | { | ||
1863 | if (unlikely(queue_index >= dev->real_num_tx_queues)) { | ||
1864 | if (net_ratelimit()) { | ||
1865 | WARN(1, "%s selects TX queue %d, but " | ||
1866 | "real number of TX queues is %d\n", | ||
1867 | dev->name, queue_index, | ||
1868 | dev->real_num_tx_queues); | ||
1869 | } | ||
1870 | return 0; | ||
1871 | } | ||
1872 | return queue_index; | ||
1873 | } | ||
1874 | |||
1792 | static struct netdev_queue *dev_pick_tx(struct net_device *dev, | 1875 | static struct netdev_queue *dev_pick_tx(struct net_device *dev, |
1793 | struct sk_buff *skb) | 1876 | struct sk_buff *skb) |
1794 | { | 1877 | { |
1795 | const struct net_device_ops *ops = dev->netdev_ops; | 1878 | u16 queue_index; |
1796 | u16 queue_index = 0; | 1879 | struct sock *sk = skb->sk; |
1797 | 1880 | ||
1798 | if (ops->ndo_select_queue) | 1881 | if (sk_tx_queue_recorded(sk)) { |
1799 | queue_index = ops->ndo_select_queue(dev, skb); | 1882 | queue_index = sk_tx_queue_get(sk); |
1800 | else if (dev->real_num_tx_queues > 1) | 1883 | } else { |
1801 | queue_index = skb_tx_hash(dev, skb); | 1884 | const struct net_device_ops *ops = dev->netdev_ops; |
1885 | |||
1886 | if (ops->ndo_select_queue) { | ||
1887 | queue_index = ops->ndo_select_queue(dev, skb); | ||
1888 | queue_index = dev_cap_txqueue(dev, queue_index); | ||
1889 | } else { | ||
1890 | queue_index = 0; | ||
1891 | if (dev->real_num_tx_queues > 1) | ||
1892 | queue_index = skb_tx_hash(dev, skb); | ||
1893 | |||
1894 | if (sk && sk->sk_dst_cache) | ||
1895 | sk_tx_queue_set(sk, queue_index); | ||
1896 | } | ||
1897 | } | ||
1802 | 1898 | ||
1803 | skb_set_queue_mapping(skb, queue_index); | 1899 | skb_set_queue_mapping(skb, queue_index); |
1804 | return netdev_get_tx_queue(dev, queue_index); | 1900 | return netdev_get_tx_queue(dev, queue_index); |
@@ -1935,8 +2031,8 @@ gso: | |||
1935 | HARD_TX_LOCK(dev, txq, cpu); | 2031 | HARD_TX_LOCK(dev, txq, cpu); |
1936 | 2032 | ||
1937 | if (!netif_tx_queue_stopped(txq)) { | 2033 | if (!netif_tx_queue_stopped(txq)) { |
1938 | rc = NET_XMIT_SUCCESS; | 2034 | rc = dev_hard_start_xmit(skb, dev, txq); |
1939 | if (!dev_hard_start_xmit(skb, dev, txq)) { | 2035 | if (dev_xmit_complete(rc)) { |
1940 | HARD_TX_UNLOCK(dev, txq); | 2036 | HARD_TX_UNLOCK(dev, txq); |
1941 | goto out; | 2037 | goto out; |
1942 | } | 2038 | } |
@@ -2292,7 +2388,7 @@ int netif_receive_skb(struct sk_buff *skb) | |||
2292 | if (!skb->tstamp.tv64) | 2388 | if (!skb->tstamp.tv64) |
2293 | net_timestamp(skb); | 2389 | net_timestamp(skb); |
2294 | 2390 | ||
2295 | if (skb->vlan_tci && vlan_hwaccel_do_receive(skb)) | 2391 | if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb)) |
2296 | return NET_RX_SUCCESS; | 2392 | return NET_RX_SUCCESS; |
2297 | 2393 | ||
2298 | /* if we've gotten here through NAPI, check netpoll */ | 2394 | /* if we've gotten here through NAPI, check netpoll */ |
@@ -2440,7 +2536,7 @@ void napi_gro_flush(struct napi_struct *napi) | |||
2440 | } | 2536 | } |
2441 | EXPORT_SYMBOL(napi_gro_flush); | 2537 | EXPORT_SYMBOL(napi_gro_flush); |
2442 | 2538 | ||
2443 | int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | 2539 | enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) |
2444 | { | 2540 | { |
2445 | struct sk_buff **pp = NULL; | 2541 | struct sk_buff **pp = NULL; |
2446 | struct packet_type *ptype; | 2542 | struct packet_type *ptype; |
@@ -2448,7 +2544,7 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
2448 | struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; | 2544 | struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; |
2449 | int same_flow; | 2545 | int same_flow; |
2450 | int mac_len; | 2546 | int mac_len; |
2451 | int ret; | 2547 | enum gro_result ret; |
2452 | 2548 | ||
2453 | if (!(skb->dev->features & NETIF_F_GRO)) | 2549 | if (!(skb->dev->features & NETIF_F_GRO)) |
2454 | goto normal; | 2550 | goto normal; |
@@ -2532,7 +2628,8 @@ normal: | |||
2532 | } | 2628 | } |
2533 | EXPORT_SYMBOL(dev_gro_receive); | 2629 | EXPORT_SYMBOL(dev_gro_receive); |
2534 | 2630 | ||
2535 | static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | 2631 | static gro_result_t |
2632 | __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | ||
2536 | { | 2633 | { |
2537 | struct sk_buff *p; | 2634 | struct sk_buff *p; |
2538 | 2635 | ||
@@ -2549,24 +2646,25 @@ static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
2549 | return dev_gro_receive(napi, skb); | 2646 | return dev_gro_receive(napi, skb); |
2550 | } | 2647 | } |
2551 | 2648 | ||
2552 | int napi_skb_finish(int ret, struct sk_buff *skb) | 2649 | gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) |
2553 | { | 2650 | { |
2554 | int err = NET_RX_SUCCESS; | ||
2555 | |||
2556 | switch (ret) { | 2651 | switch (ret) { |
2557 | case GRO_NORMAL: | 2652 | case GRO_NORMAL: |
2558 | return netif_receive_skb(skb); | 2653 | if (netif_receive_skb(skb)) |
2654 | ret = GRO_DROP; | ||
2655 | break; | ||
2559 | 2656 | ||
2560 | case GRO_DROP: | 2657 | case GRO_DROP: |
2561 | err = NET_RX_DROP; | ||
2562 | /* fall through */ | ||
2563 | |||
2564 | case GRO_MERGED_FREE: | 2658 | case GRO_MERGED_FREE: |
2565 | kfree_skb(skb); | 2659 | kfree_skb(skb); |
2566 | break; | 2660 | break; |
2661 | |||
2662 | case GRO_HELD: | ||
2663 | case GRO_MERGED: | ||
2664 | break; | ||
2567 | } | 2665 | } |
2568 | 2666 | ||
2569 | return err; | 2667 | return ret; |
2570 | } | 2668 | } |
2571 | EXPORT_SYMBOL(napi_skb_finish); | 2669 | EXPORT_SYMBOL(napi_skb_finish); |
2572 | 2670 | ||
@@ -2586,7 +2684,7 @@ void skb_gro_reset_offset(struct sk_buff *skb) | |||
2586 | } | 2684 | } |
2587 | EXPORT_SYMBOL(skb_gro_reset_offset); | 2685 | EXPORT_SYMBOL(skb_gro_reset_offset); |
2588 | 2686 | ||
2589 | int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | 2687 | gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) |
2590 | { | 2688 | { |
2591 | skb_gro_reset_offset(skb); | 2689 | skb_gro_reset_offset(skb); |
2592 | 2690 | ||
@@ -2605,49 +2703,41 @@ EXPORT_SYMBOL(napi_reuse_skb); | |||
2605 | 2703 | ||
2606 | struct sk_buff *napi_get_frags(struct napi_struct *napi) | 2704 | struct sk_buff *napi_get_frags(struct napi_struct *napi) |
2607 | { | 2705 | { |
2608 | struct net_device *dev = napi->dev; | ||
2609 | struct sk_buff *skb = napi->skb; | 2706 | struct sk_buff *skb = napi->skb; |
2610 | 2707 | ||
2611 | if (!skb) { | 2708 | if (!skb) { |
2612 | skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN); | 2709 | skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD); |
2613 | if (!skb) | 2710 | if (skb) |
2614 | goto out; | 2711 | napi->skb = skb; |
2615 | |||
2616 | skb_reserve(skb, NET_IP_ALIGN); | ||
2617 | |||
2618 | napi->skb = skb; | ||
2619 | } | 2712 | } |
2620 | |||
2621 | out: | ||
2622 | return skb; | 2713 | return skb; |
2623 | } | 2714 | } |
2624 | EXPORT_SYMBOL(napi_get_frags); | 2715 | EXPORT_SYMBOL(napi_get_frags); |
2625 | 2716 | ||
2626 | int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret) | 2717 | gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, |
2718 | gro_result_t ret) | ||
2627 | { | 2719 | { |
2628 | int err = NET_RX_SUCCESS; | ||
2629 | |||
2630 | switch (ret) { | 2720 | switch (ret) { |
2631 | case GRO_NORMAL: | 2721 | case GRO_NORMAL: |
2632 | case GRO_HELD: | 2722 | case GRO_HELD: |
2633 | skb->protocol = eth_type_trans(skb, napi->dev); | 2723 | skb->protocol = eth_type_trans(skb, napi->dev); |
2634 | 2724 | ||
2635 | if (ret == GRO_NORMAL) | 2725 | if (ret == GRO_HELD) |
2636 | return netif_receive_skb(skb); | 2726 | skb_gro_pull(skb, -ETH_HLEN); |
2637 | 2727 | else if (netif_receive_skb(skb)) | |
2638 | skb_gro_pull(skb, -ETH_HLEN); | 2728 | ret = GRO_DROP; |
2639 | break; | 2729 | break; |
2640 | 2730 | ||
2641 | case GRO_DROP: | 2731 | case GRO_DROP: |
2642 | err = NET_RX_DROP; | ||
2643 | /* fall through */ | ||
2644 | |||
2645 | case GRO_MERGED_FREE: | 2732 | case GRO_MERGED_FREE: |
2646 | napi_reuse_skb(napi, skb); | 2733 | napi_reuse_skb(napi, skb); |
2647 | break; | 2734 | break; |
2735 | |||
2736 | case GRO_MERGED: | ||
2737 | break; | ||
2648 | } | 2738 | } |
2649 | 2739 | ||
2650 | return err; | 2740 | return ret; |
2651 | } | 2741 | } |
2652 | EXPORT_SYMBOL(napi_frags_finish); | 2742 | EXPORT_SYMBOL(napi_frags_finish); |
2653 | 2743 | ||
@@ -2688,12 +2778,12 @@ out: | |||
2688 | } | 2778 | } |
2689 | EXPORT_SYMBOL(napi_frags_skb); | 2779 | EXPORT_SYMBOL(napi_frags_skb); |
2690 | 2780 | ||
2691 | int napi_gro_frags(struct napi_struct *napi) | 2781 | gro_result_t napi_gro_frags(struct napi_struct *napi) |
2692 | { | 2782 | { |
2693 | struct sk_buff *skb = napi_frags_skb(napi); | 2783 | struct sk_buff *skb = napi_frags_skb(napi); |
2694 | 2784 | ||
2695 | if (!skb) | 2785 | if (!skb) |
2696 | return NET_RX_DROP; | 2786 | return GRO_DROP; |
2697 | 2787 | ||
2698 | return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb)); | 2788 | return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb)); |
2699 | } | 2789 | } |
@@ -2938,15 +3028,15 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg) | |||
2938 | if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) | 3028 | if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) |
2939 | return -EFAULT; | 3029 | return -EFAULT; |
2940 | 3030 | ||
2941 | read_lock(&dev_base_lock); | 3031 | rcu_read_lock(); |
2942 | dev = __dev_get_by_index(net, ifr.ifr_ifindex); | 3032 | dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex); |
2943 | if (!dev) { | 3033 | if (!dev) { |
2944 | read_unlock(&dev_base_lock); | 3034 | rcu_read_unlock(); |
2945 | return -ENODEV; | 3035 | return -ENODEV; |
2946 | } | 3036 | } |
2947 | 3037 | ||
2948 | strcpy(ifr.ifr_name, dev->name); | 3038 | strcpy(ifr.ifr_name, dev->name); |
2949 | read_unlock(&dev_base_lock); | 3039 | rcu_read_unlock(); |
2950 | 3040 | ||
2951 | if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) | 3041 | if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) |
2952 | return -EFAULT; | 3042 | return -EFAULT; |
@@ -3016,18 +3106,18 @@ static int dev_ifconf(struct net *net, char __user *arg) | |||
3016 | * in detail. | 3106 | * in detail. |
3017 | */ | 3107 | */ |
3018 | void *dev_seq_start(struct seq_file *seq, loff_t *pos) | 3108 | void *dev_seq_start(struct seq_file *seq, loff_t *pos) |
3019 | __acquires(dev_base_lock) | 3109 | __acquires(RCU) |
3020 | { | 3110 | { |
3021 | struct net *net = seq_file_net(seq); | 3111 | struct net *net = seq_file_net(seq); |
3022 | loff_t off; | 3112 | loff_t off; |
3023 | struct net_device *dev; | 3113 | struct net_device *dev; |
3024 | 3114 | ||
3025 | read_lock(&dev_base_lock); | 3115 | rcu_read_lock(); |
3026 | if (!*pos) | 3116 | if (!*pos) |
3027 | return SEQ_START_TOKEN; | 3117 | return SEQ_START_TOKEN; |
3028 | 3118 | ||
3029 | off = 1; | 3119 | off = 1; |
3030 | for_each_netdev(net, dev) | 3120 | for_each_netdev_rcu(net, dev) |
3031 | if (off++ == *pos) | 3121 | if (off++ == *pos) |
3032 | return dev; | 3122 | return dev; |
3033 | 3123 | ||
@@ -3036,16 +3126,18 @@ void *dev_seq_start(struct seq_file *seq, loff_t *pos) | |||
3036 | 3126 | ||
3037 | void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 3127 | void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
3038 | { | 3128 | { |
3039 | struct net *net = seq_file_net(seq); | 3129 | struct net_device *dev = (v == SEQ_START_TOKEN) ? |
3130 | first_net_device(seq_file_net(seq)) : | ||
3131 | next_net_device((struct net_device *)v); | ||
3132 | |||
3040 | ++*pos; | 3133 | ++*pos; |
3041 | return v == SEQ_START_TOKEN ? | 3134 | return rcu_dereference(dev); |
3042 | first_net_device(net) : next_net_device((struct net_device *)v); | ||
3043 | } | 3135 | } |
3044 | 3136 | ||
3045 | void dev_seq_stop(struct seq_file *seq, void *v) | 3137 | void dev_seq_stop(struct seq_file *seq, void *v) |
3046 | __releases(dev_base_lock) | 3138 | __releases(RCU) |
3047 | { | 3139 | { |
3048 | read_unlock(&dev_base_lock); | 3140 | rcu_read_unlock(); |
3049 | } | 3141 | } |
3050 | 3142 | ||
3051 | static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) | 3143 | static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) |
@@ -4254,12 +4346,12 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa) | |||
4254 | EXPORT_SYMBOL(dev_set_mac_address); | 4346 | EXPORT_SYMBOL(dev_set_mac_address); |
4255 | 4347 | ||
4256 | /* | 4348 | /* |
4257 | * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock) | 4349 | * Perform the SIOCxIFxxx calls, inside rcu_read_lock() |
4258 | */ | 4350 | */ |
4259 | static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd) | 4351 | static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd) |
4260 | { | 4352 | { |
4261 | int err; | 4353 | int err; |
4262 | struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name); | 4354 | struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name); |
4263 | 4355 | ||
4264 | if (!dev) | 4356 | if (!dev) |
4265 | return -ENODEV; | 4357 | return -ENODEV; |
@@ -4491,9 +4583,9 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg) | |||
4491 | case SIOCGIFINDEX: | 4583 | case SIOCGIFINDEX: |
4492 | case SIOCGIFTXQLEN: | 4584 | case SIOCGIFTXQLEN: |
4493 | dev_load(net, ifr.ifr_name); | 4585 | dev_load(net, ifr.ifr_name); |
4494 | read_lock(&dev_base_lock); | 4586 | rcu_read_lock(); |
4495 | ret = dev_ifsioc_locked(net, &ifr, cmd); | 4587 | ret = dev_ifsioc_locked(net, &ifr, cmd); |
4496 | read_unlock(&dev_base_lock); | 4588 | rcu_read_unlock(); |
4497 | if (!ret) { | 4589 | if (!ret) { |
4498 | if (colon) | 4590 | if (colon) |
4499 | *colon = ':'; | 4591 | *colon = ':'; |
@@ -4636,59 +4728,93 @@ static void net_set_todo(struct net_device *dev) | |||
4636 | list_add_tail(&dev->todo_list, &net_todo_list); | 4728 | list_add_tail(&dev->todo_list, &net_todo_list); |
4637 | } | 4729 | } |
4638 | 4730 | ||
4639 | static void rollback_registered(struct net_device *dev) | 4731 | static void rollback_registered_many(struct list_head *head) |
4640 | { | 4732 | { |
4733 | struct net_device *dev, *aux, *fdev; | ||
4734 | LIST_HEAD(pernet_list); | ||
4735 | |||
4641 | BUG_ON(dev_boot_phase); | 4736 | BUG_ON(dev_boot_phase); |
4642 | ASSERT_RTNL(); | 4737 | ASSERT_RTNL(); |
4643 | 4738 | ||
4644 | /* Some devices call without registering for initialization unwind. */ | 4739 | list_for_each_entry(dev, head, unreg_list) { |
4645 | if (dev->reg_state == NETREG_UNINITIALIZED) { | 4740 | /* Some devices call without registering |
4646 | printk(KERN_DEBUG "unregister_netdevice: device %s/%p never " | 4741 | * for initialization unwind. |
4647 | "was registered\n", dev->name, dev); | 4742 | */ |
4743 | if (dev->reg_state == NETREG_UNINITIALIZED) { | ||
4744 | pr_debug("unregister_netdevice: device %s/%p never " | ||
4745 | "was registered\n", dev->name, dev); | ||
4648 | 4746 | ||
4649 | WARN_ON(1); | 4747 | WARN_ON(1); |
4650 | return; | 4748 | return; |
4651 | } | 4749 | } |
4652 | 4750 | ||
4653 | BUG_ON(dev->reg_state != NETREG_REGISTERED); | 4751 | BUG_ON(dev->reg_state != NETREG_REGISTERED); |
4654 | 4752 | ||
4655 | /* If device is running, close it first. */ | 4753 | /* If device is running, close it first. */ |
4656 | dev_close(dev); | 4754 | dev_close(dev); |
4657 | 4755 | ||
4658 | /* And unlink it from device chain. */ | 4756 | /* And unlink it from device chain. */ |
4659 | unlist_netdevice(dev); | 4757 | unlist_netdevice(dev); |
4660 | 4758 | ||
4661 | dev->reg_state = NETREG_UNREGISTERING; | 4759 | dev->reg_state = NETREG_UNREGISTERING; |
4760 | } | ||
4662 | 4761 | ||
4663 | synchronize_net(); | 4762 | synchronize_net(); |
4664 | 4763 | ||
4665 | /* Shutdown queueing discipline. */ | 4764 | list_for_each_entry(dev, head, unreg_list) { |
4666 | dev_shutdown(dev); | 4765 | /* Shutdown queueing discipline. */ |
4766 | dev_shutdown(dev); | ||
4667 | 4767 | ||
4668 | 4768 | ||
4669 | /* Notify protocols, that we are about to destroy | 4769 | /* Notify protocols, that we are about to destroy |
4670 | this device. They should clean all the things. | 4770 | this device. They should clean all the things. |
4671 | */ | 4771 | */ |
4672 | call_netdevice_notifiers(NETDEV_UNREGISTER, dev); | 4772 | call_netdevice_notifiers(NETDEV_UNREGISTER, dev); |
4673 | 4773 | ||
4674 | /* | 4774 | /* |
4675 | * Flush the unicast and multicast chains | 4775 | * Flush the unicast and multicast chains |
4676 | */ | 4776 | */ |
4677 | dev_unicast_flush(dev); | 4777 | dev_unicast_flush(dev); |
4678 | dev_addr_discard(dev); | 4778 | dev_addr_discard(dev); |
4679 | 4779 | ||
4680 | if (dev->netdev_ops->ndo_uninit) | 4780 | if (dev->netdev_ops->ndo_uninit) |
4681 | dev->netdev_ops->ndo_uninit(dev); | 4781 | dev->netdev_ops->ndo_uninit(dev); |
4682 | 4782 | ||
4683 | /* Notifier chain MUST detach us from master device. */ | 4783 | /* Notifier chain MUST detach us from master device. */ |
4684 | WARN_ON(dev->master); | 4784 | WARN_ON(dev->master); |
4685 | 4785 | ||
4686 | /* Remove entries from kobject tree */ | 4786 | /* Remove entries from kobject tree */ |
4687 | netdev_unregister_kobject(dev); | 4787 | netdev_unregister_kobject(dev); |
4788 | } | ||
4688 | 4789 | ||
4689 | synchronize_net(); | 4790 | synchronize_net(); |
4690 | 4791 | ||
4691 | dev_put(dev); | 4792 | list_for_each_entry_safe(dev, aux, head, unreg_list) { |
4793 | int new_net = 1; | ||
4794 | list_for_each_entry(fdev, &pernet_list, unreg_list) { | ||
4795 | if (dev_net(dev) == dev_net(fdev)) { | ||
4796 | new_net = 0; | ||
4797 | dev_put(dev); | ||
4798 | break; | ||
4799 | } | ||
4800 | } | ||
4801 | if (new_net) | ||
4802 | list_move(&dev->unreg_list, &pernet_list); | ||
4803 | } | ||
4804 | |||
4805 | list_for_each_entry_safe(dev, aux, &pernet_list, unreg_list) { | ||
4806 | call_netdevice_notifiers(NETDEV_UNREGISTER_PERNET, dev); | ||
4807 | list_move(&dev->unreg_list, head); | ||
4808 | dev_put(dev); | ||
4809 | } | ||
4810 | } | ||
4811 | |||
4812 | static void rollback_registered(struct net_device *dev) | ||
4813 | { | ||
4814 | LIST_HEAD(single); | ||
4815 | |||
4816 | list_add(&dev->unreg_list, &single); | ||
4817 | rollback_registered_many(&single); | ||
4692 | } | 4818 | } |
4693 | 4819 | ||
4694 | static void __netdev_init_queue_locks_one(struct net_device *dev, | 4820 | static void __netdev_init_queue_locks_one(struct net_device *dev, |
@@ -4765,8 +4891,6 @@ EXPORT_SYMBOL(netdev_fix_features); | |||
4765 | 4891 | ||
4766 | int register_netdevice(struct net_device *dev) | 4892 | int register_netdevice(struct net_device *dev) |
4767 | { | 4893 | { |
4768 | struct hlist_head *head; | ||
4769 | struct hlist_node *p; | ||
4770 | int ret; | 4894 | int ret; |
4771 | struct net *net = dev_net(dev); | 4895 | struct net *net = dev_net(dev); |
4772 | 4896 | ||
@@ -4795,26 +4919,14 @@ int register_netdevice(struct net_device *dev) | |||
4795 | } | 4919 | } |
4796 | } | 4920 | } |
4797 | 4921 | ||
4798 | if (!dev_valid_name(dev->name)) { | 4922 | ret = dev_get_valid_name(net, dev->name, dev->name, 0); |
4799 | ret = -EINVAL; | 4923 | if (ret) |
4800 | goto err_uninit; | 4924 | goto err_uninit; |
4801 | } | ||
4802 | 4925 | ||
4803 | dev->ifindex = dev_new_index(net); | 4926 | dev->ifindex = dev_new_index(net); |
4804 | if (dev->iflink == -1) | 4927 | if (dev->iflink == -1) |
4805 | dev->iflink = dev->ifindex; | 4928 | dev->iflink = dev->ifindex; |
4806 | 4929 | ||
4807 | /* Check for existence of name */ | ||
4808 | head = dev_name_hash(net, dev->name); | ||
4809 | hlist_for_each(p, head) { | ||
4810 | struct net_device *d | ||
4811 | = hlist_entry(p, struct net_device, name_hlist); | ||
4812 | if (!strncmp(d->name, dev->name, IFNAMSIZ)) { | ||
4813 | ret = -EEXIST; | ||
4814 | goto err_uninit; | ||
4815 | } | ||
4816 | } | ||
4817 | |||
4818 | /* Fix illegal checksum combinations */ | 4930 | /* Fix illegal checksum combinations */ |
4819 | if ((dev->features & NETIF_F_HW_CSUM) && | 4931 | if ((dev->features & NETIF_F_HW_CSUM) && |
4820 | (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { | 4932 | (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { |
@@ -4837,6 +4949,12 @@ int register_netdevice(struct net_device *dev) | |||
4837 | dev->features |= NETIF_F_GSO; | 4949 | dev->features |= NETIF_F_GSO; |
4838 | 4950 | ||
4839 | netdev_initialize_kobject(dev); | 4951 | netdev_initialize_kobject(dev); |
4952 | |||
4953 | ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); | ||
4954 | ret = notifier_to_errno(ret); | ||
4955 | if (ret) | ||
4956 | goto err_uninit; | ||
4957 | |||
4840 | ret = netdev_register_kobject(dev); | 4958 | ret = netdev_register_kobject(dev); |
4841 | if (ret) | 4959 | if (ret) |
4842 | goto err_uninit; | 4960 | goto err_uninit; |
@@ -4961,6 +5079,8 @@ static void netdev_wait_allrefs(struct net_device *dev) | |||
4961 | { | 5079 | { |
4962 | unsigned long rebroadcast_time, warning_time; | 5080 | unsigned long rebroadcast_time, warning_time; |
4963 | 5081 | ||
5082 | linkwatch_forget_dev(dev); | ||
5083 | |||
4964 | rebroadcast_time = warning_time = jiffies; | 5084 | rebroadcast_time = warning_time = jiffies; |
4965 | while (atomic_read(&dev->refcnt) != 0) { | 5085 | while (atomic_read(&dev->refcnt) != 0) { |
4966 | if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { | 5086 | if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { |
@@ -4968,6 +5088,8 @@ static void netdev_wait_allrefs(struct net_device *dev) | |||
4968 | 5088 | ||
4969 | /* Rebroadcast unregister notification */ | 5089 | /* Rebroadcast unregister notification */ |
4970 | call_netdevice_notifiers(NETDEV_UNREGISTER, dev); | 5090 | call_netdevice_notifiers(NETDEV_UNREGISTER, dev); |
5091 | /* don't resend NETDEV_UNREGISTER_PERNET, _PERNET users | ||
5092 | * should have already handle it the first time */ | ||
4971 | 5093 | ||
4972 | if (test_bit(__LINK_STATE_LINKWATCH_PENDING, | 5094 | if (test_bit(__LINK_STATE_LINKWATCH_PENDING, |
4973 | &dev->state)) { | 5095 | &dev->state)) { |
@@ -5063,6 +5185,32 @@ void netdev_run_todo(void) | |||
5063 | } | 5185 | } |
5064 | 5186 | ||
5065 | /** | 5187 | /** |
5188 | * dev_txq_stats_fold - fold tx_queues stats | ||
5189 | * @dev: device to get statistics from | ||
5190 | * @stats: struct net_device_stats to hold results | ||
5191 | */ | ||
5192 | void dev_txq_stats_fold(const struct net_device *dev, | ||
5193 | struct net_device_stats *stats) | ||
5194 | { | ||
5195 | unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0; | ||
5196 | unsigned int i; | ||
5197 | struct netdev_queue *txq; | ||
5198 | |||
5199 | for (i = 0; i < dev->num_tx_queues; i++) { | ||
5200 | txq = netdev_get_tx_queue(dev, i); | ||
5201 | tx_bytes += txq->tx_bytes; | ||
5202 | tx_packets += txq->tx_packets; | ||
5203 | tx_dropped += txq->tx_dropped; | ||
5204 | } | ||
5205 | if (tx_bytes || tx_packets || tx_dropped) { | ||
5206 | stats->tx_bytes = tx_bytes; | ||
5207 | stats->tx_packets = tx_packets; | ||
5208 | stats->tx_dropped = tx_dropped; | ||
5209 | } | ||
5210 | } | ||
5211 | EXPORT_SYMBOL(dev_txq_stats_fold); | ||
5212 | |||
5213 | /** | ||
5066 | * dev_get_stats - get network device statistics | 5214 | * dev_get_stats - get network device statistics |
5067 | * @dev: device to get statistics from | 5215 | * @dev: device to get statistics from |
5068 | * | 5216 | * |
@@ -5076,25 +5224,9 @@ const struct net_device_stats *dev_get_stats(struct net_device *dev) | |||
5076 | 5224 | ||
5077 | if (ops->ndo_get_stats) | 5225 | if (ops->ndo_get_stats) |
5078 | return ops->ndo_get_stats(dev); | 5226 | return ops->ndo_get_stats(dev); |
5079 | else { | 5227 | |
5080 | unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0; | 5228 | dev_txq_stats_fold(dev, &dev->stats); |
5081 | struct net_device_stats *stats = &dev->stats; | 5229 | return &dev->stats; |
5082 | unsigned int i; | ||
5083 | struct netdev_queue *txq; | ||
5084 | |||
5085 | for (i = 0; i < dev->num_tx_queues; i++) { | ||
5086 | txq = netdev_get_tx_queue(dev, i); | ||
5087 | tx_bytes += txq->tx_bytes; | ||
5088 | tx_packets += txq->tx_packets; | ||
5089 | tx_dropped += txq->tx_dropped; | ||
5090 | } | ||
5091 | if (tx_bytes || tx_packets || tx_dropped) { | ||
5092 | stats->tx_bytes = tx_bytes; | ||
5093 | stats->tx_packets = tx_packets; | ||
5094 | stats->tx_dropped = tx_dropped; | ||
5095 | } | ||
5096 | return stats; | ||
5097 | } | ||
5098 | } | 5230 | } |
5099 | EXPORT_SYMBOL(dev_get_stats); | 5231 | EXPORT_SYMBOL(dev_get_stats); |
5100 | 5232 | ||
@@ -5174,6 +5306,8 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5174 | netdev_init_queues(dev); | 5306 | netdev_init_queues(dev); |
5175 | 5307 | ||
5176 | INIT_LIST_HEAD(&dev->napi_list); | 5308 | INIT_LIST_HEAD(&dev->napi_list); |
5309 | INIT_LIST_HEAD(&dev->unreg_list); | ||
5310 | INIT_LIST_HEAD(&dev->link_watch_list); | ||
5177 | dev->priv_flags = IFF_XMIT_DST_RELEASE; | 5311 | dev->priv_flags = IFF_XMIT_DST_RELEASE; |
5178 | setup(dev); | 5312 | setup(dev); |
5179 | strcpy(dev->name, name); | 5313 | strcpy(dev->name, name); |
@@ -5238,25 +5372,52 @@ void synchronize_net(void) | |||
5238 | EXPORT_SYMBOL(synchronize_net); | 5372 | EXPORT_SYMBOL(synchronize_net); |
5239 | 5373 | ||
5240 | /** | 5374 | /** |
5241 | * unregister_netdevice - remove device from the kernel | 5375 | * unregister_netdevice_queue - remove device from the kernel |
5242 | * @dev: device | 5376 | * @dev: device |
5243 | * | 5377 | * @head: list |
5378 | |||
5244 | * This function shuts down a device interface and removes it | 5379 | * This function shuts down a device interface and removes it |
5245 | * from the kernel tables. | 5380 | * from the kernel tables. |
5381 | * If head not NULL, device is queued to be unregistered later. | ||
5246 | * | 5382 | * |
5247 | * Callers must hold the rtnl semaphore. You may want | 5383 | * Callers must hold the rtnl semaphore. You may want |
5248 | * unregister_netdev() instead of this. | 5384 | * unregister_netdev() instead of this. |
5249 | */ | 5385 | */ |
5250 | 5386 | ||
5251 | void unregister_netdevice(struct net_device *dev) | 5387 | void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) |
5252 | { | 5388 | { |
5253 | ASSERT_RTNL(); | 5389 | ASSERT_RTNL(); |
5254 | 5390 | ||
5255 | rollback_registered(dev); | 5391 | if (head) { |
5256 | /* Finish processing unregister after unlock */ | 5392 | list_move_tail(&dev->unreg_list, head); |
5257 | net_set_todo(dev); | 5393 | } else { |
5394 | rollback_registered(dev); | ||
5395 | /* Finish processing unregister after unlock */ | ||
5396 | net_set_todo(dev); | ||
5397 | } | ||
5398 | } | ||
5399 | EXPORT_SYMBOL(unregister_netdevice_queue); | ||
5400 | |||
5401 | /** | ||
5402 | * unregister_netdevice_many - unregister many devices | ||
5403 | * @head: list of devices | ||
5404 | * | ||
5405 | * WARNING: Calling this modifies the given list | ||
5406 | * (in rollback_registered_many). It may change the order of the elements | ||
5407 | * in the list. However, you can assume it does not add or delete elements | ||
5408 | * to/from the list. | ||
5409 | */ | ||
5410 | void unregister_netdevice_many(struct list_head *head) | ||
5411 | { | ||
5412 | struct net_device *dev; | ||
5413 | |||
5414 | if (!list_empty(head)) { | ||
5415 | rollback_registered_many(head); | ||
5416 | list_for_each_entry(dev, head, unreg_list) | ||
5417 | net_set_todo(dev); | ||
5418 | } | ||
5258 | } | 5419 | } |
5259 | EXPORT_SYMBOL(unregister_netdevice); | 5420 | EXPORT_SYMBOL(unregister_netdevice_many); |
5260 | 5421 | ||
5261 | /** | 5422 | /** |
5262 | * unregister_netdev - remove device from the kernel | 5423 | * unregister_netdev - remove device from the kernel |
@@ -5293,8 +5454,6 @@ EXPORT_SYMBOL(unregister_netdev); | |||
5293 | 5454 | ||
5294 | int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat) | 5455 | int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat) |
5295 | { | 5456 | { |
5296 | char buf[IFNAMSIZ]; | ||
5297 | const char *destname; | ||
5298 | int err; | 5457 | int err; |
5299 | 5458 | ||
5300 | ASSERT_RTNL(); | 5459 | ASSERT_RTNL(); |
@@ -5327,20 +5486,11 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char | |||
5327 | * we can use it in the destination network namespace. | 5486 | * we can use it in the destination network namespace. |
5328 | */ | 5487 | */ |
5329 | err = -EEXIST; | 5488 | err = -EEXIST; |
5330 | destname = dev->name; | 5489 | if (__dev_get_by_name(net, dev->name)) { |
5331 | if (__dev_get_by_name(net, destname)) { | ||
5332 | /* We get here if we can't use the current device name */ | 5490 | /* We get here if we can't use the current device name */ |
5333 | if (!pat) | 5491 | if (!pat) |
5334 | goto out; | 5492 | goto out; |
5335 | if (!dev_valid_name(pat)) | 5493 | if (dev_get_valid_name(net, pat, dev->name, 1)) |
5336 | goto out; | ||
5337 | if (strchr(pat, '%')) { | ||
5338 | if (__dev_alloc_name(net, pat, buf) < 0) | ||
5339 | goto out; | ||
5340 | destname = buf; | ||
5341 | } else | ||
5342 | destname = pat; | ||
5343 | if (__dev_get_by_name(net, destname)) | ||
5344 | goto out; | 5494 | goto out; |
5345 | } | 5495 | } |
5346 | 5496 | ||
@@ -5364,6 +5514,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char | |||
5364 | this device. They should clean all the things. | 5514 | this device. They should clean all the things. |
5365 | */ | 5515 | */ |
5366 | call_netdevice_notifiers(NETDEV_UNREGISTER, dev); | 5516 | call_netdevice_notifiers(NETDEV_UNREGISTER, dev); |
5517 | call_netdevice_notifiers(NETDEV_UNREGISTER_PERNET, dev); | ||
5367 | 5518 | ||
5368 | /* | 5519 | /* |
5369 | * Flush the unicast and multicast chains | 5520 | * Flush the unicast and multicast chains |
@@ -5376,10 +5527,6 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char | |||
5376 | /* Actually switch the network namespace */ | 5527 | /* Actually switch the network namespace */ |
5377 | dev_net_set(dev, net); | 5528 | dev_net_set(dev, net); |
5378 | 5529 | ||
5379 | /* Assign the new device name */ | ||
5380 | if (destname != dev->name) | ||
5381 | strcpy(dev->name, destname); | ||
5382 | |||
5383 | /* If there is an ifindex conflict assign a new one */ | 5530 | /* If there is an ifindex conflict assign a new one */ |
5384 | if (__dev_get_by_index(net, dev->ifindex)) { | 5531 | if (__dev_get_by_index(net, dev->ifindex)) { |
5385 | int iflink = (dev->iflink == dev->ifindex); | 5532 | int iflink = (dev->iflink == dev->ifindex); |
@@ -5484,7 +5631,7 @@ unsigned long netdev_increment_features(unsigned long all, unsigned long one, | |||
5484 | one |= NETIF_F_ALL_CSUM; | 5631 | one |= NETIF_F_ALL_CSUM; |
5485 | 5632 | ||
5486 | one |= all & NETIF_F_ONE_FOR_ALL; | 5633 | one |= all & NETIF_F_ONE_FOR_ALL; |
5487 | all &= one | NETIF_F_LLTX | NETIF_F_GSO; | 5634 | all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO; |
5488 | all |= one & mask & NETIF_F_ONE_FOR_ALL; | 5635 | all |= one & mask & NETIF_F_ONE_FOR_ALL; |
5489 | 5636 | ||
5490 | return all; | 5637 | return all; |
@@ -5583,7 +5730,7 @@ restart: | |||
5583 | 5730 | ||
5584 | /* Delete virtual devices */ | 5731 | /* Delete virtual devices */ |
5585 | if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) { | 5732 | if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) { |
5586 | dev->rtnl_link_ops->dellink(dev); | 5733 | dev->rtnl_link_ops->dellink(dev, NULL); |
5587 | goto restart; | 5734 | goto restart; |
5588 | } | 5735 | } |
5589 | 5736 | ||
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 0a113f26bc9f..b8e9d3a86887 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c | |||
@@ -41,7 +41,7 @@ static void send_dm_alert(struct work_struct *unused); | |||
41 | * netlink alerts | 41 | * netlink alerts |
42 | */ | 42 | */ |
43 | static int trace_state = TRACE_OFF; | 43 | static int trace_state = TRACE_OFF; |
44 | static spinlock_t trace_state_lock = SPIN_LOCK_UNLOCKED; | 44 | static DEFINE_SPINLOCK(trace_state_lock); |
45 | 45 | ||
46 | struct per_cpu_dm_data { | 46 | struct per_cpu_dm_data { |
47 | struct work_struct dm_alert_work; | 47 | struct work_struct dm_alert_work; |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 4c12ddb5f5ee..d8aee584e8d1 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -198,13 +198,6 @@ static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) | |||
198 | rc = ops->get_sset_count(dev, ETH_SS_PRIV_FLAGS); | 198 | rc = ops->get_sset_count(dev, ETH_SS_PRIV_FLAGS); |
199 | if (rc >= 0) | 199 | if (rc >= 0) |
200 | info.n_priv_flags = rc; | 200 | info.n_priv_flags = rc; |
201 | } else { | ||
202 | /* code path for obsolete hooks */ | ||
203 | |||
204 | if (ops->self_test_count) | ||
205 | info.testinfo_len = ops->self_test_count(dev); | ||
206 | if (ops->get_stats_count) | ||
207 | info.n_stats = ops->get_stats_count(dev); | ||
208 | } | 201 | } |
209 | if (ops->get_regs_len) | 202 | if (ops->get_regs_len) |
210 | info.regdump_len = ops->get_regs_len(dev); | 203 | info.regdump_len = ops->get_regs_len(dev); |
@@ -309,6 +302,26 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) | |||
309 | return ret; | 302 | return ret; |
310 | } | 303 | } |
311 | 304 | ||
305 | static int ethtool_reset(struct net_device *dev, char __user *useraddr) | ||
306 | { | ||
307 | struct ethtool_value reset; | ||
308 | int ret; | ||
309 | |||
310 | if (!dev->ethtool_ops->reset) | ||
311 | return -EOPNOTSUPP; | ||
312 | |||
313 | if (copy_from_user(&reset, useraddr, sizeof(reset))) | ||
314 | return -EFAULT; | ||
315 | |||
316 | ret = dev->ethtool_ops->reset(dev, &reset.data); | ||
317 | if (ret) | ||
318 | return ret; | ||
319 | |||
320 | if (copy_to_user(useraddr, &reset, sizeof(reset))) | ||
321 | return -EFAULT; | ||
322 | return 0; | ||
323 | } | ||
324 | |||
312 | static int ethtool_get_wol(struct net_device *dev, char __user *useraddr) | 325 | static int ethtool_get_wol(struct net_device *dev, char __user *useraddr) |
313 | { | 326 | { |
314 | struct ethtool_wolinfo wol = { ETHTOOL_GWOL }; | 327 | struct ethtool_wolinfo wol = { ETHTOOL_GWOL }; |
@@ -684,16 +697,10 @@ static int ethtool_self_test(struct net_device *dev, char __user *useraddr) | |||
684 | u64 *data; | 697 | u64 *data; |
685 | int ret, test_len; | 698 | int ret, test_len; |
686 | 699 | ||
687 | if (!ops->self_test) | 700 | if (!ops->self_test || !ops->get_sset_count) |
688 | return -EOPNOTSUPP; | ||
689 | if (!ops->get_sset_count && !ops->self_test_count) | ||
690 | return -EOPNOTSUPP; | 701 | return -EOPNOTSUPP; |
691 | 702 | ||
692 | if (ops->get_sset_count) | 703 | test_len = ops->get_sset_count(dev, ETH_SS_TEST); |
693 | test_len = ops->get_sset_count(dev, ETH_SS_TEST); | ||
694 | else | ||
695 | /* code path for obsolete hook */ | ||
696 | test_len = ops->self_test_count(dev); | ||
697 | if (test_len < 0) | 704 | if (test_len < 0) |
698 | return test_len; | 705 | return test_len; |
699 | WARN_ON(test_len == 0); | 706 | WARN_ON(test_len == 0); |
@@ -728,36 +735,17 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr) | |||
728 | u8 *data; | 735 | u8 *data; |
729 | int ret; | 736 | int ret; |
730 | 737 | ||
731 | if (!ops->get_strings) | 738 | if (!ops->get_strings || !ops->get_sset_count) |
732 | return -EOPNOTSUPP; | 739 | return -EOPNOTSUPP; |
733 | 740 | ||
734 | if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) | 741 | if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) |
735 | return -EFAULT; | 742 | return -EFAULT; |
736 | 743 | ||
737 | if (ops->get_sset_count) { | 744 | ret = ops->get_sset_count(dev, gstrings.string_set); |
738 | ret = ops->get_sset_count(dev, gstrings.string_set); | 745 | if (ret < 0) |
739 | if (ret < 0) | 746 | return ret; |
740 | return ret; | 747 | |
741 | 748 | gstrings.len = ret; | |
742 | gstrings.len = ret; | ||
743 | } else { | ||
744 | /* code path for obsolete hooks */ | ||
745 | |||
746 | switch (gstrings.string_set) { | ||
747 | case ETH_SS_TEST: | ||
748 | if (!ops->self_test_count) | ||
749 | return -EOPNOTSUPP; | ||
750 | gstrings.len = ops->self_test_count(dev); | ||
751 | break; | ||
752 | case ETH_SS_STATS: | ||
753 | if (!ops->get_stats_count) | ||
754 | return -EOPNOTSUPP; | ||
755 | gstrings.len = ops->get_stats_count(dev); | ||
756 | break; | ||
757 | default: | ||
758 | return -EINVAL; | ||
759 | } | ||
760 | } | ||
761 | 749 | ||
762 | data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); | 750 | data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); |
763 | if (!data) | 751 | if (!data) |
@@ -798,16 +786,10 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr) | |||
798 | u64 *data; | 786 | u64 *data; |
799 | int ret, n_stats; | 787 | int ret, n_stats; |
800 | 788 | ||
801 | if (!ops->get_ethtool_stats) | 789 | if (!ops->get_ethtool_stats || !ops->get_sset_count) |
802 | return -EOPNOTSUPP; | ||
803 | if (!ops->get_sset_count && !ops->get_stats_count) | ||
804 | return -EOPNOTSUPP; | 790 | return -EOPNOTSUPP; |
805 | 791 | ||
806 | if (ops->get_sset_count) | 792 | n_stats = ops->get_sset_count(dev, ETH_SS_STATS); |
807 | n_stats = ops->get_sset_count(dev, ETH_SS_STATS); | ||
808 | else | ||
809 | /* code path for obsolete hook */ | ||
810 | n_stats = ops->get_stats_count(dev); | ||
811 | if (n_stats < 0) | 793 | if (n_stats < 0) |
812 | return n_stats; | 794 | return n_stats; |
813 | WARN_ON(n_stats == 0); | 795 | WARN_ON(n_stats == 0); |
@@ -1127,6 +1109,9 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
1127 | case ETHTOOL_FLASHDEV: | 1109 | case ETHTOOL_FLASHDEV: |
1128 | rc = ethtool_flash_device(dev, useraddr); | 1110 | rc = ethtool_flash_device(dev, useraddr); |
1129 | break; | 1111 | break; |
1112 | case ETHTOOL_RESET: | ||
1113 | rc = ethtool_reset(dev, useraddr); | ||
1114 | break; | ||
1130 | default: | 1115 | default: |
1131 | rc = -EOPNOTSUPP; | 1116 | rc = -EOPNOTSUPP; |
1132 | } | 1117 | } |
diff --git a/net/core/filter.c b/net/core/filter.c index d1d779ca096d..08db7b9143a3 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -303,6 +303,12 @@ load_b: | |||
303 | case SKF_AD_IFINDEX: | 303 | case SKF_AD_IFINDEX: |
304 | A = skb->dev->ifindex; | 304 | A = skb->dev->ifindex; |
305 | continue; | 305 | continue; |
306 | case SKF_AD_MARK: | ||
307 | A = skb->mark; | ||
308 | continue; | ||
309 | case SKF_AD_QUEUE: | ||
310 | A = skb->queue_mapping; | ||
311 | continue; | ||
306 | case SKF_AD_NLATTR: { | 312 | case SKF_AD_NLATTR: { |
307 | struct nlattr *nla; | 313 | struct nlattr *nla; |
308 | 314 | ||
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c index 8569310268ab..393b1d8618e2 100644 --- a/net/core/gen_stats.c +++ b/net/core/gen_stats.c | |||
@@ -127,6 +127,7 @@ gnet_stats_copy_basic(struct gnet_dump *d, struct gnet_stats_basic_packed *b) | |||
127 | /** | 127 | /** |
128 | * gnet_stats_copy_rate_est - copy rate estimator statistics into statistics TLV | 128 | * gnet_stats_copy_rate_est - copy rate estimator statistics into statistics TLV |
129 | * @d: dumping handle | 129 | * @d: dumping handle |
130 | * @b: basic statistics | ||
130 | * @r: rate estimator statistics | 131 | * @r: rate estimator statistics |
131 | * | 132 | * |
132 | * Appends the rate estimator statistics to the top level TLV created by | 133 | * Appends the rate estimator statistics to the top level TLV created by |
@@ -136,8 +137,13 @@ gnet_stats_copy_basic(struct gnet_dump *d, struct gnet_stats_basic_packed *b) | |||
136 | * if the room in the socket buffer was not sufficient. | 137 | * if the room in the socket buffer was not sufficient. |
137 | */ | 138 | */ |
138 | int | 139 | int |
139 | gnet_stats_copy_rate_est(struct gnet_dump *d, struct gnet_stats_rate_est *r) | 140 | gnet_stats_copy_rate_est(struct gnet_dump *d, |
141 | const struct gnet_stats_basic_packed *b, | ||
142 | struct gnet_stats_rate_est *r) | ||
140 | { | 143 | { |
144 | if (b && !gen_estimator_active(b, r)) | ||
145 | return 0; | ||
146 | |||
141 | if (d->compat_tc_stats) { | 147 | if (d->compat_tc_stats) { |
142 | d->tc_stats.bps = r->bps; | 148 | d->tc_stats.bps = r->bps; |
143 | d->tc_stats.pps = r->pps; | 149 | d->tc_stats.pps = r->pps; |
diff --git a/net/core/link_watch.c b/net/core/link_watch.c index bf8f7af699d7..5910b555a54a 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c | |||
@@ -35,7 +35,7 @@ static unsigned long linkwatch_nextevent; | |||
35 | static void linkwatch_event(struct work_struct *dummy); | 35 | static void linkwatch_event(struct work_struct *dummy); |
36 | static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event); | 36 | static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event); |
37 | 37 | ||
38 | static struct net_device *lweventlist; | 38 | static LIST_HEAD(lweventlist); |
39 | static DEFINE_SPINLOCK(lweventlist_lock); | 39 | static DEFINE_SPINLOCK(lweventlist_lock); |
40 | 40 | ||
41 | static unsigned char default_operstate(const struct net_device *dev) | 41 | static unsigned char default_operstate(const struct net_device *dev) |
@@ -89,8 +89,10 @@ static void linkwatch_add_event(struct net_device *dev) | |||
89 | unsigned long flags; | 89 | unsigned long flags; |
90 | 90 | ||
91 | spin_lock_irqsave(&lweventlist_lock, flags); | 91 | spin_lock_irqsave(&lweventlist_lock, flags); |
92 | dev->link_watch_next = lweventlist; | 92 | if (list_empty(&dev->link_watch_list)) { |
93 | lweventlist = dev; | 93 | list_add_tail(&dev->link_watch_list, &lweventlist); |
94 | dev_hold(dev); | ||
95 | } | ||
94 | spin_unlock_irqrestore(&lweventlist_lock, flags); | 96 | spin_unlock_irqrestore(&lweventlist_lock, flags); |
95 | } | 97 | } |
96 | 98 | ||
@@ -133,9 +135,35 @@ static void linkwatch_schedule_work(int urgent) | |||
133 | } | 135 | } |
134 | 136 | ||
135 | 137 | ||
138 | static void linkwatch_do_dev(struct net_device *dev) | ||
139 | { | ||
140 | /* | ||
141 | * Make sure the above read is complete since it can be | ||
142 | * rewritten as soon as we clear the bit below. | ||
143 | */ | ||
144 | smp_mb__before_clear_bit(); | ||
145 | |||
146 | /* We are about to handle this device, | ||
147 | * so new events can be accepted | ||
148 | */ | ||
149 | clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state); | ||
150 | |||
151 | rfc2863_policy(dev); | ||
152 | if (dev->flags & IFF_UP) { | ||
153 | if (netif_carrier_ok(dev)) | ||
154 | dev_activate(dev); | ||
155 | else | ||
156 | dev_deactivate(dev); | ||
157 | |||
158 | netdev_state_change(dev); | ||
159 | } | ||
160 | dev_put(dev); | ||
161 | } | ||
162 | |||
136 | static void __linkwatch_run_queue(int urgent_only) | 163 | static void __linkwatch_run_queue(int urgent_only) |
137 | { | 164 | { |
138 | struct net_device *next; | 165 | struct net_device *dev; |
166 | LIST_HEAD(wrk); | ||
139 | 167 | ||
140 | /* | 168 | /* |
141 | * Limit the number of linkwatch events to one | 169 | * Limit the number of linkwatch events to one |
@@ -153,46 +181,40 @@ static void __linkwatch_run_queue(int urgent_only) | |||
153 | clear_bit(LW_URGENT, &linkwatch_flags); | 181 | clear_bit(LW_URGENT, &linkwatch_flags); |
154 | 182 | ||
155 | spin_lock_irq(&lweventlist_lock); | 183 | spin_lock_irq(&lweventlist_lock); |
156 | next = lweventlist; | 184 | list_splice_init(&lweventlist, &wrk); |
157 | lweventlist = NULL; | ||
158 | spin_unlock_irq(&lweventlist_lock); | ||
159 | 185 | ||
160 | while (next) { | 186 | while (!list_empty(&wrk)) { |
161 | struct net_device *dev = next; | ||
162 | 187 | ||
163 | next = dev->link_watch_next; | 188 | dev = list_first_entry(&wrk, struct net_device, link_watch_list); |
189 | list_del_init(&dev->link_watch_list); | ||
164 | 190 | ||
165 | if (urgent_only && !linkwatch_urgent_event(dev)) { | 191 | if (urgent_only && !linkwatch_urgent_event(dev)) { |
166 | linkwatch_add_event(dev); | 192 | list_add_tail(&dev->link_watch_list, &lweventlist); |
167 | continue; | 193 | continue; |
168 | } | 194 | } |
169 | 195 | spin_unlock_irq(&lweventlist_lock); | |
170 | /* | 196 | linkwatch_do_dev(dev); |
171 | * Make sure the above read is complete since it can be | 197 | spin_lock_irq(&lweventlist_lock); |
172 | * rewritten as soon as we clear the bit below. | ||
173 | */ | ||
174 | smp_mb__before_clear_bit(); | ||
175 | |||
176 | /* We are about to handle this device, | ||
177 | * so new events can be accepted | ||
178 | */ | ||
179 | clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state); | ||
180 | |||
181 | rfc2863_policy(dev); | ||
182 | if (dev->flags & IFF_UP) { | ||
183 | if (netif_carrier_ok(dev)) | ||
184 | dev_activate(dev); | ||
185 | else | ||
186 | dev_deactivate(dev); | ||
187 | |||
188 | netdev_state_change(dev); | ||
189 | } | ||
190 | |||
191 | dev_put(dev); | ||
192 | } | 198 | } |
193 | 199 | ||
194 | if (lweventlist) | 200 | if (!list_empty(&lweventlist)) |
195 | linkwatch_schedule_work(0); | 201 | linkwatch_schedule_work(0); |
202 | spin_unlock_irq(&lweventlist_lock); | ||
203 | } | ||
204 | |||
205 | void linkwatch_forget_dev(struct net_device *dev) | ||
206 | { | ||
207 | unsigned long flags; | ||
208 | int clean = 0; | ||
209 | |||
210 | spin_lock_irqsave(&lweventlist_lock, flags); | ||
211 | if (!list_empty(&dev->link_watch_list)) { | ||
212 | list_del_init(&dev->link_watch_list); | ||
213 | clean = 1; | ||
214 | } | ||
215 | spin_unlock_irqrestore(&lweventlist_lock, flags); | ||
216 | if (clean) | ||
217 | linkwatch_do_dev(dev); | ||
196 | } | 218 | } |
197 | 219 | ||
198 | 220 | ||
@@ -216,8 +238,6 @@ void linkwatch_fire_event(struct net_device *dev) | |||
216 | bool urgent = linkwatch_urgent_event(dev); | 238 | bool urgent = linkwatch_urgent_event(dev); |
217 | 239 | ||
218 | if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) { | 240 | if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) { |
219 | dev_hold(dev); | ||
220 | |||
221 | linkwatch_add_event(dev); | 241 | linkwatch_add_event(dev); |
222 | } else if (!urgent) | 242 | } else if (!urgent) |
223 | return; | 243 | return; |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 427ded841224..157645c0da73 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -130,6 +130,48 @@ static ssize_t show_carrier(struct device *dev, | |||
130 | return -EINVAL; | 130 | return -EINVAL; |
131 | } | 131 | } |
132 | 132 | ||
133 | static ssize_t show_speed(struct device *dev, | ||
134 | struct device_attribute *attr, char *buf) | ||
135 | { | ||
136 | struct net_device *netdev = to_net_dev(dev); | ||
137 | int ret = -EINVAL; | ||
138 | |||
139 | if (!rtnl_trylock()) | ||
140 | return restart_syscall(); | ||
141 | |||
142 | if (netif_running(netdev) && | ||
143 | netdev->ethtool_ops && | ||
144 | netdev->ethtool_ops->get_settings) { | ||
145 | struct ethtool_cmd cmd = { ETHTOOL_GSET }; | ||
146 | |||
147 | if (!netdev->ethtool_ops->get_settings(netdev, &cmd)) | ||
148 | ret = sprintf(buf, fmt_dec, ethtool_cmd_speed(&cmd)); | ||
149 | } | ||
150 | rtnl_unlock(); | ||
151 | return ret; | ||
152 | } | ||
153 | |||
154 | static ssize_t show_duplex(struct device *dev, | ||
155 | struct device_attribute *attr, char *buf) | ||
156 | { | ||
157 | struct net_device *netdev = to_net_dev(dev); | ||
158 | int ret = -EINVAL; | ||
159 | |||
160 | if (!rtnl_trylock()) | ||
161 | return restart_syscall(); | ||
162 | |||
163 | if (netif_running(netdev) && | ||
164 | netdev->ethtool_ops && | ||
165 | netdev->ethtool_ops->get_settings) { | ||
166 | struct ethtool_cmd cmd = { ETHTOOL_GSET }; | ||
167 | |||
168 | if (!netdev->ethtool_ops->get_settings(netdev, &cmd)) | ||
169 | ret = sprintf(buf, "%s\n", cmd.duplex ? "full" : "half"); | ||
170 | } | ||
171 | rtnl_unlock(); | ||
172 | return ret; | ||
173 | } | ||
174 | |||
133 | static ssize_t show_dormant(struct device *dev, | 175 | static ssize_t show_dormant(struct device *dev, |
134 | struct device_attribute *attr, char *buf) | 176 | struct device_attribute *attr, char *buf) |
135 | { | 177 | { |
@@ -259,6 +301,8 @@ static struct device_attribute net_class_attributes[] = { | |||
259 | __ATTR(address, S_IRUGO, show_address, NULL), | 301 | __ATTR(address, S_IRUGO, show_address, NULL), |
260 | __ATTR(broadcast, S_IRUGO, show_broadcast, NULL), | 302 | __ATTR(broadcast, S_IRUGO, show_broadcast, NULL), |
261 | __ATTR(carrier, S_IRUGO, show_carrier, NULL), | 303 | __ATTR(carrier, S_IRUGO, show_carrier, NULL), |
304 | __ATTR(speed, S_IRUGO, show_speed, NULL), | ||
305 | __ATTR(duplex, S_IRUGO, show_duplex, NULL), | ||
262 | __ATTR(dormant, S_IRUGO, show_dormant, NULL), | 306 | __ATTR(dormant, S_IRUGO, show_dormant, NULL), |
263 | __ATTR(operstate, S_IRUGO, show_operstate, NULL), | 307 | __ATTR(operstate, S_IRUGO, show_operstate, NULL), |
264 | __ATTR(mtu, S_IRUGO | S_IWUSR, show_mtu, store_mtu), | 308 | __ATTR(mtu, S_IRUGO | S_IWUSR, show_mtu, store_mtu), |
@@ -500,12 +544,19 @@ int netdev_register_kobject(struct net_device *net) | |||
500 | dev_set_name(dev, "%s", net->name); | 544 | dev_set_name(dev, "%s", net->name); |
501 | 545 | ||
502 | #ifdef CONFIG_SYSFS | 546 | #ifdef CONFIG_SYSFS |
503 | *groups++ = &netstat_group; | 547 | /* Allow for a device specific group */ |
548 | if (*groups) | ||
549 | groups++; | ||
504 | 550 | ||
551 | *groups++ = &netstat_group; | ||
505 | #ifdef CONFIG_WIRELESS_EXT_SYSFS | 552 | #ifdef CONFIG_WIRELESS_EXT_SYSFS |
506 | if (net->wireless_handlers || net->ieee80211_ptr) | 553 | if (net->ieee80211_ptr) |
554 | *groups++ = &wireless_group; | ||
555 | #ifdef CONFIG_WIRELESS_EXT | ||
556 | else if (net->wireless_handlers) | ||
507 | *groups++ = &wireless_group; | 557 | *groups++ = &wireless_group; |
508 | #endif | 558 | #endif |
559 | #endif | ||
509 | #endif /* CONFIG_SYSFS */ | 560 | #endif /* CONFIG_SYSFS */ |
510 | 561 | ||
511 | if (dev_net(net) != &init_net) | 562 | if (dev_net(net) != &init_net) |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 6eb8d47cbf3a..d38470a32792 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -340,6 +340,7 @@ struct pktgen_dev { | |||
340 | __u16 cur_udp_src; | 340 | __u16 cur_udp_src; |
341 | __u16 cur_queue_map; | 341 | __u16 cur_queue_map; |
342 | __u32 cur_pkt_size; | 342 | __u32 cur_pkt_size; |
343 | __u32 last_pkt_size; | ||
343 | 344 | ||
344 | __u8 hh[14]; | 345 | __u8 hh[14]; |
345 | /* = { | 346 | /* = { |
@@ -3434,7 +3435,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3434 | pkt_dev->clone_count--; /* back out increment, OOM */ | 3435 | pkt_dev->clone_count--; /* back out increment, OOM */ |
3435 | return; | 3436 | return; |
3436 | } | 3437 | } |
3437 | 3438 | pkt_dev->last_pkt_size = pkt_dev->skb->len; | |
3438 | pkt_dev->allocated_skbs++; | 3439 | pkt_dev->allocated_skbs++; |
3439 | pkt_dev->clone_count = 0; /* reset counter */ | 3440 | pkt_dev->clone_count = 0; /* reset counter */ |
3440 | } | 3441 | } |
@@ -3446,12 +3447,14 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3446 | txq = netdev_get_tx_queue(odev, queue_map); | 3447 | txq = netdev_get_tx_queue(odev, queue_map); |
3447 | 3448 | ||
3448 | __netif_tx_lock_bh(txq); | 3449 | __netif_tx_lock_bh(txq); |
3449 | atomic_inc(&(pkt_dev->skb->users)); | ||
3450 | 3450 | ||
3451 | if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) | 3451 | if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) { |
3452 | ret = NETDEV_TX_BUSY; | 3452 | ret = NETDEV_TX_BUSY; |
3453 | else | 3453 | pkt_dev->last_ok = 0; |
3454 | ret = (*xmit)(pkt_dev->skb, odev); | 3454 | goto unlock; |
3455 | } | ||
3456 | atomic_inc(&(pkt_dev->skb->users)); | ||
3457 | ret = (*xmit)(pkt_dev->skb, odev); | ||
3455 | 3458 | ||
3456 | switch (ret) { | 3459 | switch (ret) { |
3457 | case NETDEV_TX_OK: | 3460 | case NETDEV_TX_OK: |
@@ -3459,7 +3462,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3459 | pkt_dev->last_ok = 1; | 3462 | pkt_dev->last_ok = 1; |
3460 | pkt_dev->sofar++; | 3463 | pkt_dev->sofar++; |
3461 | pkt_dev->seq_num++; | 3464 | pkt_dev->seq_num++; |
3462 | pkt_dev->tx_bytes += pkt_dev->cur_pkt_size; | 3465 | pkt_dev->tx_bytes += pkt_dev->last_pkt_size; |
3463 | break; | 3466 | break; |
3464 | default: /* Drivers are not supposed to return other values! */ | 3467 | default: /* Drivers are not supposed to return other values! */ |
3465 | if (net_ratelimit()) | 3468 | if (net_ratelimit()) |
@@ -3473,6 +3476,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3473 | atomic_dec(&(pkt_dev->skb->users)); | 3476 | atomic_dec(&(pkt_dev->skb->users)); |
3474 | pkt_dev->last_ok = 0; | 3477 | pkt_dev->last_ok = 0; |
3475 | } | 3478 | } |
3479 | unlock: | ||
3476 | __netif_tx_unlock_bh(txq); | 3480 | __netif_tx_unlock_bh(txq); |
3477 | 3481 | ||
3478 | /* If pkt_dev->count is zero, then run forever */ | 3482 | /* If pkt_dev->count is zero, then run forever */ |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index eb42873f2a3a..33148a568199 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -38,7 +38,6 @@ | |||
38 | 38 | ||
39 | #include <asm/uaccess.h> | 39 | #include <asm/uaccess.h> |
40 | #include <asm/system.h> | 40 | #include <asm/system.h> |
41 | #include <asm/string.h> | ||
42 | 41 | ||
43 | #include <linux/inet.h> | 42 | #include <linux/inet.h> |
44 | #include <linux/netdevice.h> | 43 | #include <linux/netdevice.h> |
@@ -53,8 +52,7 @@ | |||
53 | #include <net/rtnetlink.h> | 52 | #include <net/rtnetlink.h> |
54 | #include <net/net_namespace.h> | 53 | #include <net/net_namespace.h> |
55 | 54 | ||
56 | struct rtnl_link | 55 | struct rtnl_link { |
57 | { | ||
58 | rtnl_doit_func doit; | 56 | rtnl_doit_func doit; |
59 | rtnl_dumpit_func dumpit; | 57 | rtnl_dumpit_func dumpit; |
60 | }; | 58 | }; |
@@ -65,6 +63,7 @@ void rtnl_lock(void) | |||
65 | { | 63 | { |
66 | mutex_lock(&rtnl_mutex); | 64 | mutex_lock(&rtnl_mutex); |
67 | } | 65 | } |
66 | EXPORT_SYMBOL(rtnl_lock); | ||
68 | 67 | ||
69 | void __rtnl_unlock(void) | 68 | void __rtnl_unlock(void) |
70 | { | 69 | { |
@@ -76,16 +75,19 @@ void rtnl_unlock(void) | |||
76 | /* This fellow will unlock it for us. */ | 75 | /* This fellow will unlock it for us. */ |
77 | netdev_run_todo(); | 76 | netdev_run_todo(); |
78 | } | 77 | } |
78 | EXPORT_SYMBOL(rtnl_unlock); | ||
79 | 79 | ||
80 | int rtnl_trylock(void) | 80 | int rtnl_trylock(void) |
81 | { | 81 | { |
82 | return mutex_trylock(&rtnl_mutex); | 82 | return mutex_trylock(&rtnl_mutex); |
83 | } | 83 | } |
84 | EXPORT_SYMBOL(rtnl_trylock); | ||
84 | 85 | ||
85 | int rtnl_is_locked(void) | 86 | int rtnl_is_locked(void) |
86 | { | 87 | { |
87 | return mutex_is_locked(&rtnl_mutex); | 88 | return mutex_is_locked(&rtnl_mutex); |
88 | } | 89 | } |
90 | EXPORT_SYMBOL(rtnl_is_locked); | ||
89 | 91 | ||
90 | static struct rtnl_link *rtnl_msg_handlers[NPROTO]; | 92 | static struct rtnl_link *rtnl_msg_handlers[NPROTO]; |
91 | 93 | ||
@@ -168,7 +170,6 @@ int __rtnl_register(int protocol, int msgtype, | |||
168 | 170 | ||
169 | return 0; | 171 | return 0; |
170 | } | 172 | } |
171 | |||
172 | EXPORT_SYMBOL_GPL(__rtnl_register); | 173 | EXPORT_SYMBOL_GPL(__rtnl_register); |
173 | 174 | ||
174 | /** | 175 | /** |
@@ -188,7 +189,6 @@ void rtnl_register(int protocol, int msgtype, | |||
188 | "protocol = %d, message type = %d\n", | 189 | "protocol = %d, message type = %d\n", |
189 | protocol, msgtype); | 190 | protocol, msgtype); |
190 | } | 191 | } |
191 | |||
192 | EXPORT_SYMBOL_GPL(rtnl_register); | 192 | EXPORT_SYMBOL_GPL(rtnl_register); |
193 | 193 | ||
194 | /** | 194 | /** |
@@ -213,7 +213,6 @@ int rtnl_unregister(int protocol, int msgtype) | |||
213 | 213 | ||
214 | return 0; | 214 | return 0; |
215 | } | 215 | } |
216 | |||
217 | EXPORT_SYMBOL_GPL(rtnl_unregister); | 216 | EXPORT_SYMBOL_GPL(rtnl_unregister); |
218 | 217 | ||
219 | /** | 218 | /** |
@@ -230,7 +229,6 @@ void rtnl_unregister_all(int protocol) | |||
230 | kfree(rtnl_msg_handlers[protocol]); | 229 | kfree(rtnl_msg_handlers[protocol]); |
231 | rtnl_msg_handlers[protocol] = NULL; | 230 | rtnl_msg_handlers[protocol] = NULL; |
232 | } | 231 | } |
233 | |||
234 | EXPORT_SYMBOL_GPL(rtnl_unregister_all); | 232 | EXPORT_SYMBOL_GPL(rtnl_unregister_all); |
235 | 233 | ||
236 | static LIST_HEAD(link_ops); | 234 | static LIST_HEAD(link_ops); |
@@ -248,12 +246,11 @@ static LIST_HEAD(link_ops); | |||
248 | int __rtnl_link_register(struct rtnl_link_ops *ops) | 246 | int __rtnl_link_register(struct rtnl_link_ops *ops) |
249 | { | 247 | { |
250 | if (!ops->dellink) | 248 | if (!ops->dellink) |
251 | ops->dellink = unregister_netdevice; | 249 | ops->dellink = unregister_netdevice_queue; |
252 | 250 | ||
253 | list_add_tail(&ops->list, &link_ops); | 251 | list_add_tail(&ops->list, &link_ops); |
254 | return 0; | 252 | return 0; |
255 | } | 253 | } |
256 | |||
257 | EXPORT_SYMBOL_GPL(__rtnl_link_register); | 254 | EXPORT_SYMBOL_GPL(__rtnl_link_register); |
258 | 255 | ||
259 | /** | 256 | /** |
@@ -271,19 +268,18 @@ int rtnl_link_register(struct rtnl_link_ops *ops) | |||
271 | rtnl_unlock(); | 268 | rtnl_unlock(); |
272 | return err; | 269 | return err; |
273 | } | 270 | } |
274 | |||
275 | EXPORT_SYMBOL_GPL(rtnl_link_register); | 271 | EXPORT_SYMBOL_GPL(rtnl_link_register); |
276 | 272 | ||
277 | static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops) | 273 | static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops) |
278 | { | 274 | { |
279 | struct net_device *dev; | 275 | struct net_device *dev; |
280 | restart: | 276 | LIST_HEAD(list_kill); |
277 | |||
281 | for_each_netdev(net, dev) { | 278 | for_each_netdev(net, dev) { |
282 | if (dev->rtnl_link_ops == ops) { | 279 | if (dev->rtnl_link_ops == ops) |
283 | ops->dellink(dev); | 280 | ops->dellink(dev, &list_kill); |
284 | goto restart; | ||
285 | } | ||
286 | } | 281 | } |
282 | unregister_netdevice_many(&list_kill); | ||
287 | } | 283 | } |
288 | 284 | ||
289 | void rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops) | 285 | void rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops) |
@@ -309,7 +305,6 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops) | |||
309 | } | 305 | } |
310 | list_del(&ops->list); | 306 | list_del(&ops->list); |
311 | } | 307 | } |
312 | |||
313 | EXPORT_SYMBOL_GPL(__rtnl_link_unregister); | 308 | EXPORT_SYMBOL_GPL(__rtnl_link_unregister); |
314 | 309 | ||
315 | /** | 310 | /** |
@@ -322,7 +317,6 @@ void rtnl_link_unregister(struct rtnl_link_ops *ops) | |||
322 | __rtnl_link_unregister(ops); | 317 | __rtnl_link_unregister(ops); |
323 | rtnl_unlock(); | 318 | rtnl_unlock(); |
324 | } | 319 | } |
325 | |||
326 | EXPORT_SYMBOL_GPL(rtnl_link_unregister); | 320 | EXPORT_SYMBOL_GPL(rtnl_link_unregister); |
327 | 321 | ||
328 | static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind) | 322 | static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind) |
@@ -427,12 +421,13 @@ void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data | |||
427 | struct rtattr *rta; | 421 | struct rtattr *rta; |
428 | int size = RTA_LENGTH(attrlen); | 422 | int size = RTA_LENGTH(attrlen); |
429 | 423 | ||
430 | rta = (struct rtattr*)skb_put(skb, RTA_ALIGN(size)); | 424 | rta = (struct rtattr *)skb_put(skb, RTA_ALIGN(size)); |
431 | rta->rta_type = attrtype; | 425 | rta->rta_type = attrtype; |
432 | rta->rta_len = size; | 426 | rta->rta_len = size; |
433 | memcpy(RTA_DATA(rta), data, attrlen); | 427 | memcpy(RTA_DATA(rta), data, attrlen); |
434 | memset(RTA_DATA(rta) + attrlen, 0, RTA_ALIGN(size) - size); | 428 | memset(RTA_DATA(rta) + attrlen, 0, RTA_ALIGN(size) - size); |
435 | } | 429 | } |
430 | EXPORT_SYMBOL(__rta_fill); | ||
436 | 431 | ||
437 | int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group, int echo) | 432 | int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group, int echo) |
438 | { | 433 | { |
@@ -454,6 +449,7 @@ int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid) | |||
454 | 449 | ||
455 | return nlmsg_unicast(rtnl, skb, pid); | 450 | return nlmsg_unicast(rtnl, skb, pid); |
456 | } | 451 | } |
452 | EXPORT_SYMBOL(rtnl_unicast); | ||
457 | 453 | ||
458 | void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group, | 454 | void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group, |
459 | struct nlmsghdr *nlh, gfp_t flags) | 455 | struct nlmsghdr *nlh, gfp_t flags) |
@@ -466,6 +462,7 @@ void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group, | |||
466 | 462 | ||
467 | nlmsg_notify(rtnl, skb, pid, group, report, flags); | 463 | nlmsg_notify(rtnl, skb, pid, group, report, flags); |
468 | } | 464 | } |
465 | EXPORT_SYMBOL(rtnl_notify); | ||
469 | 466 | ||
470 | void rtnl_set_sk_err(struct net *net, u32 group, int error) | 467 | void rtnl_set_sk_err(struct net *net, u32 group, int error) |
471 | { | 468 | { |
@@ -473,6 +470,7 @@ void rtnl_set_sk_err(struct net *net, u32 group, int error) | |||
473 | 470 | ||
474 | netlink_set_err(rtnl, 0, group, error); | 471 | netlink_set_err(rtnl, 0, group, error); |
475 | } | 472 | } |
473 | EXPORT_SYMBOL(rtnl_set_sk_err); | ||
476 | 474 | ||
477 | int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics) | 475 | int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics) |
478 | { | 476 | { |
@@ -501,6 +499,7 @@ nla_put_failure: | |||
501 | nla_nest_cancel(skb, mx); | 499 | nla_nest_cancel(skb, mx); |
502 | return -EMSGSIZE; | 500 | return -EMSGSIZE; |
503 | } | 501 | } |
502 | EXPORT_SYMBOL(rtnetlink_put_metrics); | ||
504 | 503 | ||
505 | int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, | 504 | int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, |
506 | u32 ts, u32 tsage, long expires, u32 error) | 505 | u32 ts, u32 tsage, long expires, u32 error) |
@@ -520,14 +519,13 @@ int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, | |||
520 | 519 | ||
521 | return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci); | 520 | return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci); |
522 | } | 521 | } |
523 | |||
524 | EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo); | 522 | EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo); |
525 | 523 | ||
526 | static void set_operstate(struct net_device *dev, unsigned char transition) | 524 | static void set_operstate(struct net_device *dev, unsigned char transition) |
527 | { | 525 | { |
528 | unsigned char operstate = dev->operstate; | 526 | unsigned char operstate = dev->operstate; |
529 | 527 | ||
530 | switch(transition) { | 528 | switch (transition) { |
531 | case IF_OPER_UP: | 529 | case IF_OPER_UP: |
532 | if ((operstate == IF_OPER_DORMANT || | 530 | if ((operstate == IF_OPER_DORMANT || |
533 | operstate == IF_OPER_UNKNOWN) && | 531 | operstate == IF_OPER_UNKNOWN) && |
@@ -682,22 +680,33 @@ nla_put_failure: | |||
682 | static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) | 680 | static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) |
683 | { | 681 | { |
684 | struct net *net = sock_net(skb->sk); | 682 | struct net *net = sock_net(skb->sk); |
685 | int idx; | 683 | int h, s_h; |
686 | int s_idx = cb->args[0]; | 684 | int idx = 0, s_idx; |
687 | struct net_device *dev; | 685 | struct net_device *dev; |
688 | 686 | struct hlist_head *head; | |
689 | idx = 0; | 687 | struct hlist_node *node; |
690 | for_each_netdev(net, dev) { | 688 | |
691 | if (idx < s_idx) | 689 | s_h = cb->args[0]; |
692 | goto cont; | 690 | s_idx = cb->args[1]; |
693 | if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, | 691 | |
694 | NETLINK_CB(cb->skb).pid, | 692 | for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { |
695 | cb->nlh->nlmsg_seq, 0, NLM_F_MULTI) <= 0) | 693 | idx = 0; |
696 | break; | 694 | head = &net->dev_index_head[h]; |
695 | hlist_for_each_entry(dev, node, head, index_hlist) { | ||
696 | if (idx < s_idx) | ||
697 | goto cont; | ||
698 | if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, | ||
699 | NETLINK_CB(cb->skb).pid, | ||
700 | cb->nlh->nlmsg_seq, 0, | ||
701 | NLM_F_MULTI) <= 0) | ||
702 | goto out; | ||
697 | cont: | 703 | cont: |
698 | idx++; | 704 | idx++; |
705 | } | ||
699 | } | 706 | } |
700 | cb->args[0] = idx; | 707 | out: |
708 | cb->args[1] = idx; | ||
709 | cb->args[0] = h; | ||
701 | 710 | ||
702 | return skb->len; | 711 | return skb->len; |
703 | } | 712 | } |
@@ -717,12 +726,27 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = { | |||
717 | [IFLA_NET_NS_PID] = { .type = NLA_U32 }, | 726 | [IFLA_NET_NS_PID] = { .type = NLA_U32 }, |
718 | [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, | 727 | [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, |
719 | }; | 728 | }; |
729 | EXPORT_SYMBOL(ifla_policy); | ||
720 | 730 | ||
721 | static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { | 731 | static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { |
722 | [IFLA_INFO_KIND] = { .type = NLA_STRING }, | 732 | [IFLA_INFO_KIND] = { .type = NLA_STRING }, |
723 | [IFLA_INFO_DATA] = { .type = NLA_NESTED }, | 733 | [IFLA_INFO_DATA] = { .type = NLA_NESTED }, |
724 | }; | 734 | }; |
725 | 735 | ||
736 | struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) | ||
737 | { | ||
738 | struct net *net; | ||
739 | /* Examine the link attributes and figure out which | ||
740 | * network namespace we are talking about. | ||
741 | */ | ||
742 | if (tb[IFLA_NET_NS_PID]) | ||
743 | net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID])); | ||
744 | else | ||
745 | net = get_net(src_net); | ||
746 | return net; | ||
747 | } | ||
748 | EXPORT_SYMBOL(rtnl_link_get_net); | ||
749 | |||
726 | static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[]) | 750 | static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[]) |
727 | { | 751 | { |
728 | if (dev) { | 752 | if (dev) { |
@@ -746,8 +770,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, | |||
746 | int err; | 770 | int err; |
747 | 771 | ||
748 | if (tb[IFLA_NET_NS_PID]) { | 772 | if (tb[IFLA_NET_NS_PID]) { |
749 | struct net *net; | 773 | struct net *net = rtnl_link_get_net(dev_net(dev), tb); |
750 | net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID])); | ||
751 | if (IS_ERR(net)) { | 774 | if (IS_ERR(net)) { |
752 | err = PTR_ERR(net); | 775 | err = PTR_ERR(net); |
753 | goto errout; | 776 | goto errout; |
@@ -910,9 +933,9 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
910 | err = -EINVAL; | 933 | err = -EINVAL; |
911 | ifm = nlmsg_data(nlh); | 934 | ifm = nlmsg_data(nlh); |
912 | if (ifm->ifi_index > 0) | 935 | if (ifm->ifi_index > 0) |
913 | dev = dev_get_by_index(net, ifm->ifi_index); | 936 | dev = __dev_get_by_index(net, ifm->ifi_index); |
914 | else if (tb[IFLA_IFNAME]) | 937 | else if (tb[IFLA_IFNAME]) |
915 | dev = dev_get_by_name(net, ifname); | 938 | dev = __dev_get_by_name(net, ifname); |
916 | else | 939 | else |
917 | goto errout; | 940 | goto errout; |
918 | 941 | ||
@@ -921,12 +944,11 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
921 | goto errout; | 944 | goto errout; |
922 | } | 945 | } |
923 | 946 | ||
924 | if ((err = validate_linkmsg(dev, tb)) < 0) | 947 | err = validate_linkmsg(dev, tb); |
925 | goto errout_dev; | 948 | if (err < 0) |
949 | goto errout; | ||
926 | 950 | ||
927 | err = do_setlink(dev, ifm, tb, ifname, 0); | 951 | err = do_setlink(dev, ifm, tb, ifname, 0); |
928 | errout_dev: | ||
929 | dev_put(dev); | ||
930 | errout: | 952 | errout: |
931 | return err; | 953 | return err; |
932 | } | 954 | } |
@@ -963,12 +985,12 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
963 | if (!ops) | 985 | if (!ops) |
964 | return -EOPNOTSUPP; | 986 | return -EOPNOTSUPP; |
965 | 987 | ||
966 | ops->dellink(dev); | 988 | ops->dellink(dev, NULL); |
967 | return 0; | 989 | return 0; |
968 | } | 990 | } |
969 | 991 | ||
970 | struct net_device *rtnl_create_link(struct net *net, char *ifname, | 992 | struct net_device *rtnl_create_link(struct net *src_net, struct net *net, |
971 | const struct rtnl_link_ops *ops, struct nlattr *tb[]) | 993 | char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[]) |
972 | { | 994 | { |
973 | int err; | 995 | int err; |
974 | struct net_device *dev; | 996 | struct net_device *dev; |
@@ -976,7 +998,8 @@ struct net_device *rtnl_create_link(struct net *net, char *ifname, | |||
976 | unsigned int real_num_queues = 1; | 998 | unsigned int real_num_queues = 1; |
977 | 999 | ||
978 | if (ops->get_tx_queues) { | 1000 | if (ops->get_tx_queues) { |
979 | err = ops->get_tx_queues(net, tb, &num_queues, &real_num_queues); | 1001 | err = ops->get_tx_queues(src_net, tb, &num_queues, |
1002 | &real_num_queues); | ||
980 | if (err) | 1003 | if (err) |
981 | goto err; | 1004 | goto err; |
982 | } | 1005 | } |
@@ -985,16 +1008,16 @@ struct net_device *rtnl_create_link(struct net *net, char *ifname, | |||
985 | if (!dev) | 1008 | if (!dev) |
986 | goto err; | 1009 | goto err; |
987 | 1010 | ||
1011 | dev_net_set(dev, net); | ||
1012 | dev->rtnl_link_ops = ops; | ||
988 | dev->real_num_tx_queues = real_num_queues; | 1013 | dev->real_num_tx_queues = real_num_queues; |
1014 | |||
989 | if (strchr(dev->name, '%')) { | 1015 | if (strchr(dev->name, '%')) { |
990 | err = dev_alloc_name(dev, dev->name); | 1016 | err = dev_alloc_name(dev, dev->name); |
991 | if (err < 0) | 1017 | if (err < 0) |
992 | goto err_free; | 1018 | goto err_free; |
993 | } | 1019 | } |
994 | 1020 | ||
995 | dev_net_set(dev, net); | ||
996 | dev->rtnl_link_ops = ops; | ||
997 | |||
998 | if (tb[IFLA_MTU]) | 1021 | if (tb[IFLA_MTU]) |
999 | dev->mtu = nla_get_u32(tb[IFLA_MTU]); | 1022 | dev->mtu = nla_get_u32(tb[IFLA_MTU]); |
1000 | if (tb[IFLA_ADDRESS]) | 1023 | if (tb[IFLA_ADDRESS]) |
@@ -1017,6 +1040,7 @@ err_free: | |||
1017 | err: | 1040 | err: |
1018 | return ERR_PTR(err); | 1041 | return ERR_PTR(err); |
1019 | } | 1042 | } |
1043 | EXPORT_SYMBOL(rtnl_create_link); | ||
1020 | 1044 | ||
1021 | static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | 1045 | static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) |
1022 | { | 1046 | { |
@@ -1050,7 +1074,8 @@ replay: | |||
1050 | else | 1074 | else |
1051 | dev = NULL; | 1075 | dev = NULL; |
1052 | 1076 | ||
1053 | if ((err = validate_linkmsg(dev, tb)) < 0) | 1077 | err = validate_linkmsg(dev, tb); |
1078 | if (err < 0) | ||
1054 | return err; | 1079 | return err; |
1055 | 1080 | ||
1056 | if (tb[IFLA_LINKINFO]) { | 1081 | if (tb[IFLA_LINKINFO]) { |
@@ -1071,6 +1096,7 @@ replay: | |||
1071 | 1096 | ||
1072 | if (1) { | 1097 | if (1) { |
1073 | struct nlattr *attr[ops ? ops->maxtype + 1 : 0], **data = NULL; | 1098 | struct nlattr *attr[ops ? ops->maxtype + 1 : 0], **data = NULL; |
1099 | struct net *dest_net; | ||
1074 | 1100 | ||
1075 | if (ops) { | 1101 | if (ops) { |
1076 | if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) { | 1102 | if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) { |
@@ -1135,17 +1161,19 @@ replay: | |||
1135 | if (!ifname[0]) | 1161 | if (!ifname[0]) |
1136 | snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind); | 1162 | snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind); |
1137 | 1163 | ||
1138 | dev = rtnl_create_link(net, ifname, ops, tb); | 1164 | dest_net = rtnl_link_get_net(net, tb); |
1165 | dev = rtnl_create_link(net, dest_net, ifname, ops, tb); | ||
1139 | 1166 | ||
1140 | if (IS_ERR(dev)) | 1167 | if (IS_ERR(dev)) |
1141 | err = PTR_ERR(dev); | 1168 | err = PTR_ERR(dev); |
1142 | else if (ops->newlink) | 1169 | else if (ops->newlink) |
1143 | err = ops->newlink(dev, tb, data); | 1170 | err = ops->newlink(net, dev, tb, data); |
1144 | else | 1171 | else |
1145 | err = register_netdevice(dev); | 1172 | err = register_netdevice(dev); |
1146 | |||
1147 | if (err < 0 && !IS_ERR(dev)) | 1173 | if (err < 0 && !IS_ERR(dev)) |
1148 | free_netdev(dev); | 1174 | free_netdev(dev); |
1175 | |||
1176 | put_net(dest_net); | ||
1149 | return err; | 1177 | return err; |
1150 | } | 1178 | } |
1151 | } | 1179 | } |
@@ -1154,6 +1182,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | |||
1154 | { | 1182 | { |
1155 | struct net *net = sock_net(skb->sk); | 1183 | struct net *net = sock_net(skb->sk); |
1156 | struct ifinfomsg *ifm; | 1184 | struct ifinfomsg *ifm; |
1185 | char ifname[IFNAMSIZ]; | ||
1157 | struct nlattr *tb[IFLA_MAX+1]; | 1186 | struct nlattr *tb[IFLA_MAX+1]; |
1158 | struct net_device *dev = NULL; | 1187 | struct net_device *dev = NULL; |
1159 | struct sk_buff *nskb; | 1188 | struct sk_buff *nskb; |
@@ -1163,19 +1192,23 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | |||
1163 | if (err < 0) | 1192 | if (err < 0) |
1164 | return err; | 1193 | return err; |
1165 | 1194 | ||
1195 | if (tb[IFLA_IFNAME]) | ||
1196 | nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); | ||
1197 | |||
1166 | ifm = nlmsg_data(nlh); | 1198 | ifm = nlmsg_data(nlh); |
1167 | if (ifm->ifi_index > 0) { | 1199 | if (ifm->ifi_index > 0) |
1168 | dev = dev_get_by_index(net, ifm->ifi_index); | 1200 | dev = __dev_get_by_index(net, ifm->ifi_index); |
1169 | if (dev == NULL) | 1201 | else if (tb[IFLA_IFNAME]) |
1170 | return -ENODEV; | 1202 | dev = __dev_get_by_name(net, ifname); |
1171 | } else | 1203 | else |
1172 | return -EINVAL; | 1204 | return -EINVAL; |
1173 | 1205 | ||
1206 | if (dev == NULL) | ||
1207 | return -ENODEV; | ||
1208 | |||
1174 | nskb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL); | 1209 | nskb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL); |
1175 | if (nskb == NULL) { | 1210 | if (nskb == NULL) |
1176 | err = -ENOBUFS; | 1211 | return -ENOBUFS; |
1177 | goto errout; | ||
1178 | } | ||
1179 | 1212 | ||
1180 | err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).pid, | 1213 | err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).pid, |
1181 | nlh->nlmsg_seq, 0, 0); | 1214 | nlh->nlmsg_seq, 0, 0); |
@@ -1183,11 +1216,8 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | |||
1183 | /* -EMSGSIZE implies BUG in if_nlmsg_size */ | 1216 | /* -EMSGSIZE implies BUG in if_nlmsg_size */ |
1184 | WARN_ON(err == -EMSGSIZE); | 1217 | WARN_ON(err == -EMSGSIZE); |
1185 | kfree_skb(nskb); | 1218 | kfree_skb(nskb); |
1186 | goto errout; | 1219 | } else |
1187 | } | 1220 | err = rtnl_unicast(nskb, net, NETLINK_CB(skb).pid); |
1188 | err = rtnl_unicast(nskb, net, NETLINK_CB(skb).pid); | ||
1189 | errout: | ||
1190 | dev_put(dev); | ||
1191 | 1221 | ||
1192 | return err; | 1222 | return err; |
1193 | } | 1223 | } |
@@ -1199,7 +1229,7 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) | |||
1199 | 1229 | ||
1200 | if (s_idx == 0) | 1230 | if (s_idx == 0) |
1201 | s_idx = 1; | 1231 | s_idx = 1; |
1202 | for (idx=1; idx<NPROTO; idx++) { | 1232 | for (idx = 1; idx < NPROTO; idx++) { |
1203 | int type = cb->nlh->nlmsg_type-RTM_BASE; | 1233 | int type = cb->nlh->nlmsg_type-RTM_BASE; |
1204 | if (idx < s_idx || idx == PF_PACKET) | 1234 | if (idx < s_idx || idx == PF_PACKET) |
1205 | continue; | 1235 | continue; |
@@ -1266,7 +1296,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
1266 | if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(struct rtgenmsg))) | 1296 | if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(struct rtgenmsg))) |
1267 | return 0; | 1297 | return 0; |
1268 | 1298 | ||
1269 | family = ((struct rtgenmsg*)NLMSG_DATA(nlh))->rtgen_family; | 1299 | family = ((struct rtgenmsg *)NLMSG_DATA(nlh))->rtgen_family; |
1270 | if (family >= NPROTO) | 1300 | if (family >= NPROTO) |
1271 | return -EAFNOSUPPORT; | 1301 | return -EAFNOSUPPORT; |
1272 | 1302 | ||
@@ -1299,7 +1329,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
1299 | 1329 | ||
1300 | if (nlh->nlmsg_len > min_len) { | 1330 | if (nlh->nlmsg_len > min_len) { |
1301 | int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len); | 1331 | int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len); |
1302 | struct rtattr *attr = (void*)nlh + NLMSG_ALIGN(min_len); | 1332 | struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len); |
1303 | 1333 | ||
1304 | while (RTA_OK(attr, attrlen)) { | 1334 | while (RTA_OK(attr, attrlen)) { |
1305 | unsigned flavor = attr->rta_type; | 1335 | unsigned flavor = attr->rta_type; |
@@ -1405,14 +1435,3 @@ void __init rtnetlink_init(void) | |||
1405 | rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all); | 1435 | rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all); |
1406 | } | 1436 | } |
1407 | 1437 | ||
1408 | EXPORT_SYMBOL(__rta_fill); | ||
1409 | EXPORT_SYMBOL(rtnetlink_put_metrics); | ||
1410 | EXPORT_SYMBOL(rtnl_lock); | ||
1411 | EXPORT_SYMBOL(rtnl_trylock); | ||
1412 | EXPORT_SYMBOL(rtnl_unlock); | ||
1413 | EXPORT_SYMBOL(rtnl_is_locked); | ||
1414 | EXPORT_SYMBOL(rtnl_unicast); | ||
1415 | EXPORT_SYMBOL(rtnl_notify); | ||
1416 | EXPORT_SYMBOL(rtnl_set_sk_err); | ||
1417 | EXPORT_SYMBOL(rtnl_create_link); | ||
1418 | EXPORT_SYMBOL(ifla_policy); | ||
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index ec85681a7dd8..739b8f4dd327 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -493,6 +493,9 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size) | |||
493 | { | 493 | { |
494 | struct skb_shared_info *shinfo; | 494 | struct skb_shared_info *shinfo; |
495 | 495 | ||
496 | if (irqs_disabled()) | ||
497 | return 0; | ||
498 | |||
496 | if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) | 499 | if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) |
497 | return 0; | 500 | return 0; |
498 | 501 | ||
diff --git a/net/core/sock.c b/net/core/sock.c index 7626b6aacd68..76ff58d43e26 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -274,25 +274,27 @@ static void sock_disable_timestamp(struct sock *sk, int flag) | |||
274 | 274 | ||
275 | int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | 275 | int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
276 | { | 276 | { |
277 | int err = 0; | 277 | int err; |
278 | int skb_len; | 278 | int skb_len; |
279 | unsigned long flags; | ||
280 | struct sk_buff_head *list = &sk->sk_receive_queue; | ||
279 | 281 | ||
280 | /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces | 282 | /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces |
281 | number of warnings when compiling with -W --ANK | 283 | number of warnings when compiling with -W --ANK |
282 | */ | 284 | */ |
283 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= | 285 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= |
284 | (unsigned)sk->sk_rcvbuf) { | 286 | (unsigned)sk->sk_rcvbuf) { |
285 | err = -ENOMEM; | 287 | atomic_inc(&sk->sk_drops); |
286 | goto out; | 288 | return -ENOMEM; |
287 | } | 289 | } |
288 | 290 | ||
289 | err = sk_filter(sk, skb); | 291 | err = sk_filter(sk, skb); |
290 | if (err) | 292 | if (err) |
291 | goto out; | 293 | return err; |
292 | 294 | ||
293 | if (!sk_rmem_schedule(sk, skb->truesize)) { | 295 | if (!sk_rmem_schedule(sk, skb->truesize)) { |
294 | err = -ENOBUFS; | 296 | atomic_inc(&sk->sk_drops); |
295 | goto out; | 297 | return -ENOBUFS; |
296 | } | 298 | } |
297 | 299 | ||
298 | skb->dev = NULL; | 300 | skb->dev = NULL; |
@@ -305,12 +307,14 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
305 | */ | 307 | */ |
306 | skb_len = skb->len; | 308 | skb_len = skb->len; |
307 | 309 | ||
308 | skb_queue_tail(&sk->sk_receive_queue, skb); | 310 | spin_lock_irqsave(&list->lock, flags); |
311 | skb->dropcount = atomic_read(&sk->sk_drops); | ||
312 | __skb_queue_tail(list, skb); | ||
313 | spin_unlock_irqrestore(&list->lock, flags); | ||
309 | 314 | ||
310 | if (!sock_flag(sk, SOCK_DEAD)) | 315 | if (!sock_flag(sk, SOCK_DEAD)) |
311 | sk->sk_data_ready(sk, skb_len); | 316 | sk->sk_data_ready(sk, skb_len); |
312 | out: | 317 | return 0; |
313 | return err; | ||
314 | } | 318 | } |
315 | EXPORT_SYMBOL(sock_queue_rcv_skb); | 319 | EXPORT_SYMBOL(sock_queue_rcv_skb); |
316 | 320 | ||
@@ -348,11 +352,18 @@ discard_and_relse: | |||
348 | } | 352 | } |
349 | EXPORT_SYMBOL(sk_receive_skb); | 353 | EXPORT_SYMBOL(sk_receive_skb); |
350 | 354 | ||
355 | void sk_reset_txq(struct sock *sk) | ||
356 | { | ||
357 | sk_tx_queue_clear(sk); | ||
358 | } | ||
359 | EXPORT_SYMBOL(sk_reset_txq); | ||
360 | |||
351 | struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) | 361 | struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) |
352 | { | 362 | { |
353 | struct dst_entry *dst = sk->sk_dst_cache; | 363 | struct dst_entry *dst = sk->sk_dst_cache; |
354 | 364 | ||
355 | if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { | 365 | if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { |
366 | sk_tx_queue_clear(sk); | ||
356 | sk->sk_dst_cache = NULL; | 367 | sk->sk_dst_cache = NULL; |
357 | dst_release(dst); | 368 | dst_release(dst); |
358 | return NULL; | 369 | return NULL; |
@@ -406,17 +417,18 @@ static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen) | |||
406 | if (copy_from_user(devname, optval, optlen)) | 417 | if (copy_from_user(devname, optval, optlen)) |
407 | goto out; | 418 | goto out; |
408 | 419 | ||
409 | if (devname[0] == '\0') { | 420 | index = 0; |
410 | index = 0; | 421 | if (devname[0] != '\0') { |
411 | } else { | 422 | struct net_device *dev; |
412 | struct net_device *dev = dev_get_by_name(net, devname); | ||
413 | 423 | ||
424 | rcu_read_lock(); | ||
425 | dev = dev_get_by_name_rcu(net, devname); | ||
426 | if (dev) | ||
427 | index = dev->ifindex; | ||
428 | rcu_read_unlock(); | ||
414 | ret = -ENODEV; | 429 | ret = -ENODEV; |
415 | if (!dev) | 430 | if (!dev) |
416 | goto out; | 431 | goto out; |
417 | |||
418 | index = dev->ifindex; | ||
419 | dev_put(dev); | ||
420 | } | 432 | } |
421 | 433 | ||
422 | lock_sock(sk); | 434 | lock_sock(sk); |
@@ -702,6 +714,12 @@ set_rcvbuf: | |||
702 | 714 | ||
703 | /* We implement the SO_SNDLOWAT etc to | 715 | /* We implement the SO_SNDLOWAT etc to |
704 | not be settable (1003.1g 5.3) */ | 716 | not be settable (1003.1g 5.3) */ |
717 | case SO_RXQ_OVFL: | ||
718 | if (valbool) | ||
719 | sock_set_flag(sk, SOCK_RXQ_OVFL); | ||
720 | else | ||
721 | sock_reset_flag(sk, SOCK_RXQ_OVFL); | ||
722 | break; | ||
705 | default: | 723 | default: |
706 | ret = -ENOPROTOOPT; | 724 | ret = -ENOPROTOOPT; |
707 | break; | 725 | break; |
@@ -901,6 +919,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname, | |||
901 | v.val = sk->sk_mark; | 919 | v.val = sk->sk_mark; |
902 | break; | 920 | break; |
903 | 921 | ||
922 | case SO_RXQ_OVFL: | ||
923 | v.val = !!sock_flag(sk, SOCK_RXQ_OVFL); | ||
924 | break; | ||
925 | |||
904 | default: | 926 | default: |
905 | return -ENOPROTOOPT; | 927 | return -ENOPROTOOPT; |
906 | } | 928 | } |
@@ -939,7 +961,8 @@ static void sock_copy(struct sock *nsk, const struct sock *osk) | |||
939 | void *sptr = nsk->sk_security; | 961 | void *sptr = nsk->sk_security; |
940 | #endif | 962 | #endif |
941 | BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) != | 963 | BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) != |
942 | sizeof(osk->sk_node) + sizeof(osk->sk_refcnt)); | 964 | sizeof(osk->sk_node) + sizeof(osk->sk_refcnt) + |
965 | sizeof(osk->sk_tx_queue_mapping)); | ||
943 | memcpy(&nsk->sk_copy_start, &osk->sk_copy_start, | 966 | memcpy(&nsk->sk_copy_start, &osk->sk_copy_start, |
944 | osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start)); | 967 | osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start)); |
945 | #ifdef CONFIG_SECURITY_NETWORK | 968 | #ifdef CONFIG_SECURITY_NETWORK |
@@ -983,6 +1006,7 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, | |||
983 | 1006 | ||
984 | if (!try_module_get(prot->owner)) | 1007 | if (!try_module_get(prot->owner)) |
985 | goto out_free_sec; | 1008 | goto out_free_sec; |
1009 | sk_tx_queue_clear(sk); | ||
986 | } | 1010 | } |
987 | 1011 | ||
988 | return sk; | 1012 | return sk; |
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c index e8cf99e880b0..a47a8c918ee8 100644 --- a/net/dccp/ccids/ccid2.c +++ b/net/dccp/ccids/ccid2.c | |||
@@ -33,20 +33,20 @@ | |||
33 | static int ccid2_debug; | 33 | static int ccid2_debug; |
34 | #define ccid2_pr_debug(format, a...) DCCP_PR_DEBUG(ccid2_debug, format, ##a) | 34 | #define ccid2_pr_debug(format, a...) DCCP_PR_DEBUG(ccid2_debug, format, ##a) |
35 | 35 | ||
36 | static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hctx) | 36 | static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hc) |
37 | { | 37 | { |
38 | int len = 0; | 38 | int len = 0; |
39 | int pipe = 0; | 39 | int pipe = 0; |
40 | struct ccid2_seq *seqp = hctx->ccid2hctx_seqh; | 40 | struct ccid2_seq *seqp = hc->tx_seqh; |
41 | 41 | ||
42 | /* there is data in the chain */ | 42 | /* there is data in the chain */ |
43 | if (seqp != hctx->ccid2hctx_seqt) { | 43 | if (seqp != hc->tx_seqt) { |
44 | seqp = seqp->ccid2s_prev; | 44 | seqp = seqp->ccid2s_prev; |
45 | len++; | 45 | len++; |
46 | if (!seqp->ccid2s_acked) | 46 | if (!seqp->ccid2s_acked) |
47 | pipe++; | 47 | pipe++; |
48 | 48 | ||
49 | while (seqp != hctx->ccid2hctx_seqt) { | 49 | while (seqp != hc->tx_seqt) { |
50 | struct ccid2_seq *prev = seqp->ccid2s_prev; | 50 | struct ccid2_seq *prev = seqp->ccid2s_prev; |
51 | 51 | ||
52 | len++; | 52 | len++; |
@@ -63,30 +63,30 @@ static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hctx) | |||
63 | } | 63 | } |
64 | } | 64 | } |
65 | 65 | ||
66 | BUG_ON(pipe != hctx->ccid2hctx_pipe); | 66 | BUG_ON(pipe != hc->tx_pipe); |
67 | ccid2_pr_debug("len of chain=%d\n", len); | 67 | ccid2_pr_debug("len of chain=%d\n", len); |
68 | 68 | ||
69 | do { | 69 | do { |
70 | seqp = seqp->ccid2s_prev; | 70 | seqp = seqp->ccid2s_prev; |
71 | len++; | 71 | len++; |
72 | } while (seqp != hctx->ccid2hctx_seqh); | 72 | } while (seqp != hc->tx_seqh); |
73 | 73 | ||
74 | ccid2_pr_debug("total len=%d\n", len); | 74 | ccid2_pr_debug("total len=%d\n", len); |
75 | BUG_ON(len != hctx->ccid2hctx_seqbufc * CCID2_SEQBUF_LEN); | 75 | BUG_ON(len != hc->tx_seqbufc * CCID2_SEQBUF_LEN); |
76 | } | 76 | } |
77 | #else | 77 | #else |
78 | #define ccid2_pr_debug(format, a...) | 78 | #define ccid2_pr_debug(format, a...) |
79 | #define ccid2_hc_tx_check_sanity(hctx) | 79 | #define ccid2_hc_tx_check_sanity(hc) |
80 | #endif | 80 | #endif |
81 | 81 | ||
82 | static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx) | 82 | static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hc) |
83 | { | 83 | { |
84 | struct ccid2_seq *seqp; | 84 | struct ccid2_seq *seqp; |
85 | int i; | 85 | int i; |
86 | 86 | ||
87 | /* check if we have space to preserve the pointer to the buffer */ | 87 | /* check if we have space to preserve the pointer to the buffer */ |
88 | if (hctx->ccid2hctx_seqbufc >= (sizeof(hctx->ccid2hctx_seqbuf) / | 88 | if (hc->tx_seqbufc >= (sizeof(hc->tx_seqbuf) / |
89 | sizeof(struct ccid2_seq*))) | 89 | sizeof(struct ccid2_seq *))) |
90 | return -ENOMEM; | 90 | return -ENOMEM; |
91 | 91 | ||
92 | /* allocate buffer and initialize linked list */ | 92 | /* allocate buffer and initialize linked list */ |
@@ -102,29 +102,29 @@ static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx) | |||
102 | seqp->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1]; | 102 | seqp->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1]; |
103 | 103 | ||
104 | /* This is the first allocation. Initiate the head and tail. */ | 104 | /* This is the first allocation. Initiate the head and tail. */ |
105 | if (hctx->ccid2hctx_seqbufc == 0) | 105 | if (hc->tx_seqbufc == 0) |
106 | hctx->ccid2hctx_seqh = hctx->ccid2hctx_seqt = seqp; | 106 | hc->tx_seqh = hc->tx_seqt = seqp; |
107 | else { | 107 | else { |
108 | /* link the existing list with the one we just created */ | 108 | /* link the existing list with the one we just created */ |
109 | hctx->ccid2hctx_seqh->ccid2s_next = seqp; | 109 | hc->tx_seqh->ccid2s_next = seqp; |
110 | seqp->ccid2s_prev = hctx->ccid2hctx_seqh; | 110 | seqp->ccid2s_prev = hc->tx_seqh; |
111 | 111 | ||
112 | hctx->ccid2hctx_seqt->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1]; | 112 | hc->tx_seqt->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1]; |
113 | seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = hctx->ccid2hctx_seqt; | 113 | seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = hc->tx_seqt; |
114 | } | 114 | } |
115 | 115 | ||
116 | /* store the original pointer to the buffer so we can free it */ | 116 | /* store the original pointer to the buffer so we can free it */ |
117 | hctx->ccid2hctx_seqbuf[hctx->ccid2hctx_seqbufc] = seqp; | 117 | hc->tx_seqbuf[hc->tx_seqbufc] = seqp; |
118 | hctx->ccid2hctx_seqbufc++; | 118 | hc->tx_seqbufc++; |
119 | 119 | ||
120 | return 0; | 120 | return 0; |
121 | } | 121 | } |
122 | 122 | ||
123 | static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) | 123 | static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) |
124 | { | 124 | { |
125 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 125 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); |
126 | 126 | ||
127 | if (hctx->ccid2hctx_pipe < hctx->ccid2hctx_cwnd) | 127 | if (hc->tx_pipe < hc->tx_cwnd) |
128 | return 0; | 128 | return 0; |
129 | 129 | ||
130 | return 1; /* XXX CCID should dequeue when ready instead of polling */ | 130 | return 1; /* XXX CCID should dequeue when ready instead of polling */ |
@@ -133,7 +133,7 @@ static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) | |||
133 | static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) | 133 | static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) |
134 | { | 134 | { |
135 | struct dccp_sock *dp = dccp_sk(sk); | 135 | struct dccp_sock *dp = dccp_sk(sk); |
136 | u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->ccid2hctx_cwnd, 2); | 136 | u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->tx_cwnd, 2); |
137 | 137 | ||
138 | /* | 138 | /* |
139 | * Ensure that Ack Ratio does not exceed ceil(cwnd/2), which is (2) from | 139 | * Ensure that Ack Ratio does not exceed ceil(cwnd/2), which is (2) from |
@@ -155,10 +155,10 @@ static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) | |||
155 | dp->dccps_l_ack_ratio = val; | 155 | dp->dccps_l_ack_ratio = val; |
156 | } | 156 | } |
157 | 157 | ||
158 | static void ccid2_change_srtt(struct ccid2_hc_tx_sock *hctx, long val) | 158 | static void ccid2_change_srtt(struct ccid2_hc_tx_sock *hc, long val) |
159 | { | 159 | { |
160 | ccid2_pr_debug("change SRTT to %ld\n", val); | 160 | ccid2_pr_debug("change SRTT to %ld\n", val); |
161 | hctx->ccid2hctx_srtt = val; | 161 | hc->tx_srtt = val; |
162 | } | 162 | } |
163 | 163 | ||
164 | static void ccid2_start_rto_timer(struct sock *sk); | 164 | static void ccid2_start_rto_timer(struct sock *sk); |
@@ -166,45 +166,44 @@ static void ccid2_start_rto_timer(struct sock *sk); | |||
166 | static void ccid2_hc_tx_rto_expire(unsigned long data) | 166 | static void ccid2_hc_tx_rto_expire(unsigned long data) |
167 | { | 167 | { |
168 | struct sock *sk = (struct sock *)data; | 168 | struct sock *sk = (struct sock *)data; |
169 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 169 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); |
170 | long s; | 170 | long s; |
171 | 171 | ||
172 | bh_lock_sock(sk); | 172 | bh_lock_sock(sk); |
173 | if (sock_owned_by_user(sk)) { | 173 | if (sock_owned_by_user(sk)) { |
174 | sk_reset_timer(sk, &hctx->ccid2hctx_rtotimer, | 174 | sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + HZ / 5); |
175 | jiffies + HZ / 5); | ||
176 | goto out; | 175 | goto out; |
177 | } | 176 | } |
178 | 177 | ||
179 | ccid2_pr_debug("RTO_EXPIRE\n"); | 178 | ccid2_pr_debug("RTO_EXPIRE\n"); |
180 | 179 | ||
181 | ccid2_hc_tx_check_sanity(hctx); | 180 | ccid2_hc_tx_check_sanity(hc); |
182 | 181 | ||
183 | /* back-off timer */ | 182 | /* back-off timer */ |
184 | hctx->ccid2hctx_rto <<= 1; | 183 | hc->tx_rto <<= 1; |
185 | 184 | ||
186 | s = hctx->ccid2hctx_rto / HZ; | 185 | s = hc->tx_rto / HZ; |
187 | if (s > 60) | 186 | if (s > 60) |
188 | hctx->ccid2hctx_rto = 60 * HZ; | 187 | hc->tx_rto = 60 * HZ; |
189 | 188 | ||
190 | ccid2_start_rto_timer(sk); | 189 | ccid2_start_rto_timer(sk); |
191 | 190 | ||
192 | /* adjust pipe, cwnd etc */ | 191 | /* adjust pipe, cwnd etc */ |
193 | hctx->ccid2hctx_ssthresh = hctx->ccid2hctx_cwnd / 2; | 192 | hc->tx_ssthresh = hc->tx_cwnd / 2; |
194 | if (hctx->ccid2hctx_ssthresh < 2) | 193 | if (hc->tx_ssthresh < 2) |
195 | hctx->ccid2hctx_ssthresh = 2; | 194 | hc->tx_ssthresh = 2; |
196 | hctx->ccid2hctx_cwnd = 1; | 195 | hc->tx_cwnd = 1; |
197 | hctx->ccid2hctx_pipe = 0; | 196 | hc->tx_pipe = 0; |
198 | 197 | ||
199 | /* clear state about stuff we sent */ | 198 | /* clear state about stuff we sent */ |
200 | hctx->ccid2hctx_seqt = hctx->ccid2hctx_seqh; | 199 | hc->tx_seqt = hc->tx_seqh; |
201 | hctx->ccid2hctx_packets_acked = 0; | 200 | hc->tx_packets_acked = 0; |
202 | 201 | ||
203 | /* clear ack ratio state. */ | 202 | /* clear ack ratio state. */ |
204 | hctx->ccid2hctx_rpseq = 0; | 203 | hc->tx_rpseq = 0; |
205 | hctx->ccid2hctx_rpdupack = -1; | 204 | hc->tx_rpdupack = -1; |
206 | ccid2_change_l_ack_ratio(sk, 1); | 205 | ccid2_change_l_ack_ratio(sk, 1); |
207 | ccid2_hc_tx_check_sanity(hctx); | 206 | ccid2_hc_tx_check_sanity(hc); |
208 | out: | 207 | out: |
209 | bh_unlock_sock(sk); | 208 | bh_unlock_sock(sk); |
210 | sock_put(sk); | 209 | sock_put(sk); |
@@ -212,42 +211,40 @@ out: | |||
212 | 211 | ||
213 | static void ccid2_start_rto_timer(struct sock *sk) | 212 | static void ccid2_start_rto_timer(struct sock *sk) |
214 | { | 213 | { |
215 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 214 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); |
216 | 215 | ||
217 | ccid2_pr_debug("setting RTO timeout=%ld\n", hctx->ccid2hctx_rto); | 216 | ccid2_pr_debug("setting RTO timeout=%ld\n", hc->tx_rto); |
218 | 217 | ||
219 | BUG_ON(timer_pending(&hctx->ccid2hctx_rtotimer)); | 218 | BUG_ON(timer_pending(&hc->tx_rtotimer)); |
220 | sk_reset_timer(sk, &hctx->ccid2hctx_rtotimer, | 219 | sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); |
221 | jiffies + hctx->ccid2hctx_rto); | ||
222 | } | 220 | } |
223 | 221 | ||
224 | static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len) | 222 | static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len) |
225 | { | 223 | { |
226 | struct dccp_sock *dp = dccp_sk(sk); | 224 | struct dccp_sock *dp = dccp_sk(sk); |
227 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 225 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); |
228 | struct ccid2_seq *next; | 226 | struct ccid2_seq *next; |
229 | 227 | ||
230 | hctx->ccid2hctx_pipe++; | 228 | hc->tx_pipe++; |
231 | 229 | ||
232 | hctx->ccid2hctx_seqh->ccid2s_seq = dp->dccps_gss; | 230 | hc->tx_seqh->ccid2s_seq = dp->dccps_gss; |
233 | hctx->ccid2hctx_seqh->ccid2s_acked = 0; | 231 | hc->tx_seqh->ccid2s_acked = 0; |
234 | hctx->ccid2hctx_seqh->ccid2s_sent = jiffies; | 232 | hc->tx_seqh->ccid2s_sent = jiffies; |
235 | 233 | ||
236 | next = hctx->ccid2hctx_seqh->ccid2s_next; | 234 | next = hc->tx_seqh->ccid2s_next; |
237 | /* check if we need to alloc more space */ | 235 | /* check if we need to alloc more space */ |
238 | if (next == hctx->ccid2hctx_seqt) { | 236 | if (next == hc->tx_seqt) { |
239 | if (ccid2_hc_tx_alloc_seq(hctx)) { | 237 | if (ccid2_hc_tx_alloc_seq(hc)) { |
240 | DCCP_CRIT("packet history - out of memory!"); | 238 | DCCP_CRIT("packet history - out of memory!"); |
241 | /* FIXME: find a more graceful way to bail out */ | 239 | /* FIXME: find a more graceful way to bail out */ |
242 | return; | 240 | return; |
243 | } | 241 | } |
244 | next = hctx->ccid2hctx_seqh->ccid2s_next; | 242 | next = hc->tx_seqh->ccid2s_next; |
245 | BUG_ON(next == hctx->ccid2hctx_seqt); | 243 | BUG_ON(next == hc->tx_seqt); |
246 | } | 244 | } |
247 | hctx->ccid2hctx_seqh = next; | 245 | hc->tx_seqh = next; |
248 | 246 | ||
249 | ccid2_pr_debug("cwnd=%d pipe=%d\n", hctx->ccid2hctx_cwnd, | 247 | ccid2_pr_debug("cwnd=%d pipe=%d\n", hc->tx_cwnd, hc->tx_pipe); |
250 | hctx->ccid2hctx_pipe); | ||
251 | 248 | ||
252 | /* | 249 | /* |
253 | * FIXME: The code below is broken and the variables have been removed | 250 | * FIXME: The code below is broken and the variables have been removed |
@@ -270,12 +267,12 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len) | |||
270 | */ | 267 | */ |
271 | #if 0 | 268 | #if 0 |
272 | /* Ack Ratio. Need to maintain a concept of how many windows we sent */ | 269 | /* Ack Ratio. Need to maintain a concept of how many windows we sent */ |
273 | hctx->ccid2hctx_arsent++; | 270 | hc->tx_arsent++; |
274 | /* We had an ack loss in this window... */ | 271 | /* We had an ack loss in this window... */ |
275 | if (hctx->ccid2hctx_ackloss) { | 272 | if (hc->tx_ackloss) { |
276 | if (hctx->ccid2hctx_arsent >= hctx->ccid2hctx_cwnd) { | 273 | if (hc->tx_arsent >= hc->tx_cwnd) { |
277 | hctx->ccid2hctx_arsent = 0; | 274 | hc->tx_arsent = 0; |
278 | hctx->ccid2hctx_ackloss = 0; | 275 | hc->tx_ackloss = 0; |
279 | } | 276 | } |
280 | } else { | 277 | } else { |
281 | /* No acks lost up to now... */ | 278 | /* No acks lost up to now... */ |
@@ -285,28 +282,28 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len) | |||
285 | int denom = dp->dccps_l_ack_ratio * dp->dccps_l_ack_ratio - | 282 | int denom = dp->dccps_l_ack_ratio * dp->dccps_l_ack_ratio - |
286 | dp->dccps_l_ack_ratio; | 283 | dp->dccps_l_ack_ratio; |
287 | 284 | ||
288 | denom = hctx->ccid2hctx_cwnd * hctx->ccid2hctx_cwnd / denom; | 285 | denom = hc->tx_cwnd * hc->tx_cwnd / denom; |
289 | 286 | ||
290 | if (hctx->ccid2hctx_arsent >= denom) { | 287 | if (hc->tx_arsent >= denom) { |
291 | ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio - 1); | 288 | ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio - 1); |
292 | hctx->ccid2hctx_arsent = 0; | 289 | hc->tx_arsent = 0; |
293 | } | 290 | } |
294 | } else { | 291 | } else { |
295 | /* we can't increase ack ratio further [1] */ | 292 | /* we can't increase ack ratio further [1] */ |
296 | hctx->ccid2hctx_arsent = 0; /* or maybe set it to cwnd*/ | 293 | hc->tx_arsent = 0; /* or maybe set it to cwnd*/ |
297 | } | 294 | } |
298 | } | 295 | } |
299 | #endif | 296 | #endif |
300 | 297 | ||
301 | /* setup RTO timer */ | 298 | /* setup RTO timer */ |
302 | if (!timer_pending(&hctx->ccid2hctx_rtotimer)) | 299 | if (!timer_pending(&hc->tx_rtotimer)) |
303 | ccid2_start_rto_timer(sk); | 300 | ccid2_start_rto_timer(sk); |
304 | 301 | ||
305 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG | 302 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG |
306 | do { | 303 | do { |
307 | struct ccid2_seq *seqp = hctx->ccid2hctx_seqt; | 304 | struct ccid2_seq *seqp = hc->tx_seqt; |
308 | 305 | ||
309 | while (seqp != hctx->ccid2hctx_seqh) { | 306 | while (seqp != hc->tx_seqh) { |
310 | ccid2_pr_debug("out seq=%llu acked=%d time=%lu\n", | 307 | ccid2_pr_debug("out seq=%llu acked=%d time=%lu\n", |
311 | (unsigned long long)seqp->ccid2s_seq, | 308 | (unsigned long long)seqp->ccid2s_seq, |
312 | seqp->ccid2s_acked, seqp->ccid2s_sent); | 309 | seqp->ccid2s_acked, seqp->ccid2s_sent); |
@@ -314,7 +311,7 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len) | |||
314 | } | 311 | } |
315 | } while (0); | 312 | } while (0); |
316 | ccid2_pr_debug("=========\n"); | 313 | ccid2_pr_debug("=========\n"); |
317 | ccid2_hc_tx_check_sanity(hctx); | 314 | ccid2_hc_tx_check_sanity(hc); |
318 | #endif | 315 | #endif |
319 | } | 316 | } |
320 | 317 | ||
@@ -382,9 +379,9 @@ out_invalid_option: | |||
382 | 379 | ||
383 | static void ccid2_hc_tx_kill_rto_timer(struct sock *sk) | 380 | static void ccid2_hc_tx_kill_rto_timer(struct sock *sk) |
384 | { | 381 | { |
385 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 382 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); |
386 | 383 | ||
387 | sk_stop_timer(sk, &hctx->ccid2hctx_rtotimer); | 384 | sk_stop_timer(sk, &hc->tx_rtotimer); |
388 | ccid2_pr_debug("deleted RTO timer\n"); | 385 | ccid2_pr_debug("deleted RTO timer\n"); |
389 | } | 386 | } |
390 | 387 | ||
@@ -392,75 +389,75 @@ static inline void ccid2_new_ack(struct sock *sk, | |||
392 | struct ccid2_seq *seqp, | 389 | struct ccid2_seq *seqp, |
393 | unsigned int *maxincr) | 390 | unsigned int *maxincr) |
394 | { | 391 | { |
395 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 392 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); |
396 | 393 | ||
397 | if (hctx->ccid2hctx_cwnd < hctx->ccid2hctx_ssthresh) { | 394 | if (hc->tx_cwnd < hc->tx_ssthresh) { |
398 | if (*maxincr > 0 && ++hctx->ccid2hctx_packets_acked == 2) { | 395 | if (*maxincr > 0 && ++hc->tx_packets_acked == 2) { |
399 | hctx->ccid2hctx_cwnd += 1; | 396 | hc->tx_cwnd += 1; |
400 | *maxincr -= 1; | 397 | *maxincr -= 1; |
401 | hctx->ccid2hctx_packets_acked = 0; | 398 | hc->tx_packets_acked = 0; |
402 | } | 399 | } |
403 | } else if (++hctx->ccid2hctx_packets_acked >= hctx->ccid2hctx_cwnd) { | 400 | } else if (++hc->tx_packets_acked >= hc->tx_cwnd) { |
404 | hctx->ccid2hctx_cwnd += 1; | 401 | hc->tx_cwnd += 1; |
405 | hctx->ccid2hctx_packets_acked = 0; | 402 | hc->tx_packets_acked = 0; |
406 | } | 403 | } |
407 | 404 | ||
408 | /* update RTO */ | 405 | /* update RTO */ |
409 | if (hctx->ccid2hctx_srtt == -1 || | 406 | if (hc->tx_srtt == -1 || |
410 | time_after(jiffies, hctx->ccid2hctx_lastrtt + hctx->ccid2hctx_srtt)) { | 407 | time_after(jiffies, hc->tx_lastrtt + hc->tx_srtt)) { |
411 | unsigned long r = (long)jiffies - (long)seqp->ccid2s_sent; | 408 | unsigned long r = (long)jiffies - (long)seqp->ccid2s_sent; |
412 | int s; | 409 | int s; |
413 | 410 | ||
414 | /* first measurement */ | 411 | /* first measurement */ |
415 | if (hctx->ccid2hctx_srtt == -1) { | 412 | if (hc->tx_srtt == -1) { |
416 | ccid2_pr_debug("R: %lu Time=%lu seq=%llu\n", | 413 | ccid2_pr_debug("R: %lu Time=%lu seq=%llu\n", |
417 | r, jiffies, | 414 | r, jiffies, |
418 | (unsigned long long)seqp->ccid2s_seq); | 415 | (unsigned long long)seqp->ccid2s_seq); |
419 | ccid2_change_srtt(hctx, r); | 416 | ccid2_change_srtt(hc, r); |
420 | hctx->ccid2hctx_rttvar = r >> 1; | 417 | hc->tx_rttvar = r >> 1; |
421 | } else { | 418 | } else { |
422 | /* RTTVAR */ | 419 | /* RTTVAR */ |
423 | long tmp = hctx->ccid2hctx_srtt - r; | 420 | long tmp = hc->tx_srtt - r; |
424 | long srtt; | 421 | long srtt; |
425 | 422 | ||
426 | if (tmp < 0) | 423 | if (tmp < 0) |
427 | tmp *= -1; | 424 | tmp *= -1; |
428 | 425 | ||
429 | tmp >>= 2; | 426 | tmp >>= 2; |
430 | hctx->ccid2hctx_rttvar *= 3; | 427 | hc->tx_rttvar *= 3; |
431 | hctx->ccid2hctx_rttvar >>= 2; | 428 | hc->tx_rttvar >>= 2; |
432 | hctx->ccid2hctx_rttvar += tmp; | 429 | hc->tx_rttvar += tmp; |
433 | 430 | ||
434 | /* SRTT */ | 431 | /* SRTT */ |
435 | srtt = hctx->ccid2hctx_srtt; | 432 | srtt = hc->tx_srtt; |
436 | srtt *= 7; | 433 | srtt *= 7; |
437 | srtt >>= 3; | 434 | srtt >>= 3; |
438 | tmp = r >> 3; | 435 | tmp = r >> 3; |
439 | srtt += tmp; | 436 | srtt += tmp; |
440 | ccid2_change_srtt(hctx, srtt); | 437 | ccid2_change_srtt(hc, srtt); |
441 | } | 438 | } |
442 | s = hctx->ccid2hctx_rttvar << 2; | 439 | s = hc->tx_rttvar << 2; |
443 | /* clock granularity is 1 when based on jiffies */ | 440 | /* clock granularity is 1 when based on jiffies */ |
444 | if (!s) | 441 | if (!s) |
445 | s = 1; | 442 | s = 1; |
446 | hctx->ccid2hctx_rto = hctx->ccid2hctx_srtt + s; | 443 | hc->tx_rto = hc->tx_srtt + s; |
447 | 444 | ||
448 | /* must be at least a second */ | 445 | /* must be at least a second */ |
449 | s = hctx->ccid2hctx_rto / HZ; | 446 | s = hc->tx_rto / HZ; |
450 | /* DCCP doesn't require this [but I like it cuz my code sux] */ | 447 | /* DCCP doesn't require this [but I like it cuz my code sux] */ |
451 | #if 1 | 448 | #if 1 |
452 | if (s < 1) | 449 | if (s < 1) |
453 | hctx->ccid2hctx_rto = HZ; | 450 | hc->tx_rto = HZ; |
454 | #endif | 451 | #endif |
455 | /* max 60 seconds */ | 452 | /* max 60 seconds */ |
456 | if (s > 60) | 453 | if (s > 60) |
457 | hctx->ccid2hctx_rto = HZ * 60; | 454 | hc->tx_rto = HZ * 60; |
458 | 455 | ||
459 | hctx->ccid2hctx_lastrtt = jiffies; | 456 | hc->tx_lastrtt = jiffies; |
460 | 457 | ||
461 | ccid2_pr_debug("srtt: %ld rttvar: %ld rto: %ld (HZ=%d) R=%lu\n", | 458 | ccid2_pr_debug("srtt: %ld rttvar: %ld rto: %ld (HZ=%d) R=%lu\n", |
462 | hctx->ccid2hctx_srtt, hctx->ccid2hctx_rttvar, | 459 | hc->tx_srtt, hc->tx_rttvar, |
463 | hctx->ccid2hctx_rto, HZ, r); | 460 | hc->tx_rto, HZ, r); |
464 | } | 461 | } |
465 | 462 | ||
466 | /* we got a new ack, so re-start RTO timer */ | 463 | /* we got a new ack, so re-start RTO timer */ |
@@ -470,40 +467,40 @@ static inline void ccid2_new_ack(struct sock *sk, | |||
470 | 467 | ||
471 | static void ccid2_hc_tx_dec_pipe(struct sock *sk) | 468 | static void ccid2_hc_tx_dec_pipe(struct sock *sk) |
472 | { | 469 | { |
473 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 470 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); |
474 | 471 | ||
475 | if (hctx->ccid2hctx_pipe == 0) | 472 | if (hc->tx_pipe == 0) |
476 | DCCP_BUG("pipe == 0"); | 473 | DCCP_BUG("pipe == 0"); |
477 | else | 474 | else |
478 | hctx->ccid2hctx_pipe--; | 475 | hc->tx_pipe--; |
479 | 476 | ||
480 | if (hctx->ccid2hctx_pipe == 0) | 477 | if (hc->tx_pipe == 0) |
481 | ccid2_hc_tx_kill_rto_timer(sk); | 478 | ccid2_hc_tx_kill_rto_timer(sk); |
482 | } | 479 | } |
483 | 480 | ||
484 | static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp) | 481 | static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp) |
485 | { | 482 | { |
486 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 483 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); |
487 | 484 | ||
488 | if (time_before(seqp->ccid2s_sent, hctx->ccid2hctx_last_cong)) { | 485 | if (time_before(seqp->ccid2s_sent, hc->tx_last_cong)) { |
489 | ccid2_pr_debug("Multiple losses in an RTT---treating as one\n"); | 486 | ccid2_pr_debug("Multiple losses in an RTT---treating as one\n"); |
490 | return; | 487 | return; |
491 | } | 488 | } |
492 | 489 | ||
493 | hctx->ccid2hctx_last_cong = jiffies; | 490 | hc->tx_last_cong = jiffies; |
494 | 491 | ||
495 | hctx->ccid2hctx_cwnd = hctx->ccid2hctx_cwnd / 2 ? : 1U; | 492 | hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U; |
496 | hctx->ccid2hctx_ssthresh = max(hctx->ccid2hctx_cwnd, 2U); | 493 | hc->tx_ssthresh = max(hc->tx_cwnd, 2U); |
497 | 494 | ||
498 | /* Avoid spurious timeouts resulting from Ack Ratio > cwnd */ | 495 | /* Avoid spurious timeouts resulting from Ack Ratio > cwnd */ |
499 | if (dccp_sk(sk)->dccps_l_ack_ratio > hctx->ccid2hctx_cwnd) | 496 | if (dccp_sk(sk)->dccps_l_ack_ratio > hc->tx_cwnd) |
500 | ccid2_change_l_ack_ratio(sk, hctx->ccid2hctx_cwnd); | 497 | ccid2_change_l_ack_ratio(sk, hc->tx_cwnd); |
501 | } | 498 | } |
502 | 499 | ||
503 | static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | 500 | static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) |
504 | { | 501 | { |
505 | struct dccp_sock *dp = dccp_sk(sk); | 502 | struct dccp_sock *dp = dccp_sk(sk); |
506 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 503 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); |
507 | u64 ackno, seqno; | 504 | u64 ackno, seqno; |
508 | struct ccid2_seq *seqp; | 505 | struct ccid2_seq *seqp; |
509 | unsigned char *vector; | 506 | unsigned char *vector; |
@@ -512,7 +509,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
512 | int done = 0; | 509 | int done = 0; |
513 | unsigned int maxincr = 0; | 510 | unsigned int maxincr = 0; |
514 | 511 | ||
515 | ccid2_hc_tx_check_sanity(hctx); | 512 | ccid2_hc_tx_check_sanity(hc); |
516 | /* check reverse path congestion */ | 513 | /* check reverse path congestion */ |
517 | seqno = DCCP_SKB_CB(skb)->dccpd_seq; | 514 | seqno = DCCP_SKB_CB(skb)->dccpd_seq; |
518 | 515 | ||
@@ -521,21 +518,21 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
521 | * -sorbo. | 518 | * -sorbo. |
522 | */ | 519 | */ |
523 | /* need to bootstrap */ | 520 | /* need to bootstrap */ |
524 | if (hctx->ccid2hctx_rpdupack == -1) { | 521 | if (hc->tx_rpdupack == -1) { |
525 | hctx->ccid2hctx_rpdupack = 0; | 522 | hc->tx_rpdupack = 0; |
526 | hctx->ccid2hctx_rpseq = seqno; | 523 | hc->tx_rpseq = seqno; |
527 | } else { | 524 | } else { |
528 | /* check if packet is consecutive */ | 525 | /* check if packet is consecutive */ |
529 | if (dccp_delta_seqno(hctx->ccid2hctx_rpseq, seqno) == 1) | 526 | if (dccp_delta_seqno(hc->tx_rpseq, seqno) == 1) |
530 | hctx->ccid2hctx_rpseq = seqno; | 527 | hc->tx_rpseq = seqno; |
531 | /* it's a later packet */ | 528 | /* it's a later packet */ |
532 | else if (after48(seqno, hctx->ccid2hctx_rpseq)) { | 529 | else if (after48(seqno, hc->tx_rpseq)) { |
533 | hctx->ccid2hctx_rpdupack++; | 530 | hc->tx_rpdupack++; |
534 | 531 | ||
535 | /* check if we got enough dupacks */ | 532 | /* check if we got enough dupacks */ |
536 | if (hctx->ccid2hctx_rpdupack >= NUMDUPACK) { | 533 | if (hc->tx_rpdupack >= NUMDUPACK) { |
537 | hctx->ccid2hctx_rpdupack = -1; /* XXX lame */ | 534 | hc->tx_rpdupack = -1; /* XXX lame */ |
538 | hctx->ccid2hctx_rpseq = 0; | 535 | hc->tx_rpseq = 0; |
539 | 536 | ||
540 | ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio); | 537 | ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio); |
541 | } | 538 | } |
@@ -544,7 +541,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
544 | 541 | ||
545 | /* check forward path congestion */ | 542 | /* check forward path congestion */ |
546 | /* still didn't send out new data packets */ | 543 | /* still didn't send out new data packets */ |
547 | if (hctx->ccid2hctx_seqh == hctx->ccid2hctx_seqt) | 544 | if (hc->tx_seqh == hc->tx_seqt) |
548 | return; | 545 | return; |
549 | 546 | ||
550 | switch (DCCP_SKB_CB(skb)->dccpd_type) { | 547 | switch (DCCP_SKB_CB(skb)->dccpd_type) { |
@@ -556,14 +553,14 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
556 | } | 553 | } |
557 | 554 | ||
558 | ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq; | 555 | ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq; |
559 | if (after48(ackno, hctx->ccid2hctx_high_ack)) | 556 | if (after48(ackno, hc->tx_high_ack)) |
560 | hctx->ccid2hctx_high_ack = ackno; | 557 | hc->tx_high_ack = ackno; |
561 | 558 | ||
562 | seqp = hctx->ccid2hctx_seqt; | 559 | seqp = hc->tx_seqt; |
563 | while (before48(seqp->ccid2s_seq, ackno)) { | 560 | while (before48(seqp->ccid2s_seq, ackno)) { |
564 | seqp = seqp->ccid2s_next; | 561 | seqp = seqp->ccid2s_next; |
565 | if (seqp == hctx->ccid2hctx_seqh) { | 562 | if (seqp == hc->tx_seqh) { |
566 | seqp = hctx->ccid2hctx_seqh->ccid2s_prev; | 563 | seqp = hc->tx_seqh->ccid2s_prev; |
567 | break; | 564 | break; |
568 | } | 565 | } |
569 | } | 566 | } |
@@ -573,7 +570,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
573 | * packets per acknowledgement. Rounding up avoids that cwnd is not | 570 | * packets per acknowledgement. Rounding up avoids that cwnd is not |
574 | * advanced when Ack Ratio is 1 and gives a slight edge otherwise. | 571 | * advanced when Ack Ratio is 1 and gives a slight edge otherwise. |
575 | */ | 572 | */ |
576 | if (hctx->ccid2hctx_cwnd < hctx->ccid2hctx_ssthresh) | 573 | if (hc->tx_cwnd < hc->tx_ssthresh) |
577 | maxincr = DIV_ROUND_UP(dp->dccps_l_ack_ratio, 2); | 574 | maxincr = DIV_ROUND_UP(dp->dccps_l_ack_ratio, 2); |
578 | 575 | ||
579 | /* go through all ack vectors */ | 576 | /* go through all ack vectors */ |
@@ -592,7 +589,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
592 | * seqnos. | 589 | * seqnos. |
593 | */ | 590 | */ |
594 | while (after48(seqp->ccid2s_seq, ackno)) { | 591 | while (after48(seqp->ccid2s_seq, ackno)) { |
595 | if (seqp == hctx->ccid2hctx_seqt) { | 592 | if (seqp == hc->tx_seqt) { |
596 | done = 1; | 593 | done = 1; |
597 | break; | 594 | break; |
598 | } | 595 | } |
@@ -624,7 +621,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
624 | (unsigned long long)seqp->ccid2s_seq); | 621 | (unsigned long long)seqp->ccid2s_seq); |
625 | ccid2_hc_tx_dec_pipe(sk); | 622 | ccid2_hc_tx_dec_pipe(sk); |
626 | } | 623 | } |
627 | if (seqp == hctx->ccid2hctx_seqt) { | 624 | if (seqp == hc->tx_seqt) { |
628 | done = 1; | 625 | done = 1; |
629 | break; | 626 | break; |
630 | } | 627 | } |
@@ -643,11 +640,11 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
643 | /* The state about what is acked should be correct now | 640 | /* The state about what is acked should be correct now |
644 | * Check for NUMDUPACK | 641 | * Check for NUMDUPACK |
645 | */ | 642 | */ |
646 | seqp = hctx->ccid2hctx_seqt; | 643 | seqp = hc->tx_seqt; |
647 | while (before48(seqp->ccid2s_seq, hctx->ccid2hctx_high_ack)) { | 644 | while (before48(seqp->ccid2s_seq, hc->tx_high_ack)) { |
648 | seqp = seqp->ccid2s_next; | 645 | seqp = seqp->ccid2s_next; |
649 | if (seqp == hctx->ccid2hctx_seqh) { | 646 | if (seqp == hc->tx_seqh) { |
650 | seqp = hctx->ccid2hctx_seqh->ccid2s_prev; | 647 | seqp = hc->tx_seqh->ccid2s_prev; |
651 | break; | 648 | break; |
652 | } | 649 | } |
653 | } | 650 | } |
@@ -658,7 +655,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
658 | if (done == NUMDUPACK) | 655 | if (done == NUMDUPACK) |
659 | break; | 656 | break; |
660 | } | 657 | } |
661 | if (seqp == hctx->ccid2hctx_seqt) | 658 | if (seqp == hc->tx_seqt) |
662 | break; | 659 | break; |
663 | seqp = seqp->ccid2s_prev; | 660 | seqp = seqp->ccid2s_prev; |
664 | } | 661 | } |
@@ -681,86 +678,86 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
681 | ccid2_congestion_event(sk, seqp); | 678 | ccid2_congestion_event(sk, seqp); |
682 | ccid2_hc_tx_dec_pipe(sk); | 679 | ccid2_hc_tx_dec_pipe(sk); |
683 | } | 680 | } |
684 | if (seqp == hctx->ccid2hctx_seqt) | 681 | if (seqp == hc->tx_seqt) |
685 | break; | 682 | break; |
686 | seqp = seqp->ccid2s_prev; | 683 | seqp = seqp->ccid2s_prev; |
687 | } | 684 | } |
688 | 685 | ||
689 | hctx->ccid2hctx_seqt = last_acked; | 686 | hc->tx_seqt = last_acked; |
690 | } | 687 | } |
691 | 688 | ||
692 | /* trim acked packets in tail */ | 689 | /* trim acked packets in tail */ |
693 | while (hctx->ccid2hctx_seqt != hctx->ccid2hctx_seqh) { | 690 | while (hc->tx_seqt != hc->tx_seqh) { |
694 | if (!hctx->ccid2hctx_seqt->ccid2s_acked) | 691 | if (!hc->tx_seqt->ccid2s_acked) |
695 | break; | 692 | break; |
696 | 693 | ||
697 | hctx->ccid2hctx_seqt = hctx->ccid2hctx_seqt->ccid2s_next; | 694 | hc->tx_seqt = hc->tx_seqt->ccid2s_next; |
698 | } | 695 | } |
699 | 696 | ||
700 | ccid2_hc_tx_check_sanity(hctx); | 697 | ccid2_hc_tx_check_sanity(hc); |
701 | } | 698 | } |
702 | 699 | ||
703 | static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) | 700 | static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) |
704 | { | 701 | { |
705 | struct ccid2_hc_tx_sock *hctx = ccid_priv(ccid); | 702 | struct ccid2_hc_tx_sock *hc = ccid_priv(ccid); |
706 | struct dccp_sock *dp = dccp_sk(sk); | 703 | struct dccp_sock *dp = dccp_sk(sk); |
707 | u32 max_ratio; | 704 | u32 max_ratio; |
708 | 705 | ||
709 | /* RFC 4341, 5: initialise ssthresh to arbitrarily high (max) value */ | 706 | /* RFC 4341, 5: initialise ssthresh to arbitrarily high (max) value */ |
710 | hctx->ccid2hctx_ssthresh = ~0U; | 707 | hc->tx_ssthresh = ~0U; |
711 | 708 | ||
712 | /* | 709 | /* |
713 | * RFC 4341, 5: "The cwnd parameter is initialized to at most four | 710 | * RFC 4341, 5: "The cwnd parameter is initialized to at most four |
714 | * packets for new connections, following the rules from [RFC3390]". | 711 | * packets for new connections, following the rules from [RFC3390]". |
715 | * We need to convert the bytes of RFC3390 into the packets of RFC 4341. | 712 | * We need to convert the bytes of RFC3390 into the packets of RFC 4341. |
716 | */ | 713 | */ |
717 | hctx->ccid2hctx_cwnd = clamp(4380U / dp->dccps_mss_cache, 2U, 4U); | 714 | hc->tx_cwnd = clamp(4380U / dp->dccps_mss_cache, 2U, 4U); |
718 | 715 | ||
719 | /* Make sure that Ack Ratio is enabled and within bounds. */ | 716 | /* Make sure that Ack Ratio is enabled and within bounds. */ |
720 | max_ratio = DIV_ROUND_UP(hctx->ccid2hctx_cwnd, 2); | 717 | max_ratio = DIV_ROUND_UP(hc->tx_cwnd, 2); |
721 | if (dp->dccps_l_ack_ratio == 0 || dp->dccps_l_ack_ratio > max_ratio) | 718 | if (dp->dccps_l_ack_ratio == 0 || dp->dccps_l_ack_ratio > max_ratio) |
722 | dp->dccps_l_ack_ratio = max_ratio; | 719 | dp->dccps_l_ack_ratio = max_ratio; |
723 | 720 | ||
724 | /* XXX init ~ to window size... */ | 721 | /* XXX init ~ to window size... */ |
725 | if (ccid2_hc_tx_alloc_seq(hctx)) | 722 | if (ccid2_hc_tx_alloc_seq(hc)) |
726 | return -ENOMEM; | 723 | return -ENOMEM; |
727 | 724 | ||
728 | hctx->ccid2hctx_rto = 3 * HZ; | 725 | hc->tx_rto = 3 * HZ; |
729 | ccid2_change_srtt(hctx, -1); | 726 | ccid2_change_srtt(hc, -1); |
730 | hctx->ccid2hctx_rttvar = -1; | 727 | hc->tx_rttvar = -1; |
731 | hctx->ccid2hctx_rpdupack = -1; | 728 | hc->tx_rpdupack = -1; |
732 | hctx->ccid2hctx_last_cong = jiffies; | 729 | hc->tx_last_cong = jiffies; |
733 | setup_timer(&hctx->ccid2hctx_rtotimer, ccid2_hc_tx_rto_expire, | 730 | setup_timer(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire, |
734 | (unsigned long)sk); | 731 | (unsigned long)sk); |
735 | 732 | ||
736 | ccid2_hc_tx_check_sanity(hctx); | 733 | ccid2_hc_tx_check_sanity(hc); |
737 | return 0; | 734 | return 0; |
738 | } | 735 | } |
739 | 736 | ||
740 | static void ccid2_hc_tx_exit(struct sock *sk) | 737 | static void ccid2_hc_tx_exit(struct sock *sk) |
741 | { | 738 | { |
742 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 739 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); |
743 | int i; | 740 | int i; |
744 | 741 | ||
745 | ccid2_hc_tx_kill_rto_timer(sk); | 742 | ccid2_hc_tx_kill_rto_timer(sk); |
746 | 743 | ||
747 | for (i = 0; i < hctx->ccid2hctx_seqbufc; i++) | 744 | for (i = 0; i < hc->tx_seqbufc; i++) |
748 | kfree(hctx->ccid2hctx_seqbuf[i]); | 745 | kfree(hc->tx_seqbuf[i]); |
749 | hctx->ccid2hctx_seqbufc = 0; | 746 | hc->tx_seqbufc = 0; |
750 | } | 747 | } |
751 | 748 | ||
752 | static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | 749 | static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) |
753 | { | 750 | { |
754 | const struct dccp_sock *dp = dccp_sk(sk); | 751 | const struct dccp_sock *dp = dccp_sk(sk); |
755 | struct ccid2_hc_rx_sock *hcrx = ccid2_hc_rx_sk(sk); | 752 | struct ccid2_hc_rx_sock *hc = ccid2_hc_rx_sk(sk); |
756 | 753 | ||
757 | switch (DCCP_SKB_CB(skb)->dccpd_type) { | 754 | switch (DCCP_SKB_CB(skb)->dccpd_type) { |
758 | case DCCP_PKT_DATA: | 755 | case DCCP_PKT_DATA: |
759 | case DCCP_PKT_DATAACK: | 756 | case DCCP_PKT_DATAACK: |
760 | hcrx->ccid2hcrx_data++; | 757 | hc->rx_data++; |
761 | if (hcrx->ccid2hcrx_data >= dp->dccps_r_ack_ratio) { | 758 | if (hc->rx_data >= dp->dccps_r_ack_ratio) { |
762 | dccp_send_ack(sk); | 759 | dccp_send_ack(sk); |
763 | hcrx->ccid2hcrx_data = 0; | 760 | hc->rx_data = 0; |
764 | } | 761 | } |
765 | break; | 762 | break; |
766 | } | 763 | } |
diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h index 326ac90fb909..1ec6a30103bb 100644 --- a/net/dccp/ccids/ccid2.h +++ b/net/dccp/ccids/ccid2.h | |||
@@ -40,34 +40,34 @@ struct ccid2_seq { | |||
40 | 40 | ||
41 | /** | 41 | /** |
42 | * struct ccid2_hc_tx_sock - CCID2 TX half connection | 42 | * struct ccid2_hc_tx_sock - CCID2 TX half connection |
43 | * @ccid2hctx_{cwnd,ssthresh,pipe}: as per RFC 4341, section 5 | 43 | * @tx_{cwnd,ssthresh,pipe}: as per RFC 4341, section 5 |
44 | * @ccid2hctx_packets_acked - Ack counter for deriving cwnd growth (RFC 3465) | 44 | * @tx_packets_acked: Ack counter for deriving cwnd growth (RFC 3465) |
45 | * @ccid2hctx_lastrtt -time RTT was last measured | 45 | * @tx_lastrtt: time RTT was last measured |
46 | * @ccid2hctx_rpseq - last consecutive seqno | 46 | * @tx_rpseq: last consecutive seqno |
47 | * @ccid2hctx_rpdupack - dupacks since rpseq | 47 | * @tx_rpdupack: dupacks since rpseq |
48 | */ | 48 | */ |
49 | struct ccid2_hc_tx_sock { | 49 | struct ccid2_hc_tx_sock { |
50 | u32 ccid2hctx_cwnd; | 50 | u32 tx_cwnd; |
51 | u32 ccid2hctx_ssthresh; | 51 | u32 tx_ssthresh; |
52 | u32 ccid2hctx_pipe; | 52 | u32 tx_pipe; |
53 | u32 ccid2hctx_packets_acked; | 53 | u32 tx_packets_acked; |
54 | struct ccid2_seq *ccid2hctx_seqbuf[CCID2_SEQBUF_MAX]; | 54 | struct ccid2_seq *tx_seqbuf[CCID2_SEQBUF_MAX]; |
55 | int ccid2hctx_seqbufc; | 55 | int tx_seqbufc; |
56 | struct ccid2_seq *ccid2hctx_seqh; | 56 | struct ccid2_seq *tx_seqh; |
57 | struct ccid2_seq *ccid2hctx_seqt; | 57 | struct ccid2_seq *tx_seqt; |
58 | long ccid2hctx_rto; | 58 | long tx_rto; |
59 | long ccid2hctx_srtt; | 59 | long tx_srtt; |
60 | long ccid2hctx_rttvar; | 60 | long tx_rttvar; |
61 | unsigned long ccid2hctx_lastrtt; | 61 | unsigned long tx_lastrtt; |
62 | struct timer_list ccid2hctx_rtotimer; | 62 | struct timer_list tx_rtotimer; |
63 | u64 ccid2hctx_rpseq; | 63 | u64 tx_rpseq; |
64 | int ccid2hctx_rpdupack; | 64 | int tx_rpdupack; |
65 | unsigned long ccid2hctx_last_cong; | 65 | unsigned long tx_last_cong; |
66 | u64 ccid2hctx_high_ack; | 66 | u64 tx_high_ack; |
67 | }; | 67 | }; |
68 | 68 | ||
69 | struct ccid2_hc_rx_sock { | 69 | struct ccid2_hc_rx_sock { |
70 | int ccid2hcrx_data; | 70 | int rx_data; |
71 | }; | 71 | }; |
72 | 72 | ||
73 | static inline struct ccid2_hc_tx_sock *ccid2_hc_tx_sk(const struct sock *sk) | 73 | static inline struct ccid2_hc_tx_sock *ccid2_hc_tx_sk(const struct sock *sk) |
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index 34dcc798c457..bcd7632299f5 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c | |||
@@ -64,14 +64,14 @@ static const char *ccid3_tx_state_name(enum ccid3_hc_tx_states state) | |||
64 | static void ccid3_hc_tx_set_state(struct sock *sk, | 64 | static void ccid3_hc_tx_set_state(struct sock *sk, |
65 | enum ccid3_hc_tx_states state) | 65 | enum ccid3_hc_tx_states state) |
66 | { | 66 | { |
67 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); | 67 | struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); |
68 | enum ccid3_hc_tx_states oldstate = hctx->ccid3hctx_state; | 68 | enum ccid3_hc_tx_states oldstate = hc->tx_state; |
69 | 69 | ||
70 | ccid3_pr_debug("%s(%p) %-8.8s -> %s\n", | 70 | ccid3_pr_debug("%s(%p) %-8.8s -> %s\n", |
71 | dccp_role(sk), sk, ccid3_tx_state_name(oldstate), | 71 | dccp_role(sk), sk, ccid3_tx_state_name(oldstate), |
72 | ccid3_tx_state_name(state)); | 72 | ccid3_tx_state_name(state)); |
73 | WARN_ON(state == oldstate); | 73 | WARN_ON(state == oldstate); |
74 | hctx->ccid3hctx_state = state; | 74 | hc->tx_state = state; |
75 | } | 75 | } |
76 | 76 | ||
77 | /* | 77 | /* |
@@ -85,37 +85,32 @@ static void ccid3_hc_tx_set_state(struct sock *sk, | |||
85 | */ | 85 | */ |
86 | static inline u64 rfc3390_initial_rate(struct sock *sk) | 86 | static inline u64 rfc3390_initial_rate(struct sock *sk) |
87 | { | 87 | { |
88 | const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); | 88 | const struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); |
89 | const __u32 w_init = clamp_t(__u32, 4380U, | 89 | const __u32 w_init = clamp_t(__u32, 4380U, 2 * hc->tx_s, 4 * hc->tx_s); |
90 | 2 * hctx->ccid3hctx_s, 4 * hctx->ccid3hctx_s); | ||
91 | 90 | ||
92 | return scaled_div(w_init << 6, hctx->ccid3hctx_rtt); | 91 | return scaled_div(w_init << 6, hc->tx_rtt); |
93 | } | 92 | } |
94 | 93 | ||
95 | /* | 94 | /* |
96 | * Recalculate t_ipi and delta (should be called whenever X changes) | 95 | * Recalculate t_ipi and delta (should be called whenever X changes) |
97 | */ | 96 | */ |
98 | static void ccid3_update_send_interval(struct ccid3_hc_tx_sock *hctx) | 97 | static void ccid3_update_send_interval(struct ccid3_hc_tx_sock *hc) |
99 | { | 98 | { |
100 | /* Calculate new t_ipi = s / X_inst (X_inst is in 64 * bytes/second) */ | 99 | /* Calculate new t_ipi = s / X_inst (X_inst is in 64 * bytes/second) */ |
101 | hctx->ccid3hctx_t_ipi = scaled_div32(((u64)hctx->ccid3hctx_s) << 6, | 100 | hc->tx_t_ipi = scaled_div32(((u64)hc->tx_s) << 6, hc->tx_x); |
102 | hctx->ccid3hctx_x); | ||
103 | 101 | ||
104 | /* Calculate new delta by delta = min(t_ipi / 2, t_gran / 2) */ | 102 | /* Calculate new delta by delta = min(t_ipi / 2, t_gran / 2) */ |
105 | hctx->ccid3hctx_delta = min_t(u32, hctx->ccid3hctx_t_ipi / 2, | 103 | hc->tx_delta = min_t(u32, hc->tx_t_ipi / 2, TFRC_OPSYS_HALF_TIME_GRAN); |
106 | TFRC_OPSYS_HALF_TIME_GRAN); | ||
107 | |||
108 | ccid3_pr_debug("t_ipi=%u, delta=%u, s=%u, X=%u\n", | ||
109 | hctx->ccid3hctx_t_ipi, hctx->ccid3hctx_delta, | ||
110 | hctx->ccid3hctx_s, (unsigned)(hctx->ccid3hctx_x >> 6)); | ||
111 | 104 | ||
105 | ccid3_pr_debug("t_ipi=%u, delta=%u, s=%u, X=%u\n", hc->tx_t_ipi, | ||
106 | hc->tx_delta, hc->tx_s, (unsigned)(hc->tx_x >> 6)); | ||
112 | } | 107 | } |
113 | 108 | ||
114 | static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hctx, ktime_t now) | 109 | static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hc, ktime_t now) |
115 | { | 110 | { |
116 | u32 delta = ktime_us_delta(now, hctx->ccid3hctx_t_last_win_count); | 111 | u32 delta = ktime_us_delta(now, hc->tx_t_last_win_count); |
117 | 112 | ||
118 | return delta / hctx->ccid3hctx_rtt; | 113 | return delta / hc->tx_rtt; |
119 | } | 114 | } |
120 | 115 | ||
121 | /** | 116 | /** |
@@ -130,9 +125,9 @@ static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hctx, ktime_t now) | |||
130 | */ | 125 | */ |
131 | static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp) | 126 | static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp) |
132 | { | 127 | { |
133 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); | 128 | struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); |
134 | __u64 min_rate = 2 * hctx->ccid3hctx_x_recv; | 129 | __u64 min_rate = 2 * hc->tx_x_recv; |
135 | const __u64 old_x = hctx->ccid3hctx_x; | 130 | const __u64 old_x = hc->tx_x; |
136 | ktime_t now = stamp ? *stamp : ktime_get_real(); | 131 | ktime_t now = stamp ? *stamp : ktime_get_real(); |
137 | 132 | ||
138 | /* | 133 | /* |
@@ -141,37 +136,31 @@ static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp) | |||
141 | * a sender is idle if it has not sent anything over a 2-RTT-period. | 136 | * a sender is idle if it has not sent anything over a 2-RTT-period. |
142 | * For consistency with X and X_recv, min_rate is also scaled by 2^6. | 137 | * For consistency with X and X_recv, min_rate is also scaled by 2^6. |
143 | */ | 138 | */ |
144 | if (ccid3_hc_tx_idle_rtt(hctx, now) >= 2) { | 139 | if (ccid3_hc_tx_idle_rtt(hc, now) >= 2) { |
145 | min_rate = rfc3390_initial_rate(sk); | 140 | min_rate = rfc3390_initial_rate(sk); |
146 | min_rate = max(min_rate, 2 * hctx->ccid3hctx_x_recv); | 141 | min_rate = max(min_rate, 2 * hc->tx_x_recv); |
147 | } | 142 | } |
148 | 143 | ||
149 | if (hctx->ccid3hctx_p > 0) { | 144 | if (hc->tx_p > 0) { |
150 | 145 | ||
151 | hctx->ccid3hctx_x = min(((__u64)hctx->ccid3hctx_x_calc) << 6, | 146 | hc->tx_x = min(((__u64)hc->tx_x_calc) << 6, min_rate); |
152 | min_rate); | 147 | hc->tx_x = max(hc->tx_x, (((__u64)hc->tx_s) << 6) / TFRC_T_MBI); |
153 | hctx->ccid3hctx_x = max(hctx->ccid3hctx_x, | ||
154 | (((__u64)hctx->ccid3hctx_s) << 6) / | ||
155 | TFRC_T_MBI); | ||
156 | 148 | ||
157 | } else if (ktime_us_delta(now, hctx->ccid3hctx_t_ld) | 149 | } else if (ktime_us_delta(now, hc->tx_t_ld) - (s64)hc->tx_rtt >= 0) { |
158 | - (s64)hctx->ccid3hctx_rtt >= 0) { | ||
159 | 150 | ||
160 | hctx->ccid3hctx_x = min(2 * hctx->ccid3hctx_x, min_rate); | 151 | hc->tx_x = min(2 * hc->tx_x, min_rate); |
161 | hctx->ccid3hctx_x = max(hctx->ccid3hctx_x, | 152 | hc->tx_x = max(hc->tx_x, |
162 | scaled_div(((__u64)hctx->ccid3hctx_s) << 6, | 153 | scaled_div(((__u64)hc->tx_s) << 6, hc->tx_rtt)); |
163 | hctx->ccid3hctx_rtt)); | 154 | hc->tx_t_ld = now; |
164 | hctx->ccid3hctx_t_ld = now; | ||
165 | } | 155 | } |
166 | 156 | ||
167 | if (hctx->ccid3hctx_x != old_x) { | 157 | if (hc->tx_x != old_x) { |
168 | ccid3_pr_debug("X_prev=%u, X_now=%u, X_calc=%u, " | 158 | ccid3_pr_debug("X_prev=%u, X_now=%u, X_calc=%u, " |
169 | "X_recv=%u\n", (unsigned)(old_x >> 6), | 159 | "X_recv=%u\n", (unsigned)(old_x >> 6), |
170 | (unsigned)(hctx->ccid3hctx_x >> 6), | 160 | (unsigned)(hc->tx_x >> 6), hc->tx_x_calc, |
171 | hctx->ccid3hctx_x_calc, | 161 | (unsigned)(hc->tx_x_recv >> 6)); |
172 | (unsigned)(hctx->ccid3hctx_x_recv >> 6)); | ||
173 | 162 | ||
174 | ccid3_update_send_interval(hctx); | 163 | ccid3_update_send_interval(hc); |
175 | } | 164 | } |
176 | } | 165 | } |
177 | 166 | ||
@@ -179,37 +168,37 @@ static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp) | |||
179 | * Track the mean packet size `s' (cf. RFC 4342, 5.3 and RFC 3448, 4.1) | 168 | * Track the mean packet size `s' (cf. RFC 4342, 5.3 and RFC 3448, 4.1) |
180 | * @len: DCCP packet payload size in bytes | 169 | * @len: DCCP packet payload size in bytes |
181 | */ | 170 | */ |
182 | static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hctx, int len) | 171 | static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hc, int len) |
183 | { | 172 | { |
184 | const u16 old_s = hctx->ccid3hctx_s; | 173 | const u16 old_s = hc->tx_s; |
185 | 174 | ||
186 | hctx->ccid3hctx_s = tfrc_ewma(hctx->ccid3hctx_s, len, 9); | 175 | hc->tx_s = tfrc_ewma(hc->tx_s, len, 9); |
187 | 176 | ||
188 | if (hctx->ccid3hctx_s != old_s) | 177 | if (hc->tx_s != old_s) |
189 | ccid3_update_send_interval(hctx); | 178 | ccid3_update_send_interval(hc); |
190 | } | 179 | } |
191 | 180 | ||
192 | /* | 181 | /* |
193 | * Update Window Counter using the algorithm from [RFC 4342, 8.1]. | 182 | * Update Window Counter using the algorithm from [RFC 4342, 8.1]. |
194 | * As elsewhere, RTT > 0 is assumed by using dccp_sample_rtt(). | 183 | * As elsewhere, RTT > 0 is assumed by using dccp_sample_rtt(). |
195 | */ | 184 | */ |
196 | static inline void ccid3_hc_tx_update_win_count(struct ccid3_hc_tx_sock *hctx, | 185 | static inline void ccid3_hc_tx_update_win_count(struct ccid3_hc_tx_sock *hc, |
197 | ktime_t now) | 186 | ktime_t now) |
198 | { | 187 | { |
199 | u32 delta = ktime_us_delta(now, hctx->ccid3hctx_t_last_win_count), | 188 | u32 delta = ktime_us_delta(now, hc->tx_t_last_win_count), |
200 | quarter_rtts = (4 * delta) / hctx->ccid3hctx_rtt; | 189 | quarter_rtts = (4 * delta) / hc->tx_rtt; |
201 | 190 | ||
202 | if (quarter_rtts > 0) { | 191 | if (quarter_rtts > 0) { |
203 | hctx->ccid3hctx_t_last_win_count = now; | 192 | hc->tx_t_last_win_count = now; |
204 | hctx->ccid3hctx_last_win_count += min(quarter_rtts, 5U); | 193 | hc->tx_last_win_count += min(quarter_rtts, 5U); |
205 | hctx->ccid3hctx_last_win_count &= 0xF; /* mod 16 */ | 194 | hc->tx_last_win_count &= 0xF; /* mod 16 */ |
206 | } | 195 | } |
207 | } | 196 | } |
208 | 197 | ||
209 | static void ccid3_hc_tx_no_feedback_timer(unsigned long data) | 198 | static void ccid3_hc_tx_no_feedback_timer(unsigned long data) |
210 | { | 199 | { |
211 | struct sock *sk = (struct sock *)data; | 200 | struct sock *sk = (struct sock *)data; |
212 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); | 201 | struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); |
213 | unsigned long t_nfb = USEC_PER_SEC / 5; | 202 | unsigned long t_nfb = USEC_PER_SEC / 5; |
214 | 203 | ||
215 | bh_lock_sock(sk); | 204 | bh_lock_sock(sk); |
@@ -220,24 +209,23 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data) | |||
220 | } | 209 | } |
221 | 210 | ||
222 | ccid3_pr_debug("%s(%p, state=%s) - entry \n", dccp_role(sk), sk, | 211 | ccid3_pr_debug("%s(%p, state=%s) - entry \n", dccp_role(sk), sk, |
223 | ccid3_tx_state_name(hctx->ccid3hctx_state)); | 212 | ccid3_tx_state_name(hc->tx_state)); |
224 | 213 | ||
225 | if (hctx->ccid3hctx_state == TFRC_SSTATE_FBACK) | 214 | if (hc->tx_state == TFRC_SSTATE_FBACK) |
226 | ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK); | 215 | ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK); |
227 | else if (hctx->ccid3hctx_state != TFRC_SSTATE_NO_FBACK) | 216 | else if (hc->tx_state != TFRC_SSTATE_NO_FBACK) |
228 | goto out; | 217 | goto out; |
229 | 218 | ||
230 | /* | 219 | /* |
231 | * Determine new allowed sending rate X as per draft rfc3448bis-00, 4.4 | 220 | * Determine new allowed sending rate X as per draft rfc3448bis-00, 4.4 |
232 | */ | 221 | */ |
233 | if (hctx->ccid3hctx_t_rto == 0 || /* no feedback received yet */ | 222 | if (hc->tx_t_rto == 0 || /* no feedback received yet */ |
234 | hctx->ccid3hctx_p == 0) { | 223 | hc->tx_p == 0) { |
235 | 224 | ||
236 | /* halve send rate directly */ | 225 | /* halve send rate directly */ |
237 | hctx->ccid3hctx_x = max(hctx->ccid3hctx_x / 2, | 226 | hc->tx_x = max(hc->tx_x / 2, |
238 | (((__u64)hctx->ccid3hctx_s) << 6) / | 227 | (((__u64)hc->tx_s) << 6) / TFRC_T_MBI); |
239 | TFRC_T_MBI); | 228 | ccid3_update_send_interval(hc); |
240 | ccid3_update_send_interval(hctx); | ||
241 | } else { | 229 | } else { |
242 | /* | 230 | /* |
243 | * Modify the cached value of X_recv | 231 | * Modify the cached value of X_recv |
@@ -249,33 +237,32 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data) | |||
249 | * | 237 | * |
250 | * Note that X_recv is scaled by 2^6 while X_calc is not | 238 | * Note that X_recv is scaled by 2^6 while X_calc is not |
251 | */ | 239 | */ |
252 | BUG_ON(hctx->ccid3hctx_p && !hctx->ccid3hctx_x_calc); | 240 | BUG_ON(hc->tx_p && !hc->tx_x_calc); |
253 | 241 | ||
254 | if (hctx->ccid3hctx_x_calc > (hctx->ccid3hctx_x_recv >> 5)) | 242 | if (hc->tx_x_calc > (hc->tx_x_recv >> 5)) |
255 | hctx->ccid3hctx_x_recv = | 243 | hc->tx_x_recv = |
256 | max(hctx->ccid3hctx_x_recv / 2, | 244 | max(hc->tx_x_recv / 2, |
257 | (((__u64)hctx->ccid3hctx_s) << 6) / | 245 | (((__u64)hc->tx_s) << 6) / (2*TFRC_T_MBI)); |
258 | (2 * TFRC_T_MBI)); | ||
259 | else { | 246 | else { |
260 | hctx->ccid3hctx_x_recv = hctx->ccid3hctx_x_calc; | 247 | hc->tx_x_recv = hc->tx_x_calc; |
261 | hctx->ccid3hctx_x_recv <<= 4; | 248 | hc->tx_x_recv <<= 4; |
262 | } | 249 | } |
263 | ccid3_hc_tx_update_x(sk, NULL); | 250 | ccid3_hc_tx_update_x(sk, NULL); |
264 | } | 251 | } |
265 | ccid3_pr_debug("Reduced X to %llu/64 bytes/sec\n", | 252 | ccid3_pr_debug("Reduced X to %llu/64 bytes/sec\n", |
266 | (unsigned long long)hctx->ccid3hctx_x); | 253 | (unsigned long long)hc->tx_x); |
267 | 254 | ||
268 | /* | 255 | /* |
269 | * Set new timeout for the nofeedback timer. | 256 | * Set new timeout for the nofeedback timer. |
270 | * See comments in packet_recv() regarding the value of t_RTO. | 257 | * See comments in packet_recv() regarding the value of t_RTO. |
271 | */ | 258 | */ |
272 | if (unlikely(hctx->ccid3hctx_t_rto == 0)) /* no feedback yet */ | 259 | if (unlikely(hc->tx_t_rto == 0)) /* no feedback yet */ |
273 | t_nfb = TFRC_INITIAL_TIMEOUT; | 260 | t_nfb = TFRC_INITIAL_TIMEOUT; |
274 | else | 261 | else |
275 | t_nfb = max(hctx->ccid3hctx_t_rto, 2 * hctx->ccid3hctx_t_ipi); | 262 | t_nfb = max(hc->tx_t_rto, 2 * hc->tx_t_ipi); |
276 | 263 | ||
277 | restart_timer: | 264 | restart_timer: |
278 | sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, | 265 | sk_reset_timer(sk, &hc->tx_no_feedback_timer, |
279 | jiffies + usecs_to_jiffies(t_nfb)); | 266 | jiffies + usecs_to_jiffies(t_nfb)); |
280 | out: | 267 | out: |
281 | bh_unlock_sock(sk); | 268 | bh_unlock_sock(sk); |
@@ -291,7 +278,7 @@ out: | |||
291 | static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) | 278 | static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) |
292 | { | 279 | { |
293 | struct dccp_sock *dp = dccp_sk(sk); | 280 | struct dccp_sock *dp = dccp_sk(sk); |
294 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); | 281 | struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); |
295 | ktime_t now = ktime_get_real(); | 282 | ktime_t now = ktime_get_real(); |
296 | s64 delay; | 283 | s64 delay; |
297 | 284 | ||
@@ -303,18 +290,17 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) | |||
303 | if (unlikely(skb->len == 0)) | 290 | if (unlikely(skb->len == 0)) |
304 | return -EBADMSG; | 291 | return -EBADMSG; |
305 | 292 | ||
306 | switch (hctx->ccid3hctx_state) { | 293 | switch (hc->tx_state) { |
307 | case TFRC_SSTATE_NO_SENT: | 294 | case TFRC_SSTATE_NO_SENT: |
308 | sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, | 295 | sk_reset_timer(sk, &hc->tx_no_feedback_timer, (jiffies + |
309 | (jiffies + | 296 | usecs_to_jiffies(TFRC_INITIAL_TIMEOUT))); |
310 | usecs_to_jiffies(TFRC_INITIAL_TIMEOUT))); | 297 | hc->tx_last_win_count = 0; |
311 | hctx->ccid3hctx_last_win_count = 0; | 298 | hc->tx_t_last_win_count = now; |
312 | hctx->ccid3hctx_t_last_win_count = now; | ||
313 | 299 | ||
314 | /* Set t_0 for initial packet */ | 300 | /* Set t_0 for initial packet */ |
315 | hctx->ccid3hctx_t_nom = now; | 301 | hc->tx_t_nom = now; |
316 | 302 | ||
317 | hctx->ccid3hctx_s = skb->len; | 303 | hc->tx_s = skb->len; |
318 | 304 | ||
319 | /* | 305 | /* |
320 | * Use initial RTT sample when available: recommended by erratum | 306 | * Use initial RTT sample when available: recommended by erratum |
@@ -323,9 +309,9 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) | |||
323 | */ | 309 | */ |
324 | if (dp->dccps_syn_rtt) { | 310 | if (dp->dccps_syn_rtt) { |
325 | ccid3_pr_debug("SYN RTT = %uus\n", dp->dccps_syn_rtt); | 311 | ccid3_pr_debug("SYN RTT = %uus\n", dp->dccps_syn_rtt); |
326 | hctx->ccid3hctx_rtt = dp->dccps_syn_rtt; | 312 | hc->tx_rtt = dp->dccps_syn_rtt; |
327 | hctx->ccid3hctx_x = rfc3390_initial_rate(sk); | 313 | hc->tx_x = rfc3390_initial_rate(sk); |
328 | hctx->ccid3hctx_t_ld = now; | 314 | hc->tx_t_ld = now; |
329 | } else { | 315 | } else { |
330 | /* | 316 | /* |
331 | * Sender does not have RTT sample: | 317 | * Sender does not have RTT sample: |
@@ -333,17 +319,17 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) | |||
333 | * is needed in several parts (e.g. window counter); | 319 | * is needed in several parts (e.g. window counter); |
334 | * - set sending rate X_pps = 1pps as per RFC 3448, 4.2. | 320 | * - set sending rate X_pps = 1pps as per RFC 3448, 4.2. |
335 | */ | 321 | */ |
336 | hctx->ccid3hctx_rtt = DCCP_FALLBACK_RTT; | 322 | hc->tx_rtt = DCCP_FALLBACK_RTT; |
337 | hctx->ccid3hctx_x = hctx->ccid3hctx_s; | 323 | hc->tx_x = hc->tx_s; |
338 | hctx->ccid3hctx_x <<= 6; | 324 | hc->tx_x <<= 6; |
339 | } | 325 | } |
340 | ccid3_update_send_interval(hctx); | 326 | ccid3_update_send_interval(hc); |
341 | 327 | ||
342 | ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK); | 328 | ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK); |
343 | break; | 329 | break; |
344 | case TFRC_SSTATE_NO_FBACK: | 330 | case TFRC_SSTATE_NO_FBACK: |
345 | case TFRC_SSTATE_FBACK: | 331 | case TFRC_SSTATE_FBACK: |
346 | delay = ktime_us_delta(hctx->ccid3hctx_t_nom, now); | 332 | delay = ktime_us_delta(hc->tx_t_nom, now); |
347 | ccid3_pr_debug("delay=%ld\n", (long)delay); | 333 | ccid3_pr_debug("delay=%ld\n", (long)delay); |
348 | /* | 334 | /* |
349 | * Scheduling of packet transmissions [RFC 3448, 4.6] | 335 | * Scheduling of packet transmissions [RFC 3448, 4.6] |
@@ -353,10 +339,10 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) | |||
353 | * else | 339 | * else |
354 | * // send the packet in (t_nom - t_now) milliseconds. | 340 | * // send the packet in (t_nom - t_now) milliseconds. |
355 | */ | 341 | */ |
356 | if (delay - (s64)hctx->ccid3hctx_delta >= 1000) | 342 | if (delay - (s64)hc->tx_delta >= 1000) |
357 | return (u32)delay / 1000L; | 343 | return (u32)delay / 1000L; |
358 | 344 | ||
359 | ccid3_hc_tx_update_win_count(hctx, now); | 345 | ccid3_hc_tx_update_win_count(hc, now); |
360 | break; | 346 | break; |
361 | case TFRC_SSTATE_TERM: | 347 | case TFRC_SSTATE_TERM: |
362 | DCCP_BUG("%s(%p) - Illegal state TERM", dccp_role(sk), sk); | 348 | DCCP_BUG("%s(%p) - Illegal state TERM", dccp_role(sk), sk); |
@@ -365,28 +351,27 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) | |||
365 | 351 | ||
366 | /* prepare to send now (add options etc.) */ | 352 | /* prepare to send now (add options etc.) */ |
367 | dp->dccps_hc_tx_insert_options = 1; | 353 | dp->dccps_hc_tx_insert_options = 1; |
368 | DCCP_SKB_CB(skb)->dccpd_ccval = hctx->ccid3hctx_last_win_count; | 354 | DCCP_SKB_CB(skb)->dccpd_ccval = hc->tx_last_win_count; |
369 | 355 | ||
370 | /* set the nominal send time for the next following packet */ | 356 | /* set the nominal send time for the next following packet */ |
371 | hctx->ccid3hctx_t_nom = ktime_add_us(hctx->ccid3hctx_t_nom, | 357 | hc->tx_t_nom = ktime_add_us(hc->tx_t_nom, hc->tx_t_ipi); |
372 | hctx->ccid3hctx_t_ipi); | ||
373 | return 0; | 358 | return 0; |
374 | } | 359 | } |
375 | 360 | ||
376 | static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, | 361 | static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, |
377 | unsigned int len) | 362 | unsigned int len) |
378 | { | 363 | { |
379 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); | 364 | struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); |
380 | 365 | ||
381 | ccid3_hc_tx_update_s(hctx, len); | 366 | ccid3_hc_tx_update_s(hc, len); |
382 | 367 | ||
383 | if (tfrc_tx_hist_add(&hctx->ccid3hctx_hist, dccp_sk(sk)->dccps_gss)) | 368 | if (tfrc_tx_hist_add(&hc->tx_hist, dccp_sk(sk)->dccps_gss)) |
384 | DCCP_CRIT("packet history - out of memory!"); | 369 | DCCP_CRIT("packet history - out of memory!"); |
385 | } | 370 | } |
386 | 371 | ||
387 | static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | 372 | static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) |
388 | { | 373 | { |
389 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); | 374 | struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); |
390 | struct ccid3_options_received *opt_recv; | 375 | struct ccid3_options_received *opt_recv; |
391 | ktime_t now; | 376 | ktime_t now; |
392 | unsigned long t_nfb; | 377 | unsigned long t_nfb; |
@@ -397,15 +382,15 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
397 | DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_DATAACK)) | 382 | DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_DATAACK)) |
398 | return; | 383 | return; |
399 | /* ... and only in the established state */ | 384 | /* ... and only in the established state */ |
400 | if (hctx->ccid3hctx_state != TFRC_SSTATE_FBACK && | 385 | if (hc->tx_state != TFRC_SSTATE_FBACK && |
401 | hctx->ccid3hctx_state != TFRC_SSTATE_NO_FBACK) | 386 | hc->tx_state != TFRC_SSTATE_NO_FBACK) |
402 | return; | 387 | return; |
403 | 388 | ||
404 | opt_recv = &hctx->ccid3hctx_options_received; | 389 | opt_recv = &hc->tx_options_received; |
405 | now = ktime_get_real(); | 390 | now = ktime_get_real(); |
406 | 391 | ||
407 | /* Estimate RTT from history if ACK number is valid */ | 392 | /* Estimate RTT from history if ACK number is valid */ |
408 | r_sample = tfrc_tx_hist_rtt(hctx->ccid3hctx_hist, | 393 | r_sample = tfrc_tx_hist_rtt(hc->tx_hist, |
409 | DCCP_SKB_CB(skb)->dccpd_ack_seq, now); | 394 | DCCP_SKB_CB(skb)->dccpd_ack_seq, now); |
410 | if (r_sample == 0) { | 395 | if (r_sample == 0) { |
411 | DCCP_WARN("%s(%p): %s with bogus ACK-%llu\n", dccp_role(sk), sk, | 396 | DCCP_WARN("%s(%p): %s with bogus ACK-%llu\n", dccp_role(sk), sk, |
@@ -415,37 +400,37 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
415 | } | 400 | } |
416 | 401 | ||
417 | /* Update receive rate in units of 64 * bytes/second */ | 402 | /* Update receive rate in units of 64 * bytes/second */ |
418 | hctx->ccid3hctx_x_recv = opt_recv->ccid3or_receive_rate; | 403 | hc->tx_x_recv = opt_recv->ccid3or_receive_rate; |
419 | hctx->ccid3hctx_x_recv <<= 6; | 404 | hc->tx_x_recv <<= 6; |
420 | 405 | ||
421 | /* Update loss event rate (which is scaled by 1e6) */ | 406 | /* Update loss event rate (which is scaled by 1e6) */ |
422 | pinv = opt_recv->ccid3or_loss_event_rate; | 407 | pinv = opt_recv->ccid3or_loss_event_rate; |
423 | if (pinv == ~0U || pinv == 0) /* see RFC 4342, 8.5 */ | 408 | if (pinv == ~0U || pinv == 0) /* see RFC 4342, 8.5 */ |
424 | hctx->ccid3hctx_p = 0; | 409 | hc->tx_p = 0; |
425 | else /* can not exceed 100% */ | 410 | else /* can not exceed 100% */ |
426 | hctx->ccid3hctx_p = scaled_div(1, pinv); | 411 | hc->tx_p = scaled_div(1, pinv); |
427 | /* | 412 | /* |
428 | * Validate new RTT sample and update moving average | 413 | * Validate new RTT sample and update moving average |
429 | */ | 414 | */ |
430 | r_sample = dccp_sample_rtt(sk, r_sample); | 415 | r_sample = dccp_sample_rtt(sk, r_sample); |
431 | hctx->ccid3hctx_rtt = tfrc_ewma(hctx->ccid3hctx_rtt, r_sample, 9); | 416 | hc->tx_rtt = tfrc_ewma(hc->tx_rtt, r_sample, 9); |
432 | /* | 417 | /* |
433 | * Update allowed sending rate X as per draft rfc3448bis-00, 4.2/3 | 418 | * Update allowed sending rate X as per draft rfc3448bis-00, 4.2/3 |
434 | */ | 419 | */ |
435 | if (hctx->ccid3hctx_state == TFRC_SSTATE_NO_FBACK) { | 420 | if (hc->tx_state == TFRC_SSTATE_NO_FBACK) { |
436 | ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK); | 421 | ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK); |
437 | 422 | ||
438 | if (hctx->ccid3hctx_t_rto == 0) { | 423 | if (hc->tx_t_rto == 0) { |
439 | /* | 424 | /* |
440 | * Initial feedback packet: Larger Initial Windows (4.2) | 425 | * Initial feedback packet: Larger Initial Windows (4.2) |
441 | */ | 426 | */ |
442 | hctx->ccid3hctx_x = rfc3390_initial_rate(sk); | 427 | hc->tx_x = rfc3390_initial_rate(sk); |
443 | hctx->ccid3hctx_t_ld = now; | 428 | hc->tx_t_ld = now; |
444 | 429 | ||
445 | ccid3_update_send_interval(hctx); | 430 | ccid3_update_send_interval(hc); |
446 | 431 | ||
447 | goto done_computing_x; | 432 | goto done_computing_x; |
448 | } else if (hctx->ccid3hctx_p == 0) { | 433 | } else if (hc->tx_p == 0) { |
449 | /* | 434 | /* |
450 | * First feedback after nofeedback timer expiry (4.3) | 435 | * First feedback after nofeedback timer expiry (4.3) |
451 | */ | 436 | */ |
@@ -454,25 +439,20 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
454 | } | 439 | } |
455 | 440 | ||
456 | /* Update sending rate (step 4 of [RFC 3448, 4.3]) */ | 441 | /* Update sending rate (step 4 of [RFC 3448, 4.3]) */ |
457 | if (hctx->ccid3hctx_p > 0) | 442 | if (hc->tx_p > 0) |
458 | hctx->ccid3hctx_x_calc = | 443 | hc->tx_x_calc = tfrc_calc_x(hc->tx_s, hc->tx_rtt, hc->tx_p); |
459 | tfrc_calc_x(hctx->ccid3hctx_s, | ||
460 | hctx->ccid3hctx_rtt, | ||
461 | hctx->ccid3hctx_p); | ||
462 | ccid3_hc_tx_update_x(sk, &now); | 444 | ccid3_hc_tx_update_x(sk, &now); |
463 | 445 | ||
464 | done_computing_x: | 446 | done_computing_x: |
465 | ccid3_pr_debug("%s(%p), RTT=%uus (sample=%uus), s=%u, " | 447 | ccid3_pr_debug("%s(%p), RTT=%uus (sample=%uus), s=%u, " |
466 | "p=%u, X_calc=%u, X_recv=%u, X=%u\n", | 448 | "p=%u, X_calc=%u, X_recv=%u, X=%u\n", |
467 | dccp_role(sk), | 449 | dccp_role(sk), sk, hc->tx_rtt, r_sample, |
468 | sk, hctx->ccid3hctx_rtt, r_sample, | 450 | hc->tx_s, hc->tx_p, hc->tx_x_calc, |
469 | hctx->ccid3hctx_s, hctx->ccid3hctx_p, | 451 | (unsigned)(hc->tx_x_recv >> 6), |
470 | hctx->ccid3hctx_x_calc, | 452 | (unsigned)(hc->tx_x >> 6)); |
471 | (unsigned)(hctx->ccid3hctx_x_recv >> 6), | ||
472 | (unsigned)(hctx->ccid3hctx_x >> 6)); | ||
473 | 453 | ||
474 | /* unschedule no feedback timer */ | 454 | /* unschedule no feedback timer */ |
475 | sk_stop_timer(sk, &hctx->ccid3hctx_no_feedback_timer); | 455 | sk_stop_timer(sk, &hc->tx_no_feedback_timer); |
476 | 456 | ||
477 | /* | 457 | /* |
478 | * As we have calculated new ipi, delta, t_nom it is possible | 458 | * As we have calculated new ipi, delta, t_nom it is possible |
@@ -486,21 +466,19 @@ done_computing_x: | |||
486 | * This can help avoid triggering the nofeedback timer too | 466 | * This can help avoid triggering the nofeedback timer too |
487 | * often ('spinning') on LANs with small RTTs. | 467 | * often ('spinning') on LANs with small RTTs. |
488 | */ | 468 | */ |
489 | hctx->ccid3hctx_t_rto = max_t(u32, 4 * hctx->ccid3hctx_rtt, | 469 | hc->tx_t_rto = max_t(u32, 4 * hc->tx_rtt, (CONFIG_IP_DCCP_CCID3_RTO * |
490 | (CONFIG_IP_DCCP_CCID3_RTO * | 470 | (USEC_PER_SEC / 1000))); |
491 | (USEC_PER_SEC / 1000))); | ||
492 | /* | 471 | /* |
493 | * Schedule no feedback timer to expire in | 472 | * Schedule no feedback timer to expire in |
494 | * max(t_RTO, 2 * s/X) = max(t_RTO, 2 * t_ipi) | 473 | * max(t_RTO, 2 * s/X) = max(t_RTO, 2 * t_ipi) |
495 | */ | 474 | */ |
496 | t_nfb = max(hctx->ccid3hctx_t_rto, 2 * hctx->ccid3hctx_t_ipi); | 475 | t_nfb = max(hc->tx_t_rto, 2 * hc->tx_t_ipi); |
497 | 476 | ||
498 | ccid3_pr_debug("%s(%p), Scheduled no feedback timer to " | 477 | ccid3_pr_debug("%s(%p), Scheduled no feedback timer to " |
499 | "expire in %lu jiffies (%luus)\n", | 478 | "expire in %lu jiffies (%luus)\n", |
500 | dccp_role(sk), | 479 | dccp_role(sk), sk, usecs_to_jiffies(t_nfb), t_nfb); |
501 | sk, usecs_to_jiffies(t_nfb), t_nfb); | ||
502 | 480 | ||
503 | sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, | 481 | sk_reset_timer(sk, &hc->tx_no_feedback_timer, |
504 | jiffies + usecs_to_jiffies(t_nfb)); | 482 | jiffies + usecs_to_jiffies(t_nfb)); |
505 | } | 483 | } |
506 | 484 | ||
@@ -510,11 +488,11 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option, | |||
510 | { | 488 | { |
511 | int rc = 0; | 489 | int rc = 0; |
512 | const struct dccp_sock *dp = dccp_sk(sk); | 490 | const struct dccp_sock *dp = dccp_sk(sk); |
513 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); | 491 | struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); |
514 | struct ccid3_options_received *opt_recv; | 492 | struct ccid3_options_received *opt_recv; |
515 | __be32 opt_val; | 493 | __be32 opt_val; |
516 | 494 | ||
517 | opt_recv = &hctx->ccid3hctx_options_received; | 495 | opt_recv = &hc->tx_options_received; |
518 | 496 | ||
519 | if (opt_recv->ccid3or_seqno != dp->dccps_gsr) { | 497 | if (opt_recv->ccid3or_seqno != dp->dccps_gsr) { |
520 | opt_recv->ccid3or_seqno = dp->dccps_gsr; | 498 | opt_recv->ccid3or_seqno = dp->dccps_gsr; |
@@ -568,56 +546,55 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option, | |||
568 | 546 | ||
569 | static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk) | 547 | static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk) |
570 | { | 548 | { |
571 | struct ccid3_hc_tx_sock *hctx = ccid_priv(ccid); | 549 | struct ccid3_hc_tx_sock *hc = ccid_priv(ccid); |
572 | 550 | ||
573 | hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT; | 551 | hc->tx_state = TFRC_SSTATE_NO_SENT; |
574 | hctx->ccid3hctx_hist = NULL; | 552 | hc->tx_hist = NULL; |
575 | setup_timer(&hctx->ccid3hctx_no_feedback_timer, | 553 | setup_timer(&hc->tx_no_feedback_timer, |
576 | ccid3_hc_tx_no_feedback_timer, (unsigned long)sk); | 554 | ccid3_hc_tx_no_feedback_timer, (unsigned long)sk); |
577 | |||
578 | return 0; | 555 | return 0; |
579 | } | 556 | } |
580 | 557 | ||
581 | static void ccid3_hc_tx_exit(struct sock *sk) | 558 | static void ccid3_hc_tx_exit(struct sock *sk) |
582 | { | 559 | { |
583 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); | 560 | struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); |
584 | 561 | ||
585 | ccid3_hc_tx_set_state(sk, TFRC_SSTATE_TERM); | 562 | ccid3_hc_tx_set_state(sk, TFRC_SSTATE_TERM); |
586 | sk_stop_timer(sk, &hctx->ccid3hctx_no_feedback_timer); | 563 | sk_stop_timer(sk, &hc->tx_no_feedback_timer); |
587 | 564 | ||
588 | tfrc_tx_hist_purge(&hctx->ccid3hctx_hist); | 565 | tfrc_tx_hist_purge(&hc->tx_hist); |
589 | } | 566 | } |
590 | 567 | ||
591 | static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info) | 568 | static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info) |
592 | { | 569 | { |
593 | struct ccid3_hc_tx_sock *hctx; | 570 | struct ccid3_hc_tx_sock *hc; |
594 | 571 | ||
595 | /* Listen socks doesn't have a private CCID block */ | 572 | /* Listen socks doesn't have a private CCID block */ |
596 | if (sk->sk_state == DCCP_LISTEN) | 573 | if (sk->sk_state == DCCP_LISTEN) |
597 | return; | 574 | return; |
598 | 575 | ||
599 | hctx = ccid3_hc_tx_sk(sk); | 576 | hc = ccid3_hc_tx_sk(sk); |
600 | info->tcpi_rto = hctx->ccid3hctx_t_rto; | 577 | info->tcpi_rto = hc->tx_t_rto; |
601 | info->tcpi_rtt = hctx->ccid3hctx_rtt; | 578 | info->tcpi_rtt = hc->tx_rtt; |
602 | } | 579 | } |
603 | 580 | ||
604 | static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len, | 581 | static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len, |
605 | u32 __user *optval, int __user *optlen) | 582 | u32 __user *optval, int __user *optlen) |
606 | { | 583 | { |
607 | const struct ccid3_hc_tx_sock *hctx; | 584 | const struct ccid3_hc_tx_sock *hc; |
608 | const void *val; | 585 | const void *val; |
609 | 586 | ||
610 | /* Listen socks doesn't have a private CCID block */ | 587 | /* Listen socks doesn't have a private CCID block */ |
611 | if (sk->sk_state == DCCP_LISTEN) | 588 | if (sk->sk_state == DCCP_LISTEN) |
612 | return -EINVAL; | 589 | return -EINVAL; |
613 | 590 | ||
614 | hctx = ccid3_hc_tx_sk(sk); | 591 | hc = ccid3_hc_tx_sk(sk); |
615 | switch (optname) { | 592 | switch (optname) { |
616 | case DCCP_SOCKOPT_CCID_TX_INFO: | 593 | case DCCP_SOCKOPT_CCID_TX_INFO: |
617 | if (len < sizeof(hctx->ccid3hctx_tfrc)) | 594 | if (len < sizeof(hc->tx_tfrc)) |
618 | return -EINVAL; | 595 | return -EINVAL; |
619 | len = sizeof(hctx->ccid3hctx_tfrc); | 596 | len = sizeof(hc->tx_tfrc); |
620 | val = &hctx->ccid3hctx_tfrc; | 597 | val = &hc->tx_tfrc; |
621 | break; | 598 | break; |
622 | default: | 599 | default: |
623 | return -ENOPROTOOPT; | 600 | return -ENOPROTOOPT; |
@@ -657,34 +634,34 @@ static const char *ccid3_rx_state_name(enum ccid3_hc_rx_states state) | |||
657 | static void ccid3_hc_rx_set_state(struct sock *sk, | 634 | static void ccid3_hc_rx_set_state(struct sock *sk, |
658 | enum ccid3_hc_rx_states state) | 635 | enum ccid3_hc_rx_states state) |
659 | { | 636 | { |
660 | struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); | 637 | struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); |
661 | enum ccid3_hc_rx_states oldstate = hcrx->ccid3hcrx_state; | 638 | enum ccid3_hc_rx_states oldstate = hc->rx_state; |
662 | 639 | ||
663 | ccid3_pr_debug("%s(%p) %-8.8s -> %s\n", | 640 | ccid3_pr_debug("%s(%p) %-8.8s -> %s\n", |
664 | dccp_role(sk), sk, ccid3_rx_state_name(oldstate), | 641 | dccp_role(sk), sk, ccid3_rx_state_name(oldstate), |
665 | ccid3_rx_state_name(state)); | 642 | ccid3_rx_state_name(state)); |
666 | WARN_ON(state == oldstate); | 643 | WARN_ON(state == oldstate); |
667 | hcrx->ccid3hcrx_state = state; | 644 | hc->rx_state = state; |
668 | } | 645 | } |
669 | 646 | ||
670 | static void ccid3_hc_rx_send_feedback(struct sock *sk, | 647 | static void ccid3_hc_rx_send_feedback(struct sock *sk, |
671 | const struct sk_buff *skb, | 648 | const struct sk_buff *skb, |
672 | enum ccid3_fback_type fbtype) | 649 | enum ccid3_fback_type fbtype) |
673 | { | 650 | { |
674 | struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); | 651 | struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); |
675 | struct dccp_sock *dp = dccp_sk(sk); | 652 | struct dccp_sock *dp = dccp_sk(sk); |
676 | ktime_t now; | 653 | ktime_t now; |
677 | s64 delta = 0; | 654 | s64 delta = 0; |
678 | 655 | ||
679 | if (unlikely(hcrx->ccid3hcrx_state == TFRC_RSTATE_TERM)) | 656 | if (unlikely(hc->rx_state == TFRC_RSTATE_TERM)) |
680 | return; | 657 | return; |
681 | 658 | ||
682 | now = ktime_get_real(); | 659 | now = ktime_get_real(); |
683 | 660 | ||
684 | switch (fbtype) { | 661 | switch (fbtype) { |
685 | case CCID3_FBACK_INITIAL: | 662 | case CCID3_FBACK_INITIAL: |
686 | hcrx->ccid3hcrx_x_recv = 0; | 663 | hc->rx_x_recv = 0; |
687 | hcrx->ccid3hcrx_pinv = ~0U; /* see RFC 4342, 8.5 */ | 664 | hc->rx_pinv = ~0U; /* see RFC 4342, 8.5 */ |
688 | break; | 665 | break; |
689 | case CCID3_FBACK_PARAM_CHANGE: | 666 | case CCID3_FBACK_PARAM_CHANGE: |
690 | /* | 667 | /* |
@@ -697,27 +674,26 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk, | |||
697 | * the number of bytes since last feedback. | 674 | * the number of bytes since last feedback. |
698 | * This is a safe fallback, since X is bounded above by X_calc. | 675 | * This is a safe fallback, since X is bounded above by X_calc. |
699 | */ | 676 | */ |
700 | if (hcrx->ccid3hcrx_x_recv > 0) | 677 | if (hc->rx_x_recv > 0) |
701 | break; | 678 | break; |
702 | /* fall through */ | 679 | /* fall through */ |
703 | case CCID3_FBACK_PERIODIC: | 680 | case CCID3_FBACK_PERIODIC: |
704 | delta = ktime_us_delta(now, hcrx->ccid3hcrx_tstamp_last_feedback); | 681 | delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback); |
705 | if (delta <= 0) | 682 | if (delta <= 0) |
706 | DCCP_BUG("delta (%ld) <= 0", (long)delta); | 683 | DCCP_BUG("delta (%ld) <= 0", (long)delta); |
707 | else | 684 | else |
708 | hcrx->ccid3hcrx_x_recv = | 685 | hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta); |
709 | scaled_div32(hcrx->ccid3hcrx_bytes_recv, delta); | ||
710 | break; | 686 | break; |
711 | default: | 687 | default: |
712 | return; | 688 | return; |
713 | } | 689 | } |
714 | 690 | ||
715 | ccid3_pr_debug("Interval %ldusec, X_recv=%u, 1/p=%u\n", (long)delta, | 691 | ccid3_pr_debug("Interval %ldusec, X_recv=%u, 1/p=%u\n", (long)delta, |
716 | hcrx->ccid3hcrx_x_recv, hcrx->ccid3hcrx_pinv); | 692 | hc->rx_x_recv, hc->rx_pinv); |
717 | 693 | ||
718 | hcrx->ccid3hcrx_tstamp_last_feedback = now; | 694 | hc->rx_tstamp_last_feedback = now; |
719 | hcrx->ccid3hcrx_last_counter = dccp_hdr(skb)->dccph_ccval; | 695 | hc->rx_last_counter = dccp_hdr(skb)->dccph_ccval; |
720 | hcrx->ccid3hcrx_bytes_recv = 0; | 696 | hc->rx_bytes_recv = 0; |
721 | 697 | ||
722 | dp->dccps_hc_rx_insert_options = 1; | 698 | dp->dccps_hc_rx_insert_options = 1; |
723 | dccp_send_ack(sk); | 699 | dccp_send_ack(sk); |
@@ -725,19 +701,19 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk, | |||
725 | 701 | ||
726 | static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb) | 702 | static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb) |
727 | { | 703 | { |
728 | const struct ccid3_hc_rx_sock *hcrx; | 704 | const struct ccid3_hc_rx_sock *hc; |
729 | __be32 x_recv, pinv; | 705 | __be32 x_recv, pinv; |
730 | 706 | ||
731 | if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN)) | 707 | if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN)) |
732 | return 0; | 708 | return 0; |
733 | 709 | ||
734 | hcrx = ccid3_hc_rx_sk(sk); | 710 | hc = ccid3_hc_rx_sk(sk); |
735 | 711 | ||
736 | if (dccp_packet_without_ack(skb)) | 712 | if (dccp_packet_without_ack(skb)) |
737 | return 0; | 713 | return 0; |
738 | 714 | ||
739 | x_recv = htonl(hcrx->ccid3hcrx_x_recv); | 715 | x_recv = htonl(hc->rx_x_recv); |
740 | pinv = htonl(hcrx->ccid3hcrx_pinv); | 716 | pinv = htonl(hc->rx_pinv); |
741 | 717 | ||
742 | if (dccp_insert_option(sk, skb, TFRC_OPT_LOSS_EVENT_RATE, | 718 | if (dccp_insert_option(sk, skb, TFRC_OPT_LOSS_EVENT_RATE, |
743 | &pinv, sizeof(pinv)) || | 719 | &pinv, sizeof(pinv)) || |
@@ -760,26 +736,26 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb) | |||
760 | */ | 736 | */ |
761 | static u32 ccid3_first_li(struct sock *sk) | 737 | static u32 ccid3_first_li(struct sock *sk) |
762 | { | 738 | { |
763 | struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); | 739 | struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); |
764 | u32 x_recv, p, delta; | 740 | u32 x_recv, p, delta; |
765 | u64 fval; | 741 | u64 fval; |
766 | 742 | ||
767 | if (hcrx->ccid3hcrx_rtt == 0) { | 743 | if (hc->rx_rtt == 0) { |
768 | DCCP_WARN("No RTT estimate available, using fallback RTT\n"); | 744 | DCCP_WARN("No RTT estimate available, using fallback RTT\n"); |
769 | hcrx->ccid3hcrx_rtt = DCCP_FALLBACK_RTT; | 745 | hc->rx_rtt = DCCP_FALLBACK_RTT; |
770 | } | 746 | } |
771 | 747 | ||
772 | delta = ktime_to_us(net_timedelta(hcrx->ccid3hcrx_tstamp_last_feedback)); | 748 | delta = ktime_to_us(net_timedelta(hc->rx_tstamp_last_feedback)); |
773 | x_recv = scaled_div32(hcrx->ccid3hcrx_bytes_recv, delta); | 749 | x_recv = scaled_div32(hc->rx_bytes_recv, delta); |
774 | if (x_recv == 0) { /* would also trigger divide-by-zero */ | 750 | if (x_recv == 0) { /* would also trigger divide-by-zero */ |
775 | DCCP_WARN("X_recv==0\n"); | 751 | DCCP_WARN("X_recv==0\n"); |
776 | if ((x_recv = hcrx->ccid3hcrx_x_recv) == 0) { | 752 | if ((x_recv = hc->rx_x_recv) == 0) { |
777 | DCCP_BUG("stored value of X_recv is zero"); | 753 | DCCP_BUG("stored value of X_recv is zero"); |
778 | return ~0U; | 754 | return ~0U; |
779 | } | 755 | } |
780 | } | 756 | } |
781 | 757 | ||
782 | fval = scaled_div(hcrx->ccid3hcrx_s, hcrx->ccid3hcrx_rtt); | 758 | fval = scaled_div(hc->rx_s, hc->rx_rtt); |
783 | fval = scaled_div32(fval, x_recv); | 759 | fval = scaled_div32(fval, x_recv); |
784 | p = tfrc_calc_x_reverse_lookup(fval); | 760 | p = tfrc_calc_x_reverse_lookup(fval); |
785 | 761 | ||
@@ -791,19 +767,19 @@ static u32 ccid3_first_li(struct sock *sk) | |||
791 | 767 | ||
792 | static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | 768 | static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) |
793 | { | 769 | { |
794 | struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); | 770 | struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); |
795 | enum ccid3_fback_type do_feedback = CCID3_FBACK_NONE; | 771 | enum ccid3_fback_type do_feedback = CCID3_FBACK_NONE; |
796 | const u64 ndp = dccp_sk(sk)->dccps_options_received.dccpor_ndp; | 772 | const u64 ndp = dccp_sk(sk)->dccps_options_received.dccpor_ndp; |
797 | const bool is_data_packet = dccp_data_packet(skb); | 773 | const bool is_data_packet = dccp_data_packet(skb); |
798 | 774 | ||
799 | if (unlikely(hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA)) { | 775 | if (unlikely(hc->rx_state == TFRC_RSTATE_NO_DATA)) { |
800 | if (is_data_packet) { | 776 | if (is_data_packet) { |
801 | const u32 payload = skb->len - dccp_hdr(skb)->dccph_doff * 4; | 777 | const u32 payload = skb->len - dccp_hdr(skb)->dccph_doff * 4; |
802 | do_feedback = CCID3_FBACK_INITIAL; | 778 | do_feedback = CCID3_FBACK_INITIAL; |
803 | ccid3_hc_rx_set_state(sk, TFRC_RSTATE_DATA); | 779 | ccid3_hc_rx_set_state(sk, TFRC_RSTATE_DATA); |
804 | hcrx->ccid3hcrx_s = payload; | 780 | hc->rx_s = payload; |
805 | /* | 781 | /* |
806 | * Not necessary to update ccid3hcrx_bytes_recv here, | 782 | * Not necessary to update rx_bytes_recv here, |
807 | * since X_recv = 0 for the first feedback packet (cf. | 783 | * since X_recv = 0 for the first feedback packet (cf. |
808 | * RFC 3448, 6.3) -- gerrit | 784 | * RFC 3448, 6.3) -- gerrit |
809 | */ | 785 | */ |
@@ -811,7 +787,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
811 | goto update_records; | 787 | goto update_records; |
812 | } | 788 | } |
813 | 789 | ||
814 | if (tfrc_rx_hist_duplicate(&hcrx->ccid3hcrx_hist, skb)) | 790 | if (tfrc_rx_hist_duplicate(&hc->rx_hist, skb)) |
815 | return; /* done receiving */ | 791 | return; /* done receiving */ |
816 | 792 | ||
817 | if (is_data_packet) { | 793 | if (is_data_packet) { |
@@ -819,20 +795,20 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
819 | /* | 795 | /* |
820 | * Update moving-average of s and the sum of received payload bytes | 796 | * Update moving-average of s and the sum of received payload bytes |
821 | */ | 797 | */ |
822 | hcrx->ccid3hcrx_s = tfrc_ewma(hcrx->ccid3hcrx_s, payload, 9); | 798 | hc->rx_s = tfrc_ewma(hc->rx_s, payload, 9); |
823 | hcrx->ccid3hcrx_bytes_recv += payload; | 799 | hc->rx_bytes_recv += payload; |
824 | } | 800 | } |
825 | 801 | ||
826 | /* | 802 | /* |
827 | * Perform loss detection and handle pending losses | 803 | * Perform loss detection and handle pending losses |
828 | */ | 804 | */ |
829 | if (tfrc_rx_handle_loss(&hcrx->ccid3hcrx_hist, &hcrx->ccid3hcrx_li_hist, | 805 | if (tfrc_rx_handle_loss(&hc->rx_hist, &hc->rx_li_hist, |
830 | skb, ndp, ccid3_first_li, sk)) { | 806 | skb, ndp, ccid3_first_li, sk)) { |
831 | do_feedback = CCID3_FBACK_PARAM_CHANGE; | 807 | do_feedback = CCID3_FBACK_PARAM_CHANGE; |
832 | goto done_receiving; | 808 | goto done_receiving; |
833 | } | 809 | } |
834 | 810 | ||
835 | if (tfrc_rx_hist_loss_pending(&hcrx->ccid3hcrx_hist)) | 811 | if (tfrc_rx_hist_loss_pending(&hc->rx_hist)) |
836 | return; /* done receiving */ | 812 | return; /* done receiving */ |
837 | 813 | ||
838 | /* | 814 | /* |
@@ -841,17 +817,17 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
841 | if (unlikely(!is_data_packet)) | 817 | if (unlikely(!is_data_packet)) |
842 | goto update_records; | 818 | goto update_records; |
843 | 819 | ||
844 | if (!tfrc_lh_is_initialised(&hcrx->ccid3hcrx_li_hist)) { | 820 | if (!tfrc_lh_is_initialised(&hc->rx_li_hist)) { |
845 | const u32 sample = tfrc_rx_hist_sample_rtt(&hcrx->ccid3hcrx_hist, skb); | 821 | const u32 sample = tfrc_rx_hist_sample_rtt(&hc->rx_hist, skb); |
846 | /* | 822 | /* |
847 | * Empty loss history: no loss so far, hence p stays 0. | 823 | * Empty loss history: no loss so far, hence p stays 0. |
848 | * Sample RTT values, since an RTT estimate is required for the | 824 | * Sample RTT values, since an RTT estimate is required for the |
849 | * computation of p when the first loss occurs; RFC 3448, 6.3.1. | 825 | * computation of p when the first loss occurs; RFC 3448, 6.3.1. |
850 | */ | 826 | */ |
851 | if (sample != 0) | 827 | if (sample != 0) |
852 | hcrx->ccid3hcrx_rtt = tfrc_ewma(hcrx->ccid3hcrx_rtt, sample, 9); | 828 | hc->rx_rtt = tfrc_ewma(hc->rx_rtt, sample, 9); |
853 | 829 | ||
854 | } else if (tfrc_lh_update_i_mean(&hcrx->ccid3hcrx_li_hist, skb)) { | 830 | } else if (tfrc_lh_update_i_mean(&hc->rx_li_hist, skb)) { |
855 | /* | 831 | /* |
856 | * Step (3) of [RFC 3448, 6.1]: Recompute I_mean and, if I_mean | 832 | * Step (3) of [RFC 3448, 6.1]: Recompute I_mean and, if I_mean |
857 | * has decreased (resp. p has increased), send feedback now. | 833 | * has decreased (resp. p has increased), send feedback now. |
@@ -862,11 +838,11 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
862 | /* | 838 | /* |
863 | * Check if the periodic once-per-RTT feedback is due; RFC 4342, 10.3 | 839 | * Check if the periodic once-per-RTT feedback is due; RFC 4342, 10.3 |
864 | */ | 840 | */ |
865 | if (SUB16(dccp_hdr(skb)->dccph_ccval, hcrx->ccid3hcrx_last_counter) > 3) | 841 | if (SUB16(dccp_hdr(skb)->dccph_ccval, hc->rx_last_counter) > 3) |
866 | do_feedback = CCID3_FBACK_PERIODIC; | 842 | do_feedback = CCID3_FBACK_PERIODIC; |
867 | 843 | ||
868 | update_records: | 844 | update_records: |
869 | tfrc_rx_hist_add_packet(&hcrx->ccid3hcrx_hist, skb, ndp); | 845 | tfrc_rx_hist_add_packet(&hc->rx_hist, skb, ndp); |
870 | 846 | ||
871 | done_receiving: | 847 | done_receiving: |
872 | if (do_feedback) | 848 | if (do_feedback) |
@@ -875,41 +851,41 @@ done_receiving: | |||
875 | 851 | ||
876 | static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk) | 852 | static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk) |
877 | { | 853 | { |
878 | struct ccid3_hc_rx_sock *hcrx = ccid_priv(ccid); | 854 | struct ccid3_hc_rx_sock *hc = ccid_priv(ccid); |
879 | 855 | ||
880 | hcrx->ccid3hcrx_state = TFRC_RSTATE_NO_DATA; | 856 | hc->rx_state = TFRC_RSTATE_NO_DATA; |
881 | tfrc_lh_init(&hcrx->ccid3hcrx_li_hist); | 857 | tfrc_lh_init(&hc->rx_li_hist); |
882 | return tfrc_rx_hist_alloc(&hcrx->ccid3hcrx_hist); | 858 | return tfrc_rx_hist_alloc(&hc->rx_hist); |
883 | } | 859 | } |
884 | 860 | ||
885 | static void ccid3_hc_rx_exit(struct sock *sk) | 861 | static void ccid3_hc_rx_exit(struct sock *sk) |
886 | { | 862 | { |
887 | struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); | 863 | struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); |
888 | 864 | ||
889 | ccid3_hc_rx_set_state(sk, TFRC_RSTATE_TERM); | 865 | ccid3_hc_rx_set_state(sk, TFRC_RSTATE_TERM); |
890 | 866 | ||
891 | tfrc_rx_hist_purge(&hcrx->ccid3hcrx_hist); | 867 | tfrc_rx_hist_purge(&hc->rx_hist); |
892 | tfrc_lh_cleanup(&hcrx->ccid3hcrx_li_hist); | 868 | tfrc_lh_cleanup(&hc->rx_li_hist); |
893 | } | 869 | } |
894 | 870 | ||
895 | static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info) | 871 | static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info) |
896 | { | 872 | { |
897 | const struct ccid3_hc_rx_sock *hcrx; | 873 | const struct ccid3_hc_rx_sock *hc; |
898 | 874 | ||
899 | /* Listen socks doesn't have a private CCID block */ | 875 | /* Listen socks doesn't have a private CCID block */ |
900 | if (sk->sk_state == DCCP_LISTEN) | 876 | if (sk->sk_state == DCCP_LISTEN) |
901 | return; | 877 | return; |
902 | 878 | ||
903 | hcrx = ccid3_hc_rx_sk(sk); | 879 | hc = ccid3_hc_rx_sk(sk); |
904 | info->tcpi_ca_state = hcrx->ccid3hcrx_state; | 880 | info->tcpi_ca_state = hc->rx_state; |
905 | info->tcpi_options |= TCPI_OPT_TIMESTAMPS; | 881 | info->tcpi_options |= TCPI_OPT_TIMESTAMPS; |
906 | info->tcpi_rcv_rtt = hcrx->ccid3hcrx_rtt; | 882 | info->tcpi_rcv_rtt = hc->rx_rtt; |
907 | } | 883 | } |
908 | 884 | ||
909 | static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len, | 885 | static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len, |
910 | u32 __user *optval, int __user *optlen) | 886 | u32 __user *optval, int __user *optlen) |
911 | { | 887 | { |
912 | const struct ccid3_hc_rx_sock *hcrx; | 888 | const struct ccid3_hc_rx_sock *hc; |
913 | struct tfrc_rx_info rx_info; | 889 | struct tfrc_rx_info rx_info; |
914 | const void *val; | 890 | const void *val; |
915 | 891 | ||
@@ -917,15 +893,15 @@ static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len, | |||
917 | if (sk->sk_state == DCCP_LISTEN) | 893 | if (sk->sk_state == DCCP_LISTEN) |
918 | return -EINVAL; | 894 | return -EINVAL; |
919 | 895 | ||
920 | hcrx = ccid3_hc_rx_sk(sk); | 896 | hc = ccid3_hc_rx_sk(sk); |
921 | switch (optname) { | 897 | switch (optname) { |
922 | case DCCP_SOCKOPT_CCID_RX_INFO: | 898 | case DCCP_SOCKOPT_CCID_RX_INFO: |
923 | if (len < sizeof(rx_info)) | 899 | if (len < sizeof(rx_info)) |
924 | return -EINVAL; | 900 | return -EINVAL; |
925 | rx_info.tfrcrx_x_recv = hcrx->ccid3hcrx_x_recv; | 901 | rx_info.tfrcrx_x_recv = hc->rx_x_recv; |
926 | rx_info.tfrcrx_rtt = hcrx->ccid3hcrx_rtt; | 902 | rx_info.tfrcrx_rtt = hc->rx_rtt; |
927 | rx_info.tfrcrx_p = hcrx->ccid3hcrx_pinv == 0 ? ~0U : | 903 | rx_info.tfrcrx_p = hc->rx_pinv == 0 ? ~0U : |
928 | scaled_div(1, hcrx->ccid3hcrx_pinv); | 904 | scaled_div(1, hc->rx_pinv); |
929 | len = sizeof(rx_info); | 905 | len = sizeof(rx_info); |
930 | val = &rx_info; | 906 | val = &rx_info; |
931 | break; | 907 | break; |
diff --git a/net/dccp/ccids/ccid3.h b/net/dccp/ccids/ccid3.h index e5a244143846..032635776653 100644 --- a/net/dccp/ccids/ccid3.h +++ b/net/dccp/ccids/ccid3.h | |||
@@ -75,44 +75,44 @@ enum ccid3_hc_tx_states { | |||
75 | 75 | ||
76 | /** | 76 | /** |
77 | * struct ccid3_hc_tx_sock - CCID3 sender half-connection socket | 77 | * struct ccid3_hc_tx_sock - CCID3 sender half-connection socket |
78 | * @ccid3hctx_x - Current sending rate in 64 * bytes per second | 78 | * @tx_x: Current sending rate in 64 * bytes per second |
79 | * @ccid3hctx_x_recv - Receive rate in 64 * bytes per second | 79 | * @tx_x_recv: Receive rate in 64 * bytes per second |
80 | * @ccid3hctx_x_calc - Calculated rate in bytes per second | 80 | * @tx_x_calc: Calculated rate in bytes per second |
81 | * @ccid3hctx_rtt - Estimate of current round trip time in usecs | 81 | * @tx_rtt: Estimate of current round trip time in usecs |
82 | * @ccid3hctx_p - Current loss event rate (0-1) scaled by 1000000 | 82 | * @tx_p: Current loss event rate (0-1) scaled by 1000000 |
83 | * @ccid3hctx_s - Packet size in bytes | 83 | * @tx_s: Packet size in bytes |
84 | * @ccid3hctx_t_rto - Nofeedback Timer setting in usecs | 84 | * @tx_t_rto: Nofeedback Timer setting in usecs |
85 | * @ccid3hctx_t_ipi - Interpacket (send) interval (RFC 3448, 4.6) in usecs | 85 | * @tx_t_ipi: Interpacket (send) interval (RFC 3448, 4.6) in usecs |
86 | * @ccid3hctx_state - Sender state, one of %ccid3_hc_tx_states | 86 | * @tx_state: Sender state, one of %ccid3_hc_tx_states |
87 | * @ccid3hctx_last_win_count - Last window counter sent | 87 | * @tx_last_win_count: Last window counter sent |
88 | * @ccid3hctx_t_last_win_count - Timestamp of earliest packet | 88 | * @tx_t_last_win_count: Timestamp of earliest packet |
89 | * with last_win_count value sent | 89 | * with last_win_count value sent |
90 | * @ccid3hctx_no_feedback_timer - Handle to no feedback timer | 90 | * @tx_no_feedback_timer: Handle to no feedback timer |
91 | * @ccid3hctx_t_ld - Time last doubled during slow start | 91 | * @tx_t_ld: Time last doubled during slow start |
92 | * @ccid3hctx_t_nom - Nominal send time of next packet | 92 | * @tx_t_nom: Nominal send time of next packet |
93 | * @ccid3hctx_delta - Send timer delta (RFC 3448, 4.6) in usecs | 93 | * @tx_delta: Send timer delta (RFC 3448, 4.6) in usecs |
94 | * @ccid3hctx_hist - Packet history | 94 | * @tx_hist: Packet history |
95 | * @ccid3hctx_options_received - Parsed set of retrieved options | 95 | * @tx_options_received: Parsed set of retrieved options |
96 | */ | 96 | */ |
97 | struct ccid3_hc_tx_sock { | 97 | struct ccid3_hc_tx_sock { |
98 | struct tfrc_tx_info ccid3hctx_tfrc; | 98 | struct tfrc_tx_info tx_tfrc; |
99 | #define ccid3hctx_x ccid3hctx_tfrc.tfrctx_x | 99 | #define tx_x tx_tfrc.tfrctx_x |
100 | #define ccid3hctx_x_recv ccid3hctx_tfrc.tfrctx_x_recv | 100 | #define tx_x_recv tx_tfrc.tfrctx_x_recv |
101 | #define ccid3hctx_x_calc ccid3hctx_tfrc.tfrctx_x_calc | 101 | #define tx_x_calc tx_tfrc.tfrctx_x_calc |
102 | #define ccid3hctx_rtt ccid3hctx_tfrc.tfrctx_rtt | 102 | #define tx_rtt tx_tfrc.tfrctx_rtt |
103 | #define ccid3hctx_p ccid3hctx_tfrc.tfrctx_p | 103 | #define tx_p tx_tfrc.tfrctx_p |
104 | #define ccid3hctx_t_rto ccid3hctx_tfrc.tfrctx_rto | 104 | #define tx_t_rto tx_tfrc.tfrctx_rto |
105 | #define ccid3hctx_t_ipi ccid3hctx_tfrc.tfrctx_ipi | 105 | #define tx_t_ipi tx_tfrc.tfrctx_ipi |
106 | u16 ccid3hctx_s; | 106 | u16 tx_s; |
107 | enum ccid3_hc_tx_states ccid3hctx_state:8; | 107 | enum ccid3_hc_tx_states tx_state:8; |
108 | u8 ccid3hctx_last_win_count; | 108 | u8 tx_last_win_count; |
109 | ktime_t ccid3hctx_t_last_win_count; | 109 | ktime_t tx_t_last_win_count; |
110 | struct timer_list ccid3hctx_no_feedback_timer; | 110 | struct timer_list tx_no_feedback_timer; |
111 | ktime_t ccid3hctx_t_ld; | 111 | ktime_t tx_t_ld; |
112 | ktime_t ccid3hctx_t_nom; | 112 | ktime_t tx_t_nom; |
113 | u32 ccid3hctx_delta; | 113 | u32 tx_delta; |
114 | struct tfrc_tx_hist_entry *ccid3hctx_hist; | 114 | struct tfrc_tx_hist_entry *tx_hist; |
115 | struct ccid3_options_received ccid3hctx_options_received; | 115 | struct ccid3_options_received tx_options_received; |
116 | }; | 116 | }; |
117 | 117 | ||
118 | static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk) | 118 | static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk) |
@@ -131,32 +131,32 @@ enum ccid3_hc_rx_states { | |||
131 | 131 | ||
132 | /** | 132 | /** |
133 | * struct ccid3_hc_rx_sock - CCID3 receiver half-connection socket | 133 | * struct ccid3_hc_rx_sock - CCID3 receiver half-connection socket |
134 | * @ccid3hcrx_x_recv - Receiver estimate of send rate (RFC 3448 4.3) | 134 | * @rx_x_recv: Receiver estimate of send rate (RFC 3448 4.3) |
135 | * @ccid3hcrx_rtt - Receiver estimate of rtt (non-standard) | 135 | * @rx_rtt: Receiver estimate of rtt (non-standard) |
136 | * @ccid3hcrx_p - Current loss event rate (RFC 3448 5.4) | 136 | * @rx_p: Current loss event rate (RFC 3448 5.4) |
137 | * @ccid3hcrx_last_counter - Tracks window counter (RFC 4342, 8.1) | 137 | * @rx_last_counter: Tracks window counter (RFC 4342, 8.1) |
138 | * @ccid3hcrx_state - Receiver state, one of %ccid3_hc_rx_states | 138 | * @rx_state: Receiver state, one of %ccid3_hc_rx_states |
139 | * @ccid3hcrx_bytes_recv - Total sum of DCCP payload bytes | 139 | * @rx_bytes_recv: Total sum of DCCP payload bytes |
140 | * @ccid3hcrx_x_recv - Receiver estimate of send rate (RFC 3448, sec. 4.3) | 140 | * @rx_x_recv: Receiver estimate of send rate (RFC 3448, sec. 4.3) |
141 | * @ccid3hcrx_rtt - Receiver estimate of RTT | 141 | * @rx_rtt: Receiver estimate of RTT |
142 | * @ccid3hcrx_tstamp_last_feedback - Time at which last feedback was sent | 142 | * @rx_tstamp_last_feedback: Time at which last feedback was sent |
143 | * @ccid3hcrx_tstamp_last_ack - Time at which last feedback was sent | 143 | * @rx_tstamp_last_ack: Time at which last feedback was sent |
144 | * @ccid3hcrx_hist - Packet history (loss detection + RTT sampling) | 144 | * @rx_hist: Packet history (loss detection + RTT sampling) |
145 | * @ccid3hcrx_li_hist - Loss Interval database | 145 | * @rx_li_hist: Loss Interval database |
146 | * @ccid3hcrx_s - Received packet size in bytes | 146 | * @rx_s: Received packet size in bytes |
147 | * @ccid3hcrx_pinv - Inverse of Loss Event Rate (RFC 4342, sec. 8.5) | 147 | * @rx_pinv: Inverse of Loss Event Rate (RFC 4342, sec. 8.5) |
148 | */ | 148 | */ |
149 | struct ccid3_hc_rx_sock { | 149 | struct ccid3_hc_rx_sock { |
150 | u8 ccid3hcrx_last_counter:4; | 150 | u8 rx_last_counter:4; |
151 | enum ccid3_hc_rx_states ccid3hcrx_state:8; | 151 | enum ccid3_hc_rx_states rx_state:8; |
152 | u32 ccid3hcrx_bytes_recv; | 152 | u32 rx_bytes_recv; |
153 | u32 ccid3hcrx_x_recv; | 153 | u32 rx_x_recv; |
154 | u32 ccid3hcrx_rtt; | 154 | u32 rx_rtt; |
155 | ktime_t ccid3hcrx_tstamp_last_feedback; | 155 | ktime_t rx_tstamp_last_feedback; |
156 | struct tfrc_rx_hist ccid3hcrx_hist; | 156 | struct tfrc_rx_hist rx_hist; |
157 | struct tfrc_loss_hist ccid3hcrx_li_hist; | 157 | struct tfrc_loss_hist rx_li_hist; |
158 | u16 ccid3hcrx_s; | 158 | u16 rx_s; |
159 | #define ccid3hcrx_pinv ccid3hcrx_li_hist.i_mean | 159 | #define rx_pinv rx_li_hist.i_mean |
160 | }; | 160 | }; |
161 | 161 | ||
162 | static inline struct ccid3_hc_rx_sock *ccid3_hc_rx_sk(const struct sock *sk) | 162 | static inline struct ccid3_hc_rx_sock *ccid3_hc_rx_sk(const struct sock *sk) |
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 7302e1498d46..2423a0866733 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
@@ -62,10 +62,10 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
62 | nexthop = inet->opt->faddr; | 62 | nexthop = inet->opt->faddr; |
63 | } | 63 | } |
64 | 64 | ||
65 | tmp = ip_route_connect(&rt, nexthop, inet->saddr, | 65 | tmp = ip_route_connect(&rt, nexthop, inet->inet_saddr, |
66 | RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, | 66 | RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, |
67 | IPPROTO_DCCP, | 67 | IPPROTO_DCCP, |
68 | inet->sport, usin->sin_port, sk, 1); | 68 | inet->inet_sport, usin->sin_port, sk, 1); |
69 | if (tmp < 0) | 69 | if (tmp < 0) |
70 | return tmp; | 70 | return tmp; |
71 | 71 | ||
@@ -77,12 +77,12 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
77 | if (inet->opt == NULL || !inet->opt->srr) | 77 | if (inet->opt == NULL || !inet->opt->srr) |
78 | daddr = rt->rt_dst; | 78 | daddr = rt->rt_dst; |
79 | 79 | ||
80 | if (inet->saddr == 0) | 80 | if (inet->inet_saddr == 0) |
81 | inet->saddr = rt->rt_src; | 81 | inet->inet_saddr = rt->rt_src; |
82 | inet->rcv_saddr = inet->saddr; | 82 | inet->inet_rcv_saddr = inet->inet_saddr; |
83 | 83 | ||
84 | inet->dport = usin->sin_port; | 84 | inet->inet_dport = usin->sin_port; |
85 | inet->daddr = daddr; | 85 | inet->inet_daddr = daddr; |
86 | 86 | ||
87 | inet_csk(sk)->icsk_ext_hdr_len = 0; | 87 | inet_csk(sk)->icsk_ext_hdr_len = 0; |
88 | if (inet->opt != NULL) | 88 | if (inet->opt != NULL) |
@@ -98,17 +98,19 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
98 | if (err != 0) | 98 | if (err != 0) |
99 | goto failure; | 99 | goto failure; |
100 | 100 | ||
101 | err = ip_route_newports(&rt, IPPROTO_DCCP, inet->sport, inet->dport, | 101 | err = ip_route_newports(&rt, IPPROTO_DCCP, inet->inet_sport, |
102 | sk); | 102 | inet->inet_dport, sk); |
103 | if (err != 0) | 103 | if (err != 0) |
104 | goto failure; | 104 | goto failure; |
105 | 105 | ||
106 | /* OK, now commit destination to socket. */ | 106 | /* OK, now commit destination to socket. */ |
107 | sk_setup_caps(sk, &rt->u.dst); | 107 | sk_setup_caps(sk, &rt->u.dst); |
108 | 108 | ||
109 | dp->dccps_iss = secure_dccp_sequence_number(inet->saddr, inet->daddr, | 109 | dp->dccps_iss = secure_dccp_sequence_number(inet->inet_saddr, |
110 | inet->sport, inet->dport); | 110 | inet->inet_daddr, |
111 | inet->id = dp->dccps_iss ^ jiffies; | 111 | inet->inet_sport, |
112 | inet->inet_dport); | ||
113 | inet->inet_id = dp->dccps_iss ^ jiffies; | ||
112 | 114 | ||
113 | err = dccp_connect(sk); | 115 | err = dccp_connect(sk); |
114 | rt = NULL; | 116 | rt = NULL; |
@@ -123,7 +125,7 @@ failure: | |||
123 | dccp_set_state(sk, DCCP_CLOSED); | 125 | dccp_set_state(sk, DCCP_CLOSED); |
124 | ip_rt_put(rt); | 126 | ip_rt_put(rt); |
125 | sk->sk_route_caps = 0; | 127 | sk->sk_route_caps = 0; |
126 | inet->dport = 0; | 128 | inet->inet_dport = 0; |
127 | goto out; | 129 | goto out; |
128 | } | 130 | } |
129 | 131 | ||
@@ -352,7 +354,9 @@ void dccp_v4_send_check(struct sock *sk, int unused, struct sk_buff *skb) | |||
352 | struct dccp_hdr *dh = dccp_hdr(skb); | 354 | struct dccp_hdr *dh = dccp_hdr(skb); |
353 | 355 | ||
354 | dccp_csum_outgoing(skb); | 356 | dccp_csum_outgoing(skb); |
355 | dh->dccph_checksum = dccp_v4_csum_finish(skb, inet->saddr, inet->daddr); | 357 | dh->dccph_checksum = dccp_v4_csum_finish(skb, |
358 | inet->inet_saddr, | ||
359 | inet->inet_daddr); | ||
356 | } | 360 | } |
357 | 361 | ||
358 | EXPORT_SYMBOL_GPL(dccp_v4_send_check); | 362 | EXPORT_SYMBOL_GPL(dccp_v4_send_check); |
@@ -393,14 +397,14 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
393 | 397 | ||
394 | newinet = inet_sk(newsk); | 398 | newinet = inet_sk(newsk); |
395 | ireq = inet_rsk(req); | 399 | ireq = inet_rsk(req); |
396 | newinet->daddr = ireq->rmt_addr; | 400 | newinet->inet_daddr = ireq->rmt_addr; |
397 | newinet->rcv_saddr = ireq->loc_addr; | 401 | newinet->inet_rcv_saddr = ireq->loc_addr; |
398 | newinet->saddr = ireq->loc_addr; | 402 | newinet->inet_saddr = ireq->loc_addr; |
399 | newinet->opt = ireq->opt; | 403 | newinet->opt = ireq->opt; |
400 | ireq->opt = NULL; | 404 | ireq->opt = NULL; |
401 | newinet->mc_index = inet_iif(skb); | 405 | newinet->mc_index = inet_iif(skb); |
402 | newinet->mc_ttl = ip_hdr(skb)->ttl; | 406 | newinet->mc_ttl = ip_hdr(skb)->ttl; |
403 | newinet->id = jiffies; | 407 | newinet->inet_id = jiffies; |
404 | 408 | ||
405 | dccp_sync_mss(newsk, dst_mtu(dst)); | 409 | dccp_sync_mss(newsk, dst_mtu(dst)); |
406 | 410 | ||
@@ -987,7 +991,6 @@ static struct inet_protosw dccp_v4_protosw = { | |||
987 | .protocol = IPPROTO_DCCP, | 991 | .protocol = IPPROTO_DCCP, |
988 | .prot = &dccp_v4_prot, | 992 | .prot = &dccp_v4_prot, |
989 | .ops = &inet_dccp_ops, | 993 | .ops = &inet_dccp_ops, |
990 | .capability = -1, | ||
991 | .no_check = 0, | 994 | .no_check = 0, |
992 | .flags = INET_PROTOSW_ICSK, | 995 | .flags = INET_PROTOSW_ICSK, |
993 | }; | 996 | }; |
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index e48ca5d45658..50ea91a77705 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -158,8 +158,8 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
158 | ipv6_addr_copy(&fl.fl6_dst, &np->daddr); | 158 | ipv6_addr_copy(&fl.fl6_dst, &np->daddr); |
159 | ipv6_addr_copy(&fl.fl6_src, &np->saddr); | 159 | ipv6_addr_copy(&fl.fl6_src, &np->saddr); |
160 | fl.oif = sk->sk_bound_dev_if; | 160 | fl.oif = sk->sk_bound_dev_if; |
161 | fl.fl_ip_dport = inet->dport; | 161 | fl.fl_ip_dport = inet->inet_dport; |
162 | fl.fl_ip_sport = inet->sport; | 162 | fl.fl_ip_sport = inet->inet_sport; |
163 | security_sk_classify_flow(sk, &fl); | 163 | security_sk_classify_flow(sk, &fl); |
164 | 164 | ||
165 | err = ip6_dst_lookup(sk, &dst, &fl); | 165 | err = ip6_dst_lookup(sk, &dst, &fl); |
@@ -510,11 +510,9 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, | |||
510 | 510 | ||
511 | memcpy(newnp, np, sizeof(struct ipv6_pinfo)); | 511 | memcpy(newnp, np, sizeof(struct ipv6_pinfo)); |
512 | 512 | ||
513 | ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF), | 513 | ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr); |
514 | newinet->daddr); | ||
515 | 514 | ||
516 | ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF), | 515 | ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr); |
517 | newinet->saddr); | ||
518 | 516 | ||
519 | ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr); | 517 | ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr); |
520 | 518 | ||
@@ -642,7 +640,8 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, | |||
642 | 640 | ||
643 | dccp_sync_mss(newsk, dst_mtu(dst)); | 641 | dccp_sync_mss(newsk, dst_mtu(dst)); |
644 | 642 | ||
645 | newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6; | 643 | newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; |
644 | newinet->inet_rcv_saddr = LOOPBACK4_IPV6; | ||
646 | 645 | ||
647 | __inet6_hash(newsk); | 646 | __inet6_hash(newsk); |
648 | __inet_inherit_port(sk, newsk); | 647 | __inet_inherit_port(sk, newsk); |
@@ -970,12 +969,9 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
970 | icsk->icsk_af_ops = &dccp_ipv6_af_ops; | 969 | icsk->icsk_af_ops = &dccp_ipv6_af_ops; |
971 | sk->sk_backlog_rcv = dccp_v6_do_rcv; | 970 | sk->sk_backlog_rcv = dccp_v6_do_rcv; |
972 | goto failure; | 971 | goto failure; |
973 | } else { | ||
974 | ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF), | ||
975 | inet->saddr); | ||
976 | ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF), | ||
977 | inet->rcv_saddr); | ||
978 | } | 972 | } |
973 | ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr); | ||
974 | ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, &np->rcv_saddr); | ||
979 | 975 | ||
980 | return err; | 976 | return err; |
981 | } | 977 | } |
@@ -988,7 +984,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
988 | ipv6_addr_copy(&fl.fl6_src, saddr ? saddr : &np->saddr); | 984 | ipv6_addr_copy(&fl.fl6_src, saddr ? saddr : &np->saddr); |
989 | fl.oif = sk->sk_bound_dev_if; | 985 | fl.oif = sk->sk_bound_dev_if; |
990 | fl.fl_ip_dport = usin->sin6_port; | 986 | fl.fl_ip_dport = usin->sin6_port; |
991 | fl.fl_ip_sport = inet->sport; | 987 | fl.fl_ip_sport = inet->inet_sport; |
992 | security_sk_classify_flow(sk, &fl); | 988 | security_sk_classify_flow(sk, &fl); |
993 | 989 | ||
994 | if (np->opt != NULL && np->opt->srcrt != NULL) { | 990 | if (np->opt != NULL && np->opt->srcrt != NULL) { |
@@ -1021,7 +1017,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
1021 | 1017 | ||
1022 | /* set the source address */ | 1018 | /* set the source address */ |
1023 | ipv6_addr_copy(&np->saddr, saddr); | 1019 | ipv6_addr_copy(&np->saddr, saddr); |
1024 | inet->rcv_saddr = LOOPBACK4_IPV6; | 1020 | inet->inet_rcv_saddr = LOOPBACK4_IPV6; |
1025 | 1021 | ||
1026 | __ip6_dst_store(sk, dst, NULL, NULL); | 1022 | __ip6_dst_store(sk, dst, NULL, NULL); |
1027 | 1023 | ||
@@ -1030,7 +1026,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
1030 | icsk->icsk_ext_hdr_len = (np->opt->opt_flen + | 1026 | icsk->icsk_ext_hdr_len = (np->opt->opt_flen + |
1031 | np->opt->opt_nflen); | 1027 | np->opt->opt_nflen); |
1032 | 1028 | ||
1033 | inet->dport = usin->sin6_port; | 1029 | inet->inet_dport = usin->sin6_port; |
1034 | 1030 | ||
1035 | dccp_set_state(sk, DCCP_REQUESTING); | 1031 | dccp_set_state(sk, DCCP_REQUESTING); |
1036 | err = inet6_hash_connect(&dccp_death_row, sk); | 1032 | err = inet6_hash_connect(&dccp_death_row, sk); |
@@ -1039,7 +1035,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
1039 | 1035 | ||
1040 | dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32, | 1036 | dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32, |
1041 | np->daddr.s6_addr32, | 1037 | np->daddr.s6_addr32, |
1042 | inet->sport, inet->dport); | 1038 | inet->inet_sport, |
1039 | inet->inet_dport); | ||
1043 | err = dccp_connect(sk); | 1040 | err = dccp_connect(sk); |
1044 | if (err) | 1041 | if (err) |
1045 | goto late_failure; | 1042 | goto late_failure; |
@@ -1050,7 +1047,7 @@ late_failure: | |||
1050 | dccp_set_state(sk, DCCP_CLOSED); | 1047 | dccp_set_state(sk, DCCP_CLOSED); |
1051 | __sk_dst_reset(sk); | 1048 | __sk_dst_reset(sk); |
1052 | failure: | 1049 | failure: |
1053 | inet->dport = 0; | 1050 | inet->inet_dport = 0; |
1054 | sk->sk_route_caps = 0; | 1051 | sk->sk_route_caps = 0; |
1055 | return err; | 1052 | return err; |
1056 | } | 1053 | } |
@@ -1188,7 +1185,6 @@ static struct inet_protosw dccp_v6_protosw = { | |||
1188 | .protocol = IPPROTO_DCCP, | 1185 | .protocol = IPPROTO_DCCP, |
1189 | .prot = &dccp_v6_prot, | 1186 | .prot = &dccp_v6_prot, |
1190 | .ops = &inet6_dccp_ops, | 1187 | .ops = &inet6_dccp_ops, |
1191 | .capability = -1, | ||
1192 | .flags = INET_PROTOSW_ICSK, | 1188 | .flags = INET_PROTOSW_ICSK, |
1193 | }; | 1189 | }; |
1194 | 1190 | ||
diff --git a/net/dccp/output.c b/net/dccp/output.c index c96119fda688..d6bb753bf6ad 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c | |||
@@ -99,8 +99,8 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) | |||
99 | /* Build DCCP header and checksum it. */ | 99 | /* Build DCCP header and checksum it. */ |
100 | dh = dccp_zeroed_hdr(skb, dccp_header_size); | 100 | dh = dccp_zeroed_hdr(skb, dccp_header_size); |
101 | dh->dccph_type = dcb->dccpd_type; | 101 | dh->dccph_type = dcb->dccpd_type; |
102 | dh->dccph_sport = inet->sport; | 102 | dh->dccph_sport = inet->inet_sport; |
103 | dh->dccph_dport = inet->dport; | 103 | dh->dccph_dport = inet->inet_dport; |
104 | dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4; | 104 | dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4; |
105 | dh->dccph_ccval = dcb->dccpd_ccval; | 105 | dh->dccph_ccval = dcb->dccpd_ccval; |
106 | dh->dccph_cscov = dp->dccps_pcslen; | 106 | dh->dccph_cscov = dp->dccps_pcslen; |
diff --git a/net/dccp/probe.c b/net/dccp/probe.c index 37731da41481..dc328425fa20 100644 --- a/net/dccp/probe.c +++ b/net/dccp/probe.c | |||
@@ -75,26 +75,25 @@ static int jdccp_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
75 | struct msghdr *msg, size_t size) | 75 | struct msghdr *msg, size_t size) |
76 | { | 76 | { |
77 | const struct inet_sock *inet = inet_sk(sk); | 77 | const struct inet_sock *inet = inet_sk(sk); |
78 | struct ccid3_hc_tx_sock *hctx = NULL; | 78 | struct ccid3_hc_tx_sock *hc = NULL; |
79 | 79 | ||
80 | if (ccid_get_current_tx_ccid(dccp_sk(sk)) == DCCPC_CCID3) | 80 | if (ccid_get_current_tx_ccid(dccp_sk(sk)) == DCCPC_CCID3) |
81 | hctx = ccid3_hc_tx_sk(sk); | 81 | hc = ccid3_hc_tx_sk(sk); |
82 | 82 | ||
83 | if (port == 0 || ntohs(inet->dport) == port || | 83 | if (port == 0 || ntohs(inet->inet_dport) == port || |
84 | ntohs(inet->sport) == port) { | 84 | ntohs(inet->inet_sport) == port) { |
85 | if (hctx) | 85 | if (hc) |
86 | printl("%pI4:%u %pI4:%u %d %d %d %d %u " | 86 | printl("%pI4:%u %pI4:%u %d %d %d %d %u %llu %llu %d\n", |
87 | "%llu %llu %d\n", | 87 | &inet->inet_saddr, ntohs(inet->inet_sport), |
88 | &inet->saddr, ntohs(inet->sport), | 88 | &inet->inet_daddr, ntohs(inet->inet_dport), size, |
89 | &inet->daddr, ntohs(inet->dport), size, | 89 | hc->tx_s, hc->tx_rtt, hc->tx_p, |
90 | hctx->ccid3hctx_s, hctx->ccid3hctx_rtt, | 90 | hc->tx_x_calc, hc->tx_x_recv >> 6, |
91 | hctx->ccid3hctx_p, hctx->ccid3hctx_x_calc, | 91 | hc->tx_x >> 6, hc->tx_t_ipi); |
92 | hctx->ccid3hctx_x_recv >> 6, | ||
93 | hctx->ccid3hctx_x >> 6, hctx->ccid3hctx_t_ipi); | ||
94 | else | 92 | else |
95 | printl("%pI4:%u %pI4:%u %d\n", | 93 | printl("%pI4:%u %pI4:%u %d\n", |
96 | &inet->saddr, ntohs(inet->sport), | 94 | &inet->inet_saddr, ntohs(inet->inet_sport), |
97 | &inet->daddr, ntohs(inet->dport), size); | 95 | &inet->inet_daddr, ntohs(inet->inet_dport), |
96 | size); | ||
98 | } | 97 | } |
99 | 98 | ||
100 | jprobe_return(); | 99 | jprobe_return(); |
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index a156319fd0ac..671cd1413d59 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -278,7 +278,7 @@ int dccp_disconnect(struct sock *sk, int flags) | |||
278 | sk->sk_send_head = NULL; | 278 | sk->sk_send_head = NULL; |
279 | } | 279 | } |
280 | 280 | ||
281 | inet->dport = 0; | 281 | inet->inet_dport = 0; |
282 | 282 | ||
283 | if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) | 283 | if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) |
284 | inet_reset_saddr(sk); | 284 | inet_reset_saddr(sk); |
@@ -290,7 +290,7 @@ int dccp_disconnect(struct sock *sk, int flags) | |||
290 | inet_csk_delack_init(sk); | 290 | inet_csk_delack_init(sk); |
291 | __sk_dst_reset(sk); | 291 | __sk_dst_reset(sk); |
292 | 292 | ||
293 | WARN_ON(inet->num && !icsk->icsk_bind_hash); | 293 | WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); |
294 | 294 | ||
295 | sk->sk_error_report(sk); | 295 | sk->sk_error_report(sk); |
296 | return err; | 296 | return err; |
@@ -1060,11 +1060,12 @@ static int __init dccp_init(void) | |||
1060 | for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++) | 1060 | for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++) |
1061 | ; | 1061 | ; |
1062 | do { | 1062 | do { |
1063 | dccp_hashinfo.ehash_size = (1UL << ehash_order) * PAGE_SIZE / | 1063 | unsigned long hash_size = (1UL << ehash_order) * PAGE_SIZE / |
1064 | sizeof(struct inet_ehash_bucket); | 1064 | sizeof(struct inet_ehash_bucket); |
1065 | while (dccp_hashinfo.ehash_size & | 1065 | |
1066 | (dccp_hashinfo.ehash_size - 1)) | 1066 | while (hash_size & (hash_size - 1)) |
1067 | dccp_hashinfo.ehash_size--; | 1067 | hash_size--; |
1068 | dccp_hashinfo.ehash_mask = hash_size - 1; | ||
1068 | dccp_hashinfo.ehash = (struct inet_ehash_bucket *) | 1069 | dccp_hashinfo.ehash = (struct inet_ehash_bucket *) |
1069 | __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, ehash_order); | 1070 | __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, ehash_order); |
1070 | } while (!dccp_hashinfo.ehash && --ehash_order > 0); | 1071 | } while (!dccp_hashinfo.ehash && --ehash_order > 0); |
@@ -1074,7 +1075,7 @@ static int __init dccp_init(void) | |||
1074 | goto out_free_bind_bucket_cachep; | 1075 | goto out_free_bind_bucket_cachep; |
1075 | } | 1076 | } |
1076 | 1077 | ||
1077 | for (i = 0; i < dccp_hashinfo.ehash_size; i++) { | 1078 | for (i = 0; i <= dccp_hashinfo.ehash_mask; i++) { |
1078 | INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i); | 1079 | INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i); |
1079 | INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].twchain, i); | 1080 | INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].twchain, i); |
1080 | } | 1081 | } |
@@ -1153,7 +1154,7 @@ static void __exit dccp_fini(void) | |||
1153 | get_order(dccp_hashinfo.bhash_size * | 1154 | get_order(dccp_hashinfo.bhash_size * |
1154 | sizeof(struct inet_bind_hashbucket))); | 1155 | sizeof(struct inet_bind_hashbucket))); |
1155 | free_pages((unsigned long)dccp_hashinfo.ehash, | 1156 | free_pages((unsigned long)dccp_hashinfo.ehash, |
1156 | get_order(dccp_hashinfo.ehash_size * | 1157 | get_order((dccp_hashinfo.ehash_mask + 1) * |
1157 | sizeof(struct inet_ehash_bucket))); | 1158 | sizeof(struct inet_ehash_bucket))); |
1158 | inet_ehash_locks_free(&dccp_hashinfo); | 1159 | inet_ehash_locks_free(&dccp_hashinfo); |
1159 | kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); | 1160 | kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); |
diff --git a/net/dccp/timer.c b/net/dccp/timer.c index 162d1e683c39..bbfeb5eae46a 100644 --- a/net/dccp/timer.c +++ b/net/dccp/timer.c | |||
@@ -38,7 +38,7 @@ static int dccp_write_timeout(struct sock *sk) | |||
38 | 38 | ||
39 | if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) { | 39 | if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) { |
40 | if (icsk->icsk_retransmits != 0) | 40 | if (icsk->icsk_retransmits != 0) |
41 | dst_negative_advice(&sk->sk_dst_cache); | 41 | dst_negative_advice(&sk->sk_dst_cache, sk); |
42 | retry_until = icsk->icsk_syn_retries ? | 42 | retry_until = icsk->icsk_syn_retries ? |
43 | : sysctl_dccp_request_retries; | 43 | : sysctl_dccp_request_retries; |
44 | } else { | 44 | } else { |
@@ -63,7 +63,7 @@ static int dccp_write_timeout(struct sock *sk) | |||
63 | Golden words :-). | 63 | Golden words :-). |
64 | */ | 64 | */ |
65 | 65 | ||
66 | dst_negative_advice(&sk->sk_dst_cache); | 66 | dst_negative_advice(&sk->sk_dst_cache, sk); |
67 | } | 67 | } |
68 | 68 | ||
69 | retry_until = sysctl_dccp_retries2; | 69 | retry_until = sysctl_dccp_retries2; |
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 7a58c87baf17..9ade3a6de954 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c | |||
@@ -675,7 +675,8 @@ char *dn_addr2asc(__u16 addr, char *buf) | |||
675 | 675 | ||
676 | 676 | ||
677 | 677 | ||
678 | static int dn_create(struct net *net, struct socket *sock, int protocol) | 678 | static int dn_create(struct net *net, struct socket *sock, int protocol, |
679 | int kern) | ||
679 | { | 680 | { |
680 | struct sock *sk; | 681 | struct sock *sk; |
681 | 682 | ||
@@ -749,9 +750,9 @@ static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
749 | 750 | ||
750 | if (!(saddr->sdn_flags & SDF_WILD)) { | 751 | if (!(saddr->sdn_flags & SDF_WILD)) { |
751 | if (le16_to_cpu(saddr->sdn_nodeaddrl)) { | 752 | if (le16_to_cpu(saddr->sdn_nodeaddrl)) { |
752 | read_lock(&dev_base_lock); | 753 | rcu_read_lock(); |
753 | ldev = NULL; | 754 | ldev = NULL; |
754 | for_each_netdev(&init_net, dev) { | 755 | for_each_netdev_rcu(&init_net, dev) { |
755 | if (!dev->dn_ptr) | 756 | if (!dev->dn_ptr) |
756 | continue; | 757 | continue; |
757 | if (dn_dev_islocal(dev, dn_saddr2dn(saddr))) { | 758 | if (dn_dev_islocal(dev, dn_saddr2dn(saddr))) { |
@@ -759,7 +760,7 @@ static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
759 | break; | 760 | break; |
760 | } | 761 | } |
761 | } | 762 | } |
762 | read_unlock(&dev_base_lock); | 763 | rcu_read_unlock(); |
763 | if (ldev == NULL) | 764 | if (ldev == NULL) |
764 | return -EADDRNOTAVAIL; | 765 | return -EADDRNOTAVAIL; |
765 | } | 766 | } |
@@ -1955,7 +1956,7 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1955 | } | 1956 | } |
1956 | 1957 | ||
1957 | if ((flags & MSG_TRYHARD) && sk->sk_dst_cache) | 1958 | if ((flags & MSG_TRYHARD) && sk->sk_dst_cache) |
1958 | dst_negative_advice(&sk->sk_dst_cache); | 1959 | dst_negative_advice(&sk->sk_dst_cache, sk); |
1959 | 1960 | ||
1960 | mss = scp->segsize_rem; | 1961 | mss = scp->segsize_rem; |
1961 | fctype = scp->services_rem & NSP_FC_MASK; | 1962 | fctype = scp->services_rem & NSP_FC_MASK; |
@@ -2325,7 +2326,7 @@ static const struct file_operations dn_socket_seq_fops = { | |||
2325 | }; | 2326 | }; |
2326 | #endif | 2327 | #endif |
2327 | 2328 | ||
2328 | static struct net_proto_family dn_family_ops = { | 2329 | static const struct net_proto_family dn_family_ops = { |
2329 | .family = AF_DECnet, | 2330 | .family = AF_DECnet, |
2330 | .create = dn_create, | 2331 | .create = dn_create, |
2331 | .owner = THIS_MODULE, | 2332 | .owner = THIS_MODULE, |
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index 6e1f085db06a..6c916e2b8a84 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c | |||
@@ -68,7 +68,7 @@ extern struct neigh_table dn_neigh_table; | |||
68 | */ | 68 | */ |
69 | __le16 decnet_address = 0; | 69 | __le16 decnet_address = 0; |
70 | 70 | ||
71 | static DEFINE_RWLOCK(dndev_lock); | 71 | static DEFINE_SPINLOCK(dndev_lock); |
72 | static struct net_device *decnet_default_device; | 72 | static struct net_device *decnet_default_device; |
73 | static BLOCKING_NOTIFIER_HEAD(dnaddr_chain); | 73 | static BLOCKING_NOTIFIER_HEAD(dnaddr_chain); |
74 | 74 | ||
@@ -557,7 +557,8 @@ rarok: | |||
557 | struct net_device *dn_dev_get_default(void) | 557 | struct net_device *dn_dev_get_default(void) |
558 | { | 558 | { |
559 | struct net_device *dev; | 559 | struct net_device *dev; |
560 | read_lock(&dndev_lock); | 560 | |
561 | spin_lock(&dndev_lock); | ||
561 | dev = decnet_default_device; | 562 | dev = decnet_default_device; |
562 | if (dev) { | 563 | if (dev) { |
563 | if (dev->dn_ptr) | 564 | if (dev->dn_ptr) |
@@ -565,7 +566,8 @@ struct net_device *dn_dev_get_default(void) | |||
565 | else | 566 | else |
566 | dev = NULL; | 567 | dev = NULL; |
567 | } | 568 | } |
568 | read_unlock(&dndev_lock); | 569 | spin_unlock(&dndev_lock); |
570 | |||
569 | return dev; | 571 | return dev; |
570 | } | 572 | } |
571 | 573 | ||
@@ -575,13 +577,15 @@ int dn_dev_set_default(struct net_device *dev, int force) | |||
575 | int rv = -EBUSY; | 577 | int rv = -EBUSY; |
576 | if (!dev->dn_ptr) | 578 | if (!dev->dn_ptr) |
577 | return -ENODEV; | 579 | return -ENODEV; |
578 | write_lock(&dndev_lock); | 580 | |
581 | spin_lock(&dndev_lock); | ||
579 | if (force || decnet_default_device == NULL) { | 582 | if (force || decnet_default_device == NULL) { |
580 | old = decnet_default_device; | 583 | old = decnet_default_device; |
581 | decnet_default_device = dev; | 584 | decnet_default_device = dev; |
582 | rv = 0; | 585 | rv = 0; |
583 | } | 586 | } |
584 | write_unlock(&dndev_lock); | 587 | spin_unlock(&dndev_lock); |
588 | |||
585 | if (old) | 589 | if (old) |
586 | dev_put(old); | 590 | dev_put(old); |
587 | return rv; | 591 | return rv; |
@@ -589,26 +593,29 @@ int dn_dev_set_default(struct net_device *dev, int force) | |||
589 | 593 | ||
590 | static void dn_dev_check_default(struct net_device *dev) | 594 | static void dn_dev_check_default(struct net_device *dev) |
591 | { | 595 | { |
592 | write_lock(&dndev_lock); | 596 | spin_lock(&dndev_lock); |
593 | if (dev == decnet_default_device) { | 597 | if (dev == decnet_default_device) { |
594 | decnet_default_device = NULL; | 598 | decnet_default_device = NULL; |
595 | } else { | 599 | } else { |
596 | dev = NULL; | 600 | dev = NULL; |
597 | } | 601 | } |
598 | write_unlock(&dndev_lock); | 602 | spin_unlock(&dndev_lock); |
603 | |||
599 | if (dev) | 604 | if (dev) |
600 | dev_put(dev); | 605 | dev_put(dev); |
601 | } | 606 | } |
602 | 607 | ||
608 | /* | ||
609 | * Called with RTNL | ||
610 | */ | ||
603 | static struct dn_dev *dn_dev_by_index(int ifindex) | 611 | static struct dn_dev *dn_dev_by_index(int ifindex) |
604 | { | 612 | { |
605 | struct net_device *dev; | 613 | struct net_device *dev; |
606 | struct dn_dev *dn_dev = NULL; | 614 | struct dn_dev *dn_dev = NULL; |
607 | dev = dev_get_by_index(&init_net, ifindex); | 615 | |
608 | if (dev) { | 616 | dev = __dev_get_by_index(&init_net, ifindex); |
617 | if (dev) | ||
609 | dn_dev = dev->dn_ptr; | 618 | dn_dev = dev->dn_ptr; |
610 | dev_put(dev); | ||
611 | } | ||
612 | 619 | ||
613 | return dn_dev; | 620 | return dn_dev; |
614 | } | 621 | } |
@@ -826,13 +833,17 @@ static int dn_dev_get_first(struct net_device *dev, __le16 *addr) | |||
826 | struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr; | 833 | struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr; |
827 | struct dn_ifaddr *ifa; | 834 | struct dn_ifaddr *ifa; |
828 | int rv = -ENODEV; | 835 | int rv = -ENODEV; |
836 | |||
829 | if (dn_db == NULL) | 837 | if (dn_db == NULL) |
830 | goto out; | 838 | goto out; |
839 | |||
840 | rtnl_lock(); | ||
831 | ifa = dn_db->ifa_list; | 841 | ifa = dn_db->ifa_list; |
832 | if (ifa != NULL) { | 842 | if (ifa != NULL) { |
833 | *addr = ifa->ifa_local; | 843 | *addr = ifa->ifa_local; |
834 | rv = 0; | 844 | rv = 0; |
835 | } | 845 | } |
846 | rtnl_unlock(); | ||
836 | out: | 847 | out: |
837 | return rv; | 848 | return rv; |
838 | } | 849 | } |
@@ -854,9 +865,7 @@ int dn_dev_bind_default(__le16 *addr) | |||
854 | dev = dn_dev_get_default(); | 865 | dev = dn_dev_get_default(); |
855 | last_chance: | 866 | last_chance: |
856 | if (dev) { | 867 | if (dev) { |
857 | read_lock(&dev_base_lock); | ||
858 | rv = dn_dev_get_first(dev, addr); | 868 | rv = dn_dev_get_first(dev, addr); |
859 | read_unlock(&dev_base_lock); | ||
860 | dev_put(dev); | 869 | dev_put(dev); |
861 | if (rv == 0 || dev == init_net.loopback_dev) | 870 | if (rv == 0 || dev == init_net.loopback_dev) |
862 | return rv; | 871 | return rv; |
@@ -1321,18 +1330,18 @@ static inline int is_dn_dev(struct net_device *dev) | |||
1321 | } | 1330 | } |
1322 | 1331 | ||
1323 | static void *dn_dev_seq_start(struct seq_file *seq, loff_t *pos) | 1332 | static void *dn_dev_seq_start(struct seq_file *seq, loff_t *pos) |
1324 | __acquires(&dev_base_lock) | 1333 | __acquires(rcu) |
1325 | { | 1334 | { |
1326 | int i; | 1335 | int i; |
1327 | struct net_device *dev; | 1336 | struct net_device *dev; |
1328 | 1337 | ||
1329 | read_lock(&dev_base_lock); | 1338 | rcu_read_lock(); |
1330 | 1339 | ||
1331 | if (*pos == 0) | 1340 | if (*pos == 0) |
1332 | return SEQ_START_TOKEN; | 1341 | return SEQ_START_TOKEN; |
1333 | 1342 | ||
1334 | i = 1; | 1343 | i = 1; |
1335 | for_each_netdev(&init_net, dev) { | 1344 | for_each_netdev_rcu(&init_net, dev) { |
1336 | if (!is_dn_dev(dev)) | 1345 | if (!is_dn_dev(dev)) |
1337 | continue; | 1346 | continue; |
1338 | 1347 | ||
@@ -1353,7 +1362,7 @@ static void *dn_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
1353 | if (v == SEQ_START_TOKEN) | 1362 | if (v == SEQ_START_TOKEN) |
1354 | dev = net_device_entry(&init_net.dev_base_head); | 1363 | dev = net_device_entry(&init_net.dev_base_head); |
1355 | 1364 | ||
1356 | for_each_netdev_continue(&init_net, dev) { | 1365 | for_each_netdev_continue_rcu(&init_net, dev) { |
1357 | if (!is_dn_dev(dev)) | 1366 | if (!is_dn_dev(dev)) |
1358 | continue; | 1367 | continue; |
1359 | 1368 | ||
@@ -1364,9 +1373,9 @@ static void *dn_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
1364 | } | 1373 | } |
1365 | 1374 | ||
1366 | static void dn_dev_seq_stop(struct seq_file *seq, void *v) | 1375 | static void dn_dev_seq_stop(struct seq_file *seq, void *v) |
1367 | __releases(&dev_base_lock) | 1376 | __releases(rcu) |
1368 | { | 1377 | { |
1369 | read_unlock(&dev_base_lock); | 1378 | rcu_read_unlock(); |
1370 | } | 1379 | } |
1371 | 1380 | ||
1372 | static char *dn_type2asc(char type) | 1381 | static char *dn_type2asc(char type) |
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c index 27ea2e9b080a..fd641f65e092 100644 --- a/net/decnet/dn_fib.c +++ b/net/decnet/dn_fib.c | |||
@@ -607,8 +607,8 @@ static void dn_fib_del_ifaddr(struct dn_ifaddr *ifa) | |||
607 | ASSERT_RTNL(); | 607 | ASSERT_RTNL(); |
608 | 608 | ||
609 | /* Scan device list */ | 609 | /* Scan device list */ |
610 | read_lock(&dev_base_lock); | 610 | rcu_read_lock(); |
611 | for_each_netdev(&init_net, dev) { | 611 | for_each_netdev_rcu(&init_net, dev) { |
612 | dn_db = dev->dn_ptr; | 612 | dn_db = dev->dn_ptr; |
613 | if (dn_db == NULL) | 613 | if (dn_db == NULL) |
614 | continue; | 614 | continue; |
@@ -619,7 +619,7 @@ static void dn_fib_del_ifaddr(struct dn_ifaddr *ifa) | |||
619 | } | 619 | } |
620 | } | 620 | } |
621 | } | 621 | } |
622 | read_unlock(&dev_base_lock); | 622 | rcu_read_unlock(); |
623 | 623 | ||
624 | if (found_it == 0) { | 624 | if (found_it == 0) { |
625 | fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 16, ifa); | 625 | fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 16, ifa); |
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 57662cabaf9b..860286a3921b 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c | |||
@@ -908,8 +908,8 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old | |||
908 | dev_put(dev_out); | 908 | dev_put(dev_out); |
909 | goto out; | 909 | goto out; |
910 | } | 910 | } |
911 | read_lock(&dev_base_lock); | 911 | rcu_read_lock(); |
912 | for_each_netdev(&init_net, dev) { | 912 | for_each_netdev_rcu(&init_net, dev) { |
913 | if (!dev->dn_ptr) | 913 | if (!dev->dn_ptr) |
914 | continue; | 914 | continue; |
915 | if (!dn_dev_islocal(dev, oldflp->fld_src)) | 915 | if (!dn_dev_islocal(dev, oldflp->fld_src)) |
@@ -922,7 +922,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old | |||
922 | dev_out = dev; | 922 | dev_out = dev; |
923 | break; | 923 | break; |
924 | } | 924 | } |
925 | read_unlock(&dev_base_lock); | 925 | rcu_read_unlock(); |
926 | if (dev_out == NULL) | 926 | if (dev_out == NULL) |
927 | goto out; | 927 | goto out; |
928 | dev_hold(dev_out); | 928 | dev_hold(dev_out); |
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c index 0e0254fd767d..596679803de5 100644 --- a/net/econet/af_econet.c +++ b/net/econet/af_econet.c | |||
@@ -457,15 +457,15 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
457 | iov[0].iov_len = size; | 457 | iov[0].iov_len = size; |
458 | for (i = 0; i < msg->msg_iovlen; i++) { | 458 | for (i = 0; i < msg->msg_iovlen; i++) { |
459 | void __user *base = msg->msg_iov[i].iov_base; | 459 | void __user *base = msg->msg_iov[i].iov_base; |
460 | size_t len = msg->msg_iov[i].iov_len; | 460 | size_t iov_len = msg->msg_iov[i].iov_len; |
461 | /* Check it now since we switch to KERNEL_DS later. */ | 461 | /* Check it now since we switch to KERNEL_DS later. */ |
462 | if (!access_ok(VERIFY_READ, base, len)) { | 462 | if (!access_ok(VERIFY_READ, base, iov_len)) { |
463 | mutex_unlock(&econet_mutex); | 463 | mutex_unlock(&econet_mutex); |
464 | return -EFAULT; | 464 | return -EFAULT; |
465 | } | 465 | } |
466 | iov[i+1].iov_base = base; | 466 | iov[i+1].iov_base = base; |
467 | iov[i+1].iov_len = len; | 467 | iov[i+1].iov_len = iov_len; |
468 | size += len; | 468 | size += iov_len; |
469 | } | 469 | } |
470 | 470 | ||
471 | /* Get a skbuff (no data, just holds our cb information) */ | 471 | /* Get a skbuff (no data, just holds our cb information) */ |
@@ -605,7 +605,8 @@ static struct proto econet_proto = { | |||
605 | * Create an Econet socket | 605 | * Create an Econet socket |
606 | */ | 606 | */ |
607 | 607 | ||
608 | static int econet_create(struct net *net, struct socket *sock, int protocol) | 608 | static int econet_create(struct net *net, struct socket *sock, int protocol, |
609 | int kern) | ||
609 | { | 610 | { |
610 | struct sock *sk; | 611 | struct sock *sk; |
611 | struct econet_sock *eo; | 612 | struct econet_sock *eo; |
@@ -742,7 +743,7 @@ static int econet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg | |||
742 | return 0; | 743 | return 0; |
743 | } | 744 | } |
744 | 745 | ||
745 | static struct net_proto_family econet_family_ops = { | 746 | static const struct net_proto_family econet_family_ops = { |
746 | .family = PF_ECONET, | 747 | .family = PF_ECONET, |
747 | .create = econet_create, | 748 | .create = econet_create, |
748 | .owner = THIS_MODULE, | 749 | .owner = THIS_MODULE, |
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 5a883affecd3..dd3db88f8f0a 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c | |||
@@ -393,10 +393,3 @@ ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len) | |||
393 | return ((ssize_t) l); | 393 | return ((ssize_t) l); |
394 | } | 394 | } |
395 | EXPORT_SYMBOL(sysfs_format_mac); | 395 | EXPORT_SYMBOL(sysfs_format_mac); |
396 | |||
397 | char *print_mac(char *buf, const unsigned char *addr) | ||
398 | { | ||
399 | _format_mac_addr(buf, MAC_BUF_SIZE, addr, ETH_ALEN); | ||
400 | return buf; | ||
401 | } | ||
402 | EXPORT_SYMBOL(print_mac); | ||
diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile index 4068a9f5113e..ce2d33582859 100644 --- a/net/ieee802154/Makefile +++ b/net/ieee802154/Makefile | |||
@@ -1,5 +1,5 @@ | |||
1 | obj-$(CONFIG_IEEE802154) += nl802154.o af_802154.o wpan-class.o | 1 | obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o |
2 | nl802154-y := netlink.o nl_policy.o | 2 | ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o |
3 | af_802154-y := af_ieee802154.o raw.o dgram.o | 3 | af_802154-y := af_ieee802154.o raw.o dgram.o |
4 | 4 | ||
5 | ccflags-y += -Wall -DDEBUG | 5 | ccflags-y += -Wall -DDEBUG |
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c index cd949d5e451b..de6e34d2a7f8 100644 --- a/net/ieee802154/af_ieee802154.c +++ b/net/ieee802154/af_ieee802154.c | |||
@@ -234,7 +234,7 @@ static const struct proto_ops ieee802154_dgram_ops = { | |||
234 | * set the state. | 234 | * set the state. |
235 | */ | 235 | */ |
236 | static int ieee802154_create(struct net *net, struct socket *sock, | 236 | static int ieee802154_create(struct net *net, struct socket *sock, |
237 | int protocol) | 237 | int protocol, int kern) |
238 | { | 238 | { |
239 | struct sock *sk; | 239 | struct sock *sk; |
240 | int rc; | 240 | int rc; |
@@ -285,7 +285,7 @@ out: | |||
285 | return rc; | 285 | return rc; |
286 | } | 286 | } |
287 | 287 | ||
288 | static struct net_proto_family ieee802154_family_ops = { | 288 | static const struct net_proto_family ieee802154_family_ops = { |
289 | .family = PF_IEEE802154, | 289 | .family = PF_IEEE802154, |
290 | .create = ieee802154_create, | 290 | .create = ieee802154_create, |
291 | .owner = THIS_MODULE, | 291 | .owner = THIS_MODULE, |
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c index a413b1bf4465..9aac5aee1575 100644 --- a/net/ieee802154/dgram.c +++ b/net/ieee802154/dgram.c | |||
@@ -303,7 +303,7 @@ static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
303 | if (err) | 303 | if (err) |
304 | goto done; | 304 | goto done; |
305 | 305 | ||
306 | sock_recv_timestamp(msg, sk, skb); | 306 | sock_recv_ts_and_drops(msg, sk, skb); |
307 | 307 | ||
308 | if (flags & MSG_TRUNC) | 308 | if (flags & MSG_TRUNC) |
309 | copied = skb->len; | 309 | copied = skb->len; |
@@ -318,7 +318,6 @@ out: | |||
318 | static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb) | 318 | static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb) |
319 | { | 319 | { |
320 | if (sock_queue_rcv_skb(sk, skb) < 0) { | 320 | if (sock_queue_rcv_skb(sk, skb) < 0) { |
321 | atomic_inc(&sk->sk_drops); | ||
322 | kfree_skb(skb); | 321 | kfree_skb(skb); |
323 | return NET_RX_DROP; | 322 | return NET_RX_DROP; |
324 | } | 323 | } |
diff --git a/net/ieee802154/ieee802154.h b/net/ieee802154/ieee802154.h new file mode 100644 index 000000000000..aadec428e6ec --- /dev/null +++ b/net/ieee802154/ieee802154.h | |||
@@ -0,0 +1,53 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007, 2008, 2009 Siemens AG | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 | ||
6 | * as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License along | ||
14 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
15 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
16 | * | ||
17 | */ | ||
18 | #ifndef IEEE_802154_LOCAL_H | ||
19 | #define IEEE_802154_LOCAL_H | ||
20 | |||
21 | int __init ieee802154_nl_init(void); | ||
22 | void __exit ieee802154_nl_exit(void); | ||
23 | |||
24 | #define IEEE802154_OP(_cmd, _func) \ | ||
25 | { \ | ||
26 | .cmd = _cmd, \ | ||
27 | .policy = ieee802154_policy, \ | ||
28 | .doit = _func, \ | ||
29 | .dumpit = NULL, \ | ||
30 | .flags = GENL_ADMIN_PERM, \ | ||
31 | } | ||
32 | |||
33 | #define IEEE802154_DUMP(_cmd, _func, _dump) \ | ||
34 | { \ | ||
35 | .cmd = _cmd, \ | ||
36 | .policy = ieee802154_policy, \ | ||
37 | .doit = _func, \ | ||
38 | .dumpit = _dump, \ | ||
39 | } | ||
40 | |||
41 | struct genl_info; | ||
42 | |||
43 | struct sk_buff *ieee802154_nl_create(int flags, u8 req); | ||
44 | int ieee802154_nl_mcast(struct sk_buff *msg, unsigned int group); | ||
45 | struct sk_buff *ieee802154_nl_new_reply(struct genl_info *info, | ||
46 | int flags, u8 req); | ||
47 | int ieee802154_nl_reply(struct sk_buff *msg, struct genl_info *info); | ||
48 | |||
49 | extern struct genl_family nl802154_family; | ||
50 | int nl802154_mac_register(void); | ||
51 | int nl802154_phy_register(void); | ||
52 | |||
53 | #endif | ||
diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c index ca767bde17a4..33137b99e471 100644 --- a/net/ieee802154/netlink.c +++ b/net/ieee802154/netlink.c | |||
@@ -23,21 +23,15 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
26 | #include <linux/if_arp.h> | ||
27 | #include <linux/netdevice.h> | ||
28 | #include <net/netlink.h> | ||
29 | #include <net/genetlink.h> | 26 | #include <net/genetlink.h> |
30 | #include <net/sock.h> | ||
31 | #include <linux/nl802154.h> | 27 | #include <linux/nl802154.h> |
32 | #include <net/af_ieee802154.h> | 28 | |
33 | #include <net/nl802154.h> | 29 | #include "ieee802154.h" |
34 | #include <net/ieee802154.h> | ||
35 | #include <net/ieee802154_netdev.h> | ||
36 | 30 | ||
37 | static unsigned int ieee802154_seq_num; | 31 | static unsigned int ieee802154_seq_num; |
38 | static DEFINE_SPINLOCK(ieee802154_seq_lock); | 32 | static DEFINE_SPINLOCK(ieee802154_seq_lock); |
39 | 33 | ||
40 | static struct genl_family ieee802154_coordinator_family = { | 34 | struct genl_family nl802154_family = { |
41 | .id = GENL_ID_GENERATE, | 35 | .id = GENL_ID_GENERATE, |
42 | .hdrsize = 0, | 36 | .hdrsize = 0, |
43 | .name = IEEE802154_NL_NAME, | 37 | .name = IEEE802154_NL_NAME, |
@@ -45,16 +39,8 @@ static struct genl_family ieee802154_coordinator_family = { | |||
45 | .maxattr = IEEE802154_ATTR_MAX, | 39 | .maxattr = IEEE802154_ATTR_MAX, |
46 | }; | 40 | }; |
47 | 41 | ||
48 | static struct genl_multicast_group ieee802154_coord_mcgrp = { | ||
49 | .name = IEEE802154_MCAST_COORD_NAME, | ||
50 | }; | ||
51 | |||
52 | static struct genl_multicast_group ieee802154_beacon_mcgrp = { | ||
53 | .name = IEEE802154_MCAST_BEACON_NAME, | ||
54 | }; | ||
55 | |||
56 | /* Requests to userspace */ | 42 | /* Requests to userspace */ |
57 | static struct sk_buff *ieee802154_nl_create(int flags, u8 req) | 43 | struct sk_buff *ieee802154_nl_create(int flags, u8 req) |
58 | { | 44 | { |
59 | void *hdr; | 45 | void *hdr; |
60 | struct sk_buff *msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); | 46 | struct sk_buff *msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); |
@@ -65,7 +51,7 @@ static struct sk_buff *ieee802154_nl_create(int flags, u8 req) | |||
65 | 51 | ||
66 | spin_lock_irqsave(&ieee802154_seq_lock, f); | 52 | spin_lock_irqsave(&ieee802154_seq_lock, f); |
67 | hdr = genlmsg_put(msg, 0, ieee802154_seq_num++, | 53 | hdr = genlmsg_put(msg, 0, ieee802154_seq_num++, |
68 | &ieee802154_coordinator_family, flags, req); | 54 | &nl802154_family, flags, req); |
69 | spin_unlock_irqrestore(&ieee802154_seq_lock, f); | 55 | spin_unlock_irqrestore(&ieee802154_seq_lock, f); |
70 | if (!hdr) { | 56 | if (!hdr) { |
71 | nlmsg_free(msg); | 57 | nlmsg_free(msg); |
@@ -75,7 +61,7 @@ static struct sk_buff *ieee802154_nl_create(int flags, u8 req) | |||
75 | return msg; | 61 | return msg; |
76 | } | 62 | } |
77 | 63 | ||
78 | static int ieee802154_nl_finish(struct sk_buff *msg) | 64 | int ieee802154_nl_mcast(struct sk_buff *msg, unsigned int group) |
79 | { | 65 | { |
80 | /* XXX: nlh is right at the start of msg */ | 66 | /* XXX: nlh is right at the start of msg */ |
81 | void *hdr = genlmsg_data(NLMSG_DATA(msg->data)); | 67 | void *hdr = genlmsg_data(NLMSG_DATA(msg->data)); |
@@ -83,607 +69,70 @@ static int ieee802154_nl_finish(struct sk_buff *msg) | |||
83 | if (genlmsg_end(msg, hdr) < 0) | 69 | if (genlmsg_end(msg, hdr) < 0) |
84 | goto out; | 70 | goto out; |
85 | 71 | ||
86 | return genlmsg_multicast(msg, 0, ieee802154_coord_mcgrp.id, | 72 | return genlmsg_multicast(msg, 0, group, GFP_ATOMIC); |
87 | GFP_ATOMIC); | ||
88 | out: | 73 | out: |
89 | nlmsg_free(msg); | 74 | nlmsg_free(msg); |
90 | return -ENOBUFS; | 75 | return -ENOBUFS; |
91 | } | 76 | } |
92 | 77 | ||
93 | int ieee802154_nl_assoc_indic(struct net_device *dev, | 78 | struct sk_buff *ieee802154_nl_new_reply(struct genl_info *info, |
94 | struct ieee802154_addr *addr, u8 cap) | 79 | int flags, u8 req) |
95 | { | ||
96 | struct sk_buff *msg; | ||
97 | |||
98 | pr_debug("%s\n", __func__); | ||
99 | |||
100 | if (addr->addr_type != IEEE802154_ADDR_LONG) { | ||
101 | pr_err("%s: received non-long source address!\n", __func__); | ||
102 | return -EINVAL; | ||
103 | } | ||
104 | |||
105 | msg = ieee802154_nl_create(0, IEEE802154_ASSOCIATE_INDIC); | ||
106 | if (!msg) | ||
107 | return -ENOBUFS; | ||
108 | |||
109 | NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); | ||
110 | NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); | ||
111 | NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, | ||
112 | dev->dev_addr); | ||
113 | |||
114 | NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN, | ||
115 | addr->hwaddr); | ||
116 | |||
117 | NLA_PUT_U8(msg, IEEE802154_ATTR_CAPABILITY, cap); | ||
118 | |||
119 | return ieee802154_nl_finish(msg); | ||
120 | |||
121 | nla_put_failure: | ||
122 | nlmsg_free(msg); | ||
123 | return -ENOBUFS; | ||
124 | } | ||
125 | EXPORT_SYMBOL(ieee802154_nl_assoc_indic); | ||
126 | |||
127 | int ieee802154_nl_assoc_confirm(struct net_device *dev, u16 short_addr, | ||
128 | u8 status) | ||
129 | { | ||
130 | struct sk_buff *msg; | ||
131 | |||
132 | pr_debug("%s\n", __func__); | ||
133 | |||
134 | msg = ieee802154_nl_create(0, IEEE802154_ASSOCIATE_CONF); | ||
135 | if (!msg) | ||
136 | return -ENOBUFS; | ||
137 | |||
138 | NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); | ||
139 | NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); | ||
140 | NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, | ||
141 | dev->dev_addr); | ||
142 | |||
143 | NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr); | ||
144 | NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); | ||
145 | |||
146 | return ieee802154_nl_finish(msg); | ||
147 | |||
148 | nla_put_failure: | ||
149 | nlmsg_free(msg); | ||
150 | return -ENOBUFS; | ||
151 | } | ||
152 | EXPORT_SYMBOL(ieee802154_nl_assoc_confirm); | ||
153 | |||
154 | int ieee802154_nl_disassoc_indic(struct net_device *dev, | ||
155 | struct ieee802154_addr *addr, u8 reason) | ||
156 | { | ||
157 | struct sk_buff *msg; | ||
158 | |||
159 | pr_debug("%s\n", __func__); | ||
160 | |||
161 | msg = ieee802154_nl_create(0, IEEE802154_DISASSOCIATE_INDIC); | ||
162 | if (!msg) | ||
163 | return -ENOBUFS; | ||
164 | |||
165 | NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); | ||
166 | NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); | ||
167 | NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, | ||
168 | dev->dev_addr); | ||
169 | |||
170 | if (addr->addr_type == IEEE802154_ADDR_LONG) | ||
171 | NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN, | ||
172 | addr->hwaddr); | ||
173 | else | ||
174 | NLA_PUT_U16(msg, IEEE802154_ATTR_SRC_SHORT_ADDR, | ||
175 | addr->short_addr); | ||
176 | |||
177 | NLA_PUT_U8(msg, IEEE802154_ATTR_REASON, reason); | ||
178 | |||
179 | return ieee802154_nl_finish(msg); | ||
180 | |||
181 | nla_put_failure: | ||
182 | nlmsg_free(msg); | ||
183 | return -ENOBUFS; | ||
184 | } | ||
185 | EXPORT_SYMBOL(ieee802154_nl_disassoc_indic); | ||
186 | |||
187 | int ieee802154_nl_disassoc_confirm(struct net_device *dev, u8 status) | ||
188 | { | ||
189 | struct sk_buff *msg; | ||
190 | |||
191 | pr_debug("%s\n", __func__); | ||
192 | |||
193 | msg = ieee802154_nl_create(0, IEEE802154_DISASSOCIATE_CONF); | ||
194 | if (!msg) | ||
195 | return -ENOBUFS; | ||
196 | |||
197 | NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); | ||
198 | NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); | ||
199 | NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, | ||
200 | dev->dev_addr); | ||
201 | |||
202 | NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); | ||
203 | |||
204 | return ieee802154_nl_finish(msg); | ||
205 | |||
206 | nla_put_failure: | ||
207 | nlmsg_free(msg); | ||
208 | return -ENOBUFS; | ||
209 | } | ||
210 | EXPORT_SYMBOL(ieee802154_nl_disassoc_confirm); | ||
211 | |||
212 | int ieee802154_nl_beacon_indic(struct net_device *dev, | ||
213 | u16 panid, u16 coord_addr) | ||
214 | { | ||
215 | struct sk_buff *msg; | ||
216 | |||
217 | pr_debug("%s\n", __func__); | ||
218 | |||
219 | msg = ieee802154_nl_create(0, IEEE802154_BEACON_NOTIFY_INDIC); | ||
220 | if (!msg) | ||
221 | return -ENOBUFS; | ||
222 | |||
223 | NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); | ||
224 | NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); | ||
225 | NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, | ||
226 | dev->dev_addr); | ||
227 | NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr); | ||
228 | NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid); | ||
229 | |||
230 | return ieee802154_nl_finish(msg); | ||
231 | |||
232 | nla_put_failure: | ||
233 | nlmsg_free(msg); | ||
234 | return -ENOBUFS; | ||
235 | } | ||
236 | EXPORT_SYMBOL(ieee802154_nl_beacon_indic); | ||
237 | |||
238 | int ieee802154_nl_scan_confirm(struct net_device *dev, | ||
239 | u8 status, u8 scan_type, u32 unscanned, u8 page, | ||
240 | u8 *edl/* , struct list_head *pan_desc_list */) | ||
241 | { | ||
242 | struct sk_buff *msg; | ||
243 | |||
244 | pr_debug("%s\n", __func__); | ||
245 | |||
246 | msg = ieee802154_nl_create(0, IEEE802154_SCAN_CONF); | ||
247 | if (!msg) | ||
248 | return -ENOBUFS; | ||
249 | |||
250 | NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); | ||
251 | NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); | ||
252 | NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, | ||
253 | dev->dev_addr); | ||
254 | |||
255 | NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); | ||
256 | NLA_PUT_U8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type); | ||
257 | NLA_PUT_U32(msg, IEEE802154_ATTR_CHANNELS, unscanned); | ||
258 | NLA_PUT_U8(msg, IEEE802154_ATTR_PAGE, page); | ||
259 | |||
260 | if (edl) | ||
261 | NLA_PUT(msg, IEEE802154_ATTR_ED_LIST, 27, edl); | ||
262 | |||
263 | return ieee802154_nl_finish(msg); | ||
264 | |||
265 | nla_put_failure: | ||
266 | nlmsg_free(msg); | ||
267 | return -ENOBUFS; | ||
268 | } | ||
269 | EXPORT_SYMBOL(ieee802154_nl_scan_confirm); | ||
270 | |||
271 | int ieee802154_nl_start_confirm(struct net_device *dev, u8 status) | ||
272 | { | ||
273 | struct sk_buff *msg; | ||
274 | |||
275 | pr_debug("%s\n", __func__); | ||
276 | |||
277 | msg = ieee802154_nl_create(0, IEEE802154_START_CONF); | ||
278 | if (!msg) | ||
279 | return -ENOBUFS; | ||
280 | |||
281 | NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); | ||
282 | NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); | ||
283 | NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, | ||
284 | dev->dev_addr); | ||
285 | |||
286 | NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); | ||
287 | |||
288 | return ieee802154_nl_finish(msg); | ||
289 | |||
290 | nla_put_failure: | ||
291 | nlmsg_free(msg); | ||
292 | return -ENOBUFS; | ||
293 | } | ||
294 | EXPORT_SYMBOL(ieee802154_nl_start_confirm); | ||
295 | |||
296 | static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 pid, | ||
297 | u32 seq, int flags, struct net_device *dev) | ||
298 | { | 80 | { |
299 | void *hdr; | 81 | void *hdr; |
82 | struct sk_buff *msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); | ||
300 | 83 | ||
301 | pr_debug("%s\n", __func__); | 84 | if (!msg) |
302 | |||
303 | hdr = genlmsg_put(msg, 0, seq, &ieee802154_coordinator_family, flags, | ||
304 | IEEE802154_LIST_IFACE); | ||
305 | if (!hdr) | ||
306 | goto out; | ||
307 | |||
308 | NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); | ||
309 | NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); | ||
310 | |||
311 | NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, | ||
312 | dev->dev_addr); | ||
313 | NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR, | ||
314 | ieee802154_mlme_ops(dev)->get_short_addr(dev)); | ||
315 | NLA_PUT_U16(msg, IEEE802154_ATTR_PAN_ID, | ||
316 | ieee802154_mlme_ops(dev)->get_pan_id(dev)); | ||
317 | return genlmsg_end(msg, hdr); | ||
318 | |||
319 | nla_put_failure: | ||
320 | genlmsg_cancel(msg, hdr); | ||
321 | out: | ||
322 | return -EMSGSIZE; | ||
323 | } | ||
324 | |||
325 | /* Requests from userspace */ | ||
326 | static struct net_device *ieee802154_nl_get_dev(struct genl_info *info) | ||
327 | { | ||
328 | struct net_device *dev; | ||
329 | |||
330 | if (info->attrs[IEEE802154_ATTR_DEV_NAME]) { | ||
331 | char name[IFNAMSIZ + 1]; | ||
332 | nla_strlcpy(name, info->attrs[IEEE802154_ATTR_DEV_NAME], | ||
333 | sizeof(name)); | ||
334 | dev = dev_get_by_name(&init_net, name); | ||
335 | } else if (info->attrs[IEEE802154_ATTR_DEV_INDEX]) | ||
336 | dev = dev_get_by_index(&init_net, | ||
337 | nla_get_u32(info->attrs[IEEE802154_ATTR_DEV_INDEX])); | ||
338 | else | ||
339 | return NULL; | ||
340 | |||
341 | if (!dev) | ||
342 | return NULL; | 85 | return NULL; |
343 | 86 | ||
344 | if (dev->type != ARPHRD_IEEE802154) { | 87 | hdr = genlmsg_put_reply(msg, info, |
345 | dev_put(dev); | 88 | &nl802154_family, flags, req); |
89 | if (!hdr) { | ||
90 | nlmsg_free(msg); | ||
346 | return NULL; | 91 | return NULL; |
347 | } | 92 | } |
348 | 93 | ||
349 | return dev; | 94 | return msg; |
350 | } | ||
351 | |||
352 | static int ieee802154_associate_req(struct sk_buff *skb, | ||
353 | struct genl_info *info) | ||
354 | { | ||
355 | struct net_device *dev; | ||
356 | struct ieee802154_addr addr; | ||
357 | u8 page; | ||
358 | int ret = -EINVAL; | ||
359 | |||
360 | if (!info->attrs[IEEE802154_ATTR_CHANNEL] || | ||
361 | !info->attrs[IEEE802154_ATTR_COORD_PAN_ID] || | ||
362 | (!info->attrs[IEEE802154_ATTR_COORD_HW_ADDR] && | ||
363 | !info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]) || | ||
364 | !info->attrs[IEEE802154_ATTR_CAPABILITY]) | ||
365 | return -EINVAL; | ||
366 | |||
367 | dev = ieee802154_nl_get_dev(info); | ||
368 | if (!dev) | ||
369 | return -ENODEV; | ||
370 | |||
371 | if (info->attrs[IEEE802154_ATTR_COORD_HW_ADDR]) { | ||
372 | addr.addr_type = IEEE802154_ADDR_LONG; | ||
373 | nla_memcpy(addr.hwaddr, | ||
374 | info->attrs[IEEE802154_ATTR_COORD_HW_ADDR], | ||
375 | IEEE802154_ADDR_LEN); | ||
376 | } else { | ||
377 | addr.addr_type = IEEE802154_ADDR_SHORT; | ||
378 | addr.short_addr = nla_get_u16( | ||
379 | info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]); | ||
380 | } | ||
381 | addr.pan_id = nla_get_u16(info->attrs[IEEE802154_ATTR_COORD_PAN_ID]); | ||
382 | |||
383 | if (info->attrs[IEEE802154_ATTR_PAGE]) | ||
384 | page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]); | ||
385 | else | ||
386 | page = 0; | ||
387 | |||
388 | ret = ieee802154_mlme_ops(dev)->assoc_req(dev, &addr, | ||
389 | nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]), | ||
390 | page, | ||
391 | nla_get_u8(info->attrs[IEEE802154_ATTR_CAPABILITY])); | ||
392 | |||
393 | dev_put(dev); | ||
394 | return ret; | ||
395 | } | ||
396 | |||
397 | static int ieee802154_associate_resp(struct sk_buff *skb, | ||
398 | struct genl_info *info) | ||
399 | { | ||
400 | struct net_device *dev; | ||
401 | struct ieee802154_addr addr; | ||
402 | int ret = -EINVAL; | ||
403 | |||
404 | if (!info->attrs[IEEE802154_ATTR_STATUS] || | ||
405 | !info->attrs[IEEE802154_ATTR_DEST_HW_ADDR] || | ||
406 | !info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]) | ||
407 | return -EINVAL; | ||
408 | |||
409 | dev = ieee802154_nl_get_dev(info); | ||
410 | if (!dev) | ||
411 | return -ENODEV; | ||
412 | |||
413 | addr.addr_type = IEEE802154_ADDR_LONG; | ||
414 | nla_memcpy(addr.hwaddr, info->attrs[IEEE802154_ATTR_DEST_HW_ADDR], | ||
415 | IEEE802154_ADDR_LEN); | ||
416 | addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev); | ||
417 | |||
418 | |||
419 | ret = ieee802154_mlme_ops(dev)->assoc_resp(dev, &addr, | ||
420 | nla_get_u16(info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]), | ||
421 | nla_get_u8(info->attrs[IEEE802154_ATTR_STATUS])); | ||
422 | |||
423 | dev_put(dev); | ||
424 | return ret; | ||
425 | } | ||
426 | |||
427 | static int ieee802154_disassociate_req(struct sk_buff *skb, | ||
428 | struct genl_info *info) | ||
429 | { | ||
430 | struct net_device *dev; | ||
431 | struct ieee802154_addr addr; | ||
432 | int ret = -EINVAL; | ||
433 | |||
434 | if ((!info->attrs[IEEE802154_ATTR_DEST_HW_ADDR] && | ||
435 | !info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]) || | ||
436 | !info->attrs[IEEE802154_ATTR_REASON]) | ||
437 | return -EINVAL; | ||
438 | |||
439 | dev = ieee802154_nl_get_dev(info); | ||
440 | if (!dev) | ||
441 | return -ENODEV; | ||
442 | |||
443 | if (info->attrs[IEEE802154_ATTR_DEST_HW_ADDR]) { | ||
444 | addr.addr_type = IEEE802154_ADDR_LONG; | ||
445 | nla_memcpy(addr.hwaddr, | ||
446 | info->attrs[IEEE802154_ATTR_DEST_HW_ADDR], | ||
447 | IEEE802154_ADDR_LEN); | ||
448 | } else { | ||
449 | addr.addr_type = IEEE802154_ADDR_SHORT; | ||
450 | addr.short_addr = nla_get_u16( | ||
451 | info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]); | ||
452 | } | ||
453 | addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev); | ||
454 | |||
455 | ret = ieee802154_mlme_ops(dev)->disassoc_req(dev, &addr, | ||
456 | nla_get_u8(info->attrs[IEEE802154_ATTR_REASON])); | ||
457 | |||
458 | dev_put(dev); | ||
459 | return ret; | ||
460 | } | ||
461 | |||
462 | /* | ||
463 | * PANid, channel, beacon_order = 15, superframe_order = 15, | ||
464 | * PAN_coordinator, battery_life_extension = 0, | ||
465 | * coord_realignment = 0, security_enable = 0 | ||
466 | */ | ||
467 | static int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info) | ||
468 | { | ||
469 | struct net_device *dev; | ||
470 | struct ieee802154_addr addr; | ||
471 | |||
472 | u8 channel, bcn_ord, sf_ord; | ||
473 | u8 page; | ||
474 | int pan_coord, blx, coord_realign; | ||
475 | int ret; | ||
476 | |||
477 | if (!info->attrs[IEEE802154_ATTR_COORD_PAN_ID] || | ||
478 | !info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR] || | ||
479 | !info->attrs[IEEE802154_ATTR_CHANNEL] || | ||
480 | !info->attrs[IEEE802154_ATTR_BCN_ORD] || | ||
481 | !info->attrs[IEEE802154_ATTR_SF_ORD] || | ||
482 | !info->attrs[IEEE802154_ATTR_PAN_COORD] || | ||
483 | !info->attrs[IEEE802154_ATTR_BAT_EXT] || | ||
484 | !info->attrs[IEEE802154_ATTR_COORD_REALIGN] | ||
485 | ) | ||
486 | return -EINVAL; | ||
487 | |||
488 | dev = ieee802154_nl_get_dev(info); | ||
489 | if (!dev) | ||
490 | return -ENODEV; | ||
491 | |||
492 | addr.addr_type = IEEE802154_ADDR_SHORT; | ||
493 | addr.short_addr = nla_get_u16( | ||
494 | info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]); | ||
495 | addr.pan_id = nla_get_u16(info->attrs[IEEE802154_ATTR_COORD_PAN_ID]); | ||
496 | |||
497 | channel = nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]); | ||
498 | bcn_ord = nla_get_u8(info->attrs[IEEE802154_ATTR_BCN_ORD]); | ||
499 | sf_ord = nla_get_u8(info->attrs[IEEE802154_ATTR_SF_ORD]); | ||
500 | pan_coord = nla_get_u8(info->attrs[IEEE802154_ATTR_PAN_COORD]); | ||
501 | blx = nla_get_u8(info->attrs[IEEE802154_ATTR_BAT_EXT]); | ||
502 | coord_realign = nla_get_u8(info->attrs[IEEE802154_ATTR_COORD_REALIGN]); | ||
503 | |||
504 | if (info->attrs[IEEE802154_ATTR_PAGE]) | ||
505 | page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]); | ||
506 | else | ||
507 | page = 0; | ||
508 | |||
509 | |||
510 | if (addr.short_addr == IEEE802154_ADDR_BROADCAST) { | ||
511 | ieee802154_nl_start_confirm(dev, IEEE802154_NO_SHORT_ADDRESS); | ||
512 | dev_put(dev); | ||
513 | return -EINVAL; | ||
514 | } | ||
515 | |||
516 | ret = ieee802154_mlme_ops(dev)->start_req(dev, &addr, channel, page, | ||
517 | bcn_ord, sf_ord, pan_coord, blx, coord_realign); | ||
518 | |||
519 | dev_put(dev); | ||
520 | return ret; | ||
521 | } | ||
522 | |||
523 | static int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info) | ||
524 | { | ||
525 | struct net_device *dev; | ||
526 | int ret; | ||
527 | u8 type; | ||
528 | u32 channels; | ||
529 | u8 duration; | ||
530 | u8 page; | ||
531 | |||
532 | if (!info->attrs[IEEE802154_ATTR_SCAN_TYPE] || | ||
533 | !info->attrs[IEEE802154_ATTR_CHANNELS] || | ||
534 | !info->attrs[IEEE802154_ATTR_DURATION]) | ||
535 | return -EINVAL; | ||
536 | |||
537 | dev = ieee802154_nl_get_dev(info); | ||
538 | if (!dev) | ||
539 | return -ENODEV; | ||
540 | |||
541 | type = nla_get_u8(info->attrs[IEEE802154_ATTR_SCAN_TYPE]); | ||
542 | channels = nla_get_u32(info->attrs[IEEE802154_ATTR_CHANNELS]); | ||
543 | duration = nla_get_u8(info->attrs[IEEE802154_ATTR_DURATION]); | ||
544 | |||
545 | if (info->attrs[IEEE802154_ATTR_PAGE]) | ||
546 | page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]); | ||
547 | else | ||
548 | page = 0; | ||
549 | |||
550 | |||
551 | ret = ieee802154_mlme_ops(dev)->scan_req(dev, type, channels, page, | ||
552 | duration); | ||
553 | |||
554 | dev_put(dev); | ||
555 | return ret; | ||
556 | } | 95 | } |
557 | 96 | ||
558 | static int ieee802154_list_iface(struct sk_buff *skb, | 97 | int ieee802154_nl_reply(struct sk_buff *msg, struct genl_info *info) |
559 | struct genl_info *info) | ||
560 | { | 98 | { |
561 | /* Request for interface name, index, type, IEEE address, | 99 | /* XXX: nlh is right at the start of msg */ |
562 | PAN Id, short address */ | 100 | void *hdr = genlmsg_data(NLMSG_DATA(msg->data)); |
563 | struct sk_buff *msg; | ||
564 | struct net_device *dev = NULL; | ||
565 | int rc = -ENOBUFS; | ||
566 | |||
567 | pr_debug("%s\n", __func__); | ||
568 | |||
569 | dev = ieee802154_nl_get_dev(info); | ||
570 | if (!dev) | ||
571 | return -ENODEV; | ||
572 | |||
573 | msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | ||
574 | if (!msg) | ||
575 | goto out_dev; | ||
576 | |||
577 | rc = ieee802154_nl_fill_iface(msg, info->snd_pid, info->snd_seq, | ||
578 | 0, dev); | ||
579 | if (rc < 0) | ||
580 | goto out_free; | ||
581 | 101 | ||
582 | dev_put(dev); | 102 | if (genlmsg_end(msg, hdr) < 0) |
103 | goto out; | ||
583 | 104 | ||
584 | return genlmsg_unicast(&init_net, msg, info->snd_pid); | 105 | return genlmsg_reply(msg, info); |
585 | out_free: | 106 | out: |
586 | nlmsg_free(msg); | 107 | nlmsg_free(msg); |
587 | out_dev: | 108 | return -ENOBUFS; |
588 | dev_put(dev); | ||
589 | return rc; | ||
590 | |||
591 | } | ||
592 | |||
593 | static int ieee802154_dump_iface(struct sk_buff *skb, | ||
594 | struct netlink_callback *cb) | ||
595 | { | ||
596 | struct net *net = sock_net(skb->sk); | ||
597 | struct net_device *dev; | ||
598 | int idx; | ||
599 | int s_idx = cb->args[0]; | ||
600 | |||
601 | pr_debug("%s\n", __func__); | ||
602 | |||
603 | idx = 0; | ||
604 | for_each_netdev(net, dev) { | ||
605 | if (idx < s_idx || (dev->type != ARPHRD_IEEE802154)) | ||
606 | goto cont; | ||
607 | |||
608 | if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).pid, | ||
609 | cb->nlh->nlmsg_seq, NLM_F_MULTI, dev) < 0) | ||
610 | break; | ||
611 | cont: | ||
612 | idx++; | ||
613 | } | ||
614 | cb->args[0] = idx; | ||
615 | |||
616 | return skb->len; | ||
617 | } | 109 | } |
618 | 110 | ||
619 | #define IEEE802154_OP(_cmd, _func) \ | 111 | int __init ieee802154_nl_init(void) |
620 | { \ | ||
621 | .cmd = _cmd, \ | ||
622 | .policy = ieee802154_policy, \ | ||
623 | .doit = _func, \ | ||
624 | .dumpit = NULL, \ | ||
625 | .flags = GENL_ADMIN_PERM, \ | ||
626 | } | ||
627 | |||
628 | #define IEEE802154_DUMP(_cmd, _func, _dump) \ | ||
629 | { \ | ||
630 | .cmd = _cmd, \ | ||
631 | .policy = ieee802154_policy, \ | ||
632 | .doit = _func, \ | ||
633 | .dumpit = _dump, \ | ||
634 | } | ||
635 | |||
636 | static struct genl_ops ieee802154_coordinator_ops[] = { | ||
637 | IEEE802154_OP(IEEE802154_ASSOCIATE_REQ, ieee802154_associate_req), | ||
638 | IEEE802154_OP(IEEE802154_ASSOCIATE_RESP, ieee802154_associate_resp), | ||
639 | IEEE802154_OP(IEEE802154_DISASSOCIATE_REQ, ieee802154_disassociate_req), | ||
640 | IEEE802154_OP(IEEE802154_SCAN_REQ, ieee802154_scan_req), | ||
641 | IEEE802154_OP(IEEE802154_START_REQ, ieee802154_start_req), | ||
642 | IEEE802154_DUMP(IEEE802154_LIST_IFACE, ieee802154_list_iface, | ||
643 | ieee802154_dump_iface), | ||
644 | }; | ||
645 | |||
646 | static int __init ieee802154_nl_init(void) | ||
647 | { | 112 | { |
648 | int rc; | 113 | int rc; |
649 | int i; | ||
650 | 114 | ||
651 | rc = genl_register_family(&ieee802154_coordinator_family); | 115 | rc = genl_register_family(&nl802154_family); |
652 | if (rc) | 116 | if (rc) |
653 | goto fail; | 117 | goto fail; |
654 | 118 | ||
655 | rc = genl_register_mc_group(&ieee802154_coordinator_family, | 119 | rc = nl802154_mac_register(); |
656 | &ieee802154_coord_mcgrp); | ||
657 | if (rc) | 120 | if (rc) |
658 | goto fail; | 121 | goto fail; |
659 | 122 | ||
660 | rc = genl_register_mc_group(&ieee802154_coordinator_family, | 123 | rc = nl802154_phy_register(); |
661 | &ieee802154_beacon_mcgrp); | ||
662 | if (rc) | 124 | if (rc) |
663 | goto fail; | 125 | goto fail; |
664 | 126 | ||
665 | |||
666 | for (i = 0; i < ARRAY_SIZE(ieee802154_coordinator_ops); i++) { | ||
667 | rc = genl_register_ops(&ieee802154_coordinator_family, | ||
668 | &ieee802154_coordinator_ops[i]); | ||
669 | if (rc) | ||
670 | goto fail; | ||
671 | } | ||
672 | |||
673 | return 0; | 127 | return 0; |
674 | 128 | ||
675 | fail: | 129 | fail: |
676 | genl_unregister_family(&ieee802154_coordinator_family); | 130 | genl_unregister_family(&nl802154_family); |
677 | return rc; | 131 | return rc; |
678 | } | 132 | } |
679 | module_init(ieee802154_nl_init); | ||
680 | 133 | ||
681 | static void __exit ieee802154_nl_exit(void) | 134 | void __exit ieee802154_nl_exit(void) |
682 | { | 135 | { |
683 | genl_unregister_family(&ieee802154_coordinator_family); | 136 | genl_unregister_family(&nl802154_family); |
684 | } | 137 | } |
685 | module_exit(ieee802154_nl_exit); | ||
686 | |||
687 | MODULE_LICENSE("GPL v2"); | ||
688 | MODULE_DESCRIPTION("ieee 802.15.4 configuration interface"); | ||
689 | 138 | ||
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c new file mode 100644 index 000000000000..135c1678fb11 --- /dev/null +++ b/net/ieee802154/nl-mac.c | |||
@@ -0,0 +1,617 @@ | |||
1 | /* | ||
2 | * Netlink inteface for IEEE 802.15.4 stack | ||
3 | * | ||
4 | * Copyright 2007, 2008 Siemens AG | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 | ||
8 | * as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along | ||
16 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
17 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | * | ||
19 | * Written by: | ||
20 | * Sergey Lapin <slapin@ossfans.org> | ||
21 | * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> | ||
22 | * Maxim Osipov <maxim.osipov@siemens.com> | ||
23 | */ | ||
24 | |||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/if_arp.h> | ||
27 | #include <linux/netdevice.h> | ||
28 | #include <net/netlink.h> | ||
29 | #include <net/genetlink.h> | ||
30 | #include <net/sock.h> | ||
31 | #include <linux/nl802154.h> | ||
32 | #include <net/af_ieee802154.h> | ||
33 | #include <net/nl802154.h> | ||
34 | #include <net/ieee802154.h> | ||
35 | #include <net/ieee802154_netdev.h> | ||
36 | #include <net/wpan-phy.h> | ||
37 | |||
38 | #include "ieee802154.h" | ||
39 | |||
40 | static struct genl_multicast_group ieee802154_coord_mcgrp = { | ||
41 | .name = IEEE802154_MCAST_COORD_NAME, | ||
42 | }; | ||
43 | |||
44 | static struct genl_multicast_group ieee802154_beacon_mcgrp = { | ||
45 | .name = IEEE802154_MCAST_BEACON_NAME, | ||
46 | }; | ||
47 | |||
48 | int ieee802154_nl_assoc_indic(struct net_device *dev, | ||
49 | struct ieee802154_addr *addr, u8 cap) | ||
50 | { | ||
51 | struct sk_buff *msg; | ||
52 | |||
53 | pr_debug("%s\n", __func__); | ||
54 | |||
55 | if (addr->addr_type != IEEE802154_ADDR_LONG) { | ||
56 | pr_err("%s: received non-long source address!\n", __func__); | ||
57 | return -EINVAL; | ||
58 | } | ||
59 | |||
60 | msg = ieee802154_nl_create(0, IEEE802154_ASSOCIATE_INDIC); | ||
61 | if (!msg) | ||
62 | return -ENOBUFS; | ||
63 | |||
64 | NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); | ||
65 | NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); | ||
66 | NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, | ||
67 | dev->dev_addr); | ||
68 | |||
69 | NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN, | ||
70 | addr->hwaddr); | ||
71 | |||
72 | NLA_PUT_U8(msg, IEEE802154_ATTR_CAPABILITY, cap); | ||
73 | |||
74 | return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); | ||
75 | |||
76 | nla_put_failure: | ||
77 | nlmsg_free(msg); | ||
78 | return -ENOBUFS; | ||
79 | } | ||
80 | EXPORT_SYMBOL(ieee802154_nl_assoc_indic); | ||
81 | |||
82 | int ieee802154_nl_assoc_confirm(struct net_device *dev, u16 short_addr, | ||
83 | u8 status) | ||
84 | { | ||
85 | struct sk_buff *msg; | ||
86 | |||
87 | pr_debug("%s\n", __func__); | ||
88 | |||
89 | msg = ieee802154_nl_create(0, IEEE802154_ASSOCIATE_CONF); | ||
90 | if (!msg) | ||
91 | return -ENOBUFS; | ||
92 | |||
93 | NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); | ||
94 | NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); | ||
95 | NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, | ||
96 | dev->dev_addr); | ||
97 | |||
98 | NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr); | ||
99 | NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); | ||
100 | |||
101 | return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); | ||
102 | |||
103 | nla_put_failure: | ||
104 | nlmsg_free(msg); | ||
105 | return -ENOBUFS; | ||
106 | } | ||
107 | EXPORT_SYMBOL(ieee802154_nl_assoc_confirm); | ||
108 | |||
109 | int ieee802154_nl_disassoc_indic(struct net_device *dev, | ||
110 | struct ieee802154_addr *addr, u8 reason) | ||
111 | { | ||
112 | struct sk_buff *msg; | ||
113 | |||
114 | pr_debug("%s\n", __func__); | ||
115 | |||
116 | msg = ieee802154_nl_create(0, IEEE802154_DISASSOCIATE_INDIC); | ||
117 | if (!msg) | ||
118 | return -ENOBUFS; | ||
119 | |||
120 | NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); | ||
121 | NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); | ||
122 | NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, | ||
123 | dev->dev_addr); | ||
124 | |||
125 | if (addr->addr_type == IEEE802154_ADDR_LONG) | ||
126 | NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN, | ||
127 | addr->hwaddr); | ||
128 | else | ||
129 | NLA_PUT_U16(msg, IEEE802154_ATTR_SRC_SHORT_ADDR, | ||
130 | addr->short_addr); | ||
131 | |||
132 | NLA_PUT_U8(msg, IEEE802154_ATTR_REASON, reason); | ||
133 | |||
134 | return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); | ||
135 | |||
136 | nla_put_failure: | ||
137 | nlmsg_free(msg); | ||
138 | return -ENOBUFS; | ||
139 | } | ||
140 | EXPORT_SYMBOL(ieee802154_nl_disassoc_indic); | ||
141 | |||
142 | int ieee802154_nl_disassoc_confirm(struct net_device *dev, u8 status) | ||
143 | { | ||
144 | struct sk_buff *msg; | ||
145 | |||
146 | pr_debug("%s\n", __func__); | ||
147 | |||
148 | msg = ieee802154_nl_create(0, IEEE802154_DISASSOCIATE_CONF); | ||
149 | if (!msg) | ||
150 | return -ENOBUFS; | ||
151 | |||
152 | NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); | ||
153 | NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); | ||
154 | NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, | ||
155 | dev->dev_addr); | ||
156 | |||
157 | NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); | ||
158 | |||
159 | return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); | ||
160 | |||
161 | nla_put_failure: | ||
162 | nlmsg_free(msg); | ||
163 | return -ENOBUFS; | ||
164 | } | ||
165 | EXPORT_SYMBOL(ieee802154_nl_disassoc_confirm); | ||
166 | |||
167 | int ieee802154_nl_beacon_indic(struct net_device *dev, | ||
168 | u16 panid, u16 coord_addr) | ||
169 | { | ||
170 | struct sk_buff *msg; | ||
171 | |||
172 | pr_debug("%s\n", __func__); | ||
173 | |||
174 | msg = ieee802154_nl_create(0, IEEE802154_BEACON_NOTIFY_INDIC); | ||
175 | if (!msg) | ||
176 | return -ENOBUFS; | ||
177 | |||
178 | NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); | ||
179 | NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); | ||
180 | NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, | ||
181 | dev->dev_addr); | ||
182 | NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr); | ||
183 | NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid); | ||
184 | |||
185 | return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); | ||
186 | |||
187 | nla_put_failure: | ||
188 | nlmsg_free(msg); | ||
189 | return -ENOBUFS; | ||
190 | } | ||
191 | EXPORT_SYMBOL(ieee802154_nl_beacon_indic); | ||
192 | |||
193 | int ieee802154_nl_scan_confirm(struct net_device *dev, | ||
194 | u8 status, u8 scan_type, u32 unscanned, u8 page, | ||
195 | u8 *edl/* , struct list_head *pan_desc_list */) | ||
196 | { | ||
197 | struct sk_buff *msg; | ||
198 | |||
199 | pr_debug("%s\n", __func__); | ||
200 | |||
201 | msg = ieee802154_nl_create(0, IEEE802154_SCAN_CONF); | ||
202 | if (!msg) | ||
203 | return -ENOBUFS; | ||
204 | |||
205 | NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); | ||
206 | NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); | ||
207 | NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, | ||
208 | dev->dev_addr); | ||
209 | |||
210 | NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); | ||
211 | NLA_PUT_U8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type); | ||
212 | NLA_PUT_U32(msg, IEEE802154_ATTR_CHANNELS, unscanned); | ||
213 | NLA_PUT_U8(msg, IEEE802154_ATTR_PAGE, page); | ||
214 | |||
215 | if (edl) | ||
216 | NLA_PUT(msg, IEEE802154_ATTR_ED_LIST, 27, edl); | ||
217 | |||
218 | return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); | ||
219 | |||
220 | nla_put_failure: | ||
221 | nlmsg_free(msg); | ||
222 | return -ENOBUFS; | ||
223 | } | ||
224 | EXPORT_SYMBOL(ieee802154_nl_scan_confirm); | ||
225 | |||
226 | int ieee802154_nl_start_confirm(struct net_device *dev, u8 status) | ||
227 | { | ||
228 | struct sk_buff *msg; | ||
229 | |||
230 | pr_debug("%s\n", __func__); | ||
231 | |||
232 | msg = ieee802154_nl_create(0, IEEE802154_START_CONF); | ||
233 | if (!msg) | ||
234 | return -ENOBUFS; | ||
235 | |||
236 | NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); | ||
237 | NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); | ||
238 | NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, | ||
239 | dev->dev_addr); | ||
240 | |||
241 | NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); | ||
242 | |||
243 | return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); | ||
244 | |||
245 | nla_put_failure: | ||
246 | nlmsg_free(msg); | ||
247 | return -ENOBUFS; | ||
248 | } | ||
249 | EXPORT_SYMBOL(ieee802154_nl_start_confirm); | ||
250 | |||
251 | static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 pid, | ||
252 | u32 seq, int flags, struct net_device *dev) | ||
253 | { | ||
254 | void *hdr; | ||
255 | struct wpan_phy *phy; | ||
256 | |||
257 | pr_debug("%s\n", __func__); | ||
258 | |||
259 | hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags, | ||
260 | IEEE802154_LIST_IFACE); | ||
261 | if (!hdr) | ||
262 | goto out; | ||
263 | |||
264 | phy = ieee802154_mlme_ops(dev)->get_phy(dev); | ||
265 | BUG_ON(!phy); | ||
266 | |||
267 | NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); | ||
268 | NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)); | ||
269 | NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); | ||
270 | |||
271 | NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, | ||
272 | dev->dev_addr); | ||
273 | NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR, | ||
274 | ieee802154_mlme_ops(dev)->get_short_addr(dev)); | ||
275 | NLA_PUT_U16(msg, IEEE802154_ATTR_PAN_ID, | ||
276 | ieee802154_mlme_ops(dev)->get_pan_id(dev)); | ||
277 | wpan_phy_put(phy); | ||
278 | return genlmsg_end(msg, hdr); | ||
279 | |||
280 | nla_put_failure: | ||
281 | wpan_phy_put(phy); | ||
282 | genlmsg_cancel(msg, hdr); | ||
283 | out: | ||
284 | return -EMSGSIZE; | ||
285 | } | ||
286 | |||
287 | /* Requests from userspace */ | ||
288 | static struct net_device *ieee802154_nl_get_dev(struct genl_info *info) | ||
289 | { | ||
290 | struct net_device *dev; | ||
291 | |||
292 | if (info->attrs[IEEE802154_ATTR_DEV_NAME]) { | ||
293 | char name[IFNAMSIZ + 1]; | ||
294 | nla_strlcpy(name, info->attrs[IEEE802154_ATTR_DEV_NAME], | ||
295 | sizeof(name)); | ||
296 | dev = dev_get_by_name(&init_net, name); | ||
297 | } else if (info->attrs[IEEE802154_ATTR_DEV_INDEX]) | ||
298 | dev = dev_get_by_index(&init_net, | ||
299 | nla_get_u32(info->attrs[IEEE802154_ATTR_DEV_INDEX])); | ||
300 | else | ||
301 | return NULL; | ||
302 | |||
303 | if (!dev) | ||
304 | return NULL; | ||
305 | |||
306 | if (dev->type != ARPHRD_IEEE802154) { | ||
307 | dev_put(dev); | ||
308 | return NULL; | ||
309 | } | ||
310 | |||
311 | return dev; | ||
312 | } | ||
313 | |||
314 | static int ieee802154_associate_req(struct sk_buff *skb, | ||
315 | struct genl_info *info) | ||
316 | { | ||
317 | struct net_device *dev; | ||
318 | struct ieee802154_addr addr; | ||
319 | u8 page; | ||
320 | int ret = -EINVAL; | ||
321 | |||
322 | if (!info->attrs[IEEE802154_ATTR_CHANNEL] || | ||
323 | !info->attrs[IEEE802154_ATTR_COORD_PAN_ID] || | ||
324 | (!info->attrs[IEEE802154_ATTR_COORD_HW_ADDR] && | ||
325 | !info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]) || | ||
326 | !info->attrs[IEEE802154_ATTR_CAPABILITY]) | ||
327 | return -EINVAL; | ||
328 | |||
329 | dev = ieee802154_nl_get_dev(info); | ||
330 | if (!dev) | ||
331 | return -ENODEV; | ||
332 | |||
333 | if (info->attrs[IEEE802154_ATTR_COORD_HW_ADDR]) { | ||
334 | addr.addr_type = IEEE802154_ADDR_LONG; | ||
335 | nla_memcpy(addr.hwaddr, | ||
336 | info->attrs[IEEE802154_ATTR_COORD_HW_ADDR], | ||
337 | IEEE802154_ADDR_LEN); | ||
338 | } else { | ||
339 | addr.addr_type = IEEE802154_ADDR_SHORT; | ||
340 | addr.short_addr = nla_get_u16( | ||
341 | info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]); | ||
342 | } | ||
343 | addr.pan_id = nla_get_u16(info->attrs[IEEE802154_ATTR_COORD_PAN_ID]); | ||
344 | |||
345 | if (info->attrs[IEEE802154_ATTR_PAGE]) | ||
346 | page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]); | ||
347 | else | ||
348 | page = 0; | ||
349 | |||
350 | ret = ieee802154_mlme_ops(dev)->assoc_req(dev, &addr, | ||
351 | nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]), | ||
352 | page, | ||
353 | nla_get_u8(info->attrs[IEEE802154_ATTR_CAPABILITY])); | ||
354 | |||
355 | dev_put(dev); | ||
356 | return ret; | ||
357 | } | ||
358 | |||
359 | static int ieee802154_associate_resp(struct sk_buff *skb, | ||
360 | struct genl_info *info) | ||
361 | { | ||
362 | struct net_device *dev; | ||
363 | struct ieee802154_addr addr; | ||
364 | int ret = -EINVAL; | ||
365 | |||
366 | if (!info->attrs[IEEE802154_ATTR_STATUS] || | ||
367 | !info->attrs[IEEE802154_ATTR_DEST_HW_ADDR] || | ||
368 | !info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]) | ||
369 | return -EINVAL; | ||
370 | |||
371 | dev = ieee802154_nl_get_dev(info); | ||
372 | if (!dev) | ||
373 | return -ENODEV; | ||
374 | |||
375 | addr.addr_type = IEEE802154_ADDR_LONG; | ||
376 | nla_memcpy(addr.hwaddr, info->attrs[IEEE802154_ATTR_DEST_HW_ADDR], | ||
377 | IEEE802154_ADDR_LEN); | ||
378 | addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev); | ||
379 | |||
380 | |||
381 | ret = ieee802154_mlme_ops(dev)->assoc_resp(dev, &addr, | ||
382 | nla_get_u16(info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]), | ||
383 | nla_get_u8(info->attrs[IEEE802154_ATTR_STATUS])); | ||
384 | |||
385 | dev_put(dev); | ||
386 | return ret; | ||
387 | } | ||
388 | |||
389 | static int ieee802154_disassociate_req(struct sk_buff *skb, | ||
390 | struct genl_info *info) | ||
391 | { | ||
392 | struct net_device *dev; | ||
393 | struct ieee802154_addr addr; | ||
394 | int ret = -EINVAL; | ||
395 | |||
396 | if ((!info->attrs[IEEE802154_ATTR_DEST_HW_ADDR] && | ||
397 | !info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]) || | ||
398 | !info->attrs[IEEE802154_ATTR_REASON]) | ||
399 | return -EINVAL; | ||
400 | |||
401 | dev = ieee802154_nl_get_dev(info); | ||
402 | if (!dev) | ||
403 | return -ENODEV; | ||
404 | |||
405 | if (info->attrs[IEEE802154_ATTR_DEST_HW_ADDR]) { | ||
406 | addr.addr_type = IEEE802154_ADDR_LONG; | ||
407 | nla_memcpy(addr.hwaddr, | ||
408 | info->attrs[IEEE802154_ATTR_DEST_HW_ADDR], | ||
409 | IEEE802154_ADDR_LEN); | ||
410 | } else { | ||
411 | addr.addr_type = IEEE802154_ADDR_SHORT; | ||
412 | addr.short_addr = nla_get_u16( | ||
413 | info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]); | ||
414 | } | ||
415 | addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev); | ||
416 | |||
417 | ret = ieee802154_mlme_ops(dev)->disassoc_req(dev, &addr, | ||
418 | nla_get_u8(info->attrs[IEEE802154_ATTR_REASON])); | ||
419 | |||
420 | dev_put(dev); | ||
421 | return ret; | ||
422 | } | ||
423 | |||
424 | /* | ||
425 | * PANid, channel, beacon_order = 15, superframe_order = 15, | ||
426 | * PAN_coordinator, battery_life_extension = 0, | ||
427 | * coord_realignment = 0, security_enable = 0 | ||
428 | */ | ||
429 | static int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info) | ||
430 | { | ||
431 | struct net_device *dev; | ||
432 | struct ieee802154_addr addr; | ||
433 | |||
434 | u8 channel, bcn_ord, sf_ord; | ||
435 | u8 page; | ||
436 | int pan_coord, blx, coord_realign; | ||
437 | int ret; | ||
438 | |||
439 | if (!info->attrs[IEEE802154_ATTR_COORD_PAN_ID] || | ||
440 | !info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR] || | ||
441 | !info->attrs[IEEE802154_ATTR_CHANNEL] || | ||
442 | !info->attrs[IEEE802154_ATTR_BCN_ORD] || | ||
443 | !info->attrs[IEEE802154_ATTR_SF_ORD] || | ||
444 | !info->attrs[IEEE802154_ATTR_PAN_COORD] || | ||
445 | !info->attrs[IEEE802154_ATTR_BAT_EXT] || | ||
446 | !info->attrs[IEEE802154_ATTR_COORD_REALIGN] | ||
447 | ) | ||
448 | return -EINVAL; | ||
449 | |||
450 | dev = ieee802154_nl_get_dev(info); | ||
451 | if (!dev) | ||
452 | return -ENODEV; | ||
453 | |||
454 | addr.addr_type = IEEE802154_ADDR_SHORT; | ||
455 | addr.short_addr = nla_get_u16( | ||
456 | info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]); | ||
457 | addr.pan_id = nla_get_u16(info->attrs[IEEE802154_ATTR_COORD_PAN_ID]); | ||
458 | |||
459 | channel = nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]); | ||
460 | bcn_ord = nla_get_u8(info->attrs[IEEE802154_ATTR_BCN_ORD]); | ||
461 | sf_ord = nla_get_u8(info->attrs[IEEE802154_ATTR_SF_ORD]); | ||
462 | pan_coord = nla_get_u8(info->attrs[IEEE802154_ATTR_PAN_COORD]); | ||
463 | blx = nla_get_u8(info->attrs[IEEE802154_ATTR_BAT_EXT]); | ||
464 | coord_realign = nla_get_u8(info->attrs[IEEE802154_ATTR_COORD_REALIGN]); | ||
465 | |||
466 | if (info->attrs[IEEE802154_ATTR_PAGE]) | ||
467 | page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]); | ||
468 | else | ||
469 | page = 0; | ||
470 | |||
471 | |||
472 | if (addr.short_addr == IEEE802154_ADDR_BROADCAST) { | ||
473 | ieee802154_nl_start_confirm(dev, IEEE802154_NO_SHORT_ADDRESS); | ||
474 | dev_put(dev); | ||
475 | return -EINVAL; | ||
476 | } | ||
477 | |||
478 | ret = ieee802154_mlme_ops(dev)->start_req(dev, &addr, channel, page, | ||
479 | bcn_ord, sf_ord, pan_coord, blx, coord_realign); | ||
480 | |||
481 | dev_put(dev); | ||
482 | return ret; | ||
483 | } | ||
484 | |||
485 | static int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info) | ||
486 | { | ||
487 | struct net_device *dev; | ||
488 | int ret; | ||
489 | u8 type; | ||
490 | u32 channels; | ||
491 | u8 duration; | ||
492 | u8 page; | ||
493 | |||
494 | if (!info->attrs[IEEE802154_ATTR_SCAN_TYPE] || | ||
495 | !info->attrs[IEEE802154_ATTR_CHANNELS] || | ||
496 | !info->attrs[IEEE802154_ATTR_DURATION]) | ||
497 | return -EINVAL; | ||
498 | |||
499 | dev = ieee802154_nl_get_dev(info); | ||
500 | if (!dev) | ||
501 | return -ENODEV; | ||
502 | |||
503 | type = nla_get_u8(info->attrs[IEEE802154_ATTR_SCAN_TYPE]); | ||
504 | channels = nla_get_u32(info->attrs[IEEE802154_ATTR_CHANNELS]); | ||
505 | duration = nla_get_u8(info->attrs[IEEE802154_ATTR_DURATION]); | ||
506 | |||
507 | if (info->attrs[IEEE802154_ATTR_PAGE]) | ||
508 | page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]); | ||
509 | else | ||
510 | page = 0; | ||
511 | |||
512 | |||
513 | ret = ieee802154_mlme_ops(dev)->scan_req(dev, type, channels, page, | ||
514 | duration); | ||
515 | |||
516 | dev_put(dev); | ||
517 | return ret; | ||
518 | } | ||
519 | |||
520 | static int ieee802154_list_iface(struct sk_buff *skb, | ||
521 | struct genl_info *info) | ||
522 | { | ||
523 | /* Request for interface name, index, type, IEEE address, | ||
524 | PAN Id, short address */ | ||
525 | struct sk_buff *msg; | ||
526 | struct net_device *dev = NULL; | ||
527 | int rc = -ENOBUFS; | ||
528 | |||
529 | pr_debug("%s\n", __func__); | ||
530 | |||
531 | dev = ieee802154_nl_get_dev(info); | ||
532 | if (!dev) | ||
533 | return -ENODEV; | ||
534 | |||
535 | msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | ||
536 | if (!msg) | ||
537 | goto out_dev; | ||
538 | |||
539 | rc = ieee802154_nl_fill_iface(msg, info->snd_pid, info->snd_seq, | ||
540 | 0, dev); | ||
541 | if (rc < 0) | ||
542 | goto out_free; | ||
543 | |||
544 | dev_put(dev); | ||
545 | |||
546 | return genlmsg_reply(msg, info); | ||
547 | out_free: | ||
548 | nlmsg_free(msg); | ||
549 | out_dev: | ||
550 | dev_put(dev); | ||
551 | return rc; | ||
552 | |||
553 | } | ||
554 | |||
555 | static int ieee802154_dump_iface(struct sk_buff *skb, | ||
556 | struct netlink_callback *cb) | ||
557 | { | ||
558 | struct net *net = sock_net(skb->sk); | ||
559 | struct net_device *dev; | ||
560 | int idx; | ||
561 | int s_idx = cb->args[0]; | ||
562 | |||
563 | pr_debug("%s\n", __func__); | ||
564 | |||
565 | idx = 0; | ||
566 | for_each_netdev(net, dev) { | ||
567 | if (idx < s_idx || (dev->type != ARPHRD_IEEE802154)) | ||
568 | goto cont; | ||
569 | |||
570 | if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).pid, | ||
571 | cb->nlh->nlmsg_seq, NLM_F_MULTI, dev) < 0) | ||
572 | break; | ||
573 | cont: | ||
574 | idx++; | ||
575 | } | ||
576 | cb->args[0] = idx; | ||
577 | |||
578 | return skb->len; | ||
579 | } | ||
580 | |||
581 | static struct genl_ops ieee802154_coordinator_ops[] = { | ||
582 | IEEE802154_OP(IEEE802154_ASSOCIATE_REQ, ieee802154_associate_req), | ||
583 | IEEE802154_OP(IEEE802154_ASSOCIATE_RESP, ieee802154_associate_resp), | ||
584 | IEEE802154_OP(IEEE802154_DISASSOCIATE_REQ, ieee802154_disassociate_req), | ||
585 | IEEE802154_OP(IEEE802154_SCAN_REQ, ieee802154_scan_req), | ||
586 | IEEE802154_OP(IEEE802154_START_REQ, ieee802154_start_req), | ||
587 | IEEE802154_DUMP(IEEE802154_LIST_IFACE, ieee802154_list_iface, | ||
588 | ieee802154_dump_iface), | ||
589 | }; | ||
590 | |||
591 | /* | ||
592 | * No need to unregister as family unregistration will do it. | ||
593 | */ | ||
594 | int nl802154_mac_register(void) | ||
595 | { | ||
596 | int i; | ||
597 | int rc; | ||
598 | |||
599 | rc = genl_register_mc_group(&nl802154_family, | ||
600 | &ieee802154_coord_mcgrp); | ||
601 | if (rc) | ||
602 | return rc; | ||
603 | |||
604 | rc = genl_register_mc_group(&nl802154_family, | ||
605 | &ieee802154_beacon_mcgrp); | ||
606 | if (rc) | ||
607 | return rc; | ||
608 | |||
609 | for (i = 0; i < ARRAY_SIZE(ieee802154_coordinator_ops); i++) { | ||
610 | rc = genl_register_ops(&nl802154_family, | ||
611 | &ieee802154_coordinator_ops[i]); | ||
612 | if (rc) | ||
613 | return rc; | ||
614 | } | ||
615 | |||
616 | return 0; | ||
617 | } | ||
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c new file mode 100644 index 000000000000..199a2d9d12f9 --- /dev/null +++ b/net/ieee802154/nl-phy.c | |||
@@ -0,0 +1,344 @@ | |||
1 | /* | ||
2 | * Netlink inteface for IEEE 802.15.4 stack | ||
3 | * | ||
4 | * Copyright 2007, 2008 Siemens AG | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 | ||
8 | * as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along | ||
16 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
17 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | * | ||
19 | * Written by: | ||
20 | * Sergey Lapin <slapin@ossfans.org> | ||
21 | * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> | ||
22 | * Maxim Osipov <maxim.osipov@siemens.com> | ||
23 | */ | ||
24 | |||
25 | #include <linux/kernel.h> | ||
26 | #include <net/netlink.h> | ||
27 | #include <net/genetlink.h> | ||
28 | #include <net/wpan-phy.h> | ||
29 | #include <net/af_ieee802154.h> | ||
30 | #include <net/ieee802154_netdev.h> | ||
31 | #include <net/rtnetlink.h> /* for rtnl_{un,}lock */ | ||
32 | #include <linux/nl802154.h> | ||
33 | |||
34 | #include "ieee802154.h" | ||
35 | |||
36 | static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 pid, | ||
37 | u32 seq, int flags, struct wpan_phy *phy) | ||
38 | { | ||
39 | void *hdr; | ||
40 | int i, pages = 0; | ||
41 | uint32_t *buf = kzalloc(32 * sizeof(uint32_t), GFP_KERNEL); | ||
42 | |||
43 | pr_debug("%s\n", __func__); | ||
44 | |||
45 | if (!buf) | ||
46 | goto out; | ||
47 | |||
48 | hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags, | ||
49 | IEEE802154_LIST_PHY); | ||
50 | if (!hdr) | ||
51 | goto out; | ||
52 | |||
53 | mutex_lock(&phy->pib_lock); | ||
54 | NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)); | ||
55 | |||
56 | NLA_PUT_U8(msg, IEEE802154_ATTR_PAGE, phy->current_page); | ||
57 | NLA_PUT_U8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel); | ||
58 | for (i = 0; i < 32; i++) { | ||
59 | if (phy->channels_supported[i]) | ||
60 | buf[pages++] = phy->channels_supported[i] | (i << 27); | ||
61 | } | ||
62 | if (pages) | ||
63 | NLA_PUT(msg, IEEE802154_ATTR_CHANNEL_PAGE_LIST, | ||
64 | pages * sizeof(uint32_t), buf); | ||
65 | |||
66 | mutex_unlock(&phy->pib_lock); | ||
67 | return genlmsg_end(msg, hdr); | ||
68 | |||
69 | nla_put_failure: | ||
70 | mutex_unlock(&phy->pib_lock); | ||
71 | genlmsg_cancel(msg, hdr); | ||
72 | out: | ||
73 | kfree(buf); | ||
74 | return -EMSGSIZE; | ||
75 | } | ||
76 | |||
77 | static int ieee802154_list_phy(struct sk_buff *skb, | ||
78 | struct genl_info *info) | ||
79 | { | ||
80 | /* Request for interface name, index, type, IEEE address, | ||
81 | PAN Id, short address */ | ||
82 | struct sk_buff *msg; | ||
83 | struct wpan_phy *phy; | ||
84 | const char *name; | ||
85 | int rc = -ENOBUFS; | ||
86 | |||
87 | pr_debug("%s\n", __func__); | ||
88 | |||
89 | if (!info->attrs[IEEE802154_ATTR_PHY_NAME]) | ||
90 | return -EINVAL; | ||
91 | |||
92 | name = nla_data(info->attrs[IEEE802154_ATTR_PHY_NAME]); | ||
93 | if (name[nla_len(info->attrs[IEEE802154_ATTR_PHY_NAME]) - 1] != '\0') | ||
94 | return -EINVAL; /* phy name should be null-terminated */ | ||
95 | |||
96 | |||
97 | phy = wpan_phy_find(name); | ||
98 | if (!phy) | ||
99 | return -ENODEV; | ||
100 | |||
101 | msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | ||
102 | if (!msg) | ||
103 | goto out_dev; | ||
104 | |||
105 | rc = ieee802154_nl_fill_phy(msg, info->snd_pid, info->snd_seq, | ||
106 | 0, phy); | ||
107 | if (rc < 0) | ||
108 | goto out_free; | ||
109 | |||
110 | wpan_phy_put(phy); | ||
111 | |||
112 | return genlmsg_reply(msg, info); | ||
113 | out_free: | ||
114 | nlmsg_free(msg); | ||
115 | out_dev: | ||
116 | wpan_phy_put(phy); | ||
117 | return rc; | ||
118 | |||
119 | } | ||
120 | |||
121 | struct dump_phy_data { | ||
122 | struct sk_buff *skb; | ||
123 | struct netlink_callback *cb; | ||
124 | int idx, s_idx; | ||
125 | }; | ||
126 | |||
127 | static int ieee802154_dump_phy_iter(struct wpan_phy *phy, void *_data) | ||
128 | { | ||
129 | int rc; | ||
130 | struct dump_phy_data *data = _data; | ||
131 | |||
132 | pr_debug("%s\n", __func__); | ||
133 | |||
134 | if (data->idx++ < data->s_idx) | ||
135 | return 0; | ||
136 | |||
137 | rc = ieee802154_nl_fill_phy(data->skb, | ||
138 | NETLINK_CB(data->cb->skb).pid, | ||
139 | data->cb->nlh->nlmsg_seq, | ||
140 | NLM_F_MULTI, | ||
141 | phy); | ||
142 | |||
143 | if (rc < 0) { | ||
144 | data->idx--; | ||
145 | return rc; | ||
146 | } | ||
147 | |||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | static int ieee802154_dump_phy(struct sk_buff *skb, | ||
152 | struct netlink_callback *cb) | ||
153 | { | ||
154 | struct dump_phy_data data = { | ||
155 | .cb = cb, | ||
156 | .skb = skb, | ||
157 | .s_idx = cb->args[0], | ||
158 | .idx = 0, | ||
159 | }; | ||
160 | |||
161 | pr_debug("%s\n", __func__); | ||
162 | |||
163 | wpan_phy_for_each(ieee802154_dump_phy_iter, &data); | ||
164 | |||
165 | cb->args[0] = data.idx; | ||
166 | |||
167 | return skb->len; | ||
168 | } | ||
169 | |||
170 | static int ieee802154_add_iface(struct sk_buff *skb, | ||
171 | struct genl_info *info) | ||
172 | { | ||
173 | struct sk_buff *msg; | ||
174 | struct wpan_phy *phy; | ||
175 | const char *name; | ||
176 | const char *devname; | ||
177 | int rc = -ENOBUFS; | ||
178 | struct net_device *dev; | ||
179 | |||
180 | pr_debug("%s\n", __func__); | ||
181 | |||
182 | if (!info->attrs[IEEE802154_ATTR_PHY_NAME]) | ||
183 | return -EINVAL; | ||
184 | |||
185 | name = nla_data(info->attrs[IEEE802154_ATTR_PHY_NAME]); | ||
186 | if (name[nla_len(info->attrs[IEEE802154_ATTR_PHY_NAME]) - 1] != '\0') | ||
187 | return -EINVAL; /* phy name should be null-terminated */ | ||
188 | |||
189 | if (info->attrs[IEEE802154_ATTR_DEV_NAME]) { | ||
190 | devname = nla_data(info->attrs[IEEE802154_ATTR_DEV_NAME]); | ||
191 | if (devname[nla_len(info->attrs[IEEE802154_ATTR_DEV_NAME]) - 1] | ||
192 | != '\0') | ||
193 | return -EINVAL; /* phy name should be null-terminated */ | ||
194 | } else { | ||
195 | devname = "wpan%d"; | ||
196 | } | ||
197 | |||
198 | if (strlen(devname) >= IFNAMSIZ) | ||
199 | return -ENAMETOOLONG; | ||
200 | |||
201 | phy = wpan_phy_find(name); | ||
202 | if (!phy) | ||
203 | return -ENODEV; | ||
204 | |||
205 | msg = ieee802154_nl_new_reply(info, 0, IEEE802154_ADD_IFACE); | ||
206 | if (!msg) | ||
207 | goto out_dev; | ||
208 | |||
209 | if (!phy->add_iface) { | ||
210 | rc = -EINVAL; | ||
211 | goto nla_put_failure; | ||
212 | } | ||
213 | |||
214 | dev = phy->add_iface(phy, devname); | ||
215 | if (IS_ERR(dev)) { | ||
216 | rc = PTR_ERR(dev); | ||
217 | goto nla_put_failure; | ||
218 | } | ||
219 | |||
220 | NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)); | ||
221 | NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); | ||
222 | |||
223 | dev_put(dev); | ||
224 | |||
225 | wpan_phy_put(phy); | ||
226 | |||
227 | return ieee802154_nl_reply(msg, info); | ||
228 | |||
229 | nla_put_failure: | ||
230 | nlmsg_free(msg); | ||
231 | out_dev: | ||
232 | wpan_phy_put(phy); | ||
233 | return rc; | ||
234 | } | ||
235 | |||
236 | static int ieee802154_del_iface(struct sk_buff *skb, | ||
237 | struct genl_info *info) | ||
238 | { | ||
239 | struct sk_buff *msg; | ||
240 | struct wpan_phy *phy; | ||
241 | const char *name; | ||
242 | int rc; | ||
243 | struct net_device *dev; | ||
244 | |||
245 | pr_debug("%s\n", __func__); | ||
246 | |||
247 | if (!info->attrs[IEEE802154_ATTR_DEV_NAME]) | ||
248 | return -EINVAL; | ||
249 | |||
250 | name = nla_data(info->attrs[IEEE802154_ATTR_DEV_NAME]); | ||
251 | if (name[nla_len(info->attrs[IEEE802154_ATTR_DEV_NAME]) - 1] != '\0') | ||
252 | return -EINVAL; /* name should be null-terminated */ | ||
253 | |||
254 | dev = dev_get_by_name(genl_info_net(info), name); | ||
255 | if (!dev) | ||
256 | return -ENODEV; | ||
257 | |||
258 | phy = ieee802154_mlme_ops(dev)->get_phy(dev); | ||
259 | BUG_ON(!phy); | ||
260 | |||
261 | rc = -EINVAL; | ||
262 | /* phy name is optional, but should be checked if it's given */ | ||
263 | if (info->attrs[IEEE802154_ATTR_PHY_NAME]) { | ||
264 | struct wpan_phy *phy2; | ||
265 | |||
266 | const char *pname = | ||
267 | nla_data(info->attrs[IEEE802154_ATTR_PHY_NAME]); | ||
268 | if (pname[nla_len(info->attrs[IEEE802154_ATTR_PHY_NAME]) - 1] | ||
269 | != '\0') | ||
270 | /* name should be null-terminated */ | ||
271 | goto out_dev; | ||
272 | |||
273 | phy2 = wpan_phy_find(pname); | ||
274 | if (!phy2) | ||
275 | goto out_dev; | ||
276 | |||
277 | if (phy != phy2) { | ||
278 | wpan_phy_put(phy2); | ||
279 | goto out_dev; | ||
280 | } | ||
281 | } | ||
282 | |||
283 | rc = -ENOBUFS; | ||
284 | |||
285 | msg = ieee802154_nl_new_reply(info, 0, IEEE802154_DEL_IFACE); | ||
286 | if (!msg) | ||
287 | goto out_dev; | ||
288 | |||
289 | if (!phy->del_iface) { | ||
290 | rc = -EINVAL; | ||
291 | goto nla_put_failure; | ||
292 | } | ||
293 | |||
294 | rtnl_lock(); | ||
295 | phy->del_iface(phy, dev); | ||
296 | |||
297 | /* We don't have device anymore */ | ||
298 | dev_put(dev); | ||
299 | dev = NULL; | ||
300 | |||
301 | rtnl_unlock(); | ||
302 | |||
303 | |||
304 | NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)); | ||
305 | NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, name); | ||
306 | |||
307 | wpan_phy_put(phy); | ||
308 | |||
309 | return ieee802154_nl_reply(msg, info); | ||
310 | |||
311 | nla_put_failure: | ||
312 | nlmsg_free(msg); | ||
313 | out_dev: | ||
314 | wpan_phy_put(phy); | ||
315 | if (dev) | ||
316 | dev_put(dev); | ||
317 | |||
318 | return rc; | ||
319 | } | ||
320 | |||
321 | static struct genl_ops ieee802154_phy_ops[] = { | ||
322 | IEEE802154_DUMP(IEEE802154_LIST_PHY, ieee802154_list_phy, | ||
323 | ieee802154_dump_phy), | ||
324 | IEEE802154_OP(IEEE802154_ADD_IFACE, ieee802154_add_iface), | ||
325 | IEEE802154_OP(IEEE802154_DEL_IFACE, ieee802154_del_iface), | ||
326 | }; | ||
327 | |||
328 | /* | ||
329 | * No need to unregister as family unregistration will do it. | ||
330 | */ | ||
331 | int nl802154_phy_register(void) | ||
332 | { | ||
333 | int i; | ||
334 | int rc; | ||
335 | |||
336 | for (i = 0; i < ARRAY_SIZE(ieee802154_phy_ops); i++) { | ||
337 | rc = genl_register_ops(&nl802154_family, | ||
338 | &ieee802154_phy_ops[i]); | ||
339 | if (rc) | ||
340 | return rc; | ||
341 | } | ||
342 | |||
343 | return 0; | ||
344 | } | ||
diff --git a/net/ieee802154/nl_policy.c b/net/ieee802154/nl_policy.c index 2363ebee02e7..6adda4d46f95 100644 --- a/net/ieee802154/nl_policy.c +++ b/net/ieee802154/nl_policy.c | |||
@@ -27,6 +27,7 @@ | |||
27 | const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = { | 27 | const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = { |
28 | [IEEE802154_ATTR_DEV_NAME] = { .type = NLA_STRING, }, | 28 | [IEEE802154_ATTR_DEV_NAME] = { .type = NLA_STRING, }, |
29 | [IEEE802154_ATTR_DEV_INDEX] = { .type = NLA_U32, }, | 29 | [IEEE802154_ATTR_DEV_INDEX] = { .type = NLA_U32, }, |
30 | [IEEE802154_ATTR_PHY_NAME] = { .type = NLA_STRING, }, | ||
30 | 31 | ||
31 | [IEEE802154_ATTR_STATUS] = { .type = NLA_U8, }, | 32 | [IEEE802154_ATTR_STATUS] = { .type = NLA_U8, }, |
32 | [IEEE802154_ATTR_SHORT_ADDR] = { .type = NLA_U16, }, | 33 | [IEEE802154_ATTR_SHORT_ADDR] = { .type = NLA_U16, }, |
@@ -50,5 +51,6 @@ const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = { | |||
50 | [IEEE802154_ATTR_CHANNELS] = { .type = NLA_U32, }, | 51 | [IEEE802154_ATTR_CHANNELS] = { .type = NLA_U32, }, |
51 | [IEEE802154_ATTR_DURATION] = { .type = NLA_U8, }, | 52 | [IEEE802154_ATTR_DURATION] = { .type = NLA_U8, }, |
52 | [IEEE802154_ATTR_ED_LIST] = { .len = 27 }, | 53 | [IEEE802154_ATTR_ED_LIST] = { .len = 27 }, |
54 | [IEEE802154_ATTR_CHANNEL_PAGE_LIST] = { .len = 32 * 4, }, | ||
53 | }; | 55 | }; |
54 | 56 | ||
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c index 30e74eee07d6..9c9b85c00033 100644 --- a/net/ieee802154/raw.c +++ b/net/ieee802154/raw.c | |||
@@ -191,7 +191,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
191 | if (err) | 191 | if (err) |
192 | goto done; | 192 | goto done; |
193 | 193 | ||
194 | sock_recv_timestamp(msg, sk, skb); | 194 | sock_recv_ts_and_drops(msg, sk, skb); |
195 | 195 | ||
196 | if (flags & MSG_TRUNC) | 196 | if (flags & MSG_TRUNC) |
197 | copied = skb->len; | 197 | copied = skb->len; |
@@ -206,7 +206,6 @@ out: | |||
206 | static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb) | 206 | static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb) |
207 | { | 207 | { |
208 | if (sock_queue_rcv_skb(sk, skb) < 0) { | 208 | if (sock_queue_rcv_skb(sk, skb) < 0) { |
209 | atomic_inc(&sk->sk_drops); | ||
210 | kfree_skb(skb); | 209 | kfree_skb(skb); |
211 | return NET_RX_DROP; | 210 | return NET_RX_DROP; |
212 | } | 211 | } |
diff --git a/net/ieee802154/wpan-class.c b/net/ieee802154/wpan-class.c index f306604da67a..268691256a6d 100644 --- a/net/ieee802154/wpan-class.c +++ b/net/ieee802154/wpan-class.c | |||
@@ -22,6 +22,8 @@ | |||
22 | 22 | ||
23 | #include <net/wpan-phy.h> | 23 | #include <net/wpan-phy.h> |
24 | 24 | ||
25 | #include "ieee802154.h" | ||
26 | |||
25 | #define MASTER_SHOW_COMPLEX(name, format_string, args...) \ | 27 | #define MASTER_SHOW_COMPLEX(name, format_string, args...) \ |
26 | static ssize_t name ## _show(struct device *dev, \ | 28 | static ssize_t name ## _show(struct device *dev, \ |
27 | struct device_attribute *attr, char *buf) \ | 29 | struct device_attribute *attr, char *buf) \ |
@@ -30,7 +32,7 @@ static ssize_t name ## _show(struct device *dev, \ | |||
30 | int ret; \ | 32 | int ret; \ |
31 | \ | 33 | \ |
32 | mutex_lock(&phy->pib_lock); \ | 34 | mutex_lock(&phy->pib_lock); \ |
33 | ret = sprintf(buf, format_string "\n", args); \ | 35 | ret = snprintf(buf, PAGE_SIZE, format_string "\n", args); \ |
34 | mutex_unlock(&phy->pib_lock); \ | 36 | mutex_unlock(&phy->pib_lock); \ |
35 | return ret; \ | 37 | return ret; \ |
36 | } | 38 | } |
@@ -40,12 +42,30 @@ static ssize_t name ## _show(struct device *dev, \ | |||
40 | 42 | ||
41 | MASTER_SHOW(current_channel, "%d"); | 43 | MASTER_SHOW(current_channel, "%d"); |
42 | MASTER_SHOW(current_page, "%d"); | 44 | MASTER_SHOW(current_page, "%d"); |
43 | MASTER_SHOW(channels_supported, "%#x"); | ||
44 | MASTER_SHOW_COMPLEX(transmit_power, "%d +- %d dB", | 45 | MASTER_SHOW_COMPLEX(transmit_power, "%d +- %d dB", |
45 | ((signed char) (phy->transmit_power << 2)) >> 2, | 46 | ((signed char) (phy->transmit_power << 2)) >> 2, |
46 | (phy->transmit_power >> 6) ? (phy->transmit_power >> 6) * 3 : 1 ); | 47 | (phy->transmit_power >> 6) ? (phy->transmit_power >> 6) * 3 : 1 ); |
47 | MASTER_SHOW(cca_mode, "%d"); | 48 | MASTER_SHOW(cca_mode, "%d"); |
48 | 49 | ||
50 | static ssize_t channels_supported_show(struct device *dev, | ||
51 | struct device_attribute *attr, char *buf) | ||
52 | { | ||
53 | struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev); | ||
54 | int ret; | ||
55 | int i, len = 0; | ||
56 | |||
57 | mutex_lock(&phy->pib_lock); | ||
58 | for (i = 0; i < 32; i++) { | ||
59 | ret = snprintf(buf + len, PAGE_SIZE - len, | ||
60 | "%#09x\n", phy->channels_supported[i]); | ||
61 | if (ret < 0) | ||
62 | break; | ||
63 | len += ret; | ||
64 | } | ||
65 | mutex_unlock(&phy->pib_lock); | ||
66 | return len; | ||
67 | } | ||
68 | |||
49 | static struct device_attribute pmib_attrs[] = { | 69 | static struct device_attribute pmib_attrs[] = { |
50 | __ATTR_RO(current_channel), | 70 | __ATTR_RO(current_channel), |
51 | __ATTR_RO(current_page), | 71 | __ATTR_RO(current_page), |
@@ -91,6 +111,31 @@ struct wpan_phy *wpan_phy_find(const char *str) | |||
91 | } | 111 | } |
92 | EXPORT_SYMBOL(wpan_phy_find); | 112 | EXPORT_SYMBOL(wpan_phy_find); |
93 | 113 | ||
114 | struct wpan_phy_iter_data { | ||
115 | int (*fn)(struct wpan_phy *phy, void *data); | ||
116 | void *data; | ||
117 | }; | ||
118 | |||
119 | static int wpan_phy_iter(struct device *dev, void *_data) | ||
120 | { | ||
121 | struct wpan_phy_iter_data *wpid = _data; | ||
122 | struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev); | ||
123 | return wpid->fn(phy, wpid->data); | ||
124 | } | ||
125 | |||
126 | int wpan_phy_for_each(int (*fn)(struct wpan_phy *phy, void *data), | ||
127 | void *data) | ||
128 | { | ||
129 | struct wpan_phy_iter_data wpid = { | ||
130 | .fn = fn, | ||
131 | .data = data, | ||
132 | }; | ||
133 | |||
134 | return class_for_each_device(&wpan_phy_class, NULL, | ||
135 | &wpid, wpan_phy_iter); | ||
136 | } | ||
137 | EXPORT_SYMBOL(wpan_phy_for_each); | ||
138 | |||
94 | static int wpan_phy_idx_valid(int idx) | 139 | static int wpan_phy_idx_valid(int idx) |
95 | { | 140 | { |
96 | return idx >= 0; | 141 | return idx >= 0; |
@@ -118,14 +163,15 @@ struct wpan_phy *wpan_phy_alloc(size_t priv_size) | |||
118 | 163 | ||
119 | phy->dev.class = &wpan_phy_class; | 164 | phy->dev.class = &wpan_phy_class; |
120 | 165 | ||
166 | phy->current_channel = -1; /* not initialised */ | ||
167 | phy->current_page = 0; /* for compatibility */ | ||
168 | |||
121 | return phy; | 169 | return phy; |
122 | } | 170 | } |
123 | EXPORT_SYMBOL(wpan_phy_alloc); | 171 | EXPORT_SYMBOL(wpan_phy_alloc); |
124 | 172 | ||
125 | int wpan_phy_register(struct device *parent, struct wpan_phy *phy) | 173 | int wpan_phy_register(struct wpan_phy *phy) |
126 | { | 174 | { |
127 | phy->dev.parent = parent; | ||
128 | |||
129 | return device_add(&phy->dev); | 175 | return device_add(&phy->dev); |
130 | } | 176 | } |
131 | EXPORT_SYMBOL(wpan_phy_register); | 177 | EXPORT_SYMBOL(wpan_phy_register); |
@@ -144,16 +190,31 @@ EXPORT_SYMBOL(wpan_phy_free); | |||
144 | 190 | ||
145 | static int __init wpan_phy_class_init(void) | 191 | static int __init wpan_phy_class_init(void) |
146 | { | 192 | { |
147 | return class_register(&wpan_phy_class); | 193 | int rc; |
194 | rc = class_register(&wpan_phy_class); | ||
195 | if (rc) | ||
196 | goto err; | ||
197 | |||
198 | rc = ieee802154_nl_init(); | ||
199 | if (rc) | ||
200 | goto err_nl; | ||
201 | |||
202 | return 0; | ||
203 | err_nl: | ||
204 | class_unregister(&wpan_phy_class); | ||
205 | err: | ||
206 | return rc; | ||
148 | } | 207 | } |
149 | subsys_initcall(wpan_phy_class_init); | 208 | subsys_initcall(wpan_phy_class_init); |
150 | 209 | ||
151 | static void __exit wpan_phy_class_exit(void) | 210 | static void __exit wpan_phy_class_exit(void) |
152 | { | 211 | { |
212 | ieee802154_nl_exit(); | ||
153 | class_unregister(&wpan_phy_class); | 213 | class_unregister(&wpan_phy_class); |
154 | } | 214 | } |
155 | module_exit(wpan_phy_class_exit); | 215 | module_exit(wpan_phy_class_exit); |
156 | 216 | ||
157 | MODULE_DESCRIPTION("IEEE 802.15.4 device class"); | ||
158 | MODULE_LICENSE("GPL v2"); | 217 | MODULE_LICENSE("GPL v2"); |
218 | MODULE_DESCRIPTION("IEEE 802.15.4 configuration interface"); | ||
219 | MODULE_AUTHOR("Dmitry Eremin-Solenikov"); | ||
159 | 220 | ||
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 57737b8d1711..7d12c6a9b19b 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -174,12 +174,12 @@ static int inet_autobind(struct sock *sk) | |||
174 | /* We may need to bind the socket. */ | 174 | /* We may need to bind the socket. */ |
175 | lock_sock(sk); | 175 | lock_sock(sk); |
176 | inet = inet_sk(sk); | 176 | inet = inet_sk(sk); |
177 | if (!inet->num) { | 177 | if (!inet->inet_num) { |
178 | if (sk->sk_prot->get_port(sk, 0)) { | 178 | if (sk->sk_prot->get_port(sk, 0)) { |
179 | release_sock(sk); | 179 | release_sock(sk); |
180 | return -EAGAIN; | 180 | return -EAGAIN; |
181 | } | 181 | } |
182 | inet->sport = htons(inet->num); | 182 | inet->inet_sport = htons(inet->inet_num); |
183 | } | 183 | } |
184 | release_sock(sk); | 184 | release_sock(sk); |
185 | return 0; | 185 | return 0; |
@@ -262,7 +262,8 @@ static inline int inet_netns_ok(struct net *net, int protocol) | |||
262 | * Create an inet socket. | 262 | * Create an inet socket. |
263 | */ | 263 | */ |
264 | 264 | ||
265 | static int inet_create(struct net *net, struct socket *sock, int protocol) | 265 | static int inet_create(struct net *net, struct socket *sock, int protocol, |
266 | int kern) | ||
266 | { | 267 | { |
267 | struct sock *sk; | 268 | struct sock *sk; |
268 | struct inet_protosw *answer; | 269 | struct inet_protosw *answer; |
@@ -325,7 +326,7 @@ lookup_protocol: | |||
325 | } | 326 | } |
326 | 327 | ||
327 | err = -EPERM; | 328 | err = -EPERM; |
328 | if (answer->capability > 0 && !capable(answer->capability)) | 329 | if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW)) |
329 | goto out_rcu_unlock; | 330 | goto out_rcu_unlock; |
330 | 331 | ||
331 | err = -EAFNOSUPPORT; | 332 | err = -EAFNOSUPPORT; |
@@ -354,7 +355,7 @@ lookup_protocol: | |||
354 | inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0; | 355 | inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0; |
355 | 356 | ||
356 | if (SOCK_RAW == sock->type) { | 357 | if (SOCK_RAW == sock->type) { |
357 | inet->num = protocol; | 358 | inet->inet_num = protocol; |
358 | if (IPPROTO_RAW == protocol) | 359 | if (IPPROTO_RAW == protocol) |
359 | inet->hdrincl = 1; | 360 | inet->hdrincl = 1; |
360 | } | 361 | } |
@@ -364,7 +365,7 @@ lookup_protocol: | |||
364 | else | 365 | else |
365 | inet->pmtudisc = IP_PMTUDISC_WANT; | 366 | inet->pmtudisc = IP_PMTUDISC_WANT; |
366 | 367 | ||
367 | inet->id = 0; | 368 | inet->inet_id = 0; |
368 | 369 | ||
369 | sock_init_data(sock, sk); | 370 | sock_init_data(sock, sk); |
370 | 371 | ||
@@ -381,13 +382,13 @@ lookup_protocol: | |||
381 | 382 | ||
382 | sk_refcnt_debug_inc(sk); | 383 | sk_refcnt_debug_inc(sk); |
383 | 384 | ||
384 | if (inet->num) { | 385 | if (inet->inet_num) { |
385 | /* It assumes that any protocol which allows | 386 | /* It assumes that any protocol which allows |
386 | * the user to assign a number at socket | 387 | * the user to assign a number at socket |
387 | * creation time automatically | 388 | * creation time automatically |
388 | * shares. | 389 | * shares. |
389 | */ | 390 | */ |
390 | inet->sport = htons(inet->num); | 391 | inet->inet_sport = htons(inet->inet_num); |
391 | /* Add to protocol hash chains. */ | 392 | /* Add to protocol hash chains. */ |
392 | sk->sk_prot->hash(sk); | 393 | sk->sk_prot->hash(sk); |
393 | } | 394 | } |
@@ -494,27 +495,27 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
494 | 495 | ||
495 | /* Check these errors (active socket, double bind). */ | 496 | /* Check these errors (active socket, double bind). */ |
496 | err = -EINVAL; | 497 | err = -EINVAL; |
497 | if (sk->sk_state != TCP_CLOSE || inet->num) | 498 | if (sk->sk_state != TCP_CLOSE || inet->inet_num) |
498 | goto out_release_sock; | 499 | goto out_release_sock; |
499 | 500 | ||
500 | inet->rcv_saddr = inet->saddr = addr->sin_addr.s_addr; | 501 | inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr; |
501 | if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) | 502 | if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) |
502 | inet->saddr = 0; /* Use device */ | 503 | inet->inet_saddr = 0; /* Use device */ |
503 | 504 | ||
504 | /* Make sure we are allowed to bind here. */ | 505 | /* Make sure we are allowed to bind here. */ |
505 | if (sk->sk_prot->get_port(sk, snum)) { | 506 | if (sk->sk_prot->get_port(sk, snum)) { |
506 | inet->saddr = inet->rcv_saddr = 0; | 507 | inet->inet_saddr = inet->inet_rcv_saddr = 0; |
507 | err = -EADDRINUSE; | 508 | err = -EADDRINUSE; |
508 | goto out_release_sock; | 509 | goto out_release_sock; |
509 | } | 510 | } |
510 | 511 | ||
511 | if (inet->rcv_saddr) | 512 | if (inet->inet_rcv_saddr) |
512 | sk->sk_userlocks |= SOCK_BINDADDR_LOCK; | 513 | sk->sk_userlocks |= SOCK_BINDADDR_LOCK; |
513 | if (snum) | 514 | if (snum) |
514 | sk->sk_userlocks |= SOCK_BINDPORT_LOCK; | 515 | sk->sk_userlocks |= SOCK_BINDPORT_LOCK; |
515 | inet->sport = htons(inet->num); | 516 | inet->inet_sport = htons(inet->inet_num); |
516 | inet->daddr = 0; | 517 | inet->inet_daddr = 0; |
517 | inet->dport = 0; | 518 | inet->inet_dport = 0; |
518 | sk_dst_reset(sk); | 519 | sk_dst_reset(sk); |
519 | err = 0; | 520 | err = 0; |
520 | out_release_sock: | 521 | out_release_sock: |
@@ -532,7 +533,7 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr, | |||
532 | if (uaddr->sa_family == AF_UNSPEC) | 533 | if (uaddr->sa_family == AF_UNSPEC) |
533 | return sk->sk_prot->disconnect(sk, flags); | 534 | return sk->sk_prot->disconnect(sk, flags); |
534 | 535 | ||
535 | if (!inet_sk(sk)->num && inet_autobind(sk)) | 536 | if (!inet_sk(sk)->inet_num && inet_autobind(sk)) |
536 | return -EAGAIN; | 537 | return -EAGAIN; |
537 | return sk->sk_prot->connect(sk, (struct sockaddr *)uaddr, addr_len); | 538 | return sk->sk_prot->connect(sk, (struct sockaddr *)uaddr, addr_len); |
538 | } | 539 | } |
@@ -685,21 +686,21 @@ int inet_getname(struct socket *sock, struct sockaddr *uaddr, | |||
685 | { | 686 | { |
686 | struct sock *sk = sock->sk; | 687 | struct sock *sk = sock->sk; |
687 | struct inet_sock *inet = inet_sk(sk); | 688 | struct inet_sock *inet = inet_sk(sk); |
688 | struct sockaddr_in *sin = (struct sockaddr_in *)uaddr; | 689 | DECLARE_SOCKADDR(struct sockaddr_in *, sin, uaddr); |
689 | 690 | ||
690 | sin->sin_family = AF_INET; | 691 | sin->sin_family = AF_INET; |
691 | if (peer) { | 692 | if (peer) { |
692 | if (!inet->dport || | 693 | if (!inet->inet_dport || |
693 | (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) && | 694 | (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) && |
694 | peer == 1)) | 695 | peer == 1)) |
695 | return -ENOTCONN; | 696 | return -ENOTCONN; |
696 | sin->sin_port = inet->dport; | 697 | sin->sin_port = inet->inet_dport; |
697 | sin->sin_addr.s_addr = inet->daddr; | 698 | sin->sin_addr.s_addr = inet->inet_daddr; |
698 | } else { | 699 | } else { |
699 | __be32 addr = inet->rcv_saddr; | 700 | __be32 addr = inet->inet_rcv_saddr; |
700 | if (!addr) | 701 | if (!addr) |
701 | addr = inet->saddr; | 702 | addr = inet->inet_saddr; |
702 | sin->sin_port = inet->sport; | 703 | sin->sin_port = inet->inet_sport; |
703 | sin->sin_addr.s_addr = addr; | 704 | sin->sin_addr.s_addr = addr; |
704 | } | 705 | } |
705 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); | 706 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); |
@@ -714,7 +715,7 @@ int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
714 | struct sock *sk = sock->sk; | 715 | struct sock *sk = sock->sk; |
715 | 716 | ||
716 | /* We may need to bind the socket. */ | 717 | /* We may need to bind the socket. */ |
717 | if (!inet_sk(sk)->num && inet_autobind(sk)) | 718 | if (!inet_sk(sk)->inet_num && inet_autobind(sk)) |
718 | return -EAGAIN; | 719 | return -EAGAIN; |
719 | 720 | ||
720 | return sk->sk_prot->sendmsg(iocb, sk, msg, size); | 721 | return sk->sk_prot->sendmsg(iocb, sk, msg, size); |
@@ -728,7 +729,7 @@ static ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, | |||
728 | struct sock *sk = sock->sk; | 729 | struct sock *sk = sock->sk; |
729 | 730 | ||
730 | /* We may need to bind the socket. */ | 731 | /* We may need to bind the socket. */ |
731 | if (!inet_sk(sk)->num && inet_autobind(sk)) | 732 | if (!inet_sk(sk)->inet_num && inet_autobind(sk)) |
732 | return -EAGAIN; | 733 | return -EAGAIN; |
733 | 734 | ||
734 | if (sk->sk_prot->sendpage) | 735 | if (sk->sk_prot->sendpage) |
@@ -931,7 +932,7 @@ static const struct proto_ops inet_sockraw_ops = { | |||
931 | #endif | 932 | #endif |
932 | }; | 933 | }; |
933 | 934 | ||
934 | static struct net_proto_family inet_family_ops = { | 935 | static const struct net_proto_family inet_family_ops = { |
935 | .family = PF_INET, | 936 | .family = PF_INET, |
936 | .create = inet_create, | 937 | .create = inet_create, |
937 | .owner = THIS_MODULE, | 938 | .owner = THIS_MODULE, |
@@ -947,7 +948,6 @@ static struct inet_protosw inetsw_array[] = | |||
947 | .protocol = IPPROTO_TCP, | 948 | .protocol = IPPROTO_TCP, |
948 | .prot = &tcp_prot, | 949 | .prot = &tcp_prot, |
949 | .ops = &inet_stream_ops, | 950 | .ops = &inet_stream_ops, |
950 | .capability = -1, | ||
951 | .no_check = 0, | 951 | .no_check = 0, |
952 | .flags = INET_PROTOSW_PERMANENT | | 952 | .flags = INET_PROTOSW_PERMANENT | |
953 | INET_PROTOSW_ICSK, | 953 | INET_PROTOSW_ICSK, |
@@ -958,7 +958,6 @@ static struct inet_protosw inetsw_array[] = | |||
958 | .protocol = IPPROTO_UDP, | 958 | .protocol = IPPROTO_UDP, |
959 | .prot = &udp_prot, | 959 | .prot = &udp_prot, |
960 | .ops = &inet_dgram_ops, | 960 | .ops = &inet_dgram_ops, |
961 | .capability = -1, | ||
962 | .no_check = UDP_CSUM_DEFAULT, | 961 | .no_check = UDP_CSUM_DEFAULT, |
963 | .flags = INET_PROTOSW_PERMANENT, | 962 | .flags = INET_PROTOSW_PERMANENT, |
964 | }, | 963 | }, |
@@ -969,7 +968,6 @@ static struct inet_protosw inetsw_array[] = | |||
969 | .protocol = IPPROTO_IP, /* wild card */ | 968 | .protocol = IPPROTO_IP, /* wild card */ |
970 | .prot = &raw_prot, | 969 | .prot = &raw_prot, |
971 | .ops = &inet_sockraw_ops, | 970 | .ops = &inet_sockraw_ops, |
972 | .capability = CAP_NET_RAW, | ||
973 | .no_check = UDP_CSUM_DEFAULT, | 971 | .no_check = UDP_CSUM_DEFAULT, |
974 | .flags = INET_PROTOSW_REUSE, | 972 | .flags = INET_PROTOSW_REUSE, |
975 | } | 973 | } |
@@ -1059,9 +1057,9 @@ static int inet_sk_reselect_saddr(struct sock *sk) | |||
1059 | struct inet_sock *inet = inet_sk(sk); | 1057 | struct inet_sock *inet = inet_sk(sk); |
1060 | int err; | 1058 | int err; |
1061 | struct rtable *rt; | 1059 | struct rtable *rt; |
1062 | __be32 old_saddr = inet->saddr; | 1060 | __be32 old_saddr = inet->inet_saddr; |
1063 | __be32 new_saddr; | 1061 | __be32 new_saddr; |
1064 | __be32 daddr = inet->daddr; | 1062 | __be32 daddr = inet->inet_daddr; |
1065 | 1063 | ||
1066 | if (inet->opt && inet->opt->srr) | 1064 | if (inet->opt && inet->opt->srr) |
1067 | daddr = inet->opt->faddr; | 1065 | daddr = inet->opt->faddr; |
@@ -1071,7 +1069,7 @@ static int inet_sk_reselect_saddr(struct sock *sk) | |||
1071 | RT_CONN_FLAGS(sk), | 1069 | RT_CONN_FLAGS(sk), |
1072 | sk->sk_bound_dev_if, | 1070 | sk->sk_bound_dev_if, |
1073 | sk->sk_protocol, | 1071 | sk->sk_protocol, |
1074 | inet->sport, inet->dport, sk, 0); | 1072 | inet->inet_sport, inet->inet_dport, sk, 0); |
1075 | if (err) | 1073 | if (err) |
1076 | return err; | 1074 | return err; |
1077 | 1075 | ||
@@ -1087,7 +1085,7 @@ static int inet_sk_reselect_saddr(struct sock *sk) | |||
1087 | __func__, &old_saddr, &new_saddr); | 1085 | __func__, &old_saddr, &new_saddr); |
1088 | } | 1086 | } |
1089 | 1087 | ||
1090 | inet->saddr = inet->rcv_saddr = new_saddr; | 1088 | inet->inet_saddr = inet->inet_rcv_saddr = new_saddr; |
1091 | 1089 | ||
1092 | /* | 1090 | /* |
1093 | * XXX The only one ugly spot where we need to | 1091 | * XXX The only one ugly spot where we need to |
@@ -1113,7 +1111,7 @@ int inet_sk_rebuild_header(struct sock *sk) | |||
1113 | return 0; | 1111 | return 0; |
1114 | 1112 | ||
1115 | /* Reroute. */ | 1113 | /* Reroute. */ |
1116 | daddr = inet->daddr; | 1114 | daddr = inet->inet_daddr; |
1117 | if (inet->opt && inet->opt->srr) | 1115 | if (inet->opt && inet->opt->srr) |
1118 | daddr = inet->opt->faddr; | 1116 | daddr = inet->opt->faddr; |
1119 | { | 1117 | { |
@@ -1123,7 +1121,7 @@ int inet_sk_rebuild_header(struct sock *sk) | |||
1123 | .nl_u = { | 1121 | .nl_u = { |
1124 | .ip4_u = { | 1122 | .ip4_u = { |
1125 | .daddr = daddr, | 1123 | .daddr = daddr, |
1126 | .saddr = inet->saddr, | 1124 | .saddr = inet->inet_saddr, |
1127 | .tos = RT_CONN_FLAGS(sk), | 1125 | .tos = RT_CONN_FLAGS(sk), |
1128 | }, | 1126 | }, |
1129 | }, | 1127 | }, |
@@ -1131,8 +1129,8 @@ int inet_sk_rebuild_header(struct sock *sk) | |||
1131 | .flags = inet_sk_flowi_flags(sk), | 1129 | .flags = inet_sk_flowi_flags(sk), |
1132 | .uli_u = { | 1130 | .uli_u = { |
1133 | .ports = { | 1131 | .ports = { |
1134 | .sport = inet->sport, | 1132 | .sport = inet->inet_sport, |
1135 | .dport = inet->dport, | 1133 | .dport = inet->inet_dport, |
1136 | }, | 1134 | }, |
1137 | }, | 1135 | }, |
1138 | }; | 1136 | }; |
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 5c662703eb1e..d07b0c1dd350 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c | |||
@@ -1,3 +1,4 @@ | |||
1 | #include <crypto/hash.h> | ||
1 | #include <linux/err.h> | 2 | #include <linux/err.h> |
2 | #include <linux/module.h> | 3 | #include <linux/module.h> |
3 | #include <net/ip.h> | 4 | #include <net/ip.h> |
@@ -5,10 +6,67 @@ | |||
5 | #include <net/ah.h> | 6 | #include <net/ah.h> |
6 | #include <linux/crypto.h> | 7 | #include <linux/crypto.h> |
7 | #include <linux/pfkeyv2.h> | 8 | #include <linux/pfkeyv2.h> |
8 | #include <linux/spinlock.h> | 9 | #include <linux/scatterlist.h> |
9 | #include <net/icmp.h> | 10 | #include <net/icmp.h> |
10 | #include <net/protocol.h> | 11 | #include <net/protocol.h> |
11 | 12 | ||
13 | struct ah_skb_cb { | ||
14 | struct xfrm_skb_cb xfrm; | ||
15 | void *tmp; | ||
16 | }; | ||
17 | |||
18 | #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0])) | ||
19 | |||
20 | static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags, | ||
21 | unsigned int size) | ||
22 | { | ||
23 | unsigned int len; | ||
24 | |||
25 | len = size + crypto_ahash_digestsize(ahash) + | ||
26 | (crypto_ahash_alignmask(ahash) & | ||
27 | ~(crypto_tfm_ctx_alignment() - 1)); | ||
28 | |||
29 | len = ALIGN(len, crypto_tfm_ctx_alignment()); | ||
30 | |||
31 | len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash); | ||
32 | len = ALIGN(len, __alignof__(struct scatterlist)); | ||
33 | |||
34 | len += sizeof(struct scatterlist) * nfrags; | ||
35 | |||
36 | return kmalloc(len, GFP_ATOMIC); | ||
37 | } | ||
38 | |||
39 | static inline u8 *ah_tmp_auth(void *tmp, unsigned int offset) | ||
40 | { | ||
41 | return tmp + offset; | ||
42 | } | ||
43 | |||
44 | static inline u8 *ah_tmp_icv(struct crypto_ahash *ahash, void *tmp, | ||
45 | unsigned int offset) | ||
46 | { | ||
47 | return PTR_ALIGN((u8 *)tmp + offset, crypto_ahash_alignmask(ahash) + 1); | ||
48 | } | ||
49 | |||
50 | static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash, | ||
51 | u8 *icv) | ||
52 | { | ||
53 | struct ahash_request *req; | ||
54 | |||
55 | req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash), | ||
56 | crypto_tfm_ctx_alignment()); | ||
57 | |||
58 | ahash_request_set_tfm(req, ahash); | ||
59 | |||
60 | return req; | ||
61 | } | ||
62 | |||
63 | static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash, | ||
64 | struct ahash_request *req) | ||
65 | { | ||
66 | return (void *)ALIGN((unsigned long)(req + 1) + | ||
67 | crypto_ahash_reqsize(ahash), | ||
68 | __alignof__(struct scatterlist)); | ||
69 | } | ||
12 | 70 | ||
13 | /* Clear mutable options and find final destination to substitute | 71 | /* Clear mutable options and find final destination to substitute |
14 | * into IP header for icv calculation. Options are already checked | 72 | * into IP header for icv calculation. Options are already checked |
@@ -54,20 +112,72 @@ static int ip_clear_mutable_options(struct iphdr *iph, __be32 *daddr) | |||
54 | return 0; | 112 | return 0; |
55 | } | 113 | } |
56 | 114 | ||
115 | static void ah_output_done(struct crypto_async_request *base, int err) | ||
116 | { | ||
117 | u8 *icv; | ||
118 | struct iphdr *iph; | ||
119 | struct sk_buff *skb = base->data; | ||
120 | struct xfrm_state *x = skb_dst(skb)->xfrm; | ||
121 | struct ah_data *ahp = x->data; | ||
122 | struct iphdr *top_iph = ip_hdr(skb); | ||
123 | struct ip_auth_hdr *ah = ip_auth_hdr(skb); | ||
124 | int ihl = ip_hdrlen(skb); | ||
125 | |||
126 | iph = AH_SKB_CB(skb)->tmp; | ||
127 | icv = ah_tmp_icv(ahp->ahash, iph, ihl); | ||
128 | memcpy(ah->auth_data, icv, ahp->icv_trunc_len); | ||
129 | |||
130 | top_iph->tos = iph->tos; | ||
131 | top_iph->ttl = iph->ttl; | ||
132 | top_iph->frag_off = iph->frag_off; | ||
133 | if (top_iph->ihl != 5) { | ||
134 | top_iph->daddr = iph->daddr; | ||
135 | memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); | ||
136 | } | ||
137 | |||
138 | err = ah->nexthdr; | ||
139 | |||
140 | kfree(AH_SKB_CB(skb)->tmp); | ||
141 | xfrm_output_resume(skb, err); | ||
142 | } | ||
143 | |||
57 | static int ah_output(struct xfrm_state *x, struct sk_buff *skb) | 144 | static int ah_output(struct xfrm_state *x, struct sk_buff *skb) |
58 | { | 145 | { |
59 | int err; | 146 | int err; |
147 | int nfrags; | ||
148 | int ihl; | ||
149 | u8 *icv; | ||
150 | struct sk_buff *trailer; | ||
151 | struct crypto_ahash *ahash; | ||
152 | struct ahash_request *req; | ||
153 | struct scatterlist *sg; | ||
60 | struct iphdr *iph, *top_iph; | 154 | struct iphdr *iph, *top_iph; |
61 | struct ip_auth_hdr *ah; | 155 | struct ip_auth_hdr *ah; |
62 | struct ah_data *ahp; | 156 | struct ah_data *ahp; |
63 | union { | 157 | |
64 | struct iphdr iph; | 158 | ahp = x->data; |
65 | char buf[60]; | 159 | ahash = ahp->ahash; |
66 | } tmp_iph; | 160 | |
161 | if ((err = skb_cow_data(skb, 0, &trailer)) < 0) | ||
162 | goto out; | ||
163 | nfrags = err; | ||
67 | 164 | ||
68 | skb_push(skb, -skb_network_offset(skb)); | 165 | skb_push(skb, -skb_network_offset(skb)); |
166 | ah = ip_auth_hdr(skb); | ||
167 | ihl = ip_hdrlen(skb); | ||
168 | |||
169 | err = -ENOMEM; | ||
170 | iph = ah_alloc_tmp(ahash, nfrags, ihl); | ||
171 | if (!iph) | ||
172 | goto out; | ||
173 | |||
174 | icv = ah_tmp_icv(ahash, iph, ihl); | ||
175 | req = ah_tmp_req(ahash, icv); | ||
176 | sg = ah_req_sg(ahash, req); | ||
177 | |||
178 | memset(ah->auth_data, 0, ahp->icv_trunc_len); | ||
179 | |||
69 | top_iph = ip_hdr(skb); | 180 | top_iph = ip_hdr(skb); |
70 | iph = &tmp_iph.iph; | ||
71 | 181 | ||
72 | iph->tos = top_iph->tos; | 182 | iph->tos = top_iph->tos; |
73 | iph->ttl = top_iph->ttl; | 183 | iph->ttl = top_iph->ttl; |
@@ -78,10 +188,9 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb) | |||
78 | memcpy(iph+1, top_iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); | 188 | memcpy(iph+1, top_iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); |
79 | err = ip_clear_mutable_options(top_iph, &top_iph->daddr); | 189 | err = ip_clear_mutable_options(top_iph, &top_iph->daddr); |
80 | if (err) | 190 | if (err) |
81 | goto error; | 191 | goto out_free; |
82 | } | 192 | } |
83 | 193 | ||
84 | ah = ip_auth_hdr(skb); | ||
85 | ah->nexthdr = *skb_mac_header(skb); | 194 | ah->nexthdr = *skb_mac_header(skb); |
86 | *skb_mac_header(skb) = IPPROTO_AH; | 195 | *skb_mac_header(skb) = IPPROTO_AH; |
87 | 196 | ||
@@ -91,20 +200,31 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb) | |||
91 | top_iph->ttl = 0; | 200 | top_iph->ttl = 0; |
92 | top_iph->check = 0; | 201 | top_iph->check = 0; |
93 | 202 | ||
94 | ahp = x->data; | ||
95 | ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2; | 203 | ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2; |
96 | 204 | ||
97 | ah->reserved = 0; | 205 | ah->reserved = 0; |
98 | ah->spi = x->id.spi; | 206 | ah->spi = x->id.spi; |
99 | ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output); | 207 | ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output); |
100 | 208 | ||
101 | spin_lock_bh(&x->lock); | 209 | sg_init_table(sg, nfrags); |
102 | err = ah_mac_digest(ahp, skb, ah->auth_data); | 210 | skb_to_sgvec(skb, sg, 0, skb->len); |
103 | memcpy(ah->auth_data, ahp->work_icv, ahp->icv_trunc_len); | ||
104 | spin_unlock_bh(&x->lock); | ||
105 | 211 | ||
106 | if (err) | 212 | ahash_request_set_crypt(req, sg, icv, skb->len); |
107 | goto error; | 213 | ahash_request_set_callback(req, 0, ah_output_done, skb); |
214 | |||
215 | AH_SKB_CB(skb)->tmp = iph; | ||
216 | |||
217 | err = crypto_ahash_digest(req); | ||
218 | if (err) { | ||
219 | if (err == -EINPROGRESS) | ||
220 | goto out; | ||
221 | |||
222 | if (err == -EBUSY) | ||
223 | err = NET_XMIT_DROP; | ||
224 | goto out_free; | ||
225 | } | ||
226 | |||
227 | memcpy(ah->auth_data, icv, ahp->icv_trunc_len); | ||
108 | 228 | ||
109 | top_iph->tos = iph->tos; | 229 | top_iph->tos = iph->tos; |
110 | top_iph->ttl = iph->ttl; | 230 | top_iph->ttl = iph->ttl; |
@@ -114,28 +234,67 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb) | |||
114 | memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); | 234 | memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); |
115 | } | 235 | } |
116 | 236 | ||
117 | err = 0; | 237 | out_free: |
118 | 238 | kfree(iph); | |
119 | error: | 239 | out: |
120 | return err; | 240 | return err; |
121 | } | 241 | } |
122 | 242 | ||
243 | static void ah_input_done(struct crypto_async_request *base, int err) | ||
244 | { | ||
245 | u8 *auth_data; | ||
246 | u8 *icv; | ||
247 | struct iphdr *work_iph; | ||
248 | struct sk_buff *skb = base->data; | ||
249 | struct xfrm_state *x = xfrm_input_state(skb); | ||
250 | struct ah_data *ahp = x->data; | ||
251 | struct ip_auth_hdr *ah = ip_auth_hdr(skb); | ||
252 | int ihl = ip_hdrlen(skb); | ||
253 | int ah_hlen = (ah->hdrlen + 2) << 2; | ||
254 | |||
255 | work_iph = AH_SKB_CB(skb)->tmp; | ||
256 | auth_data = ah_tmp_auth(work_iph, ihl); | ||
257 | icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len); | ||
258 | |||
259 | err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0; | ||
260 | if (err) | ||
261 | goto out; | ||
262 | |||
263 | skb->network_header += ah_hlen; | ||
264 | memcpy(skb_network_header(skb), work_iph, ihl); | ||
265 | __skb_pull(skb, ah_hlen + ihl); | ||
266 | skb_set_transport_header(skb, -ihl); | ||
267 | |||
268 | err = ah->nexthdr; | ||
269 | out: | ||
270 | kfree(AH_SKB_CB(skb)->tmp); | ||
271 | xfrm_input_resume(skb, err); | ||
272 | } | ||
273 | |||
123 | static int ah_input(struct xfrm_state *x, struct sk_buff *skb) | 274 | static int ah_input(struct xfrm_state *x, struct sk_buff *skb) |
124 | { | 275 | { |
125 | int ah_hlen; | 276 | int ah_hlen; |
126 | int ihl; | 277 | int ihl; |
127 | int nexthdr; | 278 | int nexthdr; |
128 | int err = -EINVAL; | 279 | int nfrags; |
129 | struct iphdr *iph; | 280 | u8 *auth_data; |
281 | u8 *icv; | ||
282 | struct sk_buff *trailer; | ||
283 | struct crypto_ahash *ahash; | ||
284 | struct ahash_request *req; | ||
285 | struct scatterlist *sg; | ||
286 | struct iphdr *iph, *work_iph; | ||
130 | struct ip_auth_hdr *ah; | 287 | struct ip_auth_hdr *ah; |
131 | struct ah_data *ahp; | 288 | struct ah_data *ahp; |
132 | char work_buf[60]; | 289 | int err = -ENOMEM; |
133 | 290 | ||
134 | if (!pskb_may_pull(skb, sizeof(*ah))) | 291 | if (!pskb_may_pull(skb, sizeof(*ah))) |
135 | goto out; | 292 | goto out; |
136 | 293 | ||
137 | ah = (struct ip_auth_hdr *)skb->data; | 294 | ah = (struct ip_auth_hdr *)skb->data; |
138 | ahp = x->data; | 295 | ahp = x->data; |
296 | ahash = ahp->ahash; | ||
297 | |||
139 | nexthdr = ah->nexthdr; | 298 | nexthdr = ah->nexthdr; |
140 | ah_hlen = (ah->hdrlen + 2) << 2; | 299 | ah_hlen = (ah->hdrlen + 2) << 2; |
141 | 300 | ||
@@ -156,9 +315,24 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb) | |||
156 | 315 | ||
157 | ah = (struct ip_auth_hdr *)skb->data; | 316 | ah = (struct ip_auth_hdr *)skb->data; |
158 | iph = ip_hdr(skb); | 317 | iph = ip_hdr(skb); |
318 | ihl = ip_hdrlen(skb); | ||
319 | |||
320 | if ((err = skb_cow_data(skb, 0, &trailer)) < 0) | ||
321 | goto out; | ||
322 | nfrags = err; | ||
323 | |||
324 | work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len); | ||
325 | if (!work_iph) | ||
326 | goto out; | ||
327 | |||
328 | auth_data = ah_tmp_auth(work_iph, ihl); | ||
329 | icv = ah_tmp_icv(ahash, auth_data, ahp->icv_trunc_len); | ||
330 | req = ah_tmp_req(ahash, icv); | ||
331 | sg = ah_req_sg(ahash, req); | ||
159 | 332 | ||
160 | ihl = skb->data - skb_network_header(skb); | 333 | memcpy(work_iph, iph, ihl); |
161 | memcpy(work_buf, iph, ihl); | 334 | memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); |
335 | memset(ah->auth_data, 0, ahp->icv_trunc_len); | ||
162 | 336 | ||
163 | iph->ttl = 0; | 337 | iph->ttl = 0; |
164 | iph->tos = 0; | 338 | iph->tos = 0; |
@@ -166,35 +340,44 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb) | |||
166 | iph->check = 0; | 340 | iph->check = 0; |
167 | if (ihl > sizeof(*iph)) { | 341 | if (ihl > sizeof(*iph)) { |
168 | __be32 dummy; | 342 | __be32 dummy; |
169 | if (ip_clear_mutable_options(iph, &dummy)) | 343 | err = ip_clear_mutable_options(iph, &dummy); |
170 | goto out; | 344 | if (err) |
345 | goto out_free; | ||
171 | } | 346 | } |
172 | 347 | ||
173 | spin_lock(&x->lock); | 348 | skb_push(skb, ihl); |
174 | { | ||
175 | u8 auth_data[MAX_AH_AUTH_LEN]; | ||
176 | 349 | ||
177 | memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); | 350 | sg_init_table(sg, nfrags); |
178 | skb_push(skb, ihl); | 351 | skb_to_sgvec(skb, sg, 0, skb->len); |
179 | err = ah_mac_digest(ahp, skb, ah->auth_data); | 352 | |
180 | if (err) | 353 | ahash_request_set_crypt(req, sg, icv, skb->len); |
181 | goto unlock; | 354 | ahash_request_set_callback(req, 0, ah_input_done, skb); |
182 | if (memcmp(ahp->work_icv, auth_data, ahp->icv_trunc_len)) | 355 | |
183 | err = -EBADMSG; | 356 | AH_SKB_CB(skb)->tmp = work_iph; |
357 | |||
358 | err = crypto_ahash_digest(req); | ||
359 | if (err) { | ||
360 | if (err == -EINPROGRESS) | ||
361 | goto out; | ||
362 | |||
363 | if (err == -EBUSY) | ||
364 | err = NET_XMIT_DROP; | ||
365 | goto out_free; | ||
184 | } | 366 | } |
185 | unlock: | ||
186 | spin_unlock(&x->lock); | ||
187 | 367 | ||
368 | err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0; | ||
188 | if (err) | 369 | if (err) |
189 | goto out; | 370 | goto out_free; |
190 | 371 | ||
191 | skb->network_header += ah_hlen; | 372 | skb->network_header += ah_hlen; |
192 | memcpy(skb_network_header(skb), work_buf, ihl); | 373 | memcpy(skb_network_header(skb), work_iph, ihl); |
193 | skb->transport_header = skb->network_header; | ||
194 | __skb_pull(skb, ah_hlen + ihl); | 374 | __skb_pull(skb, ah_hlen + ihl); |
375 | skb_set_transport_header(skb, -ihl); | ||
195 | 376 | ||
196 | return nexthdr; | 377 | err = nexthdr; |
197 | 378 | ||
379 | out_free: | ||
380 | kfree (work_iph); | ||
198 | out: | 381 | out: |
199 | return err; | 382 | return err; |
200 | } | 383 | } |
@@ -222,7 +405,7 @@ static int ah_init_state(struct xfrm_state *x) | |||
222 | { | 405 | { |
223 | struct ah_data *ahp = NULL; | 406 | struct ah_data *ahp = NULL; |
224 | struct xfrm_algo_desc *aalg_desc; | 407 | struct xfrm_algo_desc *aalg_desc; |
225 | struct crypto_hash *tfm; | 408 | struct crypto_ahash *ahash; |
226 | 409 | ||
227 | if (!x->aalg) | 410 | if (!x->aalg) |
228 | goto error; | 411 | goto error; |
@@ -231,31 +414,31 @@ static int ah_init_state(struct xfrm_state *x) | |||
231 | goto error; | 414 | goto error; |
232 | 415 | ||
233 | ahp = kzalloc(sizeof(*ahp), GFP_KERNEL); | 416 | ahp = kzalloc(sizeof(*ahp), GFP_KERNEL); |
234 | if (ahp == NULL) | 417 | if (!ahp) |
235 | return -ENOMEM; | 418 | return -ENOMEM; |
236 | 419 | ||
237 | tfm = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC); | 420 | ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0); |
238 | if (IS_ERR(tfm)) | 421 | if (IS_ERR(ahash)) |
239 | goto error; | 422 | goto error; |
240 | 423 | ||
241 | ahp->tfm = tfm; | 424 | ahp->ahash = ahash; |
242 | if (crypto_hash_setkey(tfm, x->aalg->alg_key, | 425 | if (crypto_ahash_setkey(ahash, x->aalg->alg_key, |
243 | (x->aalg->alg_key_len + 7) / 8)) | 426 | (x->aalg->alg_key_len + 7) / 8)) |
244 | goto error; | 427 | goto error; |
245 | 428 | ||
246 | /* | 429 | /* |
247 | * Lookup the algorithm description maintained by xfrm_algo, | 430 | * Lookup the algorithm description maintained by xfrm_algo, |
248 | * verify crypto transform properties, and store information | 431 | * verify crypto transform properties, and store information |
249 | * we need for AH processing. This lookup cannot fail here | 432 | * we need for AH processing. This lookup cannot fail here |
250 | * after a successful crypto_alloc_hash(). | 433 | * after a successful crypto_alloc_ahash(). |
251 | */ | 434 | */ |
252 | aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); | 435 | aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); |
253 | BUG_ON(!aalg_desc); | 436 | BUG_ON(!aalg_desc); |
254 | 437 | ||
255 | if (aalg_desc->uinfo.auth.icv_fullbits/8 != | 438 | if (aalg_desc->uinfo.auth.icv_fullbits/8 != |
256 | crypto_hash_digestsize(tfm)) { | 439 | crypto_ahash_digestsize(ahash)) { |
257 | printk(KERN_INFO "AH: %s digestsize %u != %hu\n", | 440 | printk(KERN_INFO "AH: %s digestsize %u != %hu\n", |
258 | x->aalg->alg_name, crypto_hash_digestsize(tfm), | 441 | x->aalg->alg_name, crypto_ahash_digestsize(ahash), |
259 | aalg_desc->uinfo.auth.icv_fullbits/8); | 442 | aalg_desc->uinfo.auth.icv_fullbits/8); |
260 | goto error; | 443 | goto error; |
261 | } | 444 | } |
@@ -265,10 +448,6 @@ static int ah_init_state(struct xfrm_state *x) | |||
265 | 448 | ||
266 | BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN); | 449 | BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN); |
267 | 450 | ||
268 | ahp->work_icv = kmalloc(ahp->icv_full_len, GFP_KERNEL); | ||
269 | if (!ahp->work_icv) | ||
270 | goto error; | ||
271 | |||
272 | x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + | 451 | x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + |
273 | ahp->icv_trunc_len); | 452 | ahp->icv_trunc_len); |
274 | if (x->props.mode == XFRM_MODE_TUNNEL) | 453 | if (x->props.mode == XFRM_MODE_TUNNEL) |
@@ -279,8 +458,7 @@ static int ah_init_state(struct xfrm_state *x) | |||
279 | 458 | ||
280 | error: | 459 | error: |
281 | if (ahp) { | 460 | if (ahp) { |
282 | kfree(ahp->work_icv); | 461 | crypto_free_ahash(ahp->ahash); |
283 | crypto_free_hash(ahp->tfm); | ||
284 | kfree(ahp); | 462 | kfree(ahp); |
285 | } | 463 | } |
286 | return -EINVAL; | 464 | return -EINVAL; |
@@ -293,8 +471,7 @@ static void ah_destroy(struct xfrm_state *x) | |||
293 | if (!ahp) | 471 | if (!ahp) |
294 | return; | 472 | return; |
295 | 473 | ||
296 | kfree(ahp->work_icv); | 474 | crypto_free_ahash(ahp->ahash); |
297 | crypto_free_hash(ahp->tfm); | ||
298 | kfree(ahp); | 475 | kfree(ahp); |
299 | } | 476 | } |
300 | 477 | ||
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 039cc1ffe977..1e029dc75455 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c | |||
@@ -2017,7 +2017,7 @@ req_setattr_failure: | |||
2017 | * values on failure. | 2017 | * values on failure. |
2018 | * | 2018 | * |
2019 | */ | 2019 | */ |
2020 | int cipso_v4_delopt(struct ip_options **opt_ptr) | 2020 | static int cipso_v4_delopt(struct ip_options **opt_ptr) |
2021 | { | 2021 | { |
2022 | int hdr_delta = 0; | 2022 | int hdr_delta = 0; |
2023 | struct ip_options *opt = *opt_ptr; | 2023 | struct ip_options *opt = *opt_ptr; |
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index 5e6c5a0f3fde..fb2465811b48 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c | |||
@@ -39,7 +39,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
39 | sk_dst_reset(sk); | 39 | sk_dst_reset(sk); |
40 | 40 | ||
41 | oif = sk->sk_bound_dev_if; | 41 | oif = sk->sk_bound_dev_if; |
42 | saddr = inet->saddr; | 42 | saddr = inet->inet_saddr; |
43 | if (ipv4_is_multicast(usin->sin_addr.s_addr)) { | 43 | if (ipv4_is_multicast(usin->sin_addr.s_addr)) { |
44 | if (!oif) | 44 | if (!oif) |
45 | oif = inet->mc_index; | 45 | oif = inet->mc_index; |
@@ -49,7 +49,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
49 | err = ip_route_connect(&rt, usin->sin_addr.s_addr, saddr, | 49 | err = ip_route_connect(&rt, usin->sin_addr.s_addr, saddr, |
50 | RT_CONN_FLAGS(sk), oif, | 50 | RT_CONN_FLAGS(sk), oif, |
51 | sk->sk_protocol, | 51 | sk->sk_protocol, |
52 | inet->sport, usin->sin_port, sk, 1); | 52 | inet->inet_sport, usin->sin_port, sk, 1); |
53 | if (err) { | 53 | if (err) { |
54 | if (err == -ENETUNREACH) | 54 | if (err == -ENETUNREACH) |
55 | IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); | 55 | IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); |
@@ -60,14 +60,14 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
60 | ip_rt_put(rt); | 60 | ip_rt_put(rt); |
61 | return -EACCES; | 61 | return -EACCES; |
62 | } | 62 | } |
63 | if (!inet->saddr) | 63 | if (!inet->inet_saddr) |
64 | inet->saddr = rt->rt_src; /* Update source address */ | 64 | inet->inet_saddr = rt->rt_src; /* Update source address */ |
65 | if (!inet->rcv_saddr) | 65 | if (!inet->inet_rcv_saddr) |
66 | inet->rcv_saddr = rt->rt_src; | 66 | inet->inet_rcv_saddr = rt->rt_src; |
67 | inet->daddr = rt->rt_dst; | 67 | inet->inet_daddr = rt->rt_dst; |
68 | inet->dport = usin->sin_port; | 68 | inet->inet_dport = usin->sin_port; |
69 | sk->sk_state = TCP_ESTABLISHED; | 69 | sk->sk_state = TCP_ESTABLISHED; |
70 | inet->id = jiffies; | 70 | inet->inet_id = jiffies; |
71 | 71 | ||
72 | sk_dst_set(sk, &rt->u.dst); | 72 | sk_dst_set(sk, &rt->u.dst); |
73 | return(0); | 73 | return(0); |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 5df2f6a0b0f0..7620382058a0 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -140,11 +140,11 @@ void in_dev_finish_destroy(struct in_device *idev) | |||
140 | #endif | 140 | #endif |
141 | dev_put(dev); | 141 | dev_put(dev); |
142 | if (!idev->dead) | 142 | if (!idev->dead) |
143 | printk("Freeing alive in_device %p\n", idev); | 143 | pr_err("Freeing alive in_device %p\n", idev); |
144 | else { | 144 | else |
145 | kfree(idev); | 145 | kfree(idev); |
146 | } | ||
147 | } | 146 | } |
147 | EXPORT_SYMBOL(in_dev_finish_destroy); | ||
148 | 148 | ||
149 | static struct in_device *inetdev_init(struct net_device *dev) | 149 | static struct in_device *inetdev_init(struct net_device *dev) |
150 | { | 150 | { |
@@ -159,7 +159,8 @@ static struct in_device *inetdev_init(struct net_device *dev) | |||
159 | sizeof(in_dev->cnf)); | 159 | sizeof(in_dev->cnf)); |
160 | in_dev->cnf.sysctl = NULL; | 160 | in_dev->cnf.sysctl = NULL; |
161 | in_dev->dev = dev; | 161 | in_dev->dev = dev; |
162 | if ((in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl)) == NULL) | 162 | in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl); |
163 | if (!in_dev->arp_parms) | ||
163 | goto out_kfree; | 164 | goto out_kfree; |
164 | if (IPV4_DEVCONF(in_dev->cnf, FORWARDING)) | 165 | if (IPV4_DEVCONF(in_dev->cnf, FORWARDING)) |
165 | dev_disable_lro(dev); | 166 | dev_disable_lro(dev); |
@@ -405,13 +406,15 @@ struct in_device *inetdev_by_index(struct net *net, int ifindex) | |||
405 | { | 406 | { |
406 | struct net_device *dev; | 407 | struct net_device *dev; |
407 | struct in_device *in_dev = NULL; | 408 | struct in_device *in_dev = NULL; |
408 | read_lock(&dev_base_lock); | 409 | |
409 | dev = __dev_get_by_index(net, ifindex); | 410 | rcu_read_lock(); |
411 | dev = dev_get_by_index_rcu(net, ifindex); | ||
410 | if (dev) | 412 | if (dev) |
411 | in_dev = in_dev_get(dev); | 413 | in_dev = in_dev_get(dev); |
412 | read_unlock(&dev_base_lock); | 414 | rcu_read_unlock(); |
413 | return in_dev; | 415 | return in_dev; |
414 | } | 416 | } |
417 | EXPORT_SYMBOL(inetdev_by_index); | ||
415 | 418 | ||
416 | /* Called only from RTNL semaphored context. No locks. */ | 419 | /* Called only from RTNL semaphored context. No locks. */ |
417 | 420 | ||
@@ -557,7 +560,7 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg | |||
557 | * Determine a default network mask, based on the IP address. | 560 | * Determine a default network mask, based on the IP address. |
558 | */ | 561 | */ |
559 | 562 | ||
560 | static __inline__ int inet_abc_len(__be32 addr) | 563 | static inline int inet_abc_len(__be32 addr) |
561 | { | 564 | { |
562 | int rc = -1; /* Something else, probably a multicast. */ | 565 | int rc = -1; /* Something else, probably a multicast. */ |
563 | 566 | ||
@@ -646,13 +649,15 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg) | |||
646 | rtnl_lock(); | 649 | rtnl_lock(); |
647 | 650 | ||
648 | ret = -ENODEV; | 651 | ret = -ENODEV; |
649 | if ((dev = __dev_get_by_name(net, ifr.ifr_name)) == NULL) | 652 | dev = __dev_get_by_name(net, ifr.ifr_name); |
653 | if (!dev) | ||
650 | goto done; | 654 | goto done; |
651 | 655 | ||
652 | if (colon) | 656 | if (colon) |
653 | *colon = ':'; | 657 | *colon = ':'; |
654 | 658 | ||
655 | if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) { | 659 | in_dev = __in_dev_get_rtnl(dev); |
660 | if (in_dev) { | ||
656 | if (tryaddrmatch) { | 661 | if (tryaddrmatch) { |
657 | /* Matthias Andree */ | 662 | /* Matthias Andree */ |
658 | /* compare label and address (4.4BSD style) */ | 663 | /* compare label and address (4.4BSD style) */ |
@@ -720,7 +725,8 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg) | |||
720 | 725 | ||
721 | if (!ifa) { | 726 | if (!ifa) { |
722 | ret = -ENOBUFS; | 727 | ret = -ENOBUFS; |
723 | if ((ifa = inet_alloc_ifa()) == NULL) | 728 | ifa = inet_alloc_ifa(); |
729 | if (!ifa) | ||
724 | break; | 730 | break; |
725 | if (colon) | 731 | if (colon) |
726 | memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ); | 732 | memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ); |
@@ -822,10 +828,10 @@ static int inet_gifconf(struct net_device *dev, char __user *buf, int len) | |||
822 | struct ifreq ifr; | 828 | struct ifreq ifr; |
823 | int done = 0; | 829 | int done = 0; |
824 | 830 | ||
825 | if (!in_dev || (ifa = in_dev->ifa_list) == NULL) | 831 | if (!in_dev) |
826 | goto out; | 832 | goto out; |
827 | 833 | ||
828 | for (; ifa; ifa = ifa->ifa_next) { | 834 | for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { |
829 | if (!buf) { | 835 | if (!buf) { |
830 | done += sizeof(ifr); | 836 | done += sizeof(ifr); |
831 | continue; | 837 | continue; |
@@ -875,36 +881,33 @@ __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope) | |||
875 | if (!addr) | 881 | if (!addr) |
876 | addr = ifa->ifa_local; | 882 | addr = ifa->ifa_local; |
877 | } endfor_ifa(in_dev); | 883 | } endfor_ifa(in_dev); |
878 | no_in_dev: | ||
879 | rcu_read_unlock(); | ||
880 | 884 | ||
881 | if (addr) | 885 | if (addr) |
882 | goto out; | 886 | goto out_unlock; |
887 | no_in_dev: | ||
883 | 888 | ||
884 | /* Not loopback addresses on loopback should be preferred | 889 | /* Not loopback addresses on loopback should be preferred |
885 | in this case. It is importnat that lo is the first interface | 890 | in this case. It is importnat that lo is the first interface |
886 | in dev_base list. | 891 | in dev_base list. |
887 | */ | 892 | */ |
888 | read_lock(&dev_base_lock); | 893 | for_each_netdev_rcu(net, dev) { |
889 | rcu_read_lock(); | 894 | in_dev = __in_dev_get_rcu(dev); |
890 | for_each_netdev(net, dev) { | 895 | if (!in_dev) |
891 | if ((in_dev = __in_dev_get_rcu(dev)) == NULL) | ||
892 | continue; | 896 | continue; |
893 | 897 | ||
894 | for_primary_ifa(in_dev) { | 898 | for_primary_ifa(in_dev) { |
895 | if (ifa->ifa_scope != RT_SCOPE_LINK && | 899 | if (ifa->ifa_scope != RT_SCOPE_LINK && |
896 | ifa->ifa_scope <= scope) { | 900 | ifa->ifa_scope <= scope) { |
897 | addr = ifa->ifa_local; | 901 | addr = ifa->ifa_local; |
898 | goto out_unlock_both; | 902 | goto out_unlock; |
899 | } | 903 | } |
900 | } endfor_ifa(in_dev); | 904 | } endfor_ifa(in_dev); |
901 | } | 905 | } |
902 | out_unlock_both: | 906 | out_unlock: |
903 | read_unlock(&dev_base_lock); | ||
904 | rcu_read_unlock(); | 907 | rcu_read_unlock(); |
905 | out: | ||
906 | return addr; | 908 | return addr; |
907 | } | 909 | } |
910 | EXPORT_SYMBOL(inet_select_addr); | ||
908 | 911 | ||
909 | static __be32 confirm_addr_indev(struct in_device *in_dev, __be32 dst, | 912 | static __be32 confirm_addr_indev(struct in_device *in_dev, __be32 dst, |
910 | __be32 local, int scope) | 913 | __be32 local, int scope) |
@@ -940,7 +943,7 @@ static __be32 confirm_addr_indev(struct in_device *in_dev, __be32 dst, | |||
940 | } | 943 | } |
941 | } endfor_ifa(in_dev); | 944 | } endfor_ifa(in_dev); |
942 | 945 | ||
943 | return same? addr : 0; | 946 | return same ? addr : 0; |
944 | } | 947 | } |
945 | 948 | ||
946 | /* | 949 | /* |
@@ -961,17 +964,16 @@ __be32 inet_confirm_addr(struct in_device *in_dev, | |||
961 | return confirm_addr_indev(in_dev, dst, local, scope); | 964 | return confirm_addr_indev(in_dev, dst, local, scope); |
962 | 965 | ||
963 | net = dev_net(in_dev->dev); | 966 | net = dev_net(in_dev->dev); |
964 | read_lock(&dev_base_lock); | ||
965 | rcu_read_lock(); | 967 | rcu_read_lock(); |
966 | for_each_netdev(net, dev) { | 968 | for_each_netdev_rcu(net, dev) { |
967 | if ((in_dev = __in_dev_get_rcu(dev))) { | 969 | in_dev = __in_dev_get_rcu(dev); |
970 | if (in_dev) { | ||
968 | addr = confirm_addr_indev(in_dev, dst, local, scope); | 971 | addr = confirm_addr_indev(in_dev, dst, local, scope); |
969 | if (addr) | 972 | if (addr) |
970 | break; | 973 | break; |
971 | } | 974 | } |
972 | } | 975 | } |
973 | rcu_read_unlock(); | 976 | rcu_read_unlock(); |
974 | read_unlock(&dev_base_lock); | ||
975 | 977 | ||
976 | return addr; | 978 | return addr; |
977 | } | 979 | } |
@@ -984,14 +986,16 @@ int register_inetaddr_notifier(struct notifier_block *nb) | |||
984 | { | 986 | { |
985 | return blocking_notifier_chain_register(&inetaddr_chain, nb); | 987 | return blocking_notifier_chain_register(&inetaddr_chain, nb); |
986 | } | 988 | } |
989 | EXPORT_SYMBOL(register_inetaddr_notifier); | ||
987 | 990 | ||
988 | int unregister_inetaddr_notifier(struct notifier_block *nb) | 991 | int unregister_inetaddr_notifier(struct notifier_block *nb) |
989 | { | 992 | { |
990 | return blocking_notifier_chain_unregister(&inetaddr_chain, nb); | 993 | return blocking_notifier_chain_unregister(&inetaddr_chain, nb); |
991 | } | 994 | } |
995 | EXPORT_SYMBOL(unregister_inetaddr_notifier); | ||
992 | 996 | ||
993 | /* Rename ifa_labels for a device name change. Make some effort to preserve existing | 997 | /* Rename ifa_labels for a device name change. Make some effort to preserve |
994 | * alias numbering and to create unique labels if possible. | 998 | * existing alias numbering and to create unique labels if possible. |
995 | */ | 999 | */ |
996 | static void inetdev_changename(struct net_device *dev, struct in_device *in_dev) | 1000 | static void inetdev_changename(struct net_device *dev, struct in_device *in_dev) |
997 | { | 1001 | { |
@@ -1010,11 +1014,10 @@ static void inetdev_changename(struct net_device *dev, struct in_device *in_dev) | |||
1010 | sprintf(old, ":%d", named); | 1014 | sprintf(old, ":%d", named); |
1011 | dot = old; | 1015 | dot = old; |
1012 | } | 1016 | } |
1013 | if (strlen(dot) + strlen(dev->name) < IFNAMSIZ) { | 1017 | if (strlen(dot) + strlen(dev->name) < IFNAMSIZ) |
1014 | strcat(ifa->ifa_label, dot); | 1018 | strcat(ifa->ifa_label, dot); |
1015 | } else { | 1019 | else |
1016 | strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot); | 1020 | strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot); |
1017 | } | ||
1018 | skip: | 1021 | skip: |
1019 | rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0); | 1022 | rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0); |
1020 | } | 1023 | } |
@@ -1061,8 +1064,9 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, | |||
1061 | if (!inetdev_valid_mtu(dev->mtu)) | 1064 | if (!inetdev_valid_mtu(dev->mtu)) |
1062 | break; | 1065 | break; |
1063 | if (dev->flags & IFF_LOOPBACK) { | 1066 | if (dev->flags & IFF_LOOPBACK) { |
1064 | struct in_ifaddr *ifa; | 1067 | struct in_ifaddr *ifa = inet_alloc_ifa(); |
1065 | if ((ifa = inet_alloc_ifa()) != NULL) { | 1068 | |
1069 | if (ifa) { | ||
1066 | ifa->ifa_local = | 1070 | ifa->ifa_local = |
1067 | ifa->ifa_address = htonl(INADDR_LOOPBACK); | 1071 | ifa->ifa_address = htonl(INADDR_LOOPBACK); |
1068 | ifa->ifa_prefixlen = 8; | 1072 | ifa->ifa_prefixlen = 8; |
@@ -1170,38 +1174,54 @@ nla_put_failure: | |||
1170 | static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) | 1174 | static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) |
1171 | { | 1175 | { |
1172 | struct net *net = sock_net(skb->sk); | 1176 | struct net *net = sock_net(skb->sk); |
1173 | int idx, ip_idx; | 1177 | int h, s_h; |
1178 | int idx, s_idx; | ||
1179 | int ip_idx, s_ip_idx; | ||
1174 | struct net_device *dev; | 1180 | struct net_device *dev; |
1175 | struct in_device *in_dev; | 1181 | struct in_device *in_dev; |
1176 | struct in_ifaddr *ifa; | 1182 | struct in_ifaddr *ifa; |
1177 | int s_ip_idx, s_idx = cb->args[0]; | 1183 | struct hlist_head *head; |
1184 | struct hlist_node *node; | ||
1178 | 1185 | ||
1179 | s_ip_idx = ip_idx = cb->args[1]; | 1186 | s_h = cb->args[0]; |
1180 | idx = 0; | 1187 | s_idx = idx = cb->args[1]; |
1181 | for_each_netdev(net, dev) { | 1188 | s_ip_idx = ip_idx = cb->args[2]; |
1182 | if (idx < s_idx) | 1189 | |
1183 | goto cont; | 1190 | for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { |
1184 | if (idx > s_idx) | 1191 | idx = 0; |
1185 | s_ip_idx = 0; | 1192 | head = &net->dev_index_head[h]; |
1186 | if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) | 1193 | rcu_read_lock(); |
1187 | goto cont; | 1194 | hlist_for_each_entry_rcu(dev, node, head, index_hlist) { |
1188 | 1195 | if (idx < s_idx) | |
1189 | for (ifa = in_dev->ifa_list, ip_idx = 0; ifa; | 1196 | goto cont; |
1190 | ifa = ifa->ifa_next, ip_idx++) { | 1197 | if (idx > s_idx) |
1191 | if (ip_idx < s_ip_idx) | 1198 | s_ip_idx = 0; |
1192 | continue; | 1199 | in_dev = __in_dev_get_rcu(dev); |
1193 | if (inet_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid, | 1200 | if (!in_dev) |
1201 | goto cont; | ||
1202 | |||
1203 | for (ifa = in_dev->ifa_list, ip_idx = 0; ifa; | ||
1204 | ifa = ifa->ifa_next, ip_idx++) { | ||
1205 | if (ip_idx < s_ip_idx) | ||
1206 | continue; | ||
1207 | if (inet_fill_ifaddr(skb, ifa, | ||
1208 | NETLINK_CB(cb->skb).pid, | ||
1194 | cb->nlh->nlmsg_seq, | 1209 | cb->nlh->nlmsg_seq, |
1195 | RTM_NEWADDR, NLM_F_MULTI) <= 0) | 1210 | RTM_NEWADDR, NLM_F_MULTI) <= 0) { |
1196 | goto done; | 1211 | rcu_read_unlock(); |
1197 | } | 1212 | goto done; |
1213 | } | ||
1214 | } | ||
1198 | cont: | 1215 | cont: |
1199 | idx++; | 1216 | idx++; |
1217 | } | ||
1218 | rcu_read_unlock(); | ||
1200 | } | 1219 | } |
1201 | 1220 | ||
1202 | done: | 1221 | done: |
1203 | cb->args[0] = idx; | 1222 | cb->args[0] = h; |
1204 | cb->args[1] = ip_idx; | 1223 | cb->args[1] = idx; |
1224 | cb->args[2] = ip_idx; | ||
1205 | 1225 | ||
1206 | return skb->len; | 1226 | return skb->len; |
1207 | } | 1227 | } |
@@ -1239,18 +1259,18 @@ static void devinet_copy_dflt_conf(struct net *net, int i) | |||
1239 | { | 1259 | { |
1240 | struct net_device *dev; | 1260 | struct net_device *dev; |
1241 | 1261 | ||
1242 | read_lock(&dev_base_lock); | 1262 | rcu_read_lock(); |
1243 | for_each_netdev(net, dev) { | 1263 | for_each_netdev_rcu(net, dev) { |
1244 | struct in_device *in_dev; | 1264 | struct in_device *in_dev; |
1245 | rcu_read_lock(); | 1265 | |
1246 | in_dev = __in_dev_get_rcu(dev); | 1266 | in_dev = __in_dev_get_rcu(dev); |
1247 | if (in_dev && !test_bit(i, in_dev->cnf.state)) | 1267 | if (in_dev && !test_bit(i, in_dev->cnf.state)) |
1248 | in_dev->cnf.data[i] = net->ipv4.devconf_dflt->data[i]; | 1268 | in_dev->cnf.data[i] = net->ipv4.devconf_dflt->data[i]; |
1249 | rcu_read_unlock(); | ||
1250 | } | 1269 | } |
1251 | read_unlock(&dev_base_lock); | 1270 | rcu_read_unlock(); |
1252 | } | 1271 | } |
1253 | 1272 | ||
1273 | /* called with RTNL locked */ | ||
1254 | static void inet_forward_change(struct net *net) | 1274 | static void inet_forward_change(struct net *net) |
1255 | { | 1275 | { |
1256 | struct net_device *dev; | 1276 | struct net_device *dev; |
@@ -1259,7 +1279,6 @@ static void inet_forward_change(struct net *net) | |||
1259 | IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on; | 1279 | IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on; |
1260 | IPV4_DEVCONF_DFLT(net, FORWARDING) = on; | 1280 | IPV4_DEVCONF_DFLT(net, FORWARDING) = on; |
1261 | 1281 | ||
1262 | read_lock(&dev_base_lock); | ||
1263 | for_each_netdev(net, dev) { | 1282 | for_each_netdev(net, dev) { |
1264 | struct in_device *in_dev; | 1283 | struct in_device *in_dev; |
1265 | if (on) | 1284 | if (on) |
@@ -1270,7 +1289,6 @@ static void inet_forward_change(struct net *net) | |||
1270 | IN_DEV_CONF_SET(in_dev, FORWARDING, on); | 1289 | IN_DEV_CONF_SET(in_dev, FORWARDING, on); |
1271 | rcu_read_unlock(); | 1290 | rcu_read_unlock(); |
1272 | } | 1291 | } |
1273 | read_unlock(&dev_base_lock); | ||
1274 | } | 1292 | } |
1275 | 1293 | ||
1276 | static int devinet_conf_proc(ctl_table *ctl, int write, | 1294 | static int devinet_conf_proc(ctl_table *ctl, int write, |
@@ -1680,8 +1698,3 @@ void __init devinet_init(void) | |||
1680 | rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr); | 1698 | rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr); |
1681 | } | 1699 | } |
1682 | 1700 | ||
1683 | EXPORT_SYMBOL(in_dev_finish_destroy); | ||
1684 | EXPORT_SYMBOL(inet_select_addr); | ||
1685 | EXPORT_SYMBOL(inetdev_by_index); | ||
1686 | EXPORT_SYMBOL(register_inetaddr_notifier); | ||
1687 | EXPORT_SYMBOL(unregister_inetaddr_notifier); | ||
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index aa00398be80e..6c1e56aef1f4 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -125,7 +125,7 @@ void fib_select_default(struct net *net, | |||
125 | #endif | 125 | #endif |
126 | tb = fib_get_table(net, table); | 126 | tb = fib_get_table(net, table); |
127 | if (FIB_RES_GW(*res) && FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) | 127 | if (FIB_RES_GW(*res) && FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) |
128 | tb->tb_select_default(tb, flp, res); | 128 | fib_table_select_default(tb, flp, res); |
129 | } | 129 | } |
130 | 130 | ||
131 | static void fib_flush(struct net *net) | 131 | static void fib_flush(struct net *net) |
@@ -139,7 +139,7 @@ static void fib_flush(struct net *net) | |||
139 | for (h = 0; h < FIB_TABLE_HASHSZ; h++) { | 139 | for (h = 0; h < FIB_TABLE_HASHSZ; h++) { |
140 | head = &net->ipv4.fib_table_hash[h]; | 140 | head = &net->ipv4.fib_table_hash[h]; |
141 | hlist_for_each_entry(tb, node, head, tb_hlist) | 141 | hlist_for_each_entry(tb, node, head, tb_hlist) |
142 | flushed += tb->tb_flush(tb); | 142 | flushed += fib_table_flush(tb); |
143 | } | 143 | } |
144 | 144 | ||
145 | if (flushed) | 145 | if (flushed) |
@@ -162,7 +162,7 @@ struct net_device * ip_dev_find(struct net *net, __be32 addr) | |||
162 | #endif | 162 | #endif |
163 | 163 | ||
164 | local_table = fib_get_table(net, RT_TABLE_LOCAL); | 164 | local_table = fib_get_table(net, RT_TABLE_LOCAL); |
165 | if (!local_table || local_table->tb_lookup(local_table, &fl, &res)) | 165 | if (!local_table || fib_table_lookup(local_table, &fl, &res)) |
166 | return NULL; | 166 | return NULL; |
167 | if (res.type != RTN_LOCAL) | 167 | if (res.type != RTN_LOCAL) |
168 | goto out; | 168 | goto out; |
@@ -200,7 +200,7 @@ static inline unsigned __inet_dev_addr_type(struct net *net, | |||
200 | local_table = fib_get_table(net, RT_TABLE_LOCAL); | 200 | local_table = fib_get_table(net, RT_TABLE_LOCAL); |
201 | if (local_table) { | 201 | if (local_table) { |
202 | ret = RTN_UNICAST; | 202 | ret = RTN_UNICAST; |
203 | if (!local_table->tb_lookup(local_table, &fl, &res)) { | 203 | if (!fib_table_lookup(local_table, &fl, &res)) { |
204 | if (!dev || dev == res.fi->fib_dev) | 204 | if (!dev || dev == res.fi->fib_dev) |
205 | ret = res.type; | 205 | ret = res.type; |
206 | fib_res_put(&res); | 206 | fib_res_put(&res); |
@@ -476,13 +476,13 @@ int ip_rt_ioctl(struct net *net, unsigned int cmd, void __user *arg) | |||
476 | if (cmd == SIOCDELRT) { | 476 | if (cmd == SIOCDELRT) { |
477 | tb = fib_get_table(net, cfg.fc_table); | 477 | tb = fib_get_table(net, cfg.fc_table); |
478 | if (tb) | 478 | if (tb) |
479 | err = tb->tb_delete(tb, &cfg); | 479 | err = fib_table_delete(tb, &cfg); |
480 | else | 480 | else |
481 | err = -ESRCH; | 481 | err = -ESRCH; |
482 | } else { | 482 | } else { |
483 | tb = fib_new_table(net, cfg.fc_table); | 483 | tb = fib_new_table(net, cfg.fc_table); |
484 | if (tb) | 484 | if (tb) |
485 | err = tb->tb_insert(tb, &cfg); | 485 | err = fib_table_insert(tb, &cfg); |
486 | else | 486 | else |
487 | err = -ENOBUFS; | 487 | err = -ENOBUFS; |
488 | } | 488 | } |
@@ -597,7 +597,7 @@ static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *ar | |||
597 | goto errout; | 597 | goto errout; |
598 | } | 598 | } |
599 | 599 | ||
600 | err = tb->tb_delete(tb, &cfg); | 600 | err = fib_table_delete(tb, &cfg); |
601 | errout: | 601 | errout: |
602 | return err; | 602 | return err; |
603 | } | 603 | } |
@@ -619,7 +619,7 @@ static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *ar | |||
619 | goto errout; | 619 | goto errout; |
620 | } | 620 | } |
621 | 621 | ||
622 | err = tb->tb_insert(tb, &cfg); | 622 | err = fib_table_insert(tb, &cfg); |
623 | errout: | 623 | errout: |
624 | return err; | 624 | return err; |
625 | } | 625 | } |
@@ -650,7 +650,7 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) | |||
650 | if (dumped) | 650 | if (dumped) |
651 | memset(&cb->args[2], 0, sizeof(cb->args) - | 651 | memset(&cb->args[2], 0, sizeof(cb->args) - |
652 | 2 * sizeof(cb->args[0])); | 652 | 2 * sizeof(cb->args[0])); |
653 | if (tb->tb_dump(tb, skb, cb) < 0) | 653 | if (fib_table_dump(tb, skb, cb) < 0) |
654 | goto out; | 654 | goto out; |
655 | dumped = 1; | 655 | dumped = 1; |
656 | next: | 656 | next: |
@@ -704,9 +704,9 @@ static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifad | |||
704 | cfg.fc_scope = RT_SCOPE_HOST; | 704 | cfg.fc_scope = RT_SCOPE_HOST; |
705 | 705 | ||
706 | if (cmd == RTM_NEWROUTE) | 706 | if (cmd == RTM_NEWROUTE) |
707 | tb->tb_insert(tb, &cfg); | 707 | fib_table_insert(tb, &cfg); |
708 | else | 708 | else |
709 | tb->tb_delete(tb, &cfg); | 709 | fib_table_delete(tb, &cfg); |
710 | } | 710 | } |
711 | 711 | ||
712 | void fib_add_ifaddr(struct in_ifaddr *ifa) | 712 | void fib_add_ifaddr(struct in_ifaddr *ifa) |
@@ -835,7 +835,7 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb ) | |||
835 | local_bh_disable(); | 835 | local_bh_disable(); |
836 | 836 | ||
837 | frn->tb_id = tb->tb_id; | 837 | frn->tb_id = tb->tb_id; |
838 | frn->err = tb->tb_lookup(tb, &fl, &res); | 838 | frn->err = fib_table_lookup(tb, &fl, &res); |
839 | 839 | ||
840 | if (!frn->err) { | 840 | if (!frn->err) { |
841 | frn->prefixlen = res.prefixlen; | 841 | frn->prefixlen = res.prefixlen; |
@@ -895,11 +895,11 @@ static void nl_fib_lookup_exit(struct net *net) | |||
895 | net->ipv4.fibnl = NULL; | 895 | net->ipv4.fibnl = NULL; |
896 | } | 896 | } |
897 | 897 | ||
898 | static void fib_disable_ip(struct net_device *dev, int force) | 898 | static void fib_disable_ip(struct net_device *dev, int force, int delay) |
899 | { | 899 | { |
900 | if (fib_sync_down_dev(dev, force)) | 900 | if (fib_sync_down_dev(dev, force)) |
901 | fib_flush(dev_net(dev)); | 901 | fib_flush(dev_net(dev)); |
902 | rt_cache_flush(dev_net(dev), 0); | 902 | rt_cache_flush(dev_net(dev), delay); |
903 | arp_ifdown(dev); | 903 | arp_ifdown(dev); |
904 | } | 904 | } |
905 | 905 | ||
@@ -922,7 +922,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, | |||
922 | /* Last address was deleted from this interface. | 922 | /* Last address was deleted from this interface. |
923 | Disable IP. | 923 | Disable IP. |
924 | */ | 924 | */ |
925 | fib_disable_ip(dev, 1); | 925 | fib_disable_ip(dev, 1, 0); |
926 | } else { | 926 | } else { |
927 | rt_cache_flush(dev_net(dev), -1); | 927 | rt_cache_flush(dev_net(dev), -1); |
928 | } | 928 | } |
@@ -937,7 +937,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo | |||
937 | struct in_device *in_dev = __in_dev_get_rtnl(dev); | 937 | struct in_device *in_dev = __in_dev_get_rtnl(dev); |
938 | 938 | ||
939 | if (event == NETDEV_UNREGISTER) { | 939 | if (event == NETDEV_UNREGISTER) { |
940 | fib_disable_ip(dev, 2); | 940 | fib_disable_ip(dev, 2, -1); |
941 | return NOTIFY_DONE; | 941 | return NOTIFY_DONE; |
942 | } | 942 | } |
943 | 943 | ||
@@ -955,10 +955,11 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo | |||
955 | rt_cache_flush(dev_net(dev), -1); | 955 | rt_cache_flush(dev_net(dev), -1); |
956 | break; | 956 | break; |
957 | case NETDEV_DOWN: | 957 | case NETDEV_DOWN: |
958 | fib_disable_ip(dev, 0); | 958 | fib_disable_ip(dev, 0, 0); |
959 | break; | 959 | break; |
960 | case NETDEV_CHANGEMTU: | 960 | case NETDEV_CHANGEMTU: |
961 | case NETDEV_CHANGE: | 961 | case NETDEV_CHANGE: |
962 | case NETDEV_UNREGISTER_PERNET: | ||
962 | rt_cache_flush(dev_net(dev), 0); | 963 | rt_cache_flush(dev_net(dev), 0); |
963 | break; | 964 | break; |
964 | } | 965 | } |
@@ -1012,7 +1013,7 @@ static void __net_exit ip_fib_net_exit(struct net *net) | |||
1012 | head = &net->ipv4.fib_table_hash[i]; | 1013 | head = &net->ipv4.fib_table_hash[i]; |
1013 | hlist_for_each_entry_safe(tb, node, tmp, head, tb_hlist) { | 1014 | hlist_for_each_entry_safe(tb, node, tmp, head, tb_hlist) { |
1014 | hlist_del(node); | 1015 | hlist_del(node); |
1015 | tb->tb_flush(tb); | 1016 | fib_table_flush(tb); |
1016 | kfree(tb); | 1017 | kfree(tb); |
1017 | } | 1018 | } |
1018 | } | 1019 | } |
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c index ecd39454235c..14972017b9c2 100644 --- a/net/ipv4/fib_hash.c +++ b/net/ipv4/fib_hash.c | |||
@@ -242,8 +242,8 @@ fn_new_zone(struct fn_hash *table, int z) | |||
242 | return fz; | 242 | return fz; |
243 | } | 243 | } |
244 | 244 | ||
245 | static int | 245 | int fib_table_lookup(struct fib_table *tb, |
246 | fn_hash_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result *res) | 246 | const struct flowi *flp, struct fib_result *res) |
247 | { | 247 | { |
248 | int err; | 248 | int err; |
249 | struct fn_zone *fz; | 249 | struct fn_zone *fz; |
@@ -274,8 +274,8 @@ out: | |||
274 | return err; | 274 | return err; |
275 | } | 275 | } |
276 | 276 | ||
277 | static void | 277 | void fib_table_select_default(struct fib_table *tb, |
278 | fn_hash_select_default(struct fib_table *tb, const struct flowi *flp, struct fib_result *res) | 278 | const struct flowi *flp, struct fib_result *res) |
279 | { | 279 | { |
280 | int order, last_idx; | 280 | int order, last_idx; |
281 | struct hlist_node *node; | 281 | struct hlist_node *node; |
@@ -366,7 +366,7 @@ static struct fib_node *fib_find_node(struct fn_zone *fz, __be32 key) | |||
366 | return NULL; | 366 | return NULL; |
367 | } | 367 | } |
368 | 368 | ||
369 | static int fn_hash_insert(struct fib_table *tb, struct fib_config *cfg) | 369 | int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) |
370 | { | 370 | { |
371 | struct fn_hash *table = (struct fn_hash *) tb->tb_data; | 371 | struct fn_hash *table = (struct fn_hash *) tb->tb_data; |
372 | struct fib_node *new_f = NULL; | 372 | struct fib_node *new_f = NULL; |
@@ -544,8 +544,7 @@ out: | |||
544 | return err; | 544 | return err; |
545 | } | 545 | } |
546 | 546 | ||
547 | 547 | int fib_table_delete(struct fib_table *tb, struct fib_config *cfg) | |
548 | static int fn_hash_delete(struct fib_table *tb, struct fib_config *cfg) | ||
549 | { | 548 | { |
550 | struct fn_hash *table = (struct fn_hash *)tb->tb_data; | 549 | struct fn_hash *table = (struct fn_hash *)tb->tb_data; |
551 | struct fib_node *f; | 550 | struct fib_node *f; |
@@ -662,7 +661,7 @@ static int fn_flush_list(struct fn_zone *fz, int idx) | |||
662 | return found; | 661 | return found; |
663 | } | 662 | } |
664 | 663 | ||
665 | static int fn_hash_flush(struct fib_table *tb) | 664 | int fib_table_flush(struct fib_table *tb) |
666 | { | 665 | { |
667 | struct fn_hash *table = (struct fn_hash *) tb->tb_data; | 666 | struct fn_hash *table = (struct fn_hash *) tb->tb_data; |
668 | struct fn_zone *fz; | 667 | struct fn_zone *fz; |
@@ -743,7 +742,8 @@ fn_hash_dump_zone(struct sk_buff *skb, struct netlink_callback *cb, | |||
743 | return skb->len; | 742 | return skb->len; |
744 | } | 743 | } |
745 | 744 | ||
746 | static int fn_hash_dump(struct fib_table *tb, struct sk_buff *skb, struct netlink_callback *cb) | 745 | int fib_table_dump(struct fib_table *tb, struct sk_buff *skb, |
746 | struct netlink_callback *cb) | ||
747 | { | 747 | { |
748 | int m, s_m; | 748 | int m, s_m; |
749 | struct fn_zone *fz; | 749 | struct fn_zone *fz; |
@@ -787,12 +787,7 @@ struct fib_table *fib_hash_table(u32 id) | |||
787 | 787 | ||
788 | tb->tb_id = id; | 788 | tb->tb_id = id; |
789 | tb->tb_default = -1; | 789 | tb->tb_default = -1; |
790 | tb->tb_lookup = fn_hash_lookup; | 790 | |
791 | tb->tb_insert = fn_hash_insert; | ||
792 | tb->tb_delete = fn_hash_delete; | ||
793 | tb->tb_flush = fn_hash_flush; | ||
794 | tb->tb_select_default = fn_hash_select_default; | ||
795 | tb->tb_dump = fn_hash_dump; | ||
796 | memset(tb->tb_data, 0, sizeof(struct fn_hash)); | 791 | memset(tb->tb_data, 0, sizeof(struct fn_hash)); |
797 | return tb; | 792 | return tb; |
798 | } | 793 | } |
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index 92d9d97ec5e3..835262c2b867 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c | |||
@@ -94,7 +94,7 @@ static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp, | |||
94 | if ((tbl = fib_get_table(rule->fr_net, rule->table)) == NULL) | 94 | if ((tbl = fib_get_table(rule->fr_net, rule->table)) == NULL) |
95 | goto errout; | 95 | goto errout; |
96 | 96 | ||
97 | err = tbl->tb_lookup(tbl, flp, (struct fib_result *) arg->result); | 97 | err = fib_table_lookup(tbl, flp, (struct fib_result *) arg->result); |
98 | if (err > 0) | 98 | if (err > 0) |
99 | err = -EAGAIN; | 99 | err = -EAGAIN; |
100 | errout: | 100 | errout: |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 291bdf50a21f..af5d89792860 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -1174,7 +1174,7 @@ done: | |||
1174 | /* | 1174 | /* |
1175 | * Caller must hold RTNL. | 1175 | * Caller must hold RTNL. |
1176 | */ | 1176 | */ |
1177 | static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg) | 1177 | int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) |
1178 | { | 1178 | { |
1179 | struct trie *t = (struct trie *) tb->tb_data; | 1179 | struct trie *t = (struct trie *) tb->tb_data; |
1180 | struct fib_alias *fa, *new_fa; | 1180 | struct fib_alias *fa, *new_fa; |
@@ -1373,8 +1373,8 @@ static int check_leaf(struct trie *t, struct leaf *l, | |||
1373 | return 1; | 1373 | return 1; |
1374 | } | 1374 | } |
1375 | 1375 | ||
1376 | static int fn_trie_lookup(struct fib_table *tb, const struct flowi *flp, | 1376 | int fib_table_lookup(struct fib_table *tb, const struct flowi *flp, |
1377 | struct fib_result *res) | 1377 | struct fib_result *res) |
1378 | { | 1378 | { |
1379 | struct trie *t = (struct trie *) tb->tb_data; | 1379 | struct trie *t = (struct trie *) tb->tb_data; |
1380 | int ret; | 1380 | int ret; |
@@ -1595,7 +1595,7 @@ static void trie_leaf_remove(struct trie *t, struct leaf *l) | |||
1595 | /* | 1595 | /* |
1596 | * Caller must hold RTNL. | 1596 | * Caller must hold RTNL. |
1597 | */ | 1597 | */ |
1598 | static int fn_trie_delete(struct fib_table *tb, struct fib_config *cfg) | 1598 | int fib_table_delete(struct fib_table *tb, struct fib_config *cfg) |
1599 | { | 1599 | { |
1600 | struct trie *t = (struct trie *) tb->tb_data; | 1600 | struct trie *t = (struct trie *) tb->tb_data; |
1601 | u32 key, mask; | 1601 | u32 key, mask; |
@@ -1786,7 +1786,7 @@ static struct leaf *trie_leafindex(struct trie *t, int index) | |||
1786 | /* | 1786 | /* |
1787 | * Caller must hold RTNL. | 1787 | * Caller must hold RTNL. |
1788 | */ | 1788 | */ |
1789 | static int fn_trie_flush(struct fib_table *tb) | 1789 | int fib_table_flush(struct fib_table *tb) |
1790 | { | 1790 | { |
1791 | struct trie *t = (struct trie *) tb->tb_data; | 1791 | struct trie *t = (struct trie *) tb->tb_data; |
1792 | struct leaf *l, *ll = NULL; | 1792 | struct leaf *l, *ll = NULL; |
@@ -1807,9 +1807,9 @@ static int fn_trie_flush(struct fib_table *tb) | |||
1807 | return found; | 1807 | return found; |
1808 | } | 1808 | } |
1809 | 1809 | ||
1810 | static void fn_trie_select_default(struct fib_table *tb, | 1810 | void fib_table_select_default(struct fib_table *tb, |
1811 | const struct flowi *flp, | 1811 | const struct flowi *flp, |
1812 | struct fib_result *res) | 1812 | struct fib_result *res) |
1813 | { | 1813 | { |
1814 | struct trie *t = (struct trie *) tb->tb_data; | 1814 | struct trie *t = (struct trie *) tb->tb_data; |
1815 | int order, last_idx; | 1815 | int order, last_idx; |
@@ -1952,8 +1952,8 @@ static int fn_trie_dump_leaf(struct leaf *l, struct fib_table *tb, | |||
1952 | return skb->len; | 1952 | return skb->len; |
1953 | } | 1953 | } |
1954 | 1954 | ||
1955 | static int fn_trie_dump(struct fib_table *tb, struct sk_buff *skb, | 1955 | int fib_table_dump(struct fib_table *tb, struct sk_buff *skb, |
1956 | struct netlink_callback *cb) | 1956 | struct netlink_callback *cb) |
1957 | { | 1957 | { |
1958 | struct leaf *l; | 1958 | struct leaf *l; |
1959 | struct trie *t = (struct trie *) tb->tb_data; | 1959 | struct trie *t = (struct trie *) tb->tb_data; |
@@ -2020,12 +2020,6 @@ struct fib_table *fib_hash_table(u32 id) | |||
2020 | 2020 | ||
2021 | tb->tb_id = id; | 2021 | tb->tb_id = id; |
2022 | tb->tb_default = -1; | 2022 | tb->tb_default = -1; |
2023 | tb->tb_lookup = fn_trie_lookup; | ||
2024 | tb->tb_insert = fn_trie_insert; | ||
2025 | tb->tb_delete = fn_trie_delete; | ||
2026 | tb->tb_flush = fn_trie_flush; | ||
2027 | tb->tb_select_default = fn_trie_select_default; | ||
2028 | tb->tb_dump = fn_trie_dump; | ||
2029 | 2023 | ||
2030 | t = (struct trie *) tb->tb_data; | 2024 | t = (struct trie *) tb->tb_data; |
2031 | memset(t, 0, sizeof(*t)); | 2025 | memset(t, 0, sizeof(*t)); |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 5bc13fe816d1..fe11f60ce41b 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -501,15 +501,16 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) | |||
501 | if (!(rt->rt_flags & RTCF_LOCAL)) { | 501 | if (!(rt->rt_flags & RTCF_LOCAL)) { |
502 | struct net_device *dev = NULL; | 502 | struct net_device *dev = NULL; |
503 | 503 | ||
504 | rcu_read_lock(); | ||
504 | if (rt->fl.iif && | 505 | if (rt->fl.iif && |
505 | net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr) | 506 | net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr) |
506 | dev = dev_get_by_index(net, rt->fl.iif); | 507 | dev = dev_get_by_index_rcu(net, rt->fl.iif); |
507 | 508 | ||
508 | if (dev) { | 509 | if (dev) |
509 | saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK); | 510 | saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK); |
510 | dev_put(dev); | 511 | else |
511 | } else | ||
512 | saddr = 0; | 512 | saddr = 0; |
513 | rcu_read_unlock(); | ||
513 | } | 514 | } |
514 | 515 | ||
515 | tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) | | 516 | tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) | |
@@ -1165,6 +1166,10 @@ static int __net_init icmp_sk_init(struct net *net) | |||
1165 | sk->sk_sndbuf = | 1166 | sk->sk_sndbuf = |
1166 | (2 * ((64 * 1024) + sizeof(struct sk_buff))); | 1167 | (2 * ((64 * 1024) + sizeof(struct sk_buff))); |
1167 | 1168 | ||
1169 | /* | ||
1170 | * Speedup sock_wfree() | ||
1171 | */ | ||
1172 | sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); | ||
1168 | inet_sk(sk)->pmtudisc = IP_PMTUDISC_DONT; | 1173 | inet_sk(sk)->pmtudisc = IP_PMTUDISC_DONT; |
1169 | } | 1174 | } |
1170 | 1175 | ||
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index d41e5de79a82..6110c6d6e613 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -2311,9 +2311,10 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq) | |||
2311 | struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); | 2311 | struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); |
2312 | 2312 | ||
2313 | state->in_dev = NULL; | 2313 | state->in_dev = NULL; |
2314 | for_each_netdev(net, state->dev) { | 2314 | for_each_netdev_rcu(net, state->dev) { |
2315 | struct in_device *in_dev; | 2315 | struct in_device *in_dev; |
2316 | in_dev = in_dev_get(state->dev); | 2316 | |
2317 | in_dev = __in_dev_get_rcu(state->dev); | ||
2317 | if (!in_dev) | 2318 | if (!in_dev) |
2318 | continue; | 2319 | continue; |
2319 | read_lock(&in_dev->mc_list_lock); | 2320 | read_lock(&in_dev->mc_list_lock); |
@@ -2323,7 +2324,6 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq) | |||
2323 | break; | 2324 | break; |
2324 | } | 2325 | } |
2325 | read_unlock(&in_dev->mc_list_lock); | 2326 | read_unlock(&in_dev->mc_list_lock); |
2326 | in_dev_put(in_dev); | ||
2327 | } | 2327 | } |
2328 | return im; | 2328 | return im; |
2329 | } | 2329 | } |
@@ -2333,16 +2333,15 @@ static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_li | |||
2333 | struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); | 2333 | struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); |
2334 | im = im->next; | 2334 | im = im->next; |
2335 | while (!im) { | 2335 | while (!im) { |
2336 | if (likely(state->in_dev != NULL)) { | 2336 | if (likely(state->in_dev != NULL)) |
2337 | read_unlock(&state->in_dev->mc_list_lock); | 2337 | read_unlock(&state->in_dev->mc_list_lock); |
2338 | in_dev_put(state->in_dev); | 2338 | |
2339 | } | 2339 | state->dev = next_net_device_rcu(state->dev); |
2340 | state->dev = next_net_device(state->dev); | ||
2341 | if (!state->dev) { | 2340 | if (!state->dev) { |
2342 | state->in_dev = NULL; | 2341 | state->in_dev = NULL; |
2343 | break; | 2342 | break; |
2344 | } | 2343 | } |
2345 | state->in_dev = in_dev_get(state->dev); | 2344 | state->in_dev = __in_dev_get_rcu(state->dev); |
2346 | if (!state->in_dev) | 2345 | if (!state->in_dev) |
2347 | continue; | 2346 | continue; |
2348 | read_lock(&state->in_dev->mc_list_lock); | 2347 | read_lock(&state->in_dev->mc_list_lock); |
@@ -2361,9 +2360,9 @@ static struct ip_mc_list *igmp_mc_get_idx(struct seq_file *seq, loff_t pos) | |||
2361 | } | 2360 | } |
2362 | 2361 | ||
2363 | static void *igmp_mc_seq_start(struct seq_file *seq, loff_t *pos) | 2362 | static void *igmp_mc_seq_start(struct seq_file *seq, loff_t *pos) |
2364 | __acquires(dev_base_lock) | 2363 | __acquires(rcu) |
2365 | { | 2364 | { |
2366 | read_lock(&dev_base_lock); | 2365 | rcu_read_lock(); |
2367 | return *pos ? igmp_mc_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; | 2366 | return *pos ? igmp_mc_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; |
2368 | } | 2367 | } |
2369 | 2368 | ||
@@ -2379,16 +2378,15 @@ static void *igmp_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
2379 | } | 2378 | } |
2380 | 2379 | ||
2381 | static void igmp_mc_seq_stop(struct seq_file *seq, void *v) | 2380 | static void igmp_mc_seq_stop(struct seq_file *seq, void *v) |
2382 | __releases(dev_base_lock) | 2381 | __releases(rcu) |
2383 | { | 2382 | { |
2384 | struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); | 2383 | struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); |
2385 | if (likely(state->in_dev != NULL)) { | 2384 | if (likely(state->in_dev != NULL)) { |
2386 | read_unlock(&state->in_dev->mc_list_lock); | 2385 | read_unlock(&state->in_dev->mc_list_lock); |
2387 | in_dev_put(state->in_dev); | ||
2388 | state->in_dev = NULL; | 2386 | state->in_dev = NULL; |
2389 | } | 2387 | } |
2390 | state->dev = NULL; | 2388 | state->dev = NULL; |
2391 | read_unlock(&dev_base_lock); | 2389 | rcu_read_unlock(); |
2392 | } | 2390 | } |
2393 | 2391 | ||
2394 | static int igmp_mc_seq_show(struct seq_file *seq, void *v) | 2392 | static int igmp_mc_seq_show(struct seq_file *seq, void *v) |
@@ -2462,9 +2460,9 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq) | |||
2462 | 2460 | ||
2463 | state->idev = NULL; | 2461 | state->idev = NULL; |
2464 | state->im = NULL; | 2462 | state->im = NULL; |
2465 | for_each_netdev(net, state->dev) { | 2463 | for_each_netdev_rcu(net, state->dev) { |
2466 | struct in_device *idev; | 2464 | struct in_device *idev; |
2467 | idev = in_dev_get(state->dev); | 2465 | idev = __in_dev_get_rcu(state->dev); |
2468 | if (unlikely(idev == NULL)) | 2466 | if (unlikely(idev == NULL)) |
2469 | continue; | 2467 | continue; |
2470 | read_lock(&idev->mc_list_lock); | 2468 | read_lock(&idev->mc_list_lock); |
@@ -2480,7 +2478,6 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq) | |||
2480 | spin_unlock_bh(&im->lock); | 2478 | spin_unlock_bh(&im->lock); |
2481 | } | 2479 | } |
2482 | read_unlock(&idev->mc_list_lock); | 2480 | read_unlock(&idev->mc_list_lock); |
2483 | in_dev_put(idev); | ||
2484 | } | 2481 | } |
2485 | return psf; | 2482 | return psf; |
2486 | } | 2483 | } |
@@ -2494,16 +2491,15 @@ static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_l | |||
2494 | spin_unlock_bh(&state->im->lock); | 2491 | spin_unlock_bh(&state->im->lock); |
2495 | state->im = state->im->next; | 2492 | state->im = state->im->next; |
2496 | while (!state->im) { | 2493 | while (!state->im) { |
2497 | if (likely(state->idev != NULL)) { | 2494 | if (likely(state->idev != NULL)) |
2498 | read_unlock(&state->idev->mc_list_lock); | 2495 | read_unlock(&state->idev->mc_list_lock); |
2499 | in_dev_put(state->idev); | 2496 | |
2500 | } | 2497 | state->dev = next_net_device_rcu(state->dev); |
2501 | state->dev = next_net_device(state->dev); | ||
2502 | if (!state->dev) { | 2498 | if (!state->dev) { |
2503 | state->idev = NULL; | 2499 | state->idev = NULL; |
2504 | goto out; | 2500 | goto out; |
2505 | } | 2501 | } |
2506 | state->idev = in_dev_get(state->dev); | 2502 | state->idev = __in_dev_get_rcu(state->dev); |
2507 | if (!state->idev) | 2503 | if (!state->idev) |
2508 | continue; | 2504 | continue; |
2509 | read_lock(&state->idev->mc_list_lock); | 2505 | read_lock(&state->idev->mc_list_lock); |
@@ -2528,8 +2524,9 @@ static struct ip_sf_list *igmp_mcf_get_idx(struct seq_file *seq, loff_t pos) | |||
2528 | } | 2524 | } |
2529 | 2525 | ||
2530 | static void *igmp_mcf_seq_start(struct seq_file *seq, loff_t *pos) | 2526 | static void *igmp_mcf_seq_start(struct seq_file *seq, loff_t *pos) |
2527 | __acquires(rcu) | ||
2531 | { | 2528 | { |
2532 | read_lock(&dev_base_lock); | 2529 | rcu_read_lock(); |
2533 | return *pos ? igmp_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; | 2530 | return *pos ? igmp_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; |
2534 | } | 2531 | } |
2535 | 2532 | ||
@@ -2545,6 +2542,7 @@ static void *igmp_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
2545 | } | 2542 | } |
2546 | 2543 | ||
2547 | static void igmp_mcf_seq_stop(struct seq_file *seq, void *v) | 2544 | static void igmp_mcf_seq_stop(struct seq_file *seq, void *v) |
2545 | __releases(rcu) | ||
2548 | { | 2546 | { |
2549 | struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); | 2547 | struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); |
2550 | if (likely(state->im != NULL)) { | 2548 | if (likely(state->im != NULL)) { |
@@ -2553,11 +2551,10 @@ static void igmp_mcf_seq_stop(struct seq_file *seq, void *v) | |||
2553 | } | 2551 | } |
2554 | if (likely(state->idev != NULL)) { | 2552 | if (likely(state->idev != NULL)) { |
2555 | read_unlock(&state->idev->mc_list_lock); | 2553 | read_unlock(&state->idev->mc_list_lock); |
2556 | in_dev_put(state->idev); | ||
2557 | state->idev = NULL; | 2554 | state->idev = NULL; |
2558 | } | 2555 | } |
2559 | state->dev = NULL; | 2556 | state->dev = NULL; |
2560 | read_unlock(&dev_base_lock); | 2557 | rcu_read_unlock(); |
2561 | } | 2558 | } |
2562 | 2559 | ||
2563 | static int igmp_mcf_seq_show(struct seq_file *seq, void *v) | 2560 | static int igmp_mcf_seq_show(struct seq_file *seq, void *v) |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 537731b3bcb3..26fb50e91311 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -358,6 +358,7 @@ struct dst_entry *inet_csk_route_req(struct sock *sk, | |||
358 | const struct inet_request_sock *ireq = inet_rsk(req); | 358 | const struct inet_request_sock *ireq = inet_rsk(req); |
359 | struct ip_options *opt = inet_rsk(req)->opt; | 359 | struct ip_options *opt = inet_rsk(req)->opt; |
360 | struct flowi fl = { .oif = sk->sk_bound_dev_if, | 360 | struct flowi fl = { .oif = sk->sk_bound_dev_if, |
361 | .mark = sk->sk_mark, | ||
361 | .nl_u = { .ip4_u = | 362 | .nl_u = { .ip4_u = |
362 | { .daddr = ((opt && opt->srr) ? | 363 | { .daddr = ((opt && opt->srr) ? |
363 | opt->faddr : | 364 | opt->faddr : |
@@ -367,7 +368,7 @@ struct dst_entry *inet_csk_route_req(struct sock *sk, | |||
367 | .proto = sk->sk_protocol, | 368 | .proto = sk->sk_protocol, |
368 | .flags = inet_sk_flowi_flags(sk), | 369 | .flags = inet_sk_flowi_flags(sk), |
369 | .uli_u = { .ports = | 370 | .uli_u = { .ports = |
370 | { .sport = inet_sk(sk)->sport, | 371 | { .sport = inet_sk(sk)->inet_sport, |
371 | .dport = ireq->rmt_port } } }; | 372 | .dport = ireq->rmt_port } } }; |
372 | struct net *net = sock_net(sk); | 373 | struct net *net = sock_net(sk); |
373 | 374 | ||
@@ -574,9 +575,9 @@ struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req, | |||
574 | newsk->sk_state = TCP_SYN_RECV; | 575 | newsk->sk_state = TCP_SYN_RECV; |
575 | newicsk->icsk_bind_hash = NULL; | 576 | newicsk->icsk_bind_hash = NULL; |
576 | 577 | ||
577 | inet_sk(newsk)->dport = inet_rsk(req)->rmt_port; | 578 | inet_sk(newsk)->inet_dport = inet_rsk(req)->rmt_port; |
578 | inet_sk(newsk)->num = ntohs(inet_rsk(req)->loc_port); | 579 | inet_sk(newsk)->inet_num = ntohs(inet_rsk(req)->loc_port); |
579 | inet_sk(newsk)->sport = inet_rsk(req)->loc_port; | 580 | inet_sk(newsk)->inet_sport = inet_rsk(req)->loc_port; |
580 | newsk->sk_write_space = sk_stream_write_space; | 581 | newsk->sk_write_space = sk_stream_write_space; |
581 | 582 | ||
582 | newicsk->icsk_retransmits = 0; | 583 | newicsk->icsk_retransmits = 0; |
@@ -607,8 +608,8 @@ void inet_csk_destroy_sock(struct sock *sk) | |||
607 | /* It cannot be in hash table! */ | 608 | /* It cannot be in hash table! */ |
608 | WARN_ON(!sk_unhashed(sk)); | 609 | WARN_ON(!sk_unhashed(sk)); |
609 | 610 | ||
610 | /* If it has not 0 inet_sk(sk)->num, it must be bound */ | 611 | /* If it has not 0 inet_sk(sk)->inet_num, it must be bound */ |
611 | WARN_ON(inet_sk(sk)->num && !inet_csk(sk)->icsk_bind_hash); | 612 | WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash); |
612 | 613 | ||
613 | sk->sk_prot->destroy(sk); | 614 | sk->sk_prot->destroy(sk); |
614 | 615 | ||
@@ -643,8 +644,8 @@ int inet_csk_listen_start(struct sock *sk, const int nr_table_entries) | |||
643 | * after validation is complete. | 644 | * after validation is complete. |
644 | */ | 645 | */ |
645 | sk->sk_state = TCP_LISTEN; | 646 | sk->sk_state = TCP_LISTEN; |
646 | if (!sk->sk_prot->get_port(sk, inet->num)) { | 647 | if (!sk->sk_prot->get_port(sk, inet->inet_num)) { |
647 | inet->sport = htons(inet->num); | 648 | inet->inet_sport = htons(inet->inet_num); |
648 | 649 | ||
649 | sk_dst_reset(sk); | 650 | sk_dst_reset(sk); |
650 | sk->sk_prot->hash(sk); | 651 | sk->sk_prot->hash(sk); |
@@ -720,8 +721,8 @@ void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr) | |||
720 | const struct inet_sock *inet = inet_sk(sk); | 721 | const struct inet_sock *inet = inet_sk(sk); |
721 | 722 | ||
722 | sin->sin_family = AF_INET; | 723 | sin->sin_family = AF_INET; |
723 | sin->sin_addr.s_addr = inet->daddr; | 724 | sin->sin_addr.s_addr = inet->inet_daddr; |
724 | sin->sin_port = inet->dport; | 725 | sin->sin_port = inet->inet_dport; |
725 | } | 726 | } |
726 | 727 | ||
727 | EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr); | 728 | EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr); |
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index a706a47f4dbb..bdb78dd180ce 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c | |||
@@ -116,10 +116,10 @@ static int inet_csk_diag_fill(struct sock *sk, | |||
116 | r->id.idiag_cookie[0] = (u32)(unsigned long)sk; | 116 | r->id.idiag_cookie[0] = (u32)(unsigned long)sk; |
117 | r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1); | 117 | r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1); |
118 | 118 | ||
119 | r->id.idiag_sport = inet->sport; | 119 | r->id.idiag_sport = inet->inet_sport; |
120 | r->id.idiag_dport = inet->dport; | 120 | r->id.idiag_dport = inet->inet_dport; |
121 | r->id.idiag_src[0] = inet->rcv_saddr; | 121 | r->id.idiag_src[0] = inet->inet_rcv_saddr; |
122 | r->id.idiag_dst[0] = inet->daddr; | 122 | r->id.idiag_dst[0] = inet->inet_daddr; |
123 | 123 | ||
124 | #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) | 124 | #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) |
125 | if (r->idiag_family == AF_INET6) { | 125 | if (r->idiag_family == AF_INET6) { |
@@ -504,11 +504,11 @@ static int inet_csk_diag_dump(struct sock *sk, | |||
504 | } else | 504 | } else |
505 | #endif | 505 | #endif |
506 | { | 506 | { |
507 | entry.saddr = &inet->rcv_saddr; | 507 | entry.saddr = &inet->inet_rcv_saddr; |
508 | entry.daddr = &inet->daddr; | 508 | entry.daddr = &inet->inet_daddr; |
509 | } | 509 | } |
510 | entry.sport = inet->num; | 510 | entry.sport = inet->inet_num; |
511 | entry.dport = ntohs(inet->dport); | 511 | entry.dport = ntohs(inet->inet_dport); |
512 | entry.userlocks = sk->sk_userlocks; | 512 | entry.userlocks = sk->sk_userlocks; |
513 | 513 | ||
514 | if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry)) | 514 | if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry)) |
@@ -584,7 +584,7 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, | |||
584 | if (tmo < 0) | 584 | if (tmo < 0) |
585 | tmo = 0; | 585 | tmo = 0; |
586 | 586 | ||
587 | r->id.idiag_sport = inet->sport; | 587 | r->id.idiag_sport = inet->inet_sport; |
588 | r->id.idiag_dport = ireq->rmt_port; | 588 | r->id.idiag_dport = ireq->rmt_port; |
589 | r->id.idiag_src[0] = ireq->loc_addr; | 589 | r->id.idiag_src[0] = ireq->loc_addr; |
590 | r->id.idiag_dst[0] = ireq->rmt_addr; | 590 | r->id.idiag_dst[0] = ireq->rmt_addr; |
@@ -639,7 +639,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, | |||
639 | 639 | ||
640 | if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { | 640 | if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { |
641 | bc = (struct rtattr *)(r + 1); | 641 | bc = (struct rtattr *)(r + 1); |
642 | entry.sport = inet->num; | 642 | entry.sport = inet->inet_num; |
643 | entry.userlocks = sk->sk_userlocks; | 643 | entry.userlocks = sk->sk_userlocks; |
644 | } | 644 | } |
645 | 645 | ||
@@ -732,7 +732,7 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
732 | continue; | 732 | continue; |
733 | } | 733 | } |
734 | 734 | ||
735 | if (r->id.idiag_sport != inet->sport && | 735 | if (r->id.idiag_sport != inet->inet_sport && |
736 | r->id.idiag_sport) | 736 | r->id.idiag_sport) |
737 | goto next_listen; | 737 | goto next_listen; |
738 | 738 | ||
@@ -774,7 +774,7 @@ skip_listen_ht: | |||
774 | if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV))) | 774 | if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV))) |
775 | goto unlock; | 775 | goto unlock; |
776 | 776 | ||
777 | for (i = s_i; i < hashinfo->ehash_size; i++) { | 777 | for (i = s_i; i <= hashinfo->ehash_mask; i++) { |
778 | struct inet_ehash_bucket *head = &hashinfo->ehash[i]; | 778 | struct inet_ehash_bucket *head = &hashinfo->ehash[i]; |
779 | spinlock_t *lock = inet_ehash_lockp(hashinfo, i); | 779 | spinlock_t *lock = inet_ehash_lockp(hashinfo, i); |
780 | struct sock *sk; | 780 | struct sock *sk; |
@@ -797,10 +797,10 @@ skip_listen_ht: | |||
797 | goto next_normal; | 797 | goto next_normal; |
798 | if (!(r->idiag_states & (1 << sk->sk_state))) | 798 | if (!(r->idiag_states & (1 << sk->sk_state))) |
799 | goto next_normal; | 799 | goto next_normal; |
800 | if (r->id.idiag_sport != inet->sport && | 800 | if (r->id.idiag_sport != inet->inet_sport && |
801 | r->id.idiag_sport) | 801 | r->id.idiag_sport) |
802 | goto next_normal; | 802 | goto next_normal; |
803 | if (r->id.idiag_dport != inet->dport && | 803 | if (r->id.idiag_dport != inet->inet_dport && |
804 | r->id.idiag_dport) | 804 | r->id.idiag_dport) |
805 | goto next_normal; | 805 | goto next_normal; |
806 | if (inet_csk_diag_dump(sk, skb, cb) < 0) { | 806 | if (inet_csk_diag_dump(sk, skb, cb) < 0) { |
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 625cc5f64c94..47ad7aab51e3 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c | |||
@@ -64,7 +64,7 @@ void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, | |||
64 | 64 | ||
65 | atomic_inc(&hashinfo->bsockets); | 65 | atomic_inc(&hashinfo->bsockets); |
66 | 66 | ||
67 | inet_sk(sk)->num = snum; | 67 | inet_sk(sk)->inet_num = snum; |
68 | sk_add_bind_node(sk, &tb->owners); | 68 | sk_add_bind_node(sk, &tb->owners); |
69 | tb->num_owners++; | 69 | tb->num_owners++; |
70 | inet_csk(sk)->icsk_bind_hash = tb; | 70 | inet_csk(sk)->icsk_bind_hash = tb; |
@@ -76,7 +76,7 @@ void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, | |||
76 | static void __inet_put_port(struct sock *sk) | 76 | static void __inet_put_port(struct sock *sk) |
77 | { | 77 | { |
78 | struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; | 78 | struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; |
79 | const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->num, | 79 | const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->inet_num, |
80 | hashinfo->bhash_size); | 80 | hashinfo->bhash_size); |
81 | struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; | 81 | struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; |
82 | struct inet_bind_bucket *tb; | 82 | struct inet_bind_bucket *tb; |
@@ -88,7 +88,7 @@ static void __inet_put_port(struct sock *sk) | |||
88 | __sk_del_bind_node(sk); | 88 | __sk_del_bind_node(sk); |
89 | tb->num_owners--; | 89 | tb->num_owners--; |
90 | inet_csk(sk)->icsk_bind_hash = NULL; | 90 | inet_csk(sk)->icsk_bind_hash = NULL; |
91 | inet_sk(sk)->num = 0; | 91 | inet_sk(sk)->inet_num = 0; |
92 | inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); | 92 | inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); |
93 | spin_unlock(&head->lock); | 93 | spin_unlock(&head->lock); |
94 | } | 94 | } |
@@ -105,7 +105,7 @@ EXPORT_SYMBOL(inet_put_port); | |||
105 | void __inet_inherit_port(struct sock *sk, struct sock *child) | 105 | void __inet_inherit_port(struct sock *sk, struct sock *child) |
106 | { | 106 | { |
107 | struct inet_hashinfo *table = sk->sk_prot->h.hashinfo; | 107 | struct inet_hashinfo *table = sk->sk_prot->h.hashinfo; |
108 | const int bhash = inet_bhashfn(sock_net(sk), inet_sk(child)->num, | 108 | const int bhash = inet_bhashfn(sock_net(sk), inet_sk(child)->inet_num, |
109 | table->bhash_size); | 109 | table->bhash_size); |
110 | struct inet_bind_hashbucket *head = &table->bhash[bhash]; | 110 | struct inet_bind_hashbucket *head = &table->bhash[bhash]; |
111 | struct inet_bind_bucket *tb; | 111 | struct inet_bind_bucket *tb; |
@@ -126,9 +126,9 @@ static inline int compute_score(struct sock *sk, struct net *net, | |||
126 | int score = -1; | 126 | int score = -1; |
127 | struct inet_sock *inet = inet_sk(sk); | 127 | struct inet_sock *inet = inet_sk(sk); |
128 | 128 | ||
129 | if (net_eq(sock_net(sk), net) && inet->num == hnum && | 129 | if (net_eq(sock_net(sk), net) && inet->inet_num == hnum && |
130 | !ipv6_only_sock(sk)) { | 130 | !ipv6_only_sock(sk)) { |
131 | __be32 rcv_saddr = inet->rcv_saddr; | 131 | __be32 rcv_saddr = inet->inet_rcv_saddr; |
132 | score = sk->sk_family == PF_INET ? 1 : 0; | 132 | score = sk->sk_family == PF_INET ? 1 : 0; |
133 | if (rcv_saddr) { | 133 | if (rcv_saddr) { |
134 | if (rcv_saddr != daddr) | 134 | if (rcv_saddr != daddr) |
@@ -209,7 +209,7 @@ struct sock * __inet_lookup_established(struct net *net, | |||
209 | * have wildcards anyways. | 209 | * have wildcards anyways. |
210 | */ | 210 | */ |
211 | unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport); | 211 | unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport); |
212 | unsigned int slot = hash & (hashinfo->ehash_size - 1); | 212 | unsigned int slot = hash & hashinfo->ehash_mask; |
213 | struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; | 213 | struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; |
214 | 214 | ||
215 | rcu_read_lock(); | 215 | rcu_read_lock(); |
@@ -273,13 +273,14 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row, | |||
273 | { | 273 | { |
274 | struct inet_hashinfo *hinfo = death_row->hashinfo; | 274 | struct inet_hashinfo *hinfo = death_row->hashinfo; |
275 | struct inet_sock *inet = inet_sk(sk); | 275 | struct inet_sock *inet = inet_sk(sk); |
276 | __be32 daddr = inet->rcv_saddr; | 276 | __be32 daddr = inet->inet_rcv_saddr; |
277 | __be32 saddr = inet->daddr; | 277 | __be32 saddr = inet->inet_daddr; |
278 | int dif = sk->sk_bound_dev_if; | 278 | int dif = sk->sk_bound_dev_if; |
279 | INET_ADDR_COOKIE(acookie, saddr, daddr) | 279 | INET_ADDR_COOKIE(acookie, saddr, daddr) |
280 | const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport); | 280 | const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport); |
281 | struct net *net = sock_net(sk); | 281 | struct net *net = sock_net(sk); |
282 | unsigned int hash = inet_ehashfn(net, daddr, lport, saddr, inet->dport); | 282 | unsigned int hash = inet_ehashfn(net, daddr, lport, |
283 | saddr, inet->inet_dport); | ||
283 | struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); | 284 | struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); |
284 | spinlock_t *lock = inet_ehash_lockp(hinfo, hash); | 285 | spinlock_t *lock = inet_ehash_lockp(hinfo, hash); |
285 | struct sock *sk2; | 286 | struct sock *sk2; |
@@ -312,8 +313,8 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row, | |||
312 | unique: | 313 | unique: |
313 | /* Must record num and sport now. Otherwise we will see | 314 | /* Must record num and sport now. Otherwise we will see |
314 | * in hash table socket with a funny identity. */ | 315 | * in hash table socket with a funny identity. */ |
315 | inet->num = lport; | 316 | inet->inet_num = lport; |
316 | inet->sport = htons(lport); | 317 | inet->inet_sport = htons(lport); |
317 | sk->sk_hash = hash; | 318 | sk->sk_hash = hash; |
318 | WARN_ON(!sk_unhashed(sk)); | 319 | WARN_ON(!sk_unhashed(sk)); |
319 | __sk_nulls_add_node_rcu(sk, &head->chain); | 320 | __sk_nulls_add_node_rcu(sk, &head->chain); |
@@ -341,8 +342,9 @@ not_unique: | |||
341 | static inline u32 inet_sk_port_offset(const struct sock *sk) | 342 | static inline u32 inet_sk_port_offset(const struct sock *sk) |
342 | { | 343 | { |
343 | const struct inet_sock *inet = inet_sk(sk); | 344 | const struct inet_sock *inet = inet_sk(sk); |
344 | return secure_ipv4_port_ephemeral(inet->rcv_saddr, inet->daddr, | 345 | return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr, |
345 | inet->dport); | 346 | inet->inet_daddr, |
347 | inet->inet_dport); | ||
346 | } | 348 | } |
347 | 349 | ||
348 | void __inet_hash_nolisten(struct sock *sk) | 350 | void __inet_hash_nolisten(struct sock *sk) |
@@ -424,7 +426,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, | |||
424 | void (*hash)(struct sock *sk)) | 426 | void (*hash)(struct sock *sk)) |
425 | { | 427 | { |
426 | struct inet_hashinfo *hinfo = death_row->hashinfo; | 428 | struct inet_hashinfo *hinfo = death_row->hashinfo; |
427 | const unsigned short snum = inet_sk(sk)->num; | 429 | const unsigned short snum = inet_sk(sk)->inet_num; |
428 | struct inet_bind_hashbucket *head; | 430 | struct inet_bind_hashbucket *head; |
429 | struct inet_bind_bucket *tb; | 431 | struct inet_bind_bucket *tb; |
430 | int ret; | 432 | int ret; |
@@ -485,7 +487,7 @@ ok: | |||
485 | /* Head lock still held and bh's disabled */ | 487 | /* Head lock still held and bh's disabled */ |
486 | inet_bind_hash(sk, tb, port); | 488 | inet_bind_hash(sk, tb, port); |
487 | if (sk_unhashed(sk)) { | 489 | if (sk_unhashed(sk)) { |
488 | inet_sk(sk)->sport = htons(port); | 490 | inet_sk(sk)->inet_sport = htons(port); |
489 | hash(sk); | 491 | hash(sk); |
490 | } | 492 | } |
491 | spin_unlock(&head->lock); | 493 | spin_unlock(&head->lock); |
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 13f0781f35cd..1f5d508bb18b 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c | |||
@@ -86,7 +86,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, | |||
86 | Note, that any socket with inet->num != 0 MUST be bound in | 86 | Note, that any socket with inet->num != 0 MUST be bound in |
87 | binding cache, even if it is closed. | 87 | binding cache, even if it is closed. |
88 | */ | 88 | */ |
89 | bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->num, | 89 | bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num, |
90 | hashinfo->bhash_size)]; | 90 | hashinfo->bhash_size)]; |
91 | spin_lock(&bhead->lock); | 91 | spin_lock(&bhead->lock); |
92 | tw->tw_tb = icsk->icsk_bind_hash; | 92 | tw->tw_tb = icsk->icsk_bind_hash; |
@@ -124,14 +124,14 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat | |||
124 | kmemcheck_annotate_bitfield(tw, flags); | 124 | kmemcheck_annotate_bitfield(tw, flags); |
125 | 125 | ||
126 | /* Give us an identity. */ | 126 | /* Give us an identity. */ |
127 | tw->tw_daddr = inet->daddr; | 127 | tw->tw_daddr = inet->inet_daddr; |
128 | tw->tw_rcv_saddr = inet->rcv_saddr; | 128 | tw->tw_rcv_saddr = inet->inet_rcv_saddr; |
129 | tw->tw_bound_dev_if = sk->sk_bound_dev_if; | 129 | tw->tw_bound_dev_if = sk->sk_bound_dev_if; |
130 | tw->tw_num = inet->num; | 130 | tw->tw_num = inet->inet_num; |
131 | tw->tw_state = TCP_TIME_WAIT; | 131 | tw->tw_state = TCP_TIME_WAIT; |
132 | tw->tw_substate = state; | 132 | tw->tw_substate = state; |
133 | tw->tw_sport = inet->sport; | 133 | tw->tw_sport = inet->inet_sport; |
134 | tw->tw_dport = inet->dport; | 134 | tw->tw_dport = inet->inet_dport; |
135 | tw->tw_family = sk->sk_family; | 135 | tw->tw_family = sk->sk_family; |
136 | tw->tw_reuse = sk->sk_reuse; | 136 | tw->tw_reuse = sk->sk_reuse; |
137 | tw->tw_hash = sk->sk_hash; | 137 | tw->tw_hash = sk->sk_hash; |
@@ -430,7 +430,7 @@ void inet_twsk_purge(struct net *net, struct inet_hashinfo *hashinfo, | |||
430 | int h; | 430 | int h; |
431 | 431 | ||
432 | local_bh_disable(); | 432 | local_bh_disable(); |
433 | for (h = 0; h < (hashinfo->ehash_size); h++) { | 433 | for (h = 0; h <= hashinfo->ehash_mask; h++) { |
434 | struct inet_ehash_bucket *head = | 434 | struct inet_ehash_bucket *head = |
435 | inet_ehash_bucket(hashinfo, h); | 435 | inet_ehash_bucket(hashinfo, h); |
436 | spinlock_t *lock = inet_ehash_lockp(hashinfo, h); | 436 | spinlock_t *lock = inet_ehash_lockp(hashinfo, h); |
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index b1fbe18feb5a..6bcfe52a9c87 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c | |||
@@ -67,9 +67,6 @@ | |||
67 | * ip_id_count: idlock | 67 | * ip_id_count: idlock |
68 | */ | 68 | */ |
69 | 69 | ||
70 | /* Exported for inet_getid inline function. */ | ||
71 | DEFINE_SPINLOCK(inet_peer_idlock); | ||
72 | |||
73 | static struct kmem_cache *peer_cachep __read_mostly; | 70 | static struct kmem_cache *peer_cachep __read_mostly; |
74 | 71 | ||
75 | #define node_height(x) x->avl_height | 72 | #define node_height(x) x->avl_height |
@@ -390,7 +387,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create) | |||
390 | n->v4daddr = daddr; | 387 | n->v4daddr = daddr; |
391 | atomic_set(&n->refcnt, 1); | 388 | atomic_set(&n->refcnt, 1); |
392 | atomic_set(&n->rid, 0); | 389 | atomic_set(&n->rid, 0); |
393 | n->ip_id_count = secure_ip_id(daddr); | 390 | atomic_set(&n->ip_id_count, secure_ip_id(daddr)); |
394 | n->tcp_ts_stamp = 0; | 391 | n->tcp_ts_stamp = 0; |
395 | 392 | ||
396 | write_lock_bh(&peer_pool_lock); | 393 | write_lock_bh(&peer_pool_lock); |
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 575f9bd51ccd..b007f8af6e1f 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
@@ -206,10 +206,11 @@ static void ip_expire(unsigned long arg) | |||
206 | struct sk_buff *head = qp->q.fragments; | 206 | struct sk_buff *head = qp->q.fragments; |
207 | 207 | ||
208 | /* Send an ICMP "Fragment Reassembly Timeout" message. */ | 208 | /* Send an ICMP "Fragment Reassembly Timeout" message. */ |
209 | if ((head->dev = dev_get_by_index(net, qp->iif)) != NULL) { | 209 | rcu_read_lock(); |
210 | head->dev = dev_get_by_index_rcu(net, qp->iif); | ||
211 | if (head->dev) | ||
210 | icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); | 212 | icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); |
211 | dev_put(head->dev); | 213 | rcu_read_unlock(); |
212 | } | ||
213 | } | 214 | } |
214 | out: | 215 | out: |
215 | spin_unlock(&qp->q.lock); | 216 | spin_unlock(&qp->q.lock); |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 143333852624..c5f6af5d0f34 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -125,7 +125,7 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev); | |||
125 | 125 | ||
126 | #define HASH_SIZE 16 | 126 | #define HASH_SIZE 16 |
127 | 127 | ||
128 | static int ipgre_net_id; | 128 | static int ipgre_net_id __read_mostly; |
129 | struct ipgre_net { | 129 | struct ipgre_net { |
130 | struct ip_tunnel *tunnels[4][HASH_SIZE]; | 130 | struct ip_tunnel *tunnels[4][HASH_SIZE]; |
131 | 131 | ||
@@ -156,8 +156,13 @@ struct ipgre_net { | |||
156 | #define tunnels_r tunnels[2] | 156 | #define tunnels_r tunnels[2] |
157 | #define tunnels_l tunnels[1] | 157 | #define tunnels_l tunnels[1] |
158 | #define tunnels_wc tunnels[0] | 158 | #define tunnels_wc tunnels[0] |
159 | /* | ||
160 | * Locking : hash tables are protected by RCU and a spinlock | ||
161 | */ | ||
162 | static DEFINE_SPINLOCK(ipgre_lock); | ||
159 | 163 | ||
160 | static DEFINE_RWLOCK(ipgre_lock); | 164 | #define for_each_ip_tunnel_rcu(start) \ |
165 | for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) | ||
161 | 166 | ||
162 | /* Given src, dst and key, find appropriate for input tunnel. */ | 167 | /* Given src, dst and key, find appropriate for input tunnel. */ |
163 | 168 | ||
@@ -175,7 +180,7 @@ static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev, | |||
175 | ARPHRD_ETHER : ARPHRD_IPGRE; | 180 | ARPHRD_ETHER : ARPHRD_IPGRE; |
176 | int score, cand_score = 4; | 181 | int score, cand_score = 4; |
177 | 182 | ||
178 | for (t = ign->tunnels_r_l[h0^h1]; t; t = t->next) { | 183 | for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) { |
179 | if (local != t->parms.iph.saddr || | 184 | if (local != t->parms.iph.saddr || |
180 | remote != t->parms.iph.daddr || | 185 | remote != t->parms.iph.daddr || |
181 | key != t->parms.i_key || | 186 | key != t->parms.i_key || |
@@ -200,7 +205,7 @@ static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev, | |||
200 | } | 205 | } |
201 | } | 206 | } |
202 | 207 | ||
203 | for (t = ign->tunnels_r[h0^h1]; t; t = t->next) { | 208 | for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) { |
204 | if (remote != t->parms.iph.daddr || | 209 | if (remote != t->parms.iph.daddr || |
205 | key != t->parms.i_key || | 210 | key != t->parms.i_key || |
206 | !(t->dev->flags & IFF_UP)) | 211 | !(t->dev->flags & IFF_UP)) |
@@ -224,7 +229,7 @@ static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev, | |||
224 | } | 229 | } |
225 | } | 230 | } |
226 | 231 | ||
227 | for (t = ign->tunnels_l[h1]; t; t = t->next) { | 232 | for_each_ip_tunnel_rcu(ign->tunnels_l[h1]) { |
228 | if ((local != t->parms.iph.saddr && | 233 | if ((local != t->parms.iph.saddr && |
229 | (local != t->parms.iph.daddr || | 234 | (local != t->parms.iph.daddr || |
230 | !ipv4_is_multicast(local))) || | 235 | !ipv4_is_multicast(local))) || |
@@ -250,7 +255,7 @@ static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev, | |||
250 | } | 255 | } |
251 | } | 256 | } |
252 | 257 | ||
253 | for (t = ign->tunnels_wc[h1]; t; t = t->next) { | 258 | for_each_ip_tunnel_rcu(ign->tunnels_wc[h1]) { |
254 | if (t->parms.i_key != key || | 259 | if (t->parms.i_key != key || |
255 | !(t->dev->flags & IFF_UP)) | 260 | !(t->dev->flags & IFF_UP)) |
256 | continue; | 261 | continue; |
@@ -276,8 +281,9 @@ static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev, | |||
276 | if (cand != NULL) | 281 | if (cand != NULL) |
277 | return cand; | 282 | return cand; |
278 | 283 | ||
279 | if (ign->fb_tunnel_dev->flags & IFF_UP) | 284 | dev = ign->fb_tunnel_dev; |
280 | return netdev_priv(ign->fb_tunnel_dev); | 285 | if (dev->flags & IFF_UP) |
286 | return netdev_priv(dev); | ||
281 | 287 | ||
282 | return NULL; | 288 | return NULL; |
283 | } | 289 | } |
@@ -311,10 +317,10 @@ static void ipgre_tunnel_link(struct ipgre_net *ign, struct ip_tunnel *t) | |||
311 | { | 317 | { |
312 | struct ip_tunnel **tp = ipgre_bucket(ign, t); | 318 | struct ip_tunnel **tp = ipgre_bucket(ign, t); |
313 | 319 | ||
320 | spin_lock_bh(&ipgre_lock); | ||
314 | t->next = *tp; | 321 | t->next = *tp; |
315 | write_lock_bh(&ipgre_lock); | 322 | rcu_assign_pointer(*tp, t); |
316 | *tp = t; | 323 | spin_unlock_bh(&ipgre_lock); |
317 | write_unlock_bh(&ipgre_lock); | ||
318 | } | 324 | } |
319 | 325 | ||
320 | static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t) | 326 | static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t) |
@@ -323,9 +329,9 @@ static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t) | |||
323 | 329 | ||
324 | for (tp = ipgre_bucket(ign, t); *tp; tp = &(*tp)->next) { | 330 | for (tp = ipgre_bucket(ign, t); *tp; tp = &(*tp)->next) { |
325 | if (t == *tp) { | 331 | if (t == *tp) { |
326 | write_lock_bh(&ipgre_lock); | 332 | spin_lock_bh(&ipgre_lock); |
327 | *tp = t->next; | 333 | *tp = t->next; |
328 | write_unlock_bh(&ipgre_lock); | 334 | spin_unlock_bh(&ipgre_lock); |
329 | break; | 335 | break; |
330 | } | 336 | } |
331 | } | 337 | } |
@@ -476,7 +482,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info) | |||
476 | break; | 482 | break; |
477 | } | 483 | } |
478 | 484 | ||
479 | read_lock(&ipgre_lock); | 485 | rcu_read_lock(); |
480 | t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr, | 486 | t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr, |
481 | flags & GRE_KEY ? | 487 | flags & GRE_KEY ? |
482 | *(((__be32 *)p) + (grehlen / 4) - 1) : 0, | 488 | *(((__be32 *)p) + (grehlen / 4) - 1) : 0, |
@@ -494,7 +500,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info) | |||
494 | t->err_count = 1; | 500 | t->err_count = 1; |
495 | t->err_time = jiffies; | 501 | t->err_time = jiffies; |
496 | out: | 502 | out: |
497 | read_unlock(&ipgre_lock); | 503 | rcu_read_unlock(); |
498 | return; | 504 | return; |
499 | } | 505 | } |
500 | 506 | ||
@@ -573,7 +579,7 @@ static int ipgre_rcv(struct sk_buff *skb) | |||
573 | 579 | ||
574 | gre_proto = *(__be16 *)(h + 2); | 580 | gre_proto = *(__be16 *)(h + 2); |
575 | 581 | ||
576 | read_lock(&ipgre_lock); | 582 | rcu_read_lock(); |
577 | if ((tunnel = ipgre_tunnel_lookup(skb->dev, | 583 | if ((tunnel = ipgre_tunnel_lookup(skb->dev, |
578 | iph->saddr, iph->daddr, key, | 584 | iph->saddr, iph->daddr, key, |
579 | gre_proto))) { | 585 | gre_proto))) { |
@@ -647,13 +653,13 @@ static int ipgre_rcv(struct sk_buff *skb) | |||
647 | ipgre_ecn_decapsulate(iph, skb); | 653 | ipgre_ecn_decapsulate(iph, skb); |
648 | 654 | ||
649 | netif_rx(skb); | 655 | netif_rx(skb); |
650 | read_unlock(&ipgre_lock); | 656 | rcu_read_unlock(); |
651 | return(0); | 657 | return(0); |
652 | } | 658 | } |
653 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); | 659 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); |
654 | 660 | ||
655 | drop: | 661 | drop: |
656 | read_unlock(&ipgre_lock); | 662 | rcu_read_unlock(); |
657 | drop_nolock: | 663 | drop_nolock: |
658 | kfree_skb(skb); | 664 | kfree_skb(skb); |
659 | return(0); | 665 | return(0); |
@@ -662,7 +668,8 @@ drop_nolock: | |||
662 | static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | 668 | static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) |
663 | { | 669 | { |
664 | struct ip_tunnel *tunnel = netdev_priv(dev); | 670 | struct ip_tunnel *tunnel = netdev_priv(dev); |
665 | struct net_device_stats *stats = &tunnel->dev->stats; | 671 | struct net_device_stats *stats = &dev->stats; |
672 | struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); | ||
666 | struct iphdr *old_iph = ip_hdr(skb); | 673 | struct iphdr *old_iph = ip_hdr(skb); |
667 | struct iphdr *tiph; | 674 | struct iphdr *tiph; |
668 | u8 tos; | 675 | u8 tos; |
@@ -810,7 +817,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
810 | struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); | 817 | struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); |
811 | if (!new_skb) { | 818 | if (!new_skb) { |
812 | ip_rt_put(rt); | 819 | ip_rt_put(rt); |
813 | stats->tx_dropped++; | 820 | txq->tx_dropped++; |
814 | dev_kfree_skb(skb); | 821 | dev_kfree_skb(skb); |
815 | return NETDEV_TX_OK; | 822 | return NETDEV_TX_OK; |
816 | } | 823 | } |
@@ -1283,16 +1290,19 @@ static const struct net_protocol ipgre_protocol = { | |||
1283 | .netns_ok = 1, | 1290 | .netns_ok = 1, |
1284 | }; | 1291 | }; |
1285 | 1292 | ||
1286 | static void ipgre_destroy_tunnels(struct ipgre_net *ign) | 1293 | static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head) |
1287 | { | 1294 | { |
1288 | int prio; | 1295 | int prio; |
1289 | 1296 | ||
1290 | for (prio = 0; prio < 4; prio++) { | 1297 | for (prio = 0; prio < 4; prio++) { |
1291 | int h; | 1298 | int h; |
1292 | for (h = 0; h < HASH_SIZE; h++) { | 1299 | for (h = 0; h < HASH_SIZE; h++) { |
1293 | struct ip_tunnel *t; | 1300 | struct ip_tunnel *t = ign->tunnels[prio][h]; |
1294 | while ((t = ign->tunnels[prio][h]) != NULL) | 1301 | |
1295 | unregister_netdevice(t->dev); | 1302 | while (t != NULL) { |
1303 | unregister_netdevice_queue(t->dev, head); | ||
1304 | t = t->next; | ||
1305 | } | ||
1296 | } | 1306 | } |
1297 | } | 1307 | } |
1298 | } | 1308 | } |
@@ -1340,10 +1350,12 @@ err_alloc: | |||
1340 | static void ipgre_exit_net(struct net *net) | 1350 | static void ipgre_exit_net(struct net *net) |
1341 | { | 1351 | { |
1342 | struct ipgre_net *ign; | 1352 | struct ipgre_net *ign; |
1353 | LIST_HEAD(list); | ||
1343 | 1354 | ||
1344 | ign = net_generic(net, ipgre_net_id); | 1355 | ign = net_generic(net, ipgre_net_id); |
1345 | rtnl_lock(); | 1356 | rtnl_lock(); |
1346 | ipgre_destroy_tunnels(ign); | 1357 | ipgre_destroy_tunnels(ign, &list); |
1358 | unregister_netdevice_many(&list); | ||
1347 | rtnl_unlock(); | 1359 | rtnl_unlock(); |
1348 | kfree(ign); | 1360 | kfree(ign); |
1349 | } | 1361 | } |
@@ -1471,7 +1483,7 @@ static void ipgre_tap_setup(struct net_device *dev) | |||
1471 | dev->features |= NETIF_F_NETNS_LOCAL; | 1483 | dev->features |= NETIF_F_NETNS_LOCAL; |
1472 | } | 1484 | } |
1473 | 1485 | ||
1474 | static int ipgre_newlink(struct net_device *dev, struct nlattr *tb[], | 1486 | static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], |
1475 | struct nlattr *data[]) | 1487 | struct nlattr *data[]) |
1476 | { | 1488 | { |
1477 | struct ip_tunnel *nt; | 1489 | struct ip_tunnel *nt; |
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 6c98b43badf4..fdf51badc8e5 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
@@ -161,7 +161,7 @@ int ip_call_ra_chain(struct sk_buff *skb) | |||
161 | /* If socket is bound to an interface, only report | 161 | /* If socket is bound to an interface, only report |
162 | * the packet if it came from that interface. | 162 | * the packet if it came from that interface. |
163 | */ | 163 | */ |
164 | if (sk && inet_sk(sk)->num == protocol && | 164 | if (sk && inet_sk(sk)->inet_num == protocol && |
165 | (!sk->sk_bound_dev_if || | 165 | (!sk->sk_bound_dev_if || |
166 | sk->sk_bound_dev_if == dev->ifindex) && | 166 | sk->sk_bound_dev_if == dev->ifindex) && |
167 | sock_net(sk) == dev_net(dev)) { | 167 | sock_net(sk) == dev_net(dev)) { |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index f9895180f481..322b40864ac0 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -329,7 +329,7 @@ int ip_queue_xmit(struct sk_buff *skb, int ipfragok) | |||
329 | __be32 daddr; | 329 | __be32 daddr; |
330 | 330 | ||
331 | /* Use correct destination address if we have options. */ | 331 | /* Use correct destination address if we have options. */ |
332 | daddr = inet->daddr; | 332 | daddr = inet->inet_daddr; |
333 | if(opt && opt->srr) | 333 | if(opt && opt->srr) |
334 | daddr = opt->faddr; | 334 | daddr = opt->faddr; |
335 | 335 | ||
@@ -338,13 +338,13 @@ int ip_queue_xmit(struct sk_buff *skb, int ipfragok) | |||
338 | .mark = sk->sk_mark, | 338 | .mark = sk->sk_mark, |
339 | .nl_u = { .ip4_u = | 339 | .nl_u = { .ip4_u = |
340 | { .daddr = daddr, | 340 | { .daddr = daddr, |
341 | .saddr = inet->saddr, | 341 | .saddr = inet->inet_saddr, |
342 | .tos = RT_CONN_FLAGS(sk) } }, | 342 | .tos = RT_CONN_FLAGS(sk) } }, |
343 | .proto = sk->sk_protocol, | 343 | .proto = sk->sk_protocol, |
344 | .flags = inet_sk_flowi_flags(sk), | 344 | .flags = inet_sk_flowi_flags(sk), |
345 | .uli_u = { .ports = | 345 | .uli_u = { .ports = |
346 | { .sport = inet->sport, | 346 | { .sport = inet->inet_sport, |
347 | .dport = inet->dport } } }; | 347 | .dport = inet->inet_dport } } }; |
348 | 348 | ||
349 | /* If this fails, retransmit mechanism of transport layer will | 349 | /* If this fails, retransmit mechanism of transport layer will |
350 | * keep trying until route appears or the connection times | 350 | * keep trying until route appears or the connection times |
@@ -379,7 +379,7 @@ packet_routed: | |||
379 | 379 | ||
380 | if (opt && opt->optlen) { | 380 | if (opt && opt->optlen) { |
381 | iph->ihl += opt->optlen >> 2; | 381 | iph->ihl += opt->optlen >> 2; |
382 | ip_options_build(skb, opt, inet->daddr, rt, 0); | 382 | ip_options_build(skb, opt, inet->inet_daddr, rt, 0); |
383 | } | 383 | } |
384 | 384 | ||
385 | ip_select_ident_more(iph, &rt->u.dst, sk, | 385 | ip_select_ident_more(iph, &rt->u.dst, sk, |
@@ -846,7 +846,8 @@ int ip_append_data(struct sock *sk, | |||
846 | maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; | 846 | maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; |
847 | 847 | ||
848 | if (inet->cork.length + length > 0xFFFF - fragheaderlen) { | 848 | if (inet->cork.length + length > 0xFFFF - fragheaderlen) { |
849 | ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu-exthdrlen); | 849 | ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, |
850 | mtu-exthdrlen); | ||
850 | return -EMSGSIZE; | 851 | return -EMSGSIZE; |
851 | } | 852 | } |
852 | 853 | ||
@@ -1100,7 +1101,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page, | |||
1100 | maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; | 1101 | maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; |
1101 | 1102 | ||
1102 | if (inet->cork.length + size > 0xFFFF - fragheaderlen) { | 1103 | if (inet->cork.length + size > 0xFFFF - fragheaderlen) { |
1103 | ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu); | 1104 | ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, mtu); |
1104 | return -EMSGSIZE; | 1105 | return -EMSGSIZE; |
1105 | } | 1106 | } |
1106 | 1107 | ||
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index e982b5c1ee17..cafad9baff03 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -245,7 +245,7 @@ int ip_ra_control(struct sock *sk, unsigned char on, | |||
245 | { | 245 | { |
246 | struct ip_ra_chain *ra, *new_ra, **rap; | 246 | struct ip_ra_chain *ra, *new_ra, **rap; |
247 | 247 | ||
248 | if (sk->sk_type != SOCK_RAW || inet_sk(sk)->num == IPPROTO_RAW) | 248 | if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW) |
249 | return -EINVAL; | 249 | return -EINVAL; |
250 | 250 | ||
251 | new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; | 251 | new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; |
@@ -480,7 +480,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
480 | case IP_OPTIONS: | 480 | case IP_OPTIONS: |
481 | { | 481 | { |
482 | struct ip_options *opt = NULL; | 482 | struct ip_options *opt = NULL; |
483 | if (optlen > 40 || optlen < 0) | 483 | if (optlen > 40) |
484 | goto e_inval; | 484 | goto e_inval; |
485 | err = ip_options_get_from_user(sock_net(sk), &opt, | 485 | err = ip_options_get_from_user(sock_net(sk), &opt, |
486 | optval, optlen); | 486 | optval, optlen); |
@@ -492,7 +492,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
492 | if (sk->sk_family == PF_INET || | 492 | if (sk->sk_family == PF_INET || |
493 | (!((1 << sk->sk_state) & | 493 | (!((1 << sk->sk_state) & |
494 | (TCPF_LISTEN | TCPF_CLOSE)) && | 494 | (TCPF_LISTEN | TCPF_CLOSE)) && |
495 | inet->daddr != LOOPBACK4_IPV6)) { | 495 | inet->inet_daddr != LOOPBACK4_IPV6)) { |
496 | #endif | 496 | #endif |
497 | if (inet->opt) | 497 | if (inet->opt) |
498 | icsk->icsk_ext_hdr_len -= inet->opt->optlen; | 498 | icsk->icsk_ext_hdr_len -= inet->opt->optlen; |
@@ -575,7 +575,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
575 | inet->hdrincl = val ? 1 : 0; | 575 | inet->hdrincl = val ? 1 : 0; |
576 | break; | 576 | break; |
577 | case IP_MTU_DISCOVER: | 577 | case IP_MTU_DISCOVER: |
578 | if (val < 0 || val > 3) | 578 | if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_PROBE) |
579 | goto e_inval; | 579 | goto e_inval; |
580 | inet->pmtudisc = val; | 580 | inet->pmtudisc = val; |
581 | break; | 581 | break; |
@@ -1180,8 +1180,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, | |||
1180 | if (inet->cmsg_flags & IP_CMSG_PKTINFO) { | 1180 | if (inet->cmsg_flags & IP_CMSG_PKTINFO) { |
1181 | struct in_pktinfo info; | 1181 | struct in_pktinfo info; |
1182 | 1182 | ||
1183 | info.ipi_addr.s_addr = inet->rcv_saddr; | 1183 | info.ipi_addr.s_addr = inet->inet_rcv_saddr; |
1184 | info.ipi_spec_dst.s_addr = inet->rcv_saddr; | 1184 | info.ipi_spec_dst.s_addr = inet->inet_rcv_saddr; |
1185 | info.ipi_ifindex = inet->mc_index; | 1185 | info.ipi_ifindex = inet->mc_index; |
1186 | put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info); | 1186 | put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info); |
1187 | } | 1187 | } |
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index ae40ed1ba560..7242ffcc44e5 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -119,7 +119,7 @@ | |||
119 | #define HASH_SIZE 16 | 119 | #define HASH_SIZE 16 |
120 | #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF) | 120 | #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF) |
121 | 121 | ||
122 | static int ipip_net_id; | 122 | static int ipip_net_id __read_mostly; |
123 | struct ipip_net { | 123 | struct ipip_net { |
124 | struct ip_tunnel *tunnels_r_l[HASH_SIZE]; | 124 | struct ip_tunnel *tunnels_r_l[HASH_SIZE]; |
125 | struct ip_tunnel *tunnels_r[HASH_SIZE]; | 125 | struct ip_tunnel *tunnels_r[HASH_SIZE]; |
@@ -134,7 +134,13 @@ static void ipip_fb_tunnel_init(struct net_device *dev); | |||
134 | static void ipip_tunnel_init(struct net_device *dev); | 134 | static void ipip_tunnel_init(struct net_device *dev); |
135 | static void ipip_tunnel_setup(struct net_device *dev); | 135 | static void ipip_tunnel_setup(struct net_device *dev); |
136 | 136 | ||
137 | static DEFINE_RWLOCK(ipip_lock); | 137 | /* |
138 | * Locking : hash tables are protected by RCU and a spinlock | ||
139 | */ | ||
140 | static DEFINE_SPINLOCK(ipip_lock); | ||
141 | |||
142 | #define for_each_ip_tunnel_rcu(start) \ | ||
143 | for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) | ||
138 | 144 | ||
139 | static struct ip_tunnel * ipip_tunnel_lookup(struct net *net, | 145 | static struct ip_tunnel * ipip_tunnel_lookup(struct net *net, |
140 | __be32 remote, __be32 local) | 146 | __be32 remote, __be32 local) |
@@ -144,20 +150,21 @@ static struct ip_tunnel * ipip_tunnel_lookup(struct net *net, | |||
144 | struct ip_tunnel *t; | 150 | struct ip_tunnel *t; |
145 | struct ipip_net *ipn = net_generic(net, ipip_net_id); | 151 | struct ipip_net *ipn = net_generic(net, ipip_net_id); |
146 | 152 | ||
147 | for (t = ipn->tunnels_r_l[h0^h1]; t; t = t->next) { | 153 | for_each_ip_tunnel_rcu(ipn->tunnels_r_l[h0 ^ h1]) |
148 | if (local == t->parms.iph.saddr && | 154 | if (local == t->parms.iph.saddr && |
149 | remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) | 155 | remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) |
150 | return t; | 156 | return t; |
151 | } | 157 | |
152 | for (t = ipn->tunnels_r[h0]; t; t = t->next) { | 158 | for_each_ip_tunnel_rcu(ipn->tunnels_r[h0]) |
153 | if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) | 159 | if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) |
154 | return t; | 160 | return t; |
155 | } | 161 | |
156 | for (t = ipn->tunnels_l[h1]; t; t = t->next) { | 162 | for_each_ip_tunnel_rcu(ipn->tunnels_l[h1]) |
157 | if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP)) | 163 | if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP)) |
158 | return t; | 164 | return t; |
159 | } | 165 | |
160 | if ((t = ipn->tunnels_wc[0]) != NULL && (t->dev->flags&IFF_UP)) | 166 | t = rcu_dereference(ipn->tunnels_wc[0]); |
167 | if (t && (t->dev->flags&IFF_UP)) | ||
161 | return t; | 168 | return t; |
162 | return NULL; | 169 | return NULL; |
163 | } | 170 | } |
@@ -193,9 +200,9 @@ static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t) | |||
193 | 200 | ||
194 | for (tp = ipip_bucket(ipn, t); *tp; tp = &(*tp)->next) { | 201 | for (tp = ipip_bucket(ipn, t); *tp; tp = &(*tp)->next) { |
195 | if (t == *tp) { | 202 | if (t == *tp) { |
196 | write_lock_bh(&ipip_lock); | 203 | spin_lock_bh(&ipip_lock); |
197 | *tp = t->next; | 204 | *tp = t->next; |
198 | write_unlock_bh(&ipip_lock); | 205 | spin_unlock_bh(&ipip_lock); |
199 | break; | 206 | break; |
200 | } | 207 | } |
201 | } | 208 | } |
@@ -205,10 +212,10 @@ static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t) | |||
205 | { | 212 | { |
206 | struct ip_tunnel **tp = ipip_bucket(ipn, t); | 213 | struct ip_tunnel **tp = ipip_bucket(ipn, t); |
207 | 214 | ||
215 | spin_lock_bh(&ipip_lock); | ||
208 | t->next = *tp; | 216 | t->next = *tp; |
209 | write_lock_bh(&ipip_lock); | 217 | rcu_assign_pointer(*tp, t); |
210 | *tp = t; | 218 | spin_unlock_bh(&ipip_lock); |
211 | write_unlock_bh(&ipip_lock); | ||
212 | } | 219 | } |
213 | 220 | ||
214 | static struct ip_tunnel * ipip_tunnel_locate(struct net *net, | 221 | static struct ip_tunnel * ipip_tunnel_locate(struct net *net, |
@@ -267,9 +274,9 @@ static void ipip_tunnel_uninit(struct net_device *dev) | |||
267 | struct ipip_net *ipn = net_generic(net, ipip_net_id); | 274 | struct ipip_net *ipn = net_generic(net, ipip_net_id); |
268 | 275 | ||
269 | if (dev == ipn->fb_tunnel_dev) { | 276 | if (dev == ipn->fb_tunnel_dev) { |
270 | write_lock_bh(&ipip_lock); | 277 | spin_lock_bh(&ipip_lock); |
271 | ipn->tunnels_wc[0] = NULL; | 278 | ipn->tunnels_wc[0] = NULL; |
272 | write_unlock_bh(&ipip_lock); | 279 | spin_unlock_bh(&ipip_lock); |
273 | } else | 280 | } else |
274 | ipip_tunnel_unlink(ipn, netdev_priv(dev)); | 281 | ipip_tunnel_unlink(ipn, netdev_priv(dev)); |
275 | dev_put(dev); | 282 | dev_put(dev); |
@@ -318,7 +325,7 @@ static int ipip_err(struct sk_buff *skb, u32 info) | |||
318 | 325 | ||
319 | err = -ENOENT; | 326 | err = -ENOENT; |
320 | 327 | ||
321 | read_lock(&ipip_lock); | 328 | rcu_read_lock(); |
322 | t = ipip_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr); | 329 | t = ipip_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr); |
323 | if (t == NULL || t->parms.iph.daddr == 0) | 330 | if (t == NULL || t->parms.iph.daddr == 0) |
324 | goto out; | 331 | goto out; |
@@ -333,7 +340,7 @@ static int ipip_err(struct sk_buff *skb, u32 info) | |||
333 | t->err_count = 1; | 340 | t->err_count = 1; |
334 | t->err_time = jiffies; | 341 | t->err_time = jiffies; |
335 | out: | 342 | out: |
336 | read_unlock(&ipip_lock); | 343 | rcu_read_unlock(); |
337 | return err; | 344 | return err; |
338 | } | 345 | } |
339 | 346 | ||
@@ -351,11 +358,11 @@ static int ipip_rcv(struct sk_buff *skb) | |||
351 | struct ip_tunnel *tunnel; | 358 | struct ip_tunnel *tunnel; |
352 | const struct iphdr *iph = ip_hdr(skb); | 359 | const struct iphdr *iph = ip_hdr(skb); |
353 | 360 | ||
354 | read_lock(&ipip_lock); | 361 | rcu_read_lock(); |
355 | if ((tunnel = ipip_tunnel_lookup(dev_net(skb->dev), | 362 | if ((tunnel = ipip_tunnel_lookup(dev_net(skb->dev), |
356 | iph->saddr, iph->daddr)) != NULL) { | 363 | iph->saddr, iph->daddr)) != NULL) { |
357 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { | 364 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { |
358 | read_unlock(&ipip_lock); | 365 | rcu_read_unlock(); |
359 | kfree_skb(skb); | 366 | kfree_skb(skb); |
360 | return 0; | 367 | return 0; |
361 | } | 368 | } |
@@ -374,10 +381,10 @@ static int ipip_rcv(struct sk_buff *skb) | |||
374 | nf_reset(skb); | 381 | nf_reset(skb); |
375 | ipip_ecn_decapsulate(iph, skb); | 382 | ipip_ecn_decapsulate(iph, skb); |
376 | netif_rx(skb); | 383 | netif_rx(skb); |
377 | read_unlock(&ipip_lock); | 384 | rcu_read_unlock(); |
378 | return 0; | 385 | return 0; |
379 | } | 386 | } |
380 | read_unlock(&ipip_lock); | 387 | rcu_read_unlock(); |
381 | 388 | ||
382 | return -1; | 389 | return -1; |
383 | } | 390 | } |
@@ -390,7 +397,8 @@ static int ipip_rcv(struct sk_buff *skb) | |||
390 | static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | 397 | static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) |
391 | { | 398 | { |
392 | struct ip_tunnel *tunnel = netdev_priv(dev); | 399 | struct ip_tunnel *tunnel = netdev_priv(dev); |
393 | struct net_device_stats *stats = &tunnel->dev->stats; | 400 | struct net_device_stats *stats = &dev->stats; |
401 | struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); | ||
394 | struct iphdr *tiph = &tunnel->parms.iph; | 402 | struct iphdr *tiph = &tunnel->parms.iph; |
395 | u8 tos = tunnel->parms.iph.tos; | 403 | u8 tos = tunnel->parms.iph.tos; |
396 | __be16 df = tiph->frag_off; | 404 | __be16 df = tiph->frag_off; |
@@ -480,7 +488,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
480 | struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); | 488 | struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); |
481 | if (!new_skb) { | 489 | if (!new_skb) { |
482 | ip_rt_put(rt); | 490 | ip_rt_put(rt); |
483 | stats->tx_dropped++; | 491 | txq->tx_dropped++; |
484 | dev_kfree_skb(skb); | 492 | dev_kfree_skb(skb); |
485 | return NETDEV_TX_OK; | 493 | return NETDEV_TX_OK; |
486 | } | 494 | } |
@@ -748,16 +756,19 @@ static struct xfrm_tunnel ipip_handler = { | |||
748 | static const char banner[] __initconst = | 756 | static const char banner[] __initconst = |
749 | KERN_INFO "IPv4 over IPv4 tunneling driver\n"; | 757 | KERN_INFO "IPv4 over IPv4 tunneling driver\n"; |
750 | 758 | ||
751 | static void ipip_destroy_tunnels(struct ipip_net *ipn) | 759 | static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head) |
752 | { | 760 | { |
753 | int prio; | 761 | int prio; |
754 | 762 | ||
755 | for (prio = 1; prio < 4; prio++) { | 763 | for (prio = 1; prio < 4; prio++) { |
756 | int h; | 764 | int h; |
757 | for (h = 0; h < HASH_SIZE; h++) { | 765 | for (h = 0; h < HASH_SIZE; h++) { |
758 | struct ip_tunnel *t; | 766 | struct ip_tunnel *t = ipn->tunnels[prio][h]; |
759 | while ((t = ipn->tunnels[prio][h]) != NULL) | 767 | |
760 | unregister_netdevice(t->dev); | 768 | while (t != NULL) { |
769 | unregister_netdevice_queue(t->dev, head); | ||
770 | t = t->next; | ||
771 | } | ||
761 | } | 772 | } |
762 | } | 773 | } |
763 | } | 774 | } |
@@ -810,11 +821,13 @@ err_alloc: | |||
810 | static void ipip_exit_net(struct net *net) | 821 | static void ipip_exit_net(struct net *net) |
811 | { | 822 | { |
812 | struct ipip_net *ipn; | 823 | struct ipip_net *ipn; |
824 | LIST_HEAD(list); | ||
813 | 825 | ||
814 | ipn = net_generic(net, ipip_net_id); | 826 | ipn = net_generic(net, ipip_net_id); |
815 | rtnl_lock(); | 827 | rtnl_lock(); |
816 | ipip_destroy_tunnels(ipn); | 828 | ipip_destroy_tunnels(ipn, &list); |
817 | unregister_netdevice(ipn->fb_tunnel_dev); | 829 | unregister_netdevice_queue(ipn->fb_tunnel_dev, &list); |
830 | unregister_netdevice_many(&list); | ||
818 | rtnl_unlock(); | 831 | rtnl_unlock(); |
819 | kfree(ipn); | 832 | kfree(ipn); |
820 | } | 833 | } |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 99508d66a642..54596f73eff5 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -275,7 +275,8 @@ failure: | |||
275 | * @notify: Set to 1, if the caller is a notifier_call | 275 | * @notify: Set to 1, if the caller is a notifier_call |
276 | */ | 276 | */ |
277 | 277 | ||
278 | static int vif_delete(struct net *net, int vifi, int notify) | 278 | static int vif_delete(struct net *net, int vifi, int notify, |
279 | struct list_head *head) | ||
279 | { | 280 | { |
280 | struct vif_device *v; | 281 | struct vif_device *v; |
281 | struct net_device *dev; | 282 | struct net_device *dev; |
@@ -319,7 +320,7 @@ static int vif_delete(struct net *net, int vifi, int notify) | |||
319 | } | 320 | } |
320 | 321 | ||
321 | if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER) && !notify) | 322 | if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER) && !notify) |
322 | unregister_netdevice(dev); | 323 | unregister_netdevice_queue(dev, head); |
323 | 324 | ||
324 | dev_put(dev); | 325 | dev_put(dev); |
325 | return 0; | 326 | return 0; |
@@ -469,8 +470,18 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock) | |||
469 | return err; | 470 | return err; |
470 | } | 471 | } |
471 | break; | 472 | break; |
473 | |||
474 | case VIFF_USE_IFINDEX: | ||
472 | case 0: | 475 | case 0: |
473 | dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr); | 476 | if (vifc->vifc_flags == VIFF_USE_IFINDEX) { |
477 | dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex); | ||
478 | if (dev && dev->ip_ptr == NULL) { | ||
479 | dev_put(dev); | ||
480 | return -EADDRNOTAVAIL; | ||
481 | } | ||
482 | } else | ||
483 | dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr); | ||
484 | |||
474 | if (!dev) | 485 | if (!dev) |
475 | return -EADDRNOTAVAIL; | 486 | return -EADDRNOTAVAIL; |
476 | err = dev_set_allmulti(dev, 1); | 487 | err = dev_set_allmulti(dev, 1); |
@@ -862,14 +873,16 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) | |||
862 | static void mroute_clean_tables(struct net *net) | 873 | static void mroute_clean_tables(struct net *net) |
863 | { | 874 | { |
864 | int i; | 875 | int i; |
876 | LIST_HEAD(list); | ||
865 | 877 | ||
866 | /* | 878 | /* |
867 | * Shut down all active vif entries | 879 | * Shut down all active vif entries |
868 | */ | 880 | */ |
869 | for (i = 0; i < net->ipv4.maxvif; i++) { | 881 | for (i = 0; i < net->ipv4.maxvif; i++) { |
870 | if (!(net->ipv4.vif_table[i].flags&VIFF_STATIC)) | 882 | if (!(net->ipv4.vif_table[i].flags&VIFF_STATIC)) |
871 | vif_delete(net, i, 0); | 883 | vif_delete(net, i, 0, &list); |
872 | } | 884 | } |
885 | unregister_netdevice_many(&list); | ||
873 | 886 | ||
874 | /* | 887 | /* |
875 | * Wipe the cache | 888 | * Wipe the cache |
@@ -948,7 +961,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi | |||
948 | switch (optname) { | 961 | switch (optname) { |
949 | case MRT_INIT: | 962 | case MRT_INIT: |
950 | if (sk->sk_type != SOCK_RAW || | 963 | if (sk->sk_type != SOCK_RAW || |
951 | inet_sk(sk)->num != IPPROTO_IGMP) | 964 | inet_sk(sk)->inet_num != IPPROTO_IGMP) |
952 | return -EOPNOTSUPP; | 965 | return -EOPNOTSUPP; |
953 | if (optlen != sizeof(int)) | 966 | if (optlen != sizeof(int)) |
954 | return -ENOPROTOOPT; | 967 | return -ENOPROTOOPT; |
@@ -985,7 +998,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi | |||
985 | if (optname == MRT_ADD_VIF) { | 998 | if (optname == MRT_ADD_VIF) { |
986 | ret = vif_add(net, &vif, sk == net->ipv4.mroute_sk); | 999 | ret = vif_add(net, &vif, sk == net->ipv4.mroute_sk); |
987 | } else { | 1000 | } else { |
988 | ret = vif_delete(net, vif.vifc_vifi, 0); | 1001 | ret = vif_delete(net, vif.vifc_vifi, 0, NULL); |
989 | } | 1002 | } |
990 | rtnl_unlock(); | 1003 | rtnl_unlock(); |
991 | return ret; | 1004 | return ret; |
@@ -1148,6 +1161,7 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v | |||
1148 | struct net *net = dev_net(dev); | 1161 | struct net *net = dev_net(dev); |
1149 | struct vif_device *v; | 1162 | struct vif_device *v; |
1150 | int ct; | 1163 | int ct; |
1164 | LIST_HEAD(list); | ||
1151 | 1165 | ||
1152 | if (!net_eq(dev_net(dev), net)) | 1166 | if (!net_eq(dev_net(dev), net)) |
1153 | return NOTIFY_DONE; | 1167 | return NOTIFY_DONE; |
@@ -1157,8 +1171,9 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v | |||
1157 | v = &net->ipv4.vif_table[0]; | 1171 | v = &net->ipv4.vif_table[0]; |
1158 | for (ct = 0; ct < net->ipv4.maxvif; ct++, v++) { | 1172 | for (ct = 0; ct < net->ipv4.maxvif; ct++, v++) { |
1159 | if (v->dev == dev) | 1173 | if (v->dev == dev) |
1160 | vif_delete(net, ct, 1); | 1174 | vif_delete(net, ct, 1, &list); |
1161 | } | 1175 | } |
1176 | unregister_netdevice_many(&list); | ||
1162 | return NOTIFY_DONE; | 1177 | return NOTIFY_DONE; |
1163 | } | 1178 | } |
1164 | 1179 | ||
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index aa95bb82ee6c..9cd423ffafa8 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | |||
@@ -255,10 +255,10 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len) | |||
255 | struct nf_conntrack_tuple tuple; | 255 | struct nf_conntrack_tuple tuple; |
256 | 256 | ||
257 | memset(&tuple, 0, sizeof(tuple)); | 257 | memset(&tuple, 0, sizeof(tuple)); |
258 | tuple.src.u3.ip = inet->rcv_saddr; | 258 | tuple.src.u3.ip = inet->inet_rcv_saddr; |
259 | tuple.src.u.tcp.port = inet->sport; | 259 | tuple.src.u.tcp.port = inet->inet_sport; |
260 | tuple.dst.u3.ip = inet->daddr; | 260 | tuple.dst.u3.ip = inet->inet_daddr; |
261 | tuple.dst.u.tcp.port = inet->dport; | 261 | tuple.dst.u.tcp.port = inet->inet_dport; |
262 | tuple.src.l3num = PF_INET; | 262 | tuple.src.l3num = PF_INET; |
263 | tuple.dst.protonum = sk->sk_protocol; | 263 | tuple.dst.protonum = sk->sk_protocol; |
264 | 264 | ||
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index ab996f9c0fe0..ce154b47f1da 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -87,7 +87,7 @@ void raw_hash_sk(struct sock *sk) | |||
87 | struct raw_hashinfo *h = sk->sk_prot->h.raw_hash; | 87 | struct raw_hashinfo *h = sk->sk_prot->h.raw_hash; |
88 | struct hlist_head *head; | 88 | struct hlist_head *head; |
89 | 89 | ||
90 | head = &h->ht[inet_sk(sk)->num & (RAW_HTABLE_SIZE - 1)]; | 90 | head = &h->ht[inet_sk(sk)->inet_num & (RAW_HTABLE_SIZE - 1)]; |
91 | 91 | ||
92 | write_lock_bh(&h->lock); | 92 | write_lock_bh(&h->lock); |
93 | sk_add_node(sk, head); | 93 | sk_add_node(sk, head); |
@@ -115,9 +115,9 @@ static struct sock *__raw_v4_lookup(struct net *net, struct sock *sk, | |||
115 | sk_for_each_from(sk, node) { | 115 | sk_for_each_from(sk, node) { |
116 | struct inet_sock *inet = inet_sk(sk); | 116 | struct inet_sock *inet = inet_sk(sk); |
117 | 117 | ||
118 | if (net_eq(sock_net(sk), net) && inet->num == num && | 118 | if (net_eq(sock_net(sk), net) && inet->inet_num == num && |
119 | !(inet->daddr && inet->daddr != raddr) && | 119 | !(inet->inet_daddr && inet->inet_daddr != raddr) && |
120 | !(inet->rcv_saddr && inet->rcv_saddr != laddr) && | 120 | !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) && |
121 | !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) | 121 | !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) |
122 | goto found; /* gotcha */ | 122 | goto found; /* gotcha */ |
123 | } | 123 | } |
@@ -292,7 +292,6 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb) | |||
292 | /* Charge it to the socket. */ | 292 | /* Charge it to the socket. */ |
293 | 293 | ||
294 | if (sock_queue_rcv_skb(sk, skb) < 0) { | 294 | if (sock_queue_rcv_skb(sk, skb) < 0) { |
295 | atomic_inc(&sk->sk_drops); | ||
296 | kfree_skb(skb); | 295 | kfree_skb(skb); |
297 | return NET_RX_DROP; | 296 | return NET_RX_DROP; |
298 | } | 297 | } |
@@ -327,7 +326,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, | |||
327 | int err; | 326 | int err; |
328 | 327 | ||
329 | if (length > rt->u.dst.dev->mtu) { | 328 | if (length > rt->u.dst.dev->mtu) { |
330 | ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, | 329 | ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, |
331 | rt->u.dst.dev->mtu); | 330 | rt->u.dst.dev->mtu); |
332 | return -EMSGSIZE; | 331 | return -EMSGSIZE; |
333 | } | 332 | } |
@@ -500,10 +499,10 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
500 | err = -EDESTADDRREQ; | 499 | err = -EDESTADDRREQ; |
501 | if (sk->sk_state != TCP_ESTABLISHED) | 500 | if (sk->sk_state != TCP_ESTABLISHED) |
502 | goto out; | 501 | goto out; |
503 | daddr = inet->daddr; | 502 | daddr = inet->inet_daddr; |
504 | } | 503 | } |
505 | 504 | ||
506 | ipc.addr = inet->saddr; | 505 | ipc.addr = inet->inet_saddr; |
507 | ipc.opt = NULL; | 506 | ipc.opt = NULL; |
508 | ipc.shtx.flags = 0; | 507 | ipc.shtx.flags = 0; |
509 | ipc.oif = sk->sk_bound_dev_if; | 508 | ipc.oif = sk->sk_bound_dev_if; |
@@ -645,9 +644,9 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
645 | if (addr->sin_addr.s_addr && chk_addr_ret != RTN_LOCAL && | 644 | if (addr->sin_addr.s_addr && chk_addr_ret != RTN_LOCAL && |
646 | chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) | 645 | chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) |
647 | goto out; | 646 | goto out; |
648 | inet->rcv_saddr = inet->saddr = addr->sin_addr.s_addr; | 647 | inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr; |
649 | if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) | 648 | if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) |
650 | inet->saddr = 0; /* Use device */ | 649 | inet->inet_saddr = 0; /* Use device */ |
651 | sk_dst_reset(sk); | 650 | sk_dst_reset(sk); |
652 | ret = 0; | 651 | ret = 0; |
653 | out: return ret; | 652 | out: return ret; |
@@ -692,7 +691,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
692 | if (err) | 691 | if (err) |
693 | goto done; | 692 | goto done; |
694 | 693 | ||
695 | sock_recv_timestamp(msg, sk, skb); | 694 | sock_recv_ts_and_drops(msg, sk, skb); |
696 | 695 | ||
697 | /* Copy the address. */ | 696 | /* Copy the address. */ |
698 | if (sin) { | 697 | if (sin) { |
@@ -717,7 +716,7 @@ static int raw_init(struct sock *sk) | |||
717 | { | 716 | { |
718 | struct raw_sock *rp = raw_sk(sk); | 717 | struct raw_sock *rp = raw_sk(sk); |
719 | 718 | ||
720 | if (inet_sk(sk)->num == IPPROTO_ICMP) | 719 | if (inet_sk(sk)->inet_num == IPPROTO_ICMP) |
721 | memset(&rp->filter, 0, sizeof(rp->filter)); | 720 | memset(&rp->filter, 0, sizeof(rp->filter)); |
722 | return 0; | 721 | return 0; |
723 | } | 722 | } |
@@ -754,7 +753,7 @@ static int do_raw_setsockopt(struct sock *sk, int level, int optname, | |||
754 | char __user *optval, unsigned int optlen) | 753 | char __user *optval, unsigned int optlen) |
755 | { | 754 | { |
756 | if (optname == ICMP_FILTER) { | 755 | if (optname == ICMP_FILTER) { |
757 | if (inet_sk(sk)->num != IPPROTO_ICMP) | 756 | if (inet_sk(sk)->inet_num != IPPROTO_ICMP) |
758 | return -EOPNOTSUPP; | 757 | return -EOPNOTSUPP; |
759 | else | 758 | else |
760 | return raw_seticmpfilter(sk, optval, optlen); | 759 | return raw_seticmpfilter(sk, optval, optlen); |
@@ -784,7 +783,7 @@ static int do_raw_getsockopt(struct sock *sk, int level, int optname, | |||
784 | char __user *optval, int __user *optlen) | 783 | char __user *optval, int __user *optlen) |
785 | { | 784 | { |
786 | if (optname == ICMP_FILTER) { | 785 | if (optname == ICMP_FILTER) { |
787 | if (inet_sk(sk)->num != IPPROTO_ICMP) | 786 | if (inet_sk(sk)->inet_num != IPPROTO_ICMP) |
788 | return -EOPNOTSUPP; | 787 | return -EOPNOTSUPP; |
789 | else | 788 | else |
790 | return raw_geticmpfilter(sk, optval, optlen); | 789 | return raw_geticmpfilter(sk, optval, optlen); |
@@ -943,10 +942,10 @@ EXPORT_SYMBOL_GPL(raw_seq_stop); | |||
943 | static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) | 942 | static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) |
944 | { | 943 | { |
945 | struct inet_sock *inet = inet_sk(sp); | 944 | struct inet_sock *inet = inet_sk(sp); |
946 | __be32 dest = inet->daddr, | 945 | __be32 dest = inet->inet_daddr, |
947 | src = inet->rcv_saddr; | 946 | src = inet->inet_rcv_saddr; |
948 | __u16 destp = 0, | 947 | __u16 destp = 0, |
949 | srcp = inet->num; | 948 | srcp = inet->inet_num; |
950 | 949 | ||
951 | seq_printf(seq, "%4d: %08X:%04X %08X:%04X" | 950 | seq_printf(seq, "%4d: %08X:%04X %08X:%04X" |
952 | " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", | 951 | " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 5b1050a5d874..4284ceef7945 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1628,9 +1628,6 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph, | |||
1628 | __be32 daddr = iph->daddr; | 1628 | __be32 daddr = iph->daddr; |
1629 | unsigned short est_mtu = 0; | 1629 | unsigned short est_mtu = 0; |
1630 | 1630 | ||
1631 | if (ipv4_config.no_pmtu_disc) | ||
1632 | return 0; | ||
1633 | |||
1634 | for (k = 0; k < 2; k++) { | 1631 | for (k = 0; k < 2; k++) { |
1635 | for (i = 0; i < 2; i++) { | 1632 | for (i = 0; i < 2; i++) { |
1636 | unsigned hash = rt_hash(daddr, skeys[i], ikeys[k], | 1633 | unsigned hash = rt_hash(daddr, skeys[i], ikeys[k], |
@@ -2855,7 +2852,7 @@ static int rt_fill_info(struct net *net, | |||
2855 | error = rt->u.dst.error; | 2852 | error = rt->u.dst.error; |
2856 | expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0; | 2853 | expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0; |
2857 | if (rt->peer) { | 2854 | if (rt->peer) { |
2858 | id = rt->peer->ip_id_count; | 2855 | id = atomic_read(&rt->peer->ip_id_count) & 0xffff; |
2859 | if (rt->peer->tcp_ts_stamp) { | 2856 | if (rt->peer->tcp_ts_stamp) { |
2860 | ts = rt->peer->tcp_ts; | 2857 | ts = rt->peer->tcp_ts; |
2861 | tsage = get_seconds() - rt->peer->tcp_ts_stamp; | 2858 | tsage = get_seconds() - rt->peer->tcp_ts_stamp; |
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index a6e0e077ac33..3146cc401748 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c | |||
@@ -276,13 +276,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
276 | 276 | ||
277 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); | 277 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); |
278 | 278 | ||
279 | /* check for timestamp cookie support */ | ||
280 | memset(&tcp_opt, 0, sizeof(tcp_opt)); | ||
281 | tcp_parse_options(skb, &tcp_opt, 0); | ||
282 | |||
283 | if (tcp_opt.saw_tstamp) | ||
284 | cookie_check_timestamp(&tcp_opt); | ||
285 | |||
286 | ret = NULL; | 279 | ret = NULL; |
287 | req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */ | 280 | req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */ |
288 | if (!req) | 281 | if (!req) |
@@ -298,12 +291,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
298 | ireq->loc_addr = ip_hdr(skb)->daddr; | 291 | ireq->loc_addr = ip_hdr(skb)->daddr; |
299 | ireq->rmt_addr = ip_hdr(skb)->saddr; | 292 | ireq->rmt_addr = ip_hdr(skb)->saddr; |
300 | ireq->ecn_ok = 0; | 293 | ireq->ecn_ok = 0; |
301 | ireq->snd_wscale = tcp_opt.snd_wscale; | ||
302 | ireq->rcv_wscale = tcp_opt.rcv_wscale; | ||
303 | ireq->sack_ok = tcp_opt.sack_ok; | ||
304 | ireq->wscale_ok = tcp_opt.wscale_ok; | ||
305 | ireq->tstamp_ok = tcp_opt.saw_tstamp; | ||
306 | req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; | ||
307 | 294 | ||
308 | /* We throwed the options of the initial SYN away, so we hope | 295 | /* We throwed the options of the initial SYN away, so we hope |
309 | * the ACK carries the same options again (see RFC1122 4.2.3.8) | 296 | * the ACK carries the same options again (see RFC1122 4.2.3.8) |
@@ -333,7 +320,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
333 | * no easy way to do this. | 320 | * no easy way to do this. |
334 | */ | 321 | */ |
335 | { | 322 | { |
336 | struct flowi fl = { .nl_u = { .ip4_u = | 323 | struct flowi fl = { .mark = sk->sk_mark, |
324 | .nl_u = { .ip4_u = | ||
337 | { .daddr = ((opt && opt->srr) ? | 325 | { .daddr = ((opt && opt->srr) ? |
338 | opt->faddr : | 326 | opt->faddr : |
339 | ireq->rmt_addr), | 327 | ireq->rmt_addr), |
@@ -351,6 +339,20 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
351 | } | 339 | } |
352 | } | 340 | } |
353 | 341 | ||
342 | /* check for timestamp cookie support */ | ||
343 | memset(&tcp_opt, 0, sizeof(tcp_opt)); | ||
344 | tcp_parse_options(skb, &tcp_opt, 0, &rt->u.dst); | ||
345 | |||
346 | if (tcp_opt.saw_tstamp) | ||
347 | cookie_check_timestamp(&tcp_opt); | ||
348 | |||
349 | ireq->snd_wscale = tcp_opt.snd_wscale; | ||
350 | ireq->rcv_wscale = tcp_opt.rcv_wscale; | ||
351 | ireq->sack_ok = tcp_opt.sack_ok; | ||
352 | ireq->wscale_ok = tcp_opt.wscale_ok; | ||
353 | ireq->tstamp_ok = tcp_opt.saw_tstamp; | ||
354 | req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; | ||
355 | |||
354 | /* Try to redo what tcp_v4_send_synack did. */ | 356 | /* Try to redo what tcp_v4_send_synack did. */ |
355 | req->window_clamp = tp->window_clamp ? :dst_metric(&rt->u.dst, RTAX_WINDOW); | 357 | req->window_clamp = tp->window_clamp ? :dst_metric(&rt->u.dst, RTAX_WINDOW); |
356 | 358 | ||
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index f1813bc71088..524f9760193b 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -2042,7 +2042,7 @@ int tcp_disconnect(struct sock *sk, int flags) | |||
2042 | __skb_queue_purge(&sk->sk_async_wait_queue); | 2042 | __skb_queue_purge(&sk->sk_async_wait_queue); |
2043 | #endif | 2043 | #endif |
2044 | 2044 | ||
2045 | inet->dport = 0; | 2045 | inet->inet_dport = 0; |
2046 | 2046 | ||
2047 | if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) | 2047 | if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) |
2048 | inet_reset_saddr(sk); | 2048 | inet_reset_saddr(sk); |
@@ -2066,7 +2066,7 @@ int tcp_disconnect(struct sock *sk, int flags) | |||
2066 | memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); | 2066 | memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); |
2067 | __sk_dst_reset(sk); | 2067 | __sk_dst_reset(sk); |
2068 | 2068 | ||
2069 | WARN_ON(inet->num && !icsk->icsk_bind_hash); | 2069 | WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); |
2070 | 2070 | ||
2071 | sk->sk_error_report(sk); | 2071 | sk->sk_error_report(sk); |
2072 | return err; | 2072 | return err; |
@@ -2903,11 +2903,10 @@ void __init tcp_init(void) | |||
2903 | (totalram_pages >= 128 * 1024) ? | 2903 | (totalram_pages >= 128 * 1024) ? |
2904 | 13 : 15, | 2904 | 13 : 15, |
2905 | 0, | 2905 | 0, |
2906 | &tcp_hashinfo.ehash_size, | ||
2907 | NULL, | 2906 | NULL, |
2907 | &tcp_hashinfo.ehash_mask, | ||
2908 | thash_entries ? 0 : 512 * 1024); | 2908 | thash_entries ? 0 : 512 * 1024); |
2909 | tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size; | 2909 | for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) { |
2910 | for (i = 0; i < tcp_hashinfo.ehash_size; i++) { | ||
2911 | INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i); | 2910 | INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i); |
2912 | INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].twchain, i); | 2911 | INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].twchain, i); |
2913 | } | 2912 | } |
@@ -2916,7 +2915,7 @@ void __init tcp_init(void) | |||
2916 | tcp_hashinfo.bhash = | 2915 | tcp_hashinfo.bhash = |
2917 | alloc_large_system_hash("TCP bind", | 2916 | alloc_large_system_hash("TCP bind", |
2918 | sizeof(struct inet_bind_hashbucket), | 2917 | sizeof(struct inet_bind_hashbucket), |
2919 | tcp_hashinfo.ehash_size, | 2918 | tcp_hashinfo.ehash_mask + 1, |
2920 | (totalram_pages >= 128 * 1024) ? | 2919 | (totalram_pages >= 128 * 1024) ? |
2921 | 13 : 15, | 2920 | 13 : 15, |
2922 | 0, | 2921 | 0, |
@@ -2971,8 +2970,8 @@ void __init tcp_init(void) | |||
2971 | sysctl_tcp_rmem[2] = max(87380, max_share); | 2970 | sysctl_tcp_rmem[2] = max(87380, max_share); |
2972 | 2971 | ||
2973 | printk(KERN_INFO "TCP: Hash tables configured " | 2972 | printk(KERN_INFO "TCP: Hash tables configured " |
2974 | "(established %d bind %d)\n", | 2973 | "(established %u bind %u)\n", |
2975 | tcp_hashinfo.ehash_size, tcp_hashinfo.bhash_size); | 2974 | tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); |
2976 | 2975 | ||
2977 | tcp_register_congestion_control(&tcp_reno); | 2976 | tcp_register_congestion_control(&tcp_reno); |
2978 | } | 2977 | } |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index d86784be7ab3..cc306ac6eb51 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -140,7 +140,7 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) | |||
140 | * "len" is invariant segment length, including TCP header. | 140 | * "len" is invariant segment length, including TCP header. |
141 | */ | 141 | */ |
142 | len += skb->data - skb_transport_header(skb); | 142 | len += skb->data - skb_transport_header(skb); |
143 | if (len >= TCP_MIN_RCVMSS + sizeof(struct tcphdr) || | 143 | if (len >= TCP_MSS_DEFAULT + sizeof(struct tcphdr) || |
144 | /* If PSH is not set, packet should be | 144 | /* If PSH is not set, packet should be |
145 | * full sized, provided peer TCP is not badly broken. | 145 | * full sized, provided peer TCP is not badly broken. |
146 | * This observation (if it is correct 8)) allows | 146 | * This observation (if it is correct 8)) allows |
@@ -411,7 +411,7 @@ void tcp_initialize_rcv_mss(struct sock *sk) | |||
411 | unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); | 411 | unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); |
412 | 412 | ||
413 | hint = min(hint, tp->rcv_wnd / 2); | 413 | hint = min(hint, tp->rcv_wnd / 2); |
414 | hint = min(hint, TCP_MIN_RCVMSS); | 414 | hint = min(hint, TCP_MSS_DEFAULT); |
415 | hint = max(hint, TCP_MIN_MSS); | 415 | hint = max(hint, TCP_MIN_MSS); |
416 | 416 | ||
417 | inet_csk(sk)->icsk_ack.rcv_mss = hint; | 417 | inet_csk(sk)->icsk_ack.rcv_mss = hint; |
@@ -2300,7 +2300,7 @@ static inline int tcp_fackets_out(struct tcp_sock *tp) | |||
2300 | * they differ. Since neither occurs due to loss, TCP should really | 2300 | * they differ. Since neither occurs due to loss, TCP should really |
2301 | * ignore them. | 2301 | * ignore them. |
2302 | */ | 2302 | */ |
2303 | static inline int tcp_dupack_heurestics(struct tcp_sock *tp) | 2303 | static inline int tcp_dupack_heuristics(struct tcp_sock *tp) |
2304 | { | 2304 | { |
2305 | return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1; | 2305 | return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1; |
2306 | } | 2306 | } |
@@ -2425,7 +2425,7 @@ static int tcp_time_to_recover(struct sock *sk) | |||
2425 | return 1; | 2425 | return 1; |
2426 | 2426 | ||
2427 | /* Not-A-Trick#2 : Classic rule... */ | 2427 | /* Not-A-Trick#2 : Classic rule... */ |
2428 | if (tcp_dupack_heurestics(tp) > tp->reordering) | 2428 | if (tcp_dupack_heuristics(tp) > tp->reordering) |
2429 | return 1; | 2429 | return 1; |
2430 | 2430 | ||
2431 | /* Trick#3 : when we use RFC2988 timer restart, fast | 2431 | /* Trick#3 : when we use RFC2988 timer restart, fast |
@@ -3698,7 +3698,7 @@ old_ack: | |||
3698 | * the fast version below fails. | 3698 | * the fast version below fails. |
3699 | */ | 3699 | */ |
3700 | void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, | 3700 | void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, |
3701 | int estab) | 3701 | int estab, struct dst_entry *dst) |
3702 | { | 3702 | { |
3703 | unsigned char *ptr; | 3703 | unsigned char *ptr; |
3704 | struct tcphdr *th = tcp_hdr(skb); | 3704 | struct tcphdr *th = tcp_hdr(skb); |
@@ -3737,7 +3737,8 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, | |||
3737 | break; | 3737 | break; |
3738 | case TCPOPT_WINDOW: | 3738 | case TCPOPT_WINDOW: |
3739 | if (opsize == TCPOLEN_WINDOW && th->syn && | 3739 | if (opsize == TCPOLEN_WINDOW && th->syn && |
3740 | !estab && sysctl_tcp_window_scaling) { | 3740 | !estab && sysctl_tcp_window_scaling && |
3741 | !dst_feature(dst, RTAX_FEATURE_NO_WSCALE)) { | ||
3741 | __u8 snd_wscale = *(__u8 *)ptr; | 3742 | __u8 snd_wscale = *(__u8 *)ptr; |
3742 | opt_rx->wscale_ok = 1; | 3743 | opt_rx->wscale_ok = 1; |
3743 | if (snd_wscale > 14) { | 3744 | if (snd_wscale > 14) { |
@@ -3753,7 +3754,8 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, | |||
3753 | case TCPOPT_TIMESTAMP: | 3754 | case TCPOPT_TIMESTAMP: |
3754 | if ((opsize == TCPOLEN_TIMESTAMP) && | 3755 | if ((opsize == TCPOLEN_TIMESTAMP) && |
3755 | ((estab && opt_rx->tstamp_ok) || | 3756 | ((estab && opt_rx->tstamp_ok) || |
3756 | (!estab && sysctl_tcp_timestamps))) { | 3757 | (!estab && sysctl_tcp_timestamps && |
3758 | !dst_feature(dst, RTAX_FEATURE_NO_TSTAMP)))) { | ||
3757 | opt_rx->saw_tstamp = 1; | 3759 | opt_rx->saw_tstamp = 1; |
3758 | opt_rx->rcv_tsval = get_unaligned_be32(ptr); | 3760 | opt_rx->rcv_tsval = get_unaligned_be32(ptr); |
3759 | opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4); | 3761 | opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4); |
@@ -3761,7 +3763,8 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, | |||
3761 | break; | 3763 | break; |
3762 | case TCPOPT_SACK_PERM: | 3764 | case TCPOPT_SACK_PERM: |
3763 | if (opsize == TCPOLEN_SACK_PERM && th->syn && | 3765 | if (opsize == TCPOLEN_SACK_PERM && th->syn && |
3764 | !estab && sysctl_tcp_sack) { | 3766 | !estab && sysctl_tcp_sack && |
3767 | !dst_feature(dst, RTAX_FEATURE_NO_SACK)) { | ||
3765 | opt_rx->sack_ok = 1; | 3768 | opt_rx->sack_ok = 1; |
3766 | tcp_sack_reset(opt_rx); | 3769 | tcp_sack_reset(opt_rx); |
3767 | } | 3770 | } |
@@ -3820,7 +3823,7 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th, | |||
3820 | if (tcp_parse_aligned_timestamp(tp, th)) | 3823 | if (tcp_parse_aligned_timestamp(tp, th)) |
3821 | return 1; | 3824 | return 1; |
3822 | } | 3825 | } |
3823 | tcp_parse_options(skb, &tp->rx_opt, 1); | 3826 | tcp_parse_options(skb, &tp->rx_opt, 1, NULL); |
3824 | return 1; | 3827 | return 1; |
3825 | } | 3828 | } |
3826 | 3829 | ||
@@ -4075,8 +4078,10 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, | |||
4075 | static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) | 4078 | static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) |
4076 | { | 4079 | { |
4077 | struct tcp_sock *tp = tcp_sk(sk); | 4080 | struct tcp_sock *tp = tcp_sk(sk); |
4081 | struct dst_entry *dst = __sk_dst_get(sk); | ||
4078 | 4082 | ||
4079 | if (tcp_is_sack(tp) && sysctl_tcp_dsack) { | 4083 | if (tcp_is_sack(tp) && sysctl_tcp_dsack && |
4084 | !dst_feature(dst, RTAX_FEATURE_NO_DSACK)) { | ||
4080 | int mib_idx; | 4085 | int mib_idx; |
4081 | 4086 | ||
4082 | if (before(seq, tp->rcv_nxt)) | 4087 | if (before(seq, tp->rcv_nxt)) |
@@ -4105,13 +4110,15 @@ static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq) | |||
4105 | static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) | 4110 | static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) |
4106 | { | 4111 | { |
4107 | struct tcp_sock *tp = tcp_sk(sk); | 4112 | struct tcp_sock *tp = tcp_sk(sk); |
4113 | struct dst_entry *dst = __sk_dst_get(sk); | ||
4108 | 4114 | ||
4109 | if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && | 4115 | if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && |
4110 | before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { | 4116 | before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { |
4111 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); | 4117 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); |
4112 | tcp_enter_quickack_mode(sk); | 4118 | tcp_enter_quickack_mode(sk); |
4113 | 4119 | ||
4114 | if (tcp_is_sack(tp) && sysctl_tcp_dsack) { | 4120 | if (tcp_is_sack(tp) && sysctl_tcp_dsack && |
4121 | !dst_feature(dst, RTAX_FEATURE_NO_DSACK)) { | ||
4115 | u32 end_seq = TCP_SKB_CB(skb)->end_seq; | 4122 | u32 end_seq = TCP_SKB_CB(skb)->end_seq; |
4116 | 4123 | ||
4117 | if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) | 4124 | if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) |
@@ -5364,8 +5371,9 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, | |||
5364 | struct tcp_sock *tp = tcp_sk(sk); | 5371 | struct tcp_sock *tp = tcp_sk(sk); |
5365 | struct inet_connection_sock *icsk = inet_csk(sk); | 5372 | struct inet_connection_sock *icsk = inet_csk(sk); |
5366 | int saved_clamp = tp->rx_opt.mss_clamp; | 5373 | int saved_clamp = tp->rx_opt.mss_clamp; |
5374 | struct dst_entry *dst = __sk_dst_get(sk); | ||
5367 | 5375 | ||
5368 | tcp_parse_options(skb, &tp->rx_opt, 0); | 5376 | tcp_parse_options(skb, &tp->rx_opt, 0, dst); |
5369 | 5377 | ||
5370 | if (th->ack) { | 5378 | if (th->ack) { |
5371 | /* rfc793: | 5379 | /* rfc793: |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 7cda24b53f61..df18ce04f41e 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -165,10 +165,10 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
165 | nexthop = inet->opt->faddr; | 165 | nexthop = inet->opt->faddr; |
166 | } | 166 | } |
167 | 167 | ||
168 | tmp = ip_route_connect(&rt, nexthop, inet->saddr, | 168 | tmp = ip_route_connect(&rt, nexthop, inet->inet_saddr, |
169 | RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, | 169 | RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, |
170 | IPPROTO_TCP, | 170 | IPPROTO_TCP, |
171 | inet->sport, usin->sin_port, sk, 1); | 171 | inet->inet_sport, usin->sin_port, sk, 1); |
172 | if (tmp < 0) { | 172 | if (tmp < 0) { |
173 | if (tmp == -ENETUNREACH) | 173 | if (tmp == -ENETUNREACH) |
174 | IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); | 174 | IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); |
@@ -183,11 +183,11 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
183 | if (!inet->opt || !inet->opt->srr) | 183 | if (!inet->opt || !inet->opt->srr) |
184 | daddr = rt->rt_dst; | 184 | daddr = rt->rt_dst; |
185 | 185 | ||
186 | if (!inet->saddr) | 186 | if (!inet->inet_saddr) |
187 | inet->saddr = rt->rt_src; | 187 | inet->inet_saddr = rt->rt_src; |
188 | inet->rcv_saddr = inet->saddr; | 188 | inet->inet_rcv_saddr = inet->inet_saddr; |
189 | 189 | ||
190 | if (tp->rx_opt.ts_recent_stamp && inet->daddr != daddr) { | 190 | if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) { |
191 | /* Reset inherited state */ | 191 | /* Reset inherited state */ |
192 | tp->rx_opt.ts_recent = 0; | 192 | tp->rx_opt.ts_recent = 0; |
193 | tp->rx_opt.ts_recent_stamp = 0; | 193 | tp->rx_opt.ts_recent_stamp = 0; |
@@ -204,20 +204,20 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
204 | * when trying new connection. | 204 | * when trying new connection. |
205 | */ | 205 | */ |
206 | if (peer != NULL && | 206 | if (peer != NULL && |
207 | peer->tcp_ts_stamp + TCP_PAWS_MSL >= get_seconds()) { | 207 | (u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) { |
208 | tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp; | 208 | tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp; |
209 | tp->rx_opt.ts_recent = peer->tcp_ts; | 209 | tp->rx_opt.ts_recent = peer->tcp_ts; |
210 | } | 210 | } |
211 | } | 211 | } |
212 | 212 | ||
213 | inet->dport = usin->sin_port; | 213 | inet->inet_dport = usin->sin_port; |
214 | inet->daddr = daddr; | 214 | inet->inet_daddr = daddr; |
215 | 215 | ||
216 | inet_csk(sk)->icsk_ext_hdr_len = 0; | 216 | inet_csk(sk)->icsk_ext_hdr_len = 0; |
217 | if (inet->opt) | 217 | if (inet->opt) |
218 | inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen; | 218 | inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen; |
219 | 219 | ||
220 | tp->rx_opt.mss_clamp = 536; | 220 | tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT; |
221 | 221 | ||
222 | /* Socket identity is still unknown (sport may be zero). | 222 | /* Socket identity is still unknown (sport may be zero). |
223 | * However we set state to SYN-SENT and not releasing socket | 223 | * However we set state to SYN-SENT and not releasing socket |
@@ -230,7 +230,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
230 | goto failure; | 230 | goto failure; |
231 | 231 | ||
232 | err = ip_route_newports(&rt, IPPROTO_TCP, | 232 | err = ip_route_newports(&rt, IPPROTO_TCP, |
233 | inet->sport, inet->dport, sk); | 233 | inet->inet_sport, inet->inet_dport, sk); |
234 | if (err) | 234 | if (err) |
235 | goto failure; | 235 | goto failure; |
236 | 236 | ||
@@ -239,12 +239,12 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
239 | sk_setup_caps(sk, &rt->u.dst); | 239 | sk_setup_caps(sk, &rt->u.dst); |
240 | 240 | ||
241 | if (!tp->write_seq) | 241 | if (!tp->write_seq) |
242 | tp->write_seq = secure_tcp_sequence_number(inet->saddr, | 242 | tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr, |
243 | inet->daddr, | 243 | inet->inet_daddr, |
244 | inet->sport, | 244 | inet->inet_sport, |
245 | usin->sin_port); | 245 | usin->sin_port); |
246 | 246 | ||
247 | inet->id = tp->write_seq ^ jiffies; | 247 | inet->inet_id = tp->write_seq ^ jiffies; |
248 | 248 | ||
249 | err = tcp_connect(sk); | 249 | err = tcp_connect(sk); |
250 | rt = NULL; | 250 | rt = NULL; |
@@ -261,7 +261,7 @@ failure: | |||
261 | tcp_set_state(sk, TCP_CLOSE); | 261 | tcp_set_state(sk, TCP_CLOSE); |
262 | ip_rt_put(rt); | 262 | ip_rt_put(rt); |
263 | sk->sk_route_caps = 0; | 263 | sk->sk_route_caps = 0; |
264 | inet->dport = 0; | 264 | inet->inet_dport = 0; |
265 | return err; | 265 | return err; |
266 | } | 266 | } |
267 | 267 | ||
@@ -520,12 +520,13 @@ void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb) | |||
520 | struct tcphdr *th = tcp_hdr(skb); | 520 | struct tcphdr *th = tcp_hdr(skb); |
521 | 521 | ||
522 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 522 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
523 | th->check = ~tcp_v4_check(len, inet->saddr, | 523 | th->check = ~tcp_v4_check(len, inet->inet_saddr, |
524 | inet->daddr, 0); | 524 | inet->inet_daddr, 0); |
525 | skb->csum_start = skb_transport_header(skb) - skb->head; | 525 | skb->csum_start = skb_transport_header(skb) - skb->head; |
526 | skb->csum_offset = offsetof(struct tcphdr, check); | 526 | skb->csum_offset = offsetof(struct tcphdr, check); |
527 | } else { | 527 | } else { |
528 | th->check = tcp_v4_check(len, inet->saddr, inet->daddr, | 528 | th->check = tcp_v4_check(len, inet->inet_saddr, |
529 | inet->inet_daddr, | ||
529 | csum_partial(th, | 530 | csum_partial(th, |
530 | th->doff << 2, | 531 | th->doff << 2, |
531 | skb->csum)); | 532 | skb->csum)); |
@@ -848,7 +849,7 @@ static struct tcp_md5sig_key * | |||
848 | struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, | 849 | struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, |
849 | struct sock *addr_sk) | 850 | struct sock *addr_sk) |
850 | { | 851 | { |
851 | return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->daddr); | 852 | return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->inet_daddr); |
852 | } | 853 | } |
853 | 854 | ||
854 | EXPORT_SYMBOL(tcp_v4_md5_lookup); | 855 | EXPORT_SYMBOL(tcp_v4_md5_lookup); |
@@ -923,7 +924,7 @@ EXPORT_SYMBOL(tcp_v4_md5_do_add); | |||
923 | static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk, | 924 | static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk, |
924 | u8 *newkey, u8 newkeylen) | 925 | u8 *newkey, u8 newkeylen) |
925 | { | 926 | { |
926 | return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->daddr, | 927 | return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->inet_daddr, |
927 | newkey, newkeylen); | 928 | newkey, newkeylen); |
928 | } | 929 | } |
929 | 930 | ||
@@ -1089,8 +1090,8 @@ int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, | |||
1089 | __be32 saddr, daddr; | 1090 | __be32 saddr, daddr; |
1090 | 1091 | ||
1091 | if (sk) { | 1092 | if (sk) { |
1092 | saddr = inet_sk(sk)->saddr; | 1093 | saddr = inet_sk(sk)->inet_saddr; |
1093 | daddr = inet_sk(sk)->daddr; | 1094 | daddr = inet_sk(sk)->inet_daddr; |
1094 | } else if (req) { | 1095 | } else if (req) { |
1095 | saddr = inet_rsk(req)->loc_addr; | 1096 | saddr = inet_rsk(req)->loc_addr; |
1096 | daddr = inet_rsk(req)->rmt_addr; | 1097 | daddr = inet_rsk(req)->rmt_addr; |
@@ -1256,11 +1257,21 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1256 | tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops; | 1257 | tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops; |
1257 | #endif | 1258 | #endif |
1258 | 1259 | ||
1260 | ireq = inet_rsk(req); | ||
1261 | ireq->loc_addr = daddr; | ||
1262 | ireq->rmt_addr = saddr; | ||
1263 | ireq->no_srccheck = inet_sk(sk)->transparent; | ||
1264 | ireq->opt = tcp_v4_save_options(sk, skb); | ||
1265 | |||
1266 | dst = inet_csk_route_req(sk, req); | ||
1267 | if(!dst) | ||
1268 | goto drop_and_free; | ||
1269 | |||
1259 | tcp_clear_options(&tmp_opt); | 1270 | tcp_clear_options(&tmp_opt); |
1260 | tmp_opt.mss_clamp = 536; | 1271 | tmp_opt.mss_clamp = TCP_MSS_DEFAULT; |
1261 | tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss; | 1272 | tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss; |
1262 | 1273 | ||
1263 | tcp_parse_options(skb, &tmp_opt, 0); | 1274 | tcp_parse_options(skb, &tmp_opt, 0, dst); |
1264 | 1275 | ||
1265 | if (want_cookie && !tmp_opt.saw_tstamp) | 1276 | if (want_cookie && !tmp_opt.saw_tstamp) |
1266 | tcp_clear_options(&tmp_opt); | 1277 | tcp_clear_options(&tmp_opt); |
@@ -1269,14 +1280,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1269 | 1280 | ||
1270 | tcp_openreq_init(req, &tmp_opt, skb); | 1281 | tcp_openreq_init(req, &tmp_opt, skb); |
1271 | 1282 | ||
1272 | ireq = inet_rsk(req); | ||
1273 | ireq->loc_addr = daddr; | ||
1274 | ireq->rmt_addr = saddr; | ||
1275 | ireq->no_srccheck = inet_sk(sk)->transparent; | ||
1276 | ireq->opt = tcp_v4_save_options(sk, skb); | ||
1277 | |||
1278 | if (security_inet_conn_request(sk, skb, req)) | 1283 | if (security_inet_conn_request(sk, skb, req)) |
1279 | goto drop_and_free; | 1284 | goto drop_and_release; |
1280 | 1285 | ||
1281 | if (!want_cookie) | 1286 | if (!want_cookie) |
1282 | TCP_ECN_create_request(req, tcp_hdr(skb)); | 1287 | TCP_ECN_create_request(req, tcp_hdr(skb)); |
@@ -1301,10 +1306,9 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1301 | */ | 1306 | */ |
1302 | if (tmp_opt.saw_tstamp && | 1307 | if (tmp_opt.saw_tstamp && |
1303 | tcp_death_row.sysctl_tw_recycle && | 1308 | tcp_death_row.sysctl_tw_recycle && |
1304 | (dst = inet_csk_route_req(sk, req)) != NULL && | ||
1305 | (peer = rt_get_peer((struct rtable *)dst)) != NULL && | 1309 | (peer = rt_get_peer((struct rtable *)dst)) != NULL && |
1306 | peer->v4daddr == saddr) { | 1310 | peer->v4daddr == saddr) { |
1307 | if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL && | 1311 | if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL && |
1308 | (s32)(peer->tcp_ts - req->ts_recent) > | 1312 | (s32)(peer->tcp_ts - req->ts_recent) > |
1309 | TCP_PAWS_WINDOW) { | 1313 | TCP_PAWS_WINDOW) { |
1310 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); | 1314 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); |
@@ -1380,9 +1384,9 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1380 | newtp = tcp_sk(newsk); | 1384 | newtp = tcp_sk(newsk); |
1381 | newinet = inet_sk(newsk); | 1385 | newinet = inet_sk(newsk); |
1382 | ireq = inet_rsk(req); | 1386 | ireq = inet_rsk(req); |
1383 | newinet->daddr = ireq->rmt_addr; | 1387 | newinet->inet_daddr = ireq->rmt_addr; |
1384 | newinet->rcv_saddr = ireq->loc_addr; | 1388 | newinet->inet_rcv_saddr = ireq->loc_addr; |
1385 | newinet->saddr = ireq->loc_addr; | 1389 | newinet->inet_saddr = ireq->loc_addr; |
1386 | newinet->opt = ireq->opt; | 1390 | newinet->opt = ireq->opt; |
1387 | ireq->opt = NULL; | 1391 | ireq->opt = NULL; |
1388 | newinet->mc_index = inet_iif(skb); | 1392 | newinet->mc_index = inet_iif(skb); |
@@ -1390,7 +1394,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1390 | inet_csk(newsk)->icsk_ext_hdr_len = 0; | 1394 | inet_csk(newsk)->icsk_ext_hdr_len = 0; |
1391 | if (newinet->opt) | 1395 | if (newinet->opt) |
1392 | inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen; | 1396 | inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen; |
1393 | newinet->id = newtp->write_seq ^ jiffies; | 1397 | newinet->inet_id = newtp->write_seq ^ jiffies; |
1394 | 1398 | ||
1395 | tcp_mtup_init(newsk); | 1399 | tcp_mtup_init(newsk); |
1396 | tcp_sync_mss(newsk, dst_mtu(dst)); | 1400 | tcp_sync_mss(newsk, dst_mtu(dst)); |
@@ -1403,7 +1407,8 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1403 | 1407 | ||
1404 | #ifdef CONFIG_TCP_MD5SIG | 1408 | #ifdef CONFIG_TCP_MD5SIG |
1405 | /* Copy over the MD5 key from the original socket */ | 1409 | /* Copy over the MD5 key from the original socket */ |
1406 | if ((key = tcp_v4_md5_do_lookup(sk, newinet->daddr)) != NULL) { | 1410 | key = tcp_v4_md5_do_lookup(sk, newinet->inet_daddr); |
1411 | if (key != NULL) { | ||
1407 | /* | 1412 | /* |
1408 | * We're using one, so create a matching key | 1413 | * We're using one, so create a matching key |
1409 | * on the newsk structure. If we fail to get | 1414 | * on the newsk structure. If we fail to get |
@@ -1412,7 +1417,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1412 | */ | 1417 | */ |
1413 | char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC); | 1418 | char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC); |
1414 | if (newkey != NULL) | 1419 | if (newkey != NULL) |
1415 | tcp_v4_md5_do_add(newsk, newinet->daddr, | 1420 | tcp_v4_md5_do_add(newsk, newinet->inet_daddr, |
1416 | newkey, key->keylen); | 1421 | newkey, key->keylen); |
1417 | newsk->sk_route_caps &= ~NETIF_F_GSO_MASK; | 1422 | newsk->sk_route_caps &= ~NETIF_F_GSO_MASK; |
1418 | } | 1423 | } |
@@ -1711,8 +1716,8 @@ int tcp_v4_remember_stamp(struct sock *sk) | |||
1711 | struct inet_peer *peer = NULL; | 1716 | struct inet_peer *peer = NULL; |
1712 | int release_it = 0; | 1717 | int release_it = 0; |
1713 | 1718 | ||
1714 | if (!rt || rt->rt_dst != inet->daddr) { | 1719 | if (!rt || rt->rt_dst != inet->inet_daddr) { |
1715 | peer = inet_getpeer(inet->daddr, 1); | 1720 | peer = inet_getpeer(inet->inet_daddr, 1); |
1716 | release_it = 1; | 1721 | release_it = 1; |
1717 | } else { | 1722 | } else { |
1718 | if (!rt->peer) | 1723 | if (!rt->peer) |
@@ -1722,9 +1727,9 @@ int tcp_v4_remember_stamp(struct sock *sk) | |||
1722 | 1727 | ||
1723 | if (peer) { | 1728 | if (peer) { |
1724 | if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 || | 1729 | if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 || |
1725 | (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() && | 1730 | ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL && |
1726 | peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) { | 1731 | peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) { |
1727 | peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp; | 1732 | peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp; |
1728 | peer->tcp_ts = tp->rx_opt.ts_recent; | 1733 | peer->tcp_ts = tp->rx_opt.ts_recent; |
1729 | } | 1734 | } |
1730 | if (release_it) | 1735 | if (release_it) |
@@ -1743,9 +1748,9 @@ int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw) | |||
1743 | const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); | 1748 | const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); |
1744 | 1749 | ||
1745 | if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 || | 1750 | if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 || |
1746 | (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() && | 1751 | ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL && |
1747 | peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) { | 1752 | peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) { |
1748 | peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp; | 1753 | peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp; |
1749 | peer->tcp_ts = tcptw->tw_ts_recent; | 1754 | peer->tcp_ts = tcptw->tw_ts_recent; |
1750 | } | 1755 | } |
1751 | inet_putpeer(peer); | 1756 | inet_putpeer(peer); |
@@ -1810,7 +1815,7 @@ static int tcp_v4_init_sock(struct sock *sk) | |||
1810 | */ | 1815 | */ |
1811 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; | 1816 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; |
1812 | tp->snd_cwnd_clamp = ~0; | 1817 | tp->snd_cwnd_clamp = ~0; |
1813 | tp->mss_cache = 536; | 1818 | tp->mss_cache = TCP_MSS_DEFAULT; |
1814 | 1819 | ||
1815 | tp->reordering = sysctl_tcp_reordering; | 1820 | tp->reordering = sysctl_tcp_reordering; |
1816 | icsk->icsk_ca_ops = &tcp_init_congestion_ops; | 1821 | icsk->icsk_ca_ops = &tcp_init_congestion_ops; |
@@ -2000,7 +2005,7 @@ static void *established_get_first(struct seq_file *seq) | |||
2000 | struct net *net = seq_file_net(seq); | 2005 | struct net *net = seq_file_net(seq); |
2001 | void *rc = NULL; | 2006 | void *rc = NULL; |
2002 | 2007 | ||
2003 | for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) { | 2008 | for (st->bucket = 0; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) { |
2004 | struct sock *sk; | 2009 | struct sock *sk; |
2005 | struct hlist_nulls_node *node; | 2010 | struct hlist_nulls_node *node; |
2006 | struct inet_timewait_sock *tw; | 2011 | struct inet_timewait_sock *tw; |
@@ -2061,10 +2066,10 @@ get_tw: | |||
2061 | st->state = TCP_SEQ_STATE_ESTABLISHED; | 2066 | st->state = TCP_SEQ_STATE_ESTABLISHED; |
2062 | 2067 | ||
2063 | /* Look for next non empty bucket */ | 2068 | /* Look for next non empty bucket */ |
2064 | while (++st->bucket < tcp_hashinfo.ehash_size && | 2069 | while (++st->bucket <= tcp_hashinfo.ehash_mask && |
2065 | empty_bucket(st)) | 2070 | empty_bucket(st)) |
2066 | ; | 2071 | ; |
2067 | if (st->bucket >= tcp_hashinfo.ehash_size) | 2072 | if (st->bucket > tcp_hashinfo.ehash_mask) |
2068 | return NULL; | 2073 | return NULL; |
2069 | 2074 | ||
2070 | spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); | 2075 | spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); |
@@ -2225,7 +2230,7 @@ static void get_openreq4(struct sock *sk, struct request_sock *req, | |||
2225 | " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n", | 2230 | " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n", |
2226 | i, | 2231 | i, |
2227 | ireq->loc_addr, | 2232 | ireq->loc_addr, |
2228 | ntohs(inet_sk(sk)->sport), | 2233 | ntohs(inet_sk(sk)->inet_sport), |
2229 | ireq->rmt_addr, | 2234 | ireq->rmt_addr, |
2230 | ntohs(ireq->rmt_port), | 2235 | ntohs(ireq->rmt_port), |
2231 | TCP_SYN_RECV, | 2236 | TCP_SYN_RECV, |
@@ -2248,10 +2253,10 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len) | |||
2248 | struct tcp_sock *tp = tcp_sk(sk); | 2253 | struct tcp_sock *tp = tcp_sk(sk); |
2249 | const struct inet_connection_sock *icsk = inet_csk(sk); | 2254 | const struct inet_connection_sock *icsk = inet_csk(sk); |
2250 | struct inet_sock *inet = inet_sk(sk); | 2255 | struct inet_sock *inet = inet_sk(sk); |
2251 | __be32 dest = inet->daddr; | 2256 | __be32 dest = inet->inet_daddr; |
2252 | __be32 src = inet->rcv_saddr; | 2257 | __be32 src = inet->inet_rcv_saddr; |
2253 | __u16 destp = ntohs(inet->dport); | 2258 | __u16 destp = ntohs(inet->inet_dport); |
2254 | __u16 srcp = ntohs(inet->sport); | 2259 | __u16 srcp = ntohs(inet->inet_sport); |
2255 | 2260 | ||
2256 | if (icsk->icsk_pending == ICSK_TIME_RETRANS) { | 2261 | if (icsk->icsk_pending == ICSK_TIME_RETRANS) { |
2257 | timer_active = 1; | 2262 | timer_active = 1; |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 4c03598ed924..4be22280e6b3 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -100,9 +100,9 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, | |||
100 | struct tcp_options_received tmp_opt; | 100 | struct tcp_options_received tmp_opt; |
101 | int paws_reject = 0; | 101 | int paws_reject = 0; |
102 | 102 | ||
103 | tmp_opt.saw_tstamp = 0; | ||
104 | if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { | 103 | if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { |
105 | tcp_parse_options(skb, &tmp_opt, 0); | 104 | tmp_opt.tstamp_ok = 1; |
105 | tcp_parse_options(skb, &tmp_opt, 1, NULL); | ||
106 | 106 | ||
107 | if (tmp_opt.saw_tstamp) { | 107 | if (tmp_opt.saw_tstamp) { |
108 | tmp_opt.ts_recent = tcptw->tw_ts_recent; | 108 | tmp_opt.ts_recent = tcptw->tw_ts_recent; |
@@ -476,7 +476,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, | |||
476 | if (newtp->af_specific->md5_lookup(sk, newsk)) | 476 | if (newtp->af_specific->md5_lookup(sk, newsk)) |
477 | newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; | 477 | newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; |
478 | #endif | 478 | #endif |
479 | if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len) | 479 | if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len) |
480 | newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; | 480 | newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; |
481 | newtp->rx_opt.mss_clamp = req->mss; | 481 | newtp->rx_opt.mss_clamp = req->mss; |
482 | TCP_ECN_openreq_child(newtp, req); | 482 | TCP_ECN_openreq_child(newtp, req); |
@@ -501,9 +501,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, | |||
501 | struct tcp_options_received tmp_opt; | 501 | struct tcp_options_received tmp_opt; |
502 | struct sock *child; | 502 | struct sock *child; |
503 | 503 | ||
504 | tmp_opt.saw_tstamp = 0; | 504 | if ((th->doff > (sizeof(struct tcphdr)>>2)) && (req->ts_recent)) { |
505 | if (th->doff > (sizeof(struct tcphdr)>>2)) { | 505 | tmp_opt.tstamp_ok = 1; |
506 | tcp_parse_options(skb, &tmp_opt, 0); | 506 | tcp_parse_options(skb, &tmp_opt, 1, NULL); |
507 | 507 | ||
508 | if (tmp_opt.saw_tstamp) { | 508 | if (tmp_opt.saw_tstamp) { |
509 | tmp_opt.ts_recent = req->ts_recent; | 509 | tmp_opt.ts_recent = req->ts_recent; |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index fcd278a7080e..616c686ca253 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -464,6 +464,7 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, | |||
464 | struct tcp_md5sig_key **md5) { | 464 | struct tcp_md5sig_key **md5) { |
465 | struct tcp_sock *tp = tcp_sk(sk); | 465 | struct tcp_sock *tp = tcp_sk(sk); |
466 | unsigned size = 0; | 466 | unsigned size = 0; |
467 | struct dst_entry *dst = __sk_dst_get(sk); | ||
467 | 468 | ||
468 | #ifdef CONFIG_TCP_MD5SIG | 469 | #ifdef CONFIG_TCP_MD5SIG |
469 | *md5 = tp->af_specific->md5_lookup(sk, sk); | 470 | *md5 = tp->af_specific->md5_lookup(sk, sk); |
@@ -487,18 +488,22 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, | |||
487 | opts->mss = tcp_advertise_mss(sk); | 488 | opts->mss = tcp_advertise_mss(sk); |
488 | size += TCPOLEN_MSS_ALIGNED; | 489 | size += TCPOLEN_MSS_ALIGNED; |
489 | 490 | ||
490 | if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { | 491 | if (likely(sysctl_tcp_timestamps && |
492 | !dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) && | ||
493 | *md5 == NULL)) { | ||
491 | opts->options |= OPTION_TS; | 494 | opts->options |= OPTION_TS; |
492 | opts->tsval = TCP_SKB_CB(skb)->when; | 495 | opts->tsval = TCP_SKB_CB(skb)->when; |
493 | opts->tsecr = tp->rx_opt.ts_recent; | 496 | opts->tsecr = tp->rx_opt.ts_recent; |
494 | size += TCPOLEN_TSTAMP_ALIGNED; | 497 | size += TCPOLEN_TSTAMP_ALIGNED; |
495 | } | 498 | } |
496 | if (likely(sysctl_tcp_window_scaling)) { | 499 | if (likely(sysctl_tcp_window_scaling && |
500 | !dst_feature(dst, RTAX_FEATURE_NO_WSCALE))) { | ||
497 | opts->ws = tp->rx_opt.rcv_wscale; | 501 | opts->ws = tp->rx_opt.rcv_wscale; |
498 | opts->options |= OPTION_WSCALE; | 502 | opts->options |= OPTION_WSCALE; |
499 | size += TCPOLEN_WSCALE_ALIGNED; | 503 | size += TCPOLEN_WSCALE_ALIGNED; |
500 | } | 504 | } |
501 | if (likely(sysctl_tcp_sack)) { | 505 | if (likely(sysctl_tcp_sack && |
506 | !dst_feature(dst, RTAX_FEATURE_NO_SACK))) { | ||
502 | opts->options |= OPTION_SACK_ADVERTISE; | 507 | opts->options |= OPTION_SACK_ADVERTISE; |
503 | if (unlikely(!(OPTION_TS & opts->options))) | 508 | if (unlikely(!(OPTION_TS & opts->options))) |
504 | size += TCPOLEN_SACKPERM_ALIGNED; | 509 | size += TCPOLEN_SACKPERM_ALIGNED; |
@@ -661,8 +666,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
661 | 666 | ||
662 | /* Build TCP header and checksum it. */ | 667 | /* Build TCP header and checksum it. */ |
663 | th = tcp_hdr(skb); | 668 | th = tcp_hdr(skb); |
664 | th->source = inet->sport; | 669 | th->source = inet->inet_sport; |
665 | th->dest = inet->dport; | 670 | th->dest = inet->inet_dport; |
666 | th->seq = htonl(tcb->seq); | 671 | th->seq = htonl(tcb->seq); |
667 | th->ack_seq = htonl(tp->rcv_nxt); | 672 | th->ack_seq = htonl(tp->rcv_nxt); |
668 | *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | | 673 | *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | |
@@ -2315,7 +2320,9 @@ static void tcp_connect_init(struct sock *sk) | |||
2315 | * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. | 2320 | * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. |
2316 | */ | 2321 | */ |
2317 | tp->tcp_header_len = sizeof(struct tcphdr) + | 2322 | tp->tcp_header_len = sizeof(struct tcphdr) + |
2318 | (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); | 2323 | (sysctl_tcp_timestamps && |
2324 | (!dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) ? | ||
2325 | TCPOLEN_TSTAMP_ALIGNED : 0)); | ||
2319 | 2326 | ||
2320 | #ifdef CONFIG_TCP_MD5SIG | 2327 | #ifdef CONFIG_TCP_MD5SIG |
2321 | if (tp->af_specific->md5_lookup(sk, sk) != NULL) | 2328 | if (tp->af_specific->md5_lookup(sk, sk) != NULL) |
@@ -2341,7 +2348,8 @@ static void tcp_connect_init(struct sock *sk) | |||
2341 | tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), | 2348 | tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), |
2342 | &tp->rcv_wnd, | 2349 | &tp->rcv_wnd, |
2343 | &tp->window_clamp, | 2350 | &tp->window_clamp, |
2344 | sysctl_tcp_window_scaling, | 2351 | (sysctl_tcp_window_scaling && |
2352 | !dst_feature(dst, RTAX_FEATURE_NO_WSCALE)), | ||
2345 | &rcv_wscale); | 2353 | &rcv_wscale); |
2346 | 2354 | ||
2347 | tp->rx_opt.rcv_wscale = rcv_wscale; | 2355 | tp->rx_opt.rcv_wscale = rcv_wscale; |
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index 59f5b5e7c566..7a3cc2ffad84 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c | |||
@@ -94,7 +94,8 @@ static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
94 | const struct inet_sock *inet = inet_sk(sk); | 94 | const struct inet_sock *inet = inet_sk(sk); |
95 | 95 | ||
96 | /* Only update if port matches */ | 96 | /* Only update if port matches */ |
97 | if ((port == 0 || ntohs(inet->dport) == port || ntohs(inet->sport) == port) | 97 | if ((port == 0 || ntohs(inet->inet_dport) == port || |
98 | ntohs(inet->inet_sport) == port) | ||
98 | && (full || tp->snd_cwnd != tcp_probe.lastcwnd)) { | 99 | && (full || tp->snd_cwnd != tcp_probe.lastcwnd)) { |
99 | 100 | ||
100 | spin_lock(&tcp_probe.lock); | 101 | spin_lock(&tcp_probe.lock); |
@@ -103,10 +104,10 @@ static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
103 | struct tcp_log *p = tcp_probe.log + tcp_probe.head; | 104 | struct tcp_log *p = tcp_probe.log + tcp_probe.head; |
104 | 105 | ||
105 | p->tstamp = ktime_get(); | 106 | p->tstamp = ktime_get(); |
106 | p->saddr = inet->saddr; | 107 | p->saddr = inet->inet_saddr; |
107 | p->sport = inet->sport; | 108 | p->sport = inet->inet_sport; |
108 | p->daddr = inet->daddr; | 109 | p->daddr = inet->inet_daddr; |
109 | p->dport = inet->dport; | 110 | p->dport = inet->inet_dport; |
110 | p->length = skb->len; | 111 | p->length = skb->len; |
111 | p->snd_nxt = tp->snd_nxt; | 112 | p->snd_nxt = tp->snd_nxt; |
112 | p->snd_una = tp->snd_una; | 113 | p->snd_una = tp->snd_una; |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index cdb2ca7684d4..8353a538cd4c 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -141,14 +141,14 @@ static int tcp_write_timeout(struct sock *sk) | |||
141 | 141 | ||
142 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { | 142 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { |
143 | if (icsk->icsk_retransmits) | 143 | if (icsk->icsk_retransmits) |
144 | dst_negative_advice(&sk->sk_dst_cache); | 144 | dst_negative_advice(&sk->sk_dst_cache, sk); |
145 | retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; | 145 | retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; |
146 | } else { | 146 | } else { |
147 | if (retransmits_timed_out(sk, sysctl_tcp_retries1)) { | 147 | if (retransmits_timed_out(sk, sysctl_tcp_retries1)) { |
148 | /* Black hole detection */ | 148 | /* Black hole detection */ |
149 | tcp_mtu_probing(icsk, sk); | 149 | tcp_mtu_probing(icsk, sk); |
150 | 150 | ||
151 | dst_negative_advice(&sk->sk_dst_cache); | 151 | dst_negative_advice(&sk->sk_dst_cache, sk); |
152 | } | 152 | } |
153 | 153 | ||
154 | retry_until = sysctl_tcp_retries2; | 154 | retry_until = sysctl_tcp_retries2; |
@@ -303,15 +303,15 @@ void tcp_retransmit_timer(struct sock *sk) | |||
303 | struct inet_sock *inet = inet_sk(sk); | 303 | struct inet_sock *inet = inet_sk(sk); |
304 | if (sk->sk_family == AF_INET) { | 304 | if (sk->sk_family == AF_INET) { |
305 | LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n", | 305 | LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n", |
306 | &inet->daddr, ntohs(inet->dport), | 306 | &inet->inet_daddr, ntohs(inet->inet_dport), |
307 | inet->num, tp->snd_una, tp->snd_nxt); | 307 | inet->inet_num, tp->snd_una, tp->snd_nxt); |
308 | } | 308 | } |
309 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 309 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
310 | else if (sk->sk_family == AF_INET6) { | 310 | else if (sk->sk_family == AF_INET6) { |
311 | struct ipv6_pinfo *np = inet6_sk(sk); | 311 | struct ipv6_pinfo *np = inet6_sk(sk); |
312 | LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n", | 312 | LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n", |
313 | &np->daddr, ntohs(inet->dport), | 313 | &np->daddr, ntohs(inet->inet_dport), |
314 | inet->num, tp->snd_una, tp->snd_nxt); | 314 | inet->inet_num, tp->snd_una, tp->snd_nxt); |
315 | } | 315 | } |
316 | #endif | 316 | #endif |
317 | #endif | 317 | #endif |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 0fa9f70e4b19..1eaf57567ebf 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -106,7 +106,7 @@ | |||
106 | #include <net/xfrm.h> | 106 | #include <net/xfrm.h> |
107 | #include "udp_impl.h" | 107 | #include "udp_impl.h" |
108 | 108 | ||
109 | struct udp_table udp_table; | 109 | struct udp_table udp_table __read_mostly; |
110 | EXPORT_SYMBOL(udp_table); | 110 | EXPORT_SYMBOL(udp_table); |
111 | 111 | ||
112 | int sysctl_udp_mem[3] __read_mostly; | 112 | int sysctl_udp_mem[3] __read_mostly; |
@@ -121,14 +121,16 @@ EXPORT_SYMBOL(sysctl_udp_wmem_min); | |||
121 | atomic_t udp_memory_allocated; | 121 | atomic_t udp_memory_allocated; |
122 | EXPORT_SYMBOL(udp_memory_allocated); | 122 | EXPORT_SYMBOL(udp_memory_allocated); |
123 | 123 | ||
124 | #define PORTS_PER_CHAIN (65536 / UDP_HTABLE_SIZE) | 124 | #define MAX_UDP_PORTS 65536 |
125 | #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN) | ||
125 | 126 | ||
126 | static int udp_lib_lport_inuse(struct net *net, __u16 num, | 127 | static int udp_lib_lport_inuse(struct net *net, __u16 num, |
127 | const struct udp_hslot *hslot, | 128 | const struct udp_hslot *hslot, |
128 | unsigned long *bitmap, | 129 | unsigned long *bitmap, |
129 | struct sock *sk, | 130 | struct sock *sk, |
130 | int (*saddr_comp)(const struct sock *sk1, | 131 | int (*saddr_comp)(const struct sock *sk1, |
131 | const struct sock *sk2)) | 132 | const struct sock *sk2), |
133 | unsigned int log) | ||
132 | { | 134 | { |
133 | struct sock *sk2; | 135 | struct sock *sk2; |
134 | struct hlist_nulls_node *node; | 136 | struct hlist_nulls_node *node; |
@@ -136,13 +138,13 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num, | |||
136 | sk_nulls_for_each(sk2, node, &hslot->head) | 138 | sk_nulls_for_each(sk2, node, &hslot->head) |
137 | if (net_eq(sock_net(sk2), net) && | 139 | if (net_eq(sock_net(sk2), net) && |
138 | sk2 != sk && | 140 | sk2 != sk && |
139 | (bitmap || sk2->sk_hash == num) && | 141 | (bitmap || udp_sk(sk2)->udp_port_hash == num) && |
140 | (!sk2->sk_reuse || !sk->sk_reuse) && | 142 | (!sk2->sk_reuse || !sk->sk_reuse) && |
141 | (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if | 143 | (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if |
142 | || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && | 144 | || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && |
143 | (*saddr_comp)(sk, sk2)) { | 145 | (*saddr_comp)(sk, sk2)) { |
144 | if (bitmap) | 146 | if (bitmap) |
145 | __set_bit(sk2->sk_hash / UDP_HTABLE_SIZE, | 147 | __set_bit(udp_sk(sk2)->udp_port_hash >> log, |
146 | bitmap); | 148 | bitmap); |
147 | else | 149 | else |
148 | return 1; | 150 | return 1; |
@@ -150,18 +152,51 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num, | |||
150 | return 0; | 152 | return 0; |
151 | } | 153 | } |
152 | 154 | ||
155 | /* | ||
156 | * Note: we still hold spinlock of primary hash chain, so no other writer | ||
157 | * can insert/delete a socket with local_port == num | ||
158 | */ | ||
159 | static int udp_lib_lport_inuse2(struct net *net, __u16 num, | ||
160 | struct udp_hslot *hslot2, | ||
161 | struct sock *sk, | ||
162 | int (*saddr_comp)(const struct sock *sk1, | ||
163 | const struct sock *sk2)) | ||
164 | { | ||
165 | struct sock *sk2; | ||
166 | struct hlist_nulls_node *node; | ||
167 | int res = 0; | ||
168 | |||
169 | spin_lock(&hslot2->lock); | ||
170 | udp_portaddr_for_each_entry(sk2, node, &hslot2->head) | ||
171 | if (net_eq(sock_net(sk2), net) && | ||
172 | sk2 != sk && | ||
173 | (udp_sk(sk2)->udp_port_hash == num) && | ||
174 | (!sk2->sk_reuse || !sk->sk_reuse) && | ||
175 | (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if | ||
176 | || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && | ||
177 | (*saddr_comp)(sk, sk2)) { | ||
178 | res = 1; | ||
179 | break; | ||
180 | } | ||
181 | spin_unlock(&hslot2->lock); | ||
182 | return res; | ||
183 | } | ||
184 | |||
153 | /** | 185 | /** |
154 | * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6 | 186 | * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6 |
155 | * | 187 | * |
156 | * @sk: socket struct in question | 188 | * @sk: socket struct in question |
157 | * @snum: port number to look up | 189 | * @snum: port number to look up |
158 | * @saddr_comp: AF-dependent comparison of bound local IP addresses | 190 | * @saddr_comp: AF-dependent comparison of bound local IP addresses |
191 | * @hash2_nulladdr: AF-dependant hash value in secondary hash chains, | ||
192 | * with NULL address | ||
159 | */ | 193 | */ |
160 | int udp_lib_get_port(struct sock *sk, unsigned short snum, | 194 | int udp_lib_get_port(struct sock *sk, unsigned short snum, |
161 | int (*saddr_comp)(const struct sock *sk1, | 195 | int (*saddr_comp)(const struct sock *sk1, |
162 | const struct sock *sk2)) | 196 | const struct sock *sk2), |
197 | unsigned int hash2_nulladdr) | ||
163 | { | 198 | { |
164 | struct udp_hslot *hslot; | 199 | struct udp_hslot *hslot, *hslot2; |
165 | struct udp_table *udptable = sk->sk_prot->h.udp_table; | 200 | struct udp_table *udptable = sk->sk_prot->h.udp_table; |
166 | int error = 1; | 201 | int error = 1; |
167 | struct net *net = sock_net(sk); | 202 | struct net *net = sock_net(sk); |
@@ -180,13 +215,15 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, | |||
180 | /* | 215 | /* |
181 | * force rand to be an odd multiple of UDP_HTABLE_SIZE | 216 | * force rand to be an odd multiple of UDP_HTABLE_SIZE |
182 | */ | 217 | */ |
183 | rand = (rand | 1) * UDP_HTABLE_SIZE; | 218 | rand = (rand | 1) * (udptable->mask + 1); |
184 | for (last = first + UDP_HTABLE_SIZE; first != last; first++) { | 219 | for (last = first + udptable->mask + 1; |
185 | hslot = &udptable->hash[udp_hashfn(net, first)]; | 220 | first != last; |
221 | first++) { | ||
222 | hslot = udp_hashslot(udptable, net, first); | ||
186 | bitmap_zero(bitmap, PORTS_PER_CHAIN); | 223 | bitmap_zero(bitmap, PORTS_PER_CHAIN); |
187 | spin_lock_bh(&hslot->lock); | 224 | spin_lock_bh(&hslot->lock); |
188 | udp_lib_lport_inuse(net, snum, hslot, bitmap, sk, | 225 | udp_lib_lport_inuse(net, snum, hslot, bitmap, sk, |
189 | saddr_comp); | 226 | saddr_comp, udptable->log); |
190 | 227 | ||
191 | snum = first; | 228 | snum = first; |
192 | /* | 229 | /* |
@@ -196,7 +233,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, | |||
196 | */ | 233 | */ |
197 | do { | 234 | do { |
198 | if (low <= snum && snum <= high && | 235 | if (low <= snum && snum <= high && |
199 | !test_bit(snum / UDP_HTABLE_SIZE, bitmap)) | 236 | !test_bit(snum >> udptable->log, bitmap)) |
200 | goto found; | 237 | goto found; |
201 | snum += rand; | 238 | snum += rand; |
202 | } while (snum != first); | 239 | } while (snum != first); |
@@ -204,17 +241,51 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, | |||
204 | } | 241 | } |
205 | goto fail; | 242 | goto fail; |
206 | } else { | 243 | } else { |
207 | hslot = &udptable->hash[udp_hashfn(net, snum)]; | 244 | hslot = udp_hashslot(udptable, net, snum); |
208 | spin_lock_bh(&hslot->lock); | 245 | spin_lock_bh(&hslot->lock); |
209 | if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, saddr_comp)) | 246 | if (hslot->count > 10) { |
247 | int exist; | ||
248 | unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum; | ||
249 | |||
250 | slot2 &= udptable->mask; | ||
251 | hash2_nulladdr &= udptable->mask; | ||
252 | |||
253 | hslot2 = udp_hashslot2(udptable, slot2); | ||
254 | if (hslot->count < hslot2->count) | ||
255 | goto scan_primary_hash; | ||
256 | |||
257 | exist = udp_lib_lport_inuse2(net, snum, hslot2, | ||
258 | sk, saddr_comp); | ||
259 | if (!exist && (hash2_nulladdr != slot2)) { | ||
260 | hslot2 = udp_hashslot2(udptable, hash2_nulladdr); | ||
261 | exist = udp_lib_lport_inuse2(net, snum, hslot2, | ||
262 | sk, saddr_comp); | ||
263 | } | ||
264 | if (exist) | ||
265 | goto fail_unlock; | ||
266 | else | ||
267 | goto found; | ||
268 | } | ||
269 | scan_primary_hash: | ||
270 | if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, | ||
271 | saddr_comp, 0)) | ||
210 | goto fail_unlock; | 272 | goto fail_unlock; |
211 | } | 273 | } |
212 | found: | 274 | found: |
213 | inet_sk(sk)->num = snum; | 275 | inet_sk(sk)->inet_num = snum; |
214 | sk->sk_hash = snum; | 276 | udp_sk(sk)->udp_port_hash = snum; |
277 | udp_sk(sk)->udp_portaddr_hash ^= snum; | ||
215 | if (sk_unhashed(sk)) { | 278 | if (sk_unhashed(sk)) { |
216 | sk_nulls_add_node_rcu(sk, &hslot->head); | 279 | sk_nulls_add_node_rcu(sk, &hslot->head); |
280 | hslot->count++; | ||
217 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); | 281 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); |
282 | |||
283 | hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); | ||
284 | spin_lock(&hslot2->lock); | ||
285 | hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, | ||
286 | &hslot2->head); | ||
287 | hslot2->count++; | ||
288 | spin_unlock(&hslot2->lock); | ||
218 | } | 289 | } |
219 | error = 0; | 290 | error = 0; |
220 | fail_unlock: | 291 | fail_unlock: |
@@ -229,13 +300,26 @@ static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2) | |||
229 | struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2); | 300 | struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2); |
230 | 301 | ||
231 | return (!ipv6_only_sock(sk2) && | 302 | return (!ipv6_only_sock(sk2) && |
232 | (!inet1->rcv_saddr || !inet2->rcv_saddr || | 303 | (!inet1->inet_rcv_saddr || !inet2->inet_rcv_saddr || |
233 | inet1->rcv_saddr == inet2->rcv_saddr)); | 304 | inet1->inet_rcv_saddr == inet2->inet_rcv_saddr)); |
305 | } | ||
306 | |||
307 | static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr, | ||
308 | unsigned int port) | ||
309 | { | ||
310 | return jhash_1word(saddr, net_hash_mix(net)) ^ port; | ||
234 | } | 311 | } |
235 | 312 | ||
236 | int udp_v4_get_port(struct sock *sk, unsigned short snum) | 313 | int udp_v4_get_port(struct sock *sk, unsigned short snum) |
237 | { | 314 | { |
238 | return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal); | 315 | unsigned int hash2_nulladdr = |
316 | udp4_portaddr_hash(sock_net(sk), INADDR_ANY, snum); | ||
317 | unsigned int hash2_partial = | ||
318 | udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); | ||
319 | |||
320 | /* precompute partial secondary hash */ | ||
321 | udp_sk(sk)->udp_portaddr_hash = hash2_partial; | ||
322 | return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr); | ||
239 | } | 323 | } |
240 | 324 | ||
241 | static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr, | 325 | static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr, |
@@ -244,23 +328,23 @@ static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr, | |||
244 | { | 328 | { |
245 | int score = -1; | 329 | int score = -1; |
246 | 330 | ||
247 | if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum && | 331 | if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum && |
248 | !ipv6_only_sock(sk)) { | 332 | !ipv6_only_sock(sk)) { |
249 | struct inet_sock *inet = inet_sk(sk); | 333 | struct inet_sock *inet = inet_sk(sk); |
250 | 334 | ||
251 | score = (sk->sk_family == PF_INET ? 1 : 0); | 335 | score = (sk->sk_family == PF_INET ? 1 : 0); |
252 | if (inet->rcv_saddr) { | 336 | if (inet->inet_rcv_saddr) { |
253 | if (inet->rcv_saddr != daddr) | 337 | if (inet->inet_rcv_saddr != daddr) |
254 | return -1; | 338 | return -1; |
255 | score += 2; | 339 | score += 2; |
256 | } | 340 | } |
257 | if (inet->daddr) { | 341 | if (inet->inet_daddr) { |
258 | if (inet->daddr != saddr) | 342 | if (inet->inet_daddr != saddr) |
259 | return -1; | 343 | return -1; |
260 | score += 2; | 344 | score += 2; |
261 | } | 345 | } |
262 | if (inet->dport) { | 346 | if (inet->inet_dport) { |
263 | if (inet->dport != sport) | 347 | if (inet->inet_dport != sport) |
264 | return -1; | 348 | return -1; |
265 | score += 2; | 349 | score += 2; |
266 | } | 350 | } |
@@ -273,6 +357,89 @@ static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr, | |||
273 | return score; | 357 | return score; |
274 | } | 358 | } |
275 | 359 | ||
360 | /* | ||
361 | * In this second variant, we check (daddr, dport) matches (inet_rcv_sadd, inet_num) | ||
362 | */ | ||
363 | #define SCORE2_MAX (1 + 2 + 2 + 2) | ||
364 | static inline int compute_score2(struct sock *sk, struct net *net, | ||
365 | __be32 saddr, __be16 sport, | ||
366 | __be32 daddr, unsigned int hnum, int dif) | ||
367 | { | ||
368 | int score = -1; | ||
369 | |||
370 | if (net_eq(sock_net(sk), net) && !ipv6_only_sock(sk)) { | ||
371 | struct inet_sock *inet = inet_sk(sk); | ||
372 | |||
373 | if (inet->inet_rcv_saddr != daddr) | ||
374 | return -1; | ||
375 | if (inet->inet_num != hnum) | ||
376 | return -1; | ||
377 | |||
378 | score = (sk->sk_family == PF_INET ? 1 : 0); | ||
379 | if (inet->inet_daddr) { | ||
380 | if (inet->inet_daddr != saddr) | ||
381 | return -1; | ||
382 | score += 2; | ||
383 | } | ||
384 | if (inet->inet_dport) { | ||
385 | if (inet->inet_dport != sport) | ||
386 | return -1; | ||
387 | score += 2; | ||
388 | } | ||
389 | if (sk->sk_bound_dev_if) { | ||
390 | if (sk->sk_bound_dev_if != dif) | ||
391 | return -1; | ||
392 | score += 2; | ||
393 | } | ||
394 | } | ||
395 | return score; | ||
396 | } | ||
397 | |||
398 | |||
399 | /* called with read_rcu_lock() */ | ||
400 | static struct sock *udp4_lib_lookup2(struct net *net, | ||
401 | __be32 saddr, __be16 sport, | ||
402 | __be32 daddr, unsigned int hnum, int dif, | ||
403 | struct udp_hslot *hslot2, unsigned int slot2) | ||
404 | { | ||
405 | struct sock *sk, *result; | ||
406 | struct hlist_nulls_node *node; | ||
407 | int score, badness; | ||
408 | |||
409 | begin: | ||
410 | result = NULL; | ||
411 | badness = -1; | ||
412 | udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) { | ||
413 | score = compute_score2(sk, net, saddr, sport, | ||
414 | daddr, hnum, dif); | ||
415 | if (score > badness) { | ||
416 | result = sk; | ||
417 | badness = score; | ||
418 | if (score == SCORE2_MAX) | ||
419 | goto exact_match; | ||
420 | } | ||
421 | } | ||
422 | /* | ||
423 | * if the nulls value we got at the end of this lookup is | ||
424 | * not the expected one, we must restart lookup. | ||
425 | * We probably met an item that was moved to another chain. | ||
426 | */ | ||
427 | if (get_nulls_value(node) != slot2) | ||
428 | goto begin; | ||
429 | |||
430 | if (result) { | ||
431 | exact_match: | ||
432 | if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt))) | ||
433 | result = NULL; | ||
434 | else if (unlikely(compute_score2(result, net, saddr, sport, | ||
435 | daddr, hnum, dif) < badness)) { | ||
436 | sock_put(result); | ||
437 | goto begin; | ||
438 | } | ||
439 | } | ||
440 | return result; | ||
441 | } | ||
442 | |||
276 | /* UDP is nearly always wildcards out the wazoo, it makes no sense to try | 443 | /* UDP is nearly always wildcards out the wazoo, it makes no sense to try |
277 | * harder than this. -DaveM | 444 | * harder than this. -DaveM |
278 | */ | 445 | */ |
@@ -283,11 +450,35 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, | |||
283 | struct sock *sk, *result; | 450 | struct sock *sk, *result; |
284 | struct hlist_nulls_node *node; | 451 | struct hlist_nulls_node *node; |
285 | unsigned short hnum = ntohs(dport); | 452 | unsigned short hnum = ntohs(dport); |
286 | unsigned int hash = udp_hashfn(net, hnum); | 453 | unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask); |
287 | struct udp_hslot *hslot = &udptable->hash[hash]; | 454 | struct udp_hslot *hslot2, *hslot = &udptable->hash[slot]; |
288 | int score, badness; | 455 | int score, badness; |
289 | 456 | ||
290 | rcu_read_lock(); | 457 | rcu_read_lock(); |
458 | if (hslot->count > 10) { | ||
459 | hash2 = udp4_portaddr_hash(net, daddr, hnum); | ||
460 | slot2 = hash2 & udptable->mask; | ||
461 | hslot2 = &udptable->hash2[slot2]; | ||
462 | if (hslot->count < hslot2->count) | ||
463 | goto begin; | ||
464 | |||
465 | result = udp4_lib_lookup2(net, saddr, sport, | ||
466 | daddr, hnum, dif, | ||
467 | hslot2, slot2); | ||
468 | if (!result) { | ||
469 | hash2 = udp4_portaddr_hash(net, INADDR_ANY, hnum); | ||
470 | slot2 = hash2 & udptable->mask; | ||
471 | hslot2 = &udptable->hash2[slot2]; | ||
472 | if (hslot->count < hslot2->count) | ||
473 | goto begin; | ||
474 | |||
475 | result = udp4_lib_lookup2(net, INADDR_ANY, sport, | ||
476 | daddr, hnum, dif, | ||
477 | hslot2, slot2); | ||
478 | } | ||
479 | rcu_read_unlock(); | ||
480 | return result; | ||
481 | } | ||
291 | begin: | 482 | begin: |
292 | result = NULL; | 483 | result = NULL; |
293 | badness = -1; | 484 | badness = -1; |
@@ -304,7 +495,7 @@ begin: | |||
304 | * not the expected one, we must restart lookup. | 495 | * not the expected one, we must restart lookup. |
305 | * We probably met an item that was moved to another chain. | 496 | * We probably met an item that was moved to another chain. |
306 | */ | 497 | */ |
307 | if (get_nulls_value(node) != hash) | 498 | if (get_nulls_value(node) != slot) |
308 | goto begin; | 499 | goto begin; |
309 | 500 | ||
310 | if (result) { | 501 | if (result) { |
@@ -355,10 +546,11 @@ static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk, | |||
355 | struct inet_sock *inet = inet_sk(s); | 546 | struct inet_sock *inet = inet_sk(s); |
356 | 547 | ||
357 | if (!net_eq(sock_net(s), net) || | 548 | if (!net_eq(sock_net(s), net) || |
358 | s->sk_hash != hnum || | 549 | udp_sk(s)->udp_port_hash != hnum || |
359 | (inet->daddr && inet->daddr != rmt_addr) || | 550 | (inet->inet_daddr && inet->inet_daddr != rmt_addr) || |
360 | (inet->dport != rmt_port && inet->dport) || | 551 | (inet->inet_dport != rmt_port && inet->inet_dport) || |
361 | (inet->rcv_saddr && inet->rcv_saddr != loc_addr) || | 552 | (inet->inet_rcv_saddr && |
553 | inet->inet_rcv_saddr != loc_addr) || | ||
362 | ipv6_only_sock(s) || | 554 | ipv6_only_sock(s) || |
363 | (s->sk_bound_dev_if && s->sk_bound_dev_if != dif)) | 555 | (s->sk_bound_dev_if && s->sk_bound_dev_if != dif)) |
364 | continue; | 556 | continue; |
@@ -642,14 +834,14 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
642 | } else { | 834 | } else { |
643 | if (sk->sk_state != TCP_ESTABLISHED) | 835 | if (sk->sk_state != TCP_ESTABLISHED) |
644 | return -EDESTADDRREQ; | 836 | return -EDESTADDRREQ; |
645 | daddr = inet->daddr; | 837 | daddr = inet->inet_daddr; |
646 | dport = inet->dport; | 838 | dport = inet->inet_dport; |
647 | /* Open fast path for connected socket. | 839 | /* Open fast path for connected socket. |
648 | Route will not be used, if at least one option is set. | 840 | Route will not be used, if at least one option is set. |
649 | */ | 841 | */ |
650 | connected = 1; | 842 | connected = 1; |
651 | } | 843 | } |
652 | ipc.addr = inet->saddr; | 844 | ipc.addr = inet->inet_saddr; |
653 | 845 | ||
654 | ipc.oif = sk->sk_bound_dev_if; | 846 | ipc.oif = sk->sk_bound_dev_if; |
655 | err = sock_tx_timestamp(msg, sk, &ipc.shtx); | 847 | err = sock_tx_timestamp(msg, sk, &ipc.shtx); |
@@ -704,7 +896,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
704 | .proto = sk->sk_protocol, | 896 | .proto = sk->sk_protocol, |
705 | .flags = inet_sk_flowi_flags(sk), | 897 | .flags = inet_sk_flowi_flags(sk), |
706 | .uli_u = { .ports = | 898 | .uli_u = { .ports = |
707 | { .sport = inet->sport, | 899 | { .sport = inet->inet_sport, |
708 | .dport = dport } } }; | 900 | .dport = dport } } }; |
709 | struct net *net = sock_net(sk); | 901 | struct net *net = sock_net(sk); |
710 | 902 | ||
@@ -748,7 +940,7 @@ back_from_confirm: | |||
748 | inet->cork.fl.fl4_dst = daddr; | 940 | inet->cork.fl.fl4_dst = daddr; |
749 | inet->cork.fl.fl_ip_dport = dport; | 941 | inet->cork.fl.fl_ip_dport = dport; |
750 | inet->cork.fl.fl4_src = saddr; | 942 | inet->cork.fl.fl4_src = saddr; |
751 | inet->cork.fl.fl_ip_sport = inet->sport; | 943 | inet->cork.fl.fl_ip_sport = inet->inet_sport; |
752 | up->pending = AF_INET; | 944 | up->pending = AF_INET; |
753 | 945 | ||
754 | do_append_data: | 946 | do_append_data: |
@@ -862,6 +1054,7 @@ static unsigned int first_packet_length(struct sock *sk) | |||
862 | udp_lib_checksum_complete(skb)) { | 1054 | udp_lib_checksum_complete(skb)) { |
863 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, | 1055 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, |
864 | IS_UDPLITE(sk)); | 1056 | IS_UDPLITE(sk)); |
1057 | atomic_inc(&sk->sk_drops); | ||
865 | __skb_unlink(skb, rcvq); | 1058 | __skb_unlink(skb, rcvq); |
866 | __skb_queue_tail(&list_kill, skb); | 1059 | __skb_queue_tail(&list_kill, skb); |
867 | } | 1060 | } |
@@ -982,7 +1175,7 @@ try_again: | |||
982 | UDP_INC_STATS_USER(sock_net(sk), | 1175 | UDP_INC_STATS_USER(sock_net(sk), |
983 | UDP_MIB_INDATAGRAMS, is_udplite); | 1176 | UDP_MIB_INDATAGRAMS, is_udplite); |
984 | 1177 | ||
985 | sock_recv_timestamp(msg, sk, skb); | 1178 | sock_recv_ts_and_drops(msg, sk, skb); |
986 | 1179 | ||
987 | /* Copy the address. */ | 1180 | /* Copy the address. */ |
988 | if (sin) { | 1181 | if (sin) { |
@@ -1023,15 +1216,15 @@ int udp_disconnect(struct sock *sk, int flags) | |||
1023 | */ | 1216 | */ |
1024 | 1217 | ||
1025 | sk->sk_state = TCP_CLOSE; | 1218 | sk->sk_state = TCP_CLOSE; |
1026 | inet->daddr = 0; | 1219 | inet->inet_daddr = 0; |
1027 | inet->dport = 0; | 1220 | inet->inet_dport = 0; |
1028 | sk->sk_bound_dev_if = 0; | 1221 | sk->sk_bound_dev_if = 0; |
1029 | if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) | 1222 | if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) |
1030 | inet_reset_saddr(sk); | 1223 | inet_reset_saddr(sk); |
1031 | 1224 | ||
1032 | if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) { | 1225 | if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) { |
1033 | sk->sk_prot->unhash(sk); | 1226 | sk->sk_prot->unhash(sk); |
1034 | inet->sport = 0; | 1227 | inet->inet_sport = 0; |
1035 | } | 1228 | } |
1036 | sk_dst_reset(sk); | 1229 | sk_dst_reset(sk); |
1037 | return 0; | 1230 | return 0; |
@@ -1042,13 +1235,22 @@ void udp_lib_unhash(struct sock *sk) | |||
1042 | { | 1235 | { |
1043 | if (sk_hashed(sk)) { | 1236 | if (sk_hashed(sk)) { |
1044 | struct udp_table *udptable = sk->sk_prot->h.udp_table; | 1237 | struct udp_table *udptable = sk->sk_prot->h.udp_table; |
1045 | unsigned int hash = udp_hashfn(sock_net(sk), sk->sk_hash); | 1238 | struct udp_hslot *hslot, *hslot2; |
1046 | struct udp_hslot *hslot = &udptable->hash[hash]; | 1239 | |
1240 | hslot = udp_hashslot(udptable, sock_net(sk), | ||
1241 | udp_sk(sk)->udp_port_hash); | ||
1242 | hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); | ||
1047 | 1243 | ||
1048 | spin_lock_bh(&hslot->lock); | 1244 | spin_lock_bh(&hslot->lock); |
1049 | if (sk_nulls_del_node_init_rcu(sk)) { | 1245 | if (sk_nulls_del_node_init_rcu(sk)) { |
1050 | inet_sk(sk)->num = 0; | 1246 | hslot->count--; |
1247 | inet_sk(sk)->inet_num = 0; | ||
1051 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); | 1248 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); |
1249 | |||
1250 | spin_lock(&hslot2->lock); | ||
1251 | hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); | ||
1252 | hslot2->count--; | ||
1253 | spin_unlock(&hslot2->lock); | ||
1052 | } | 1254 | } |
1053 | spin_unlock_bh(&hslot->lock); | 1255 | spin_unlock_bh(&hslot->lock); |
1054 | } | 1256 | } |
@@ -1057,25 +1259,22 @@ EXPORT_SYMBOL(udp_lib_unhash); | |||
1057 | 1259 | ||
1058 | static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | 1260 | static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
1059 | { | 1261 | { |
1060 | int is_udplite = IS_UDPLITE(sk); | 1262 | int rc = sock_queue_rcv_skb(sk, skb); |
1061 | int rc; | 1263 | |
1264 | if (rc < 0) { | ||
1265 | int is_udplite = IS_UDPLITE(sk); | ||
1062 | 1266 | ||
1063 | if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) { | ||
1064 | /* Note that an ENOMEM error is charged twice */ | 1267 | /* Note that an ENOMEM error is charged twice */ |
1065 | if (rc == -ENOMEM) { | 1268 | if (rc == -ENOMEM) |
1066 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, | 1269 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, |
1067 | is_udplite); | 1270 | is_udplite); |
1068 | atomic_inc(&sk->sk_drops); | 1271 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); |
1069 | } | 1272 | kfree_skb(skb); |
1070 | goto drop; | 1273 | return -1; |
1071 | } | 1274 | } |
1072 | 1275 | ||
1073 | return 0; | 1276 | return 0; |
1074 | 1277 | ||
1075 | drop: | ||
1076 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); | ||
1077 | kfree_skb(skb); | ||
1078 | return -1; | ||
1079 | } | 1278 | } |
1080 | 1279 | ||
1081 | /* returns: | 1280 | /* returns: |
@@ -1182,53 +1381,88 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
1182 | 1381 | ||
1183 | drop: | 1382 | drop: |
1184 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); | 1383 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); |
1384 | atomic_inc(&sk->sk_drops); | ||
1185 | kfree_skb(skb); | 1385 | kfree_skb(skb); |
1186 | return -1; | 1386 | return -1; |
1187 | } | 1387 | } |
1188 | 1388 | ||
1389 | |||
1390 | static void flush_stack(struct sock **stack, unsigned int count, | ||
1391 | struct sk_buff *skb, unsigned int final) | ||
1392 | { | ||
1393 | unsigned int i; | ||
1394 | struct sk_buff *skb1 = NULL; | ||
1395 | struct sock *sk; | ||
1396 | |||
1397 | for (i = 0; i < count; i++) { | ||
1398 | sk = stack[i]; | ||
1399 | if (likely(skb1 == NULL)) | ||
1400 | skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); | ||
1401 | |||
1402 | if (!skb1) { | ||
1403 | atomic_inc(&sk->sk_drops); | ||
1404 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, | ||
1405 | IS_UDPLITE(sk)); | ||
1406 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, | ||
1407 | IS_UDPLITE(sk)); | ||
1408 | } | ||
1409 | |||
1410 | if (skb1 && udp_queue_rcv_skb(sk, skb1) <= 0) | ||
1411 | skb1 = NULL; | ||
1412 | } | ||
1413 | if (unlikely(skb1)) | ||
1414 | kfree_skb(skb1); | ||
1415 | } | ||
1416 | |||
1189 | /* | 1417 | /* |
1190 | * Multicasts and broadcasts go to each listener. | 1418 | * Multicasts and broadcasts go to each listener. |
1191 | * | 1419 | * |
1192 | * Note: called only from the BH handler context, | 1420 | * Note: called only from the BH handler context. |
1193 | * so we don't need to lock the hashes. | ||
1194 | */ | 1421 | */ |
1195 | static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, | 1422 | static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, |
1196 | struct udphdr *uh, | 1423 | struct udphdr *uh, |
1197 | __be32 saddr, __be32 daddr, | 1424 | __be32 saddr, __be32 daddr, |
1198 | struct udp_table *udptable) | 1425 | struct udp_table *udptable) |
1199 | { | 1426 | { |
1200 | struct sock *sk; | 1427 | struct sock *sk, *stack[256 / sizeof(struct sock *)]; |
1201 | struct udp_hslot *hslot = &udptable->hash[udp_hashfn(net, ntohs(uh->dest))]; | 1428 | struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest)); |
1202 | int dif; | 1429 | int dif; |
1430 | unsigned int i, count = 0; | ||
1203 | 1431 | ||
1204 | spin_lock(&hslot->lock); | 1432 | spin_lock(&hslot->lock); |
1205 | sk = sk_nulls_head(&hslot->head); | 1433 | sk = sk_nulls_head(&hslot->head); |
1206 | dif = skb->dev->ifindex; | 1434 | dif = skb->dev->ifindex; |
1207 | sk = udp_v4_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); | 1435 | sk = udp_v4_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); |
1208 | if (sk) { | 1436 | while (sk) { |
1209 | struct sock *sknext = NULL; | 1437 | stack[count++] = sk; |
1210 | 1438 | sk = udp_v4_mcast_next(net, sk_nulls_next(sk), uh->dest, | |
1211 | do { | 1439 | daddr, uh->source, saddr, dif); |
1212 | struct sk_buff *skb1 = skb; | 1440 | if (unlikely(count == ARRAY_SIZE(stack))) { |
1213 | 1441 | if (!sk) | |
1214 | sknext = udp_v4_mcast_next(net, sk_nulls_next(sk), uh->dest, | 1442 | break; |
1215 | daddr, uh->source, saddr, | 1443 | flush_stack(stack, count, skb, ~0); |
1216 | dif); | 1444 | count = 0; |
1217 | if (sknext) | 1445 | } |
1218 | skb1 = skb_clone(skb, GFP_ATOMIC); | 1446 | } |
1219 | 1447 | /* | |
1220 | if (skb1) { | 1448 | * before releasing chain lock, we must take a reference on sockets |
1221 | int ret = udp_queue_rcv_skb(sk, skb1); | 1449 | */ |
1222 | if (ret > 0) | 1450 | for (i = 0; i < count; i++) |
1223 | /* we should probably re-process instead | 1451 | sock_hold(stack[i]); |
1224 | * of dropping packets here. */ | 1452 | |
1225 | kfree_skb(skb1); | ||
1226 | } | ||
1227 | sk = sknext; | ||
1228 | } while (sknext); | ||
1229 | } else | ||
1230 | consume_skb(skb); | ||
1231 | spin_unlock(&hslot->lock); | 1453 | spin_unlock(&hslot->lock); |
1454 | |||
1455 | /* | ||
1456 | * do the slow work with no lock held | ||
1457 | */ | ||
1458 | if (count) { | ||
1459 | flush_stack(stack, count, skb, count - 1); | ||
1460 | |||
1461 | for (i = 0; i < count; i++) | ||
1462 | sock_put(stack[i]); | ||
1463 | } else { | ||
1464 | kfree_skb(skb); | ||
1465 | } | ||
1232 | return 0; | 1466 | return 0; |
1233 | } | 1467 | } |
1234 | 1468 | ||
@@ -1620,9 +1854,14 @@ static struct sock *udp_get_first(struct seq_file *seq, int start) | |||
1620 | struct udp_iter_state *state = seq->private; | 1854 | struct udp_iter_state *state = seq->private; |
1621 | struct net *net = seq_file_net(seq); | 1855 | struct net *net = seq_file_net(seq); |
1622 | 1856 | ||
1623 | for (state->bucket = start; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) { | 1857 | for (state->bucket = start; state->bucket <= state->udp_table->mask; |
1858 | ++state->bucket) { | ||
1624 | struct hlist_nulls_node *node; | 1859 | struct hlist_nulls_node *node; |
1625 | struct udp_hslot *hslot = &state->udp_table->hash[state->bucket]; | 1860 | struct udp_hslot *hslot = &state->udp_table->hash[state->bucket]; |
1861 | |||
1862 | if (hlist_nulls_empty(&hslot->head)) | ||
1863 | continue; | ||
1864 | |||
1626 | spin_lock_bh(&hslot->lock); | 1865 | spin_lock_bh(&hslot->lock); |
1627 | sk_nulls_for_each(sk, node, &hslot->head) { | 1866 | sk_nulls_for_each(sk, node, &hslot->head) { |
1628 | if (!net_eq(sock_net(sk), net)) | 1867 | if (!net_eq(sock_net(sk), net)) |
@@ -1647,7 +1886,7 @@ static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) | |||
1647 | } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family)); | 1886 | } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family)); |
1648 | 1887 | ||
1649 | if (!sk) { | 1888 | if (!sk) { |
1650 | if (state->bucket < UDP_HTABLE_SIZE) | 1889 | if (state->bucket <= state->udp_table->mask) |
1651 | spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); | 1890 | spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); |
1652 | return udp_get_first(seq, state->bucket + 1); | 1891 | return udp_get_first(seq, state->bucket + 1); |
1653 | } | 1892 | } |
@@ -1667,7 +1906,7 @@ static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos) | |||
1667 | static void *udp_seq_start(struct seq_file *seq, loff_t *pos) | 1906 | static void *udp_seq_start(struct seq_file *seq, loff_t *pos) |
1668 | { | 1907 | { |
1669 | struct udp_iter_state *state = seq->private; | 1908 | struct udp_iter_state *state = seq->private; |
1670 | state->bucket = UDP_HTABLE_SIZE; | 1909 | state->bucket = MAX_UDP_PORTS; |
1671 | 1910 | ||
1672 | return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN; | 1911 | return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN; |
1673 | } | 1912 | } |
@@ -1689,7 +1928,7 @@ static void udp_seq_stop(struct seq_file *seq, void *v) | |||
1689 | { | 1928 | { |
1690 | struct udp_iter_state *state = seq->private; | 1929 | struct udp_iter_state *state = seq->private; |
1691 | 1930 | ||
1692 | if (state->bucket < UDP_HTABLE_SIZE) | 1931 | if (state->bucket <= state->udp_table->mask) |
1693 | spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); | 1932 | spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); |
1694 | } | 1933 | } |
1695 | 1934 | ||
@@ -1744,12 +1983,12 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f, | |||
1744 | int bucket, int *len) | 1983 | int bucket, int *len) |
1745 | { | 1984 | { |
1746 | struct inet_sock *inet = inet_sk(sp); | 1985 | struct inet_sock *inet = inet_sk(sp); |
1747 | __be32 dest = inet->daddr; | 1986 | __be32 dest = inet->inet_daddr; |
1748 | __be32 src = inet->rcv_saddr; | 1987 | __be32 src = inet->inet_rcv_saddr; |
1749 | __u16 destp = ntohs(inet->dport); | 1988 | __u16 destp = ntohs(inet->inet_dport); |
1750 | __u16 srcp = ntohs(inet->sport); | 1989 | __u16 srcp = ntohs(inet->inet_sport); |
1751 | 1990 | ||
1752 | seq_printf(f, "%4d: %08X:%04X %08X:%04X" | 1991 | seq_printf(f, "%5d: %08X:%04X %08X:%04X" |
1753 | " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d%n", | 1992 | " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d%n", |
1754 | bucket, src, srcp, dest, destp, sp->sk_state, | 1993 | bucket, src, srcp, dest, destp, sp->sk_state, |
1755 | sk_wmem_alloc_get(sp), | 1994 | sk_wmem_alloc_get(sp), |
@@ -1815,21 +2054,60 @@ void udp4_proc_exit(void) | |||
1815 | } | 2054 | } |
1816 | #endif /* CONFIG_PROC_FS */ | 2055 | #endif /* CONFIG_PROC_FS */ |
1817 | 2056 | ||
1818 | void __init udp_table_init(struct udp_table *table) | 2057 | static __initdata unsigned long uhash_entries; |
2058 | static int __init set_uhash_entries(char *str) | ||
1819 | { | 2059 | { |
1820 | int i; | 2060 | if (!str) |
2061 | return 0; | ||
2062 | uhash_entries = simple_strtoul(str, &str, 0); | ||
2063 | if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN) | ||
2064 | uhash_entries = UDP_HTABLE_SIZE_MIN; | ||
2065 | return 1; | ||
2066 | } | ||
2067 | __setup("uhash_entries=", set_uhash_entries); | ||
1821 | 2068 | ||
1822 | for (i = 0; i < UDP_HTABLE_SIZE; i++) { | 2069 | void __init udp_table_init(struct udp_table *table, const char *name) |
2070 | { | ||
2071 | unsigned int i; | ||
2072 | |||
2073 | if (!CONFIG_BASE_SMALL) | ||
2074 | table->hash = alloc_large_system_hash(name, | ||
2075 | 2 * sizeof(struct udp_hslot), | ||
2076 | uhash_entries, | ||
2077 | 21, /* one slot per 2 MB */ | ||
2078 | 0, | ||
2079 | &table->log, | ||
2080 | &table->mask, | ||
2081 | 64 * 1024); | ||
2082 | /* | ||
2083 | * Make sure hash table has the minimum size | ||
2084 | */ | ||
2085 | if (CONFIG_BASE_SMALL || table->mask < UDP_HTABLE_SIZE_MIN - 1) { | ||
2086 | table->hash = kmalloc(UDP_HTABLE_SIZE_MIN * | ||
2087 | 2 * sizeof(struct udp_hslot), GFP_KERNEL); | ||
2088 | if (!table->hash) | ||
2089 | panic(name); | ||
2090 | table->log = ilog2(UDP_HTABLE_SIZE_MIN); | ||
2091 | table->mask = UDP_HTABLE_SIZE_MIN - 1; | ||
2092 | } | ||
2093 | table->hash2 = table->hash + (table->mask + 1); | ||
2094 | for (i = 0; i <= table->mask; i++) { | ||
1823 | INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i); | 2095 | INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i); |
2096 | table->hash[i].count = 0; | ||
1824 | spin_lock_init(&table->hash[i].lock); | 2097 | spin_lock_init(&table->hash[i].lock); |
1825 | } | 2098 | } |
2099 | for (i = 0; i <= table->mask; i++) { | ||
2100 | INIT_HLIST_NULLS_HEAD(&table->hash2[i].head, i); | ||
2101 | table->hash2[i].count = 0; | ||
2102 | spin_lock_init(&table->hash2[i].lock); | ||
2103 | } | ||
1826 | } | 2104 | } |
1827 | 2105 | ||
1828 | void __init udp_init(void) | 2106 | void __init udp_init(void) |
1829 | { | 2107 | { |
1830 | unsigned long nr_pages, limit; | 2108 | unsigned long nr_pages, limit; |
1831 | 2109 | ||
1832 | udp_table_init(&udp_table); | 2110 | udp_table_init(&udp_table, "UDP"); |
1833 | /* Set the pressure threshold up by the same strategy of TCP. It is a | 2111 | /* Set the pressure threshold up by the same strategy of TCP. It is a |
1834 | * fraction of global memory that is up to 1/2 at 256 MB, decreasing | 2112 | * fraction of global memory that is up to 1/2 at 256 MB, decreasing |
1835 | * toward zero with the amount of memory, with a floor of 128 pages. | 2113 | * toward zero with the amount of memory, with a floor of 128 pages. |
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c index 95248d7f75ec..66f79513f4a5 100644 --- a/net/ipv4/udplite.c +++ b/net/ipv4/udplite.c | |||
@@ -12,7 +12,7 @@ | |||
12 | */ | 12 | */ |
13 | #include "udp_impl.h" | 13 | #include "udp_impl.h" |
14 | 14 | ||
15 | struct udp_table udplite_table; | 15 | struct udp_table udplite_table __read_mostly; |
16 | EXPORT_SYMBOL(udplite_table); | 16 | EXPORT_SYMBOL(udplite_table); |
17 | 17 | ||
18 | static int udplite_rcv(struct sk_buff *skb) | 18 | static int udplite_rcv(struct sk_buff *skb) |
@@ -64,7 +64,6 @@ static struct inet_protosw udplite4_protosw = { | |||
64 | .protocol = IPPROTO_UDPLITE, | 64 | .protocol = IPPROTO_UDPLITE, |
65 | .prot = &udplite_prot, | 65 | .prot = &udplite_prot, |
66 | .ops = &inet_dgram_ops, | 66 | .ops = &inet_dgram_ops, |
67 | .capability = -1, | ||
68 | .no_check = 0, /* must checksum (RFC 3828) */ | 67 | .no_check = 0, /* must checksum (RFC 3828) */ |
69 | .flags = INET_PROTOSW_PERMANENT, | 68 | .flags = INET_PROTOSW_PERMANENT, |
70 | }; | 69 | }; |
@@ -110,7 +109,7 @@ static inline int udplite4_proc_init(void) | |||
110 | 109 | ||
111 | void __init udplite4_register(void) | 110 | void __init udplite4_register(void) |
112 | { | 111 | { |
113 | udp_table_init(&udplite_table); | 112 | udp_table_init(&udplite_table, "UDP-Lite"); |
114 | if (proto_register(&udplite_prot, 1)) | 113 | if (proto_register(&udplite_prot, 1)) |
115 | goto out_register_err; | 114 | goto out_register_err; |
116 | 115 | ||
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index ead6c7a42f44..a578096152ab 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig | |||
@@ -170,6 +170,25 @@ config IPV6_SIT | |||
170 | 170 | ||
171 | Saying M here will produce a module called sit. If unsure, say Y. | 171 | Saying M here will produce a module called sit. If unsure, say Y. |
172 | 172 | ||
173 | config IPV6_SIT_6RD | ||
174 | bool "IPv6: IPv6 Rapid Deployment (6RD) (EXPERIMENTAL)" | ||
175 | depends on IPV6_SIT && EXPERIMENTAL | ||
176 | default n | ||
177 | ---help--- | ||
178 | IPv6 Rapid Deployment (6rd; draft-ietf-softwire-ipv6-6rd) builds upon | ||
179 | mechanisms of 6to4 (RFC3056) to enable a service provider to rapidly | ||
180 | deploy IPv6 unicast service to IPv4 sites to which it provides | ||
181 | customer premise equipment. Like 6to4, it utilizes stateless IPv6 in | ||
182 | IPv4 encapsulation in order to transit IPv4-only network | ||
183 | infrastructure. Unlike 6to4, a 6rd service provider uses an IPv6 | ||
184 | prefix of its own in place of the fixed 6to4 prefix. | ||
185 | |||
186 | With this option enabled, the SIT driver offers 6rd functionality by | ||
187 | providing additional ioctl API to configure the IPv6 Prefix for in | ||
188 | stead of static 2002::/16 for 6to4. | ||
189 | |||
190 | If unsure, say N. | ||
191 | |||
173 | config IPV6_NDISC_NODETYPE | 192 | config IPV6_NDISC_NODETYPE |
174 | bool | 193 | bool |
175 | 194 | ||
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 1fd0a3d775d2..522bdc77206c 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -481,9 +481,8 @@ static void addrconf_forward_change(struct net *net, __s32 newf) | |||
481 | struct net_device *dev; | 481 | struct net_device *dev; |
482 | struct inet6_dev *idev; | 482 | struct inet6_dev *idev; |
483 | 483 | ||
484 | read_lock(&dev_base_lock); | 484 | rcu_read_lock(); |
485 | for_each_netdev(net, dev) { | 485 | for_each_netdev_rcu(net, dev) { |
486 | rcu_read_lock(); | ||
487 | idev = __in6_dev_get(dev); | 486 | idev = __in6_dev_get(dev); |
488 | if (idev) { | 487 | if (idev) { |
489 | int changed = (!idev->cnf.forwarding) ^ (!newf); | 488 | int changed = (!idev->cnf.forwarding) ^ (!newf); |
@@ -491,9 +490,8 @@ static void addrconf_forward_change(struct net *net, __s32 newf) | |||
491 | if (changed) | 490 | if (changed) |
492 | dev_forward_change(idev); | 491 | dev_forward_change(idev); |
493 | } | 492 | } |
494 | rcu_read_unlock(); | ||
495 | } | 493 | } |
496 | read_unlock(&dev_base_lock); | 494 | rcu_read_unlock(); |
497 | } | 495 | } |
498 | 496 | ||
499 | static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old) | 497 | static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old) |
@@ -1137,10 +1135,9 @@ int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev, | |||
1137 | hiscore->rule = -1; | 1135 | hiscore->rule = -1; |
1138 | hiscore->ifa = NULL; | 1136 | hiscore->ifa = NULL; |
1139 | 1137 | ||
1140 | read_lock(&dev_base_lock); | ||
1141 | rcu_read_lock(); | 1138 | rcu_read_lock(); |
1142 | 1139 | ||
1143 | for_each_netdev(net, dev) { | 1140 | for_each_netdev_rcu(net, dev) { |
1144 | struct inet6_dev *idev; | 1141 | struct inet6_dev *idev; |
1145 | 1142 | ||
1146 | /* Candidate Source Address (section 4) | 1143 | /* Candidate Source Address (section 4) |
@@ -1235,7 +1232,6 @@ try_nextdev: | |||
1235 | read_unlock_bh(&idev->lock); | 1232 | read_unlock_bh(&idev->lock); |
1236 | } | 1233 | } |
1237 | rcu_read_unlock(); | 1234 | rcu_read_unlock(); |
1238 | read_unlock(&dev_base_lock); | ||
1239 | 1235 | ||
1240 | if (!hiscore->ifa) | 1236 | if (!hiscore->ifa) |
1241 | return -EADDRNOTAVAIL; | 1237 | return -EADDRNOTAVAIL; |
@@ -3485,85 +3481,114 @@ enum addr_type_t | |||
3485 | ANYCAST_ADDR, | 3481 | ANYCAST_ADDR, |
3486 | }; | 3482 | }; |
3487 | 3483 | ||
3484 | /* called with rcu_read_lock() */ | ||
3485 | static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb, | ||
3486 | struct netlink_callback *cb, enum addr_type_t type, | ||
3487 | int s_ip_idx, int *p_ip_idx) | ||
3488 | { | ||
3489 | struct inet6_ifaddr *ifa; | ||
3490 | struct ifmcaddr6 *ifmca; | ||
3491 | struct ifacaddr6 *ifaca; | ||
3492 | int err = 1; | ||
3493 | int ip_idx = *p_ip_idx; | ||
3494 | |||
3495 | read_lock_bh(&idev->lock); | ||
3496 | switch (type) { | ||
3497 | case UNICAST_ADDR: | ||
3498 | /* unicast address incl. temp addr */ | ||
3499 | for (ifa = idev->addr_list; ifa; | ||
3500 | ifa = ifa->if_next, ip_idx++) { | ||
3501 | if (ip_idx < s_ip_idx) | ||
3502 | continue; | ||
3503 | err = inet6_fill_ifaddr(skb, ifa, | ||
3504 | NETLINK_CB(cb->skb).pid, | ||
3505 | cb->nlh->nlmsg_seq, | ||
3506 | RTM_NEWADDR, | ||
3507 | NLM_F_MULTI); | ||
3508 | if (err <= 0) | ||
3509 | break; | ||
3510 | } | ||
3511 | break; | ||
3512 | case MULTICAST_ADDR: | ||
3513 | /* multicast address */ | ||
3514 | for (ifmca = idev->mc_list; ifmca; | ||
3515 | ifmca = ifmca->next, ip_idx++) { | ||
3516 | if (ip_idx < s_ip_idx) | ||
3517 | continue; | ||
3518 | err = inet6_fill_ifmcaddr(skb, ifmca, | ||
3519 | NETLINK_CB(cb->skb).pid, | ||
3520 | cb->nlh->nlmsg_seq, | ||
3521 | RTM_GETMULTICAST, | ||
3522 | NLM_F_MULTI); | ||
3523 | if (err <= 0) | ||
3524 | break; | ||
3525 | } | ||
3526 | break; | ||
3527 | case ANYCAST_ADDR: | ||
3528 | /* anycast address */ | ||
3529 | for (ifaca = idev->ac_list; ifaca; | ||
3530 | ifaca = ifaca->aca_next, ip_idx++) { | ||
3531 | if (ip_idx < s_ip_idx) | ||
3532 | continue; | ||
3533 | err = inet6_fill_ifacaddr(skb, ifaca, | ||
3534 | NETLINK_CB(cb->skb).pid, | ||
3535 | cb->nlh->nlmsg_seq, | ||
3536 | RTM_GETANYCAST, | ||
3537 | NLM_F_MULTI); | ||
3538 | if (err <= 0) | ||
3539 | break; | ||
3540 | } | ||
3541 | break; | ||
3542 | default: | ||
3543 | break; | ||
3544 | } | ||
3545 | read_unlock_bh(&idev->lock); | ||
3546 | *p_ip_idx = ip_idx; | ||
3547 | return err; | ||
3548 | } | ||
3549 | |||
3488 | static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, | 3550 | static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, |
3489 | enum addr_type_t type) | 3551 | enum addr_type_t type) |
3490 | { | 3552 | { |
3553 | struct net *net = sock_net(skb->sk); | ||
3554 | int h, s_h; | ||
3491 | int idx, ip_idx; | 3555 | int idx, ip_idx; |
3492 | int s_idx, s_ip_idx; | 3556 | int s_idx, s_ip_idx; |
3493 | int err = 1; | ||
3494 | struct net_device *dev; | 3557 | struct net_device *dev; |
3495 | struct inet6_dev *idev = NULL; | 3558 | struct inet6_dev *idev; |
3496 | struct inet6_ifaddr *ifa; | 3559 | struct hlist_head *head; |
3497 | struct ifmcaddr6 *ifmca; | 3560 | struct hlist_node *node; |
3498 | struct ifacaddr6 *ifaca; | ||
3499 | struct net *net = sock_net(skb->sk); | ||
3500 | |||
3501 | s_idx = cb->args[0]; | ||
3502 | s_ip_idx = ip_idx = cb->args[1]; | ||
3503 | 3561 | ||
3504 | idx = 0; | 3562 | s_h = cb->args[0]; |
3505 | for_each_netdev(net, dev) { | 3563 | s_idx = idx = cb->args[1]; |
3506 | if (idx < s_idx) | 3564 | s_ip_idx = ip_idx = cb->args[2]; |
3507 | goto cont; | ||
3508 | if (idx > s_idx) | ||
3509 | s_ip_idx = 0; | ||
3510 | ip_idx = 0; | ||
3511 | if ((idev = in6_dev_get(dev)) == NULL) | ||
3512 | goto cont; | ||
3513 | read_lock_bh(&idev->lock); | ||
3514 | switch (type) { | ||
3515 | case UNICAST_ADDR: | ||
3516 | /* unicast address incl. temp addr */ | ||
3517 | for (ifa = idev->addr_list; ifa; | ||
3518 | ifa = ifa->if_next, ip_idx++) { | ||
3519 | if (ip_idx < s_ip_idx) | ||
3520 | continue; | ||
3521 | err = inet6_fill_ifaddr(skb, ifa, | ||
3522 | NETLINK_CB(cb->skb).pid, | ||
3523 | cb->nlh->nlmsg_seq, | ||
3524 | RTM_NEWADDR, | ||
3525 | NLM_F_MULTI); | ||
3526 | } | ||
3527 | break; | ||
3528 | case MULTICAST_ADDR: | ||
3529 | /* multicast address */ | ||
3530 | for (ifmca = idev->mc_list; ifmca; | ||
3531 | ifmca = ifmca->next, ip_idx++) { | ||
3532 | if (ip_idx < s_ip_idx) | ||
3533 | continue; | ||
3534 | err = inet6_fill_ifmcaddr(skb, ifmca, | ||
3535 | NETLINK_CB(cb->skb).pid, | ||
3536 | cb->nlh->nlmsg_seq, | ||
3537 | RTM_GETMULTICAST, | ||
3538 | NLM_F_MULTI); | ||
3539 | } | ||
3540 | break; | ||
3541 | case ANYCAST_ADDR: | ||
3542 | /* anycast address */ | ||
3543 | for (ifaca = idev->ac_list; ifaca; | ||
3544 | ifaca = ifaca->aca_next, ip_idx++) { | ||
3545 | if (ip_idx < s_ip_idx) | ||
3546 | continue; | ||
3547 | err = inet6_fill_ifacaddr(skb, ifaca, | ||
3548 | NETLINK_CB(cb->skb).pid, | ||
3549 | cb->nlh->nlmsg_seq, | ||
3550 | RTM_GETANYCAST, | ||
3551 | NLM_F_MULTI); | ||
3552 | } | ||
3553 | break; | ||
3554 | default: | ||
3555 | break; | ||
3556 | } | ||
3557 | read_unlock_bh(&idev->lock); | ||
3558 | in6_dev_put(idev); | ||
3559 | 3565 | ||
3560 | if (err <= 0) | 3566 | rcu_read_lock(); |
3561 | break; | 3567 | for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { |
3568 | idx = 0; | ||
3569 | head = &net->dev_index_head[h]; | ||
3570 | hlist_for_each_entry_rcu(dev, node, head, index_hlist) { | ||
3571 | if (idx < s_idx) | ||
3572 | goto cont; | ||
3573 | if (idx > s_idx) | ||
3574 | s_ip_idx = 0; | ||
3575 | ip_idx = 0; | ||
3576 | if ((idev = __in6_dev_get(dev)) == NULL) | ||
3577 | goto cont; | ||
3578 | |||
3579 | if (in6_dump_addrs(idev, skb, cb, type, | ||
3580 | s_ip_idx, &ip_idx) <= 0) | ||
3581 | goto done; | ||
3562 | cont: | 3582 | cont: |
3563 | idx++; | 3583 | idx++; |
3584 | } | ||
3564 | } | 3585 | } |
3565 | cb->args[0] = idx; | 3586 | done: |
3566 | cb->args[1] = ip_idx; | 3587 | rcu_read_unlock(); |
3588 | cb->args[0] = h; | ||
3589 | cb->args[1] = idx; | ||
3590 | cb->args[2] = ip_idx; | ||
3591 | |||
3567 | return skb->len; | 3592 | return skb->len; |
3568 | } | 3593 | } |
3569 | 3594 | ||
@@ -3708,6 +3733,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, | |||
3708 | #endif | 3733 | #endif |
3709 | array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6; | 3734 | array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6; |
3710 | array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad; | 3735 | array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad; |
3736 | array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao; | ||
3711 | } | 3737 | } |
3712 | 3738 | ||
3713 | static inline size_t inet6_if_nlmsg_size(void) | 3739 | static inline size_t inet6_if_nlmsg_size(void) |
@@ -3826,28 +3852,39 @@ nla_put_failure: | |||
3826 | static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) | 3852 | static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) |
3827 | { | 3853 | { |
3828 | struct net *net = sock_net(skb->sk); | 3854 | struct net *net = sock_net(skb->sk); |
3829 | int idx, err; | 3855 | int h, s_h; |
3830 | int s_idx = cb->args[0]; | 3856 | int idx = 0, s_idx; |
3831 | struct net_device *dev; | 3857 | struct net_device *dev; |
3832 | struct inet6_dev *idev; | 3858 | struct inet6_dev *idev; |
3859 | struct hlist_head *head; | ||
3860 | struct hlist_node *node; | ||
3833 | 3861 | ||
3834 | read_lock(&dev_base_lock); | 3862 | s_h = cb->args[0]; |
3835 | idx = 0; | 3863 | s_idx = cb->args[1]; |
3836 | for_each_netdev(net, dev) { | 3864 | |
3837 | if (idx < s_idx) | 3865 | rcu_read_lock(); |
3838 | goto cont; | 3866 | for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { |
3839 | if ((idev = in6_dev_get(dev)) == NULL) | 3867 | idx = 0; |
3840 | goto cont; | 3868 | head = &net->dev_index_head[h]; |
3841 | err = inet6_fill_ifinfo(skb, idev, NETLINK_CB(cb->skb).pid, | 3869 | hlist_for_each_entry_rcu(dev, node, head, index_hlist) { |
3842 | cb->nlh->nlmsg_seq, RTM_NEWLINK, NLM_F_MULTI); | 3870 | if (idx < s_idx) |
3843 | in6_dev_put(idev); | 3871 | goto cont; |
3844 | if (err <= 0) | 3872 | idev = __in6_dev_get(dev); |
3845 | break; | 3873 | if (!idev) |
3874 | goto cont; | ||
3875 | if (inet6_fill_ifinfo(skb, idev, | ||
3876 | NETLINK_CB(cb->skb).pid, | ||
3877 | cb->nlh->nlmsg_seq, | ||
3878 | RTM_NEWLINK, NLM_F_MULTI) <= 0) | ||
3879 | goto out; | ||
3846 | cont: | 3880 | cont: |
3847 | idx++; | 3881 | idx++; |
3882 | } | ||
3848 | } | 3883 | } |
3849 | read_unlock(&dev_base_lock); | 3884 | out: |
3850 | cb->args[0] = idx; | 3885 | rcu_read_unlock(); |
3886 | cb->args[1] = idx; | ||
3887 | cb->args[0] = h; | ||
3851 | 3888 | ||
3852 | return skb->len; | 3889 | return skb->len; |
3853 | } | 3890 | } |
@@ -4051,9 +4088,8 @@ static void addrconf_disable_change(struct net *net, __s32 newf) | |||
4051 | struct net_device *dev; | 4088 | struct net_device *dev; |
4052 | struct inet6_dev *idev; | 4089 | struct inet6_dev *idev; |
4053 | 4090 | ||
4054 | read_lock(&dev_base_lock); | 4091 | rcu_read_lock(); |
4055 | for_each_netdev(net, dev) { | 4092 | for_each_netdev_rcu(net, dev) { |
4056 | rcu_read_lock(); | ||
4057 | idev = __in6_dev_get(dev); | 4093 | idev = __in6_dev_get(dev); |
4058 | if (idev) { | 4094 | if (idev) { |
4059 | int changed = (!idev->cnf.disable_ipv6) ^ (!newf); | 4095 | int changed = (!idev->cnf.disable_ipv6) ^ (!newf); |
@@ -4061,9 +4097,8 @@ static void addrconf_disable_change(struct net *net, __s32 newf) | |||
4061 | if (changed) | 4097 | if (changed) |
4062 | dev_disable_change(idev); | 4098 | dev_disable_change(idev); |
4063 | } | 4099 | } |
4064 | rcu_read_unlock(); | ||
4065 | } | 4100 | } |
4066 | read_unlock(&dev_base_lock); | 4101 | rcu_read_unlock(); |
4067 | } | 4102 | } |
4068 | 4103 | ||
4069 | static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int old) | 4104 | static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int old) |
@@ -4353,6 +4388,14 @@ static struct addrconf_sysctl_table | |||
4353 | .proc_handler = proc_dointvec, | 4388 | .proc_handler = proc_dointvec, |
4354 | }, | 4389 | }, |
4355 | { | 4390 | { |
4391 | .ctl_name = CTL_UNNUMBERED, | ||
4392 | .procname = "force_tllao", | ||
4393 | .data = &ipv6_devconf.force_tllao, | ||
4394 | .maxlen = sizeof(int), | ||
4395 | .mode = 0644, | ||
4396 | .proc_handler = proc_dointvec | ||
4397 | }, | ||
4398 | { | ||
4356 | .ctl_name = 0, /* sentinel */ | 4399 | .ctl_name = 0, /* sentinel */ |
4357 | } | 4400 | } |
4358 | }, | 4401 | }, |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index e127a32f9540..12e69d364dd5 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -95,7 +95,8 @@ static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk) | |||
95 | return (struct ipv6_pinfo *)(((u8 *)sk) + offset); | 95 | return (struct ipv6_pinfo *)(((u8 *)sk) + offset); |
96 | } | 96 | } |
97 | 97 | ||
98 | static int inet6_create(struct net *net, struct socket *sock, int protocol) | 98 | static int inet6_create(struct net *net, struct socket *sock, int protocol, |
99 | int kern) | ||
99 | { | 100 | { |
100 | struct inet_sock *inet; | 101 | struct inet_sock *inet; |
101 | struct ipv6_pinfo *np; | 102 | struct ipv6_pinfo *np; |
@@ -158,7 +159,7 @@ lookup_protocol: | |||
158 | } | 159 | } |
159 | 160 | ||
160 | err = -EPERM; | 161 | err = -EPERM; |
161 | if (answer->capability > 0 && !capable(answer->capability)) | 162 | if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW)) |
162 | goto out_rcu_unlock; | 163 | goto out_rcu_unlock; |
163 | 164 | ||
164 | sock->ops = answer->ops; | 165 | sock->ops = answer->ops; |
@@ -185,7 +186,7 @@ lookup_protocol: | |||
185 | inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0; | 186 | inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0; |
186 | 187 | ||
187 | if (SOCK_RAW == sock->type) { | 188 | if (SOCK_RAW == sock->type) { |
188 | inet->num = protocol; | 189 | inet->inet_num = protocol; |
189 | if (IPPROTO_RAW == protocol) | 190 | if (IPPROTO_RAW == protocol) |
190 | inet->hdrincl = 1; | 191 | inet->hdrincl = 1; |
191 | } | 192 | } |
@@ -228,12 +229,12 @@ lookup_protocol: | |||
228 | */ | 229 | */ |
229 | sk_refcnt_debug_inc(sk); | 230 | sk_refcnt_debug_inc(sk); |
230 | 231 | ||
231 | if (inet->num) { | 232 | if (inet->inet_num) { |
232 | /* It assumes that any protocol which allows | 233 | /* It assumes that any protocol which allows |
233 | * the user to assign a number at socket | 234 | * the user to assign a number at socket |
234 | * creation time automatically shares. | 235 | * creation time automatically shares. |
235 | */ | 236 | */ |
236 | inet->sport = htons(inet->num); | 237 | inet->inet_sport = htons(inet->inet_num); |
237 | sk->sk_prot->hash(sk); | 238 | sk->sk_prot->hash(sk); |
238 | } | 239 | } |
239 | if (sk->sk_prot->init) { | 240 | if (sk->sk_prot->init) { |
@@ -281,7 +282,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
281 | lock_sock(sk); | 282 | lock_sock(sk); |
282 | 283 | ||
283 | /* Check these errors (active socket, double bind). */ | 284 | /* Check these errors (active socket, double bind). */ |
284 | if (sk->sk_state != TCP_CLOSE || inet->num) { | 285 | if (sk->sk_state != TCP_CLOSE || inet->inet_num) { |
285 | err = -EINVAL; | 286 | err = -EINVAL; |
286 | goto out; | 287 | goto out; |
287 | } | 288 | } |
@@ -314,6 +315,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
314 | if (addr_type != IPV6_ADDR_ANY) { | 315 | if (addr_type != IPV6_ADDR_ANY) { |
315 | struct net_device *dev = NULL; | 316 | struct net_device *dev = NULL; |
316 | 317 | ||
318 | rcu_read_lock(); | ||
317 | if (addr_type & IPV6_ADDR_LINKLOCAL) { | 319 | if (addr_type & IPV6_ADDR_LINKLOCAL) { |
318 | if (addr_len >= sizeof(struct sockaddr_in6) && | 320 | if (addr_len >= sizeof(struct sockaddr_in6) && |
319 | addr->sin6_scope_id) { | 321 | addr->sin6_scope_id) { |
@@ -326,12 +328,12 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
326 | /* Binding to link-local address requires an interface */ | 328 | /* Binding to link-local address requires an interface */ |
327 | if (!sk->sk_bound_dev_if) { | 329 | if (!sk->sk_bound_dev_if) { |
328 | err = -EINVAL; | 330 | err = -EINVAL; |
329 | goto out; | 331 | goto out_unlock; |
330 | } | 332 | } |
331 | dev = dev_get_by_index(net, sk->sk_bound_dev_if); | 333 | dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if); |
332 | if (!dev) { | 334 | if (!dev) { |
333 | err = -ENODEV; | 335 | err = -ENODEV; |
334 | goto out; | 336 | goto out_unlock; |
335 | } | 337 | } |
336 | } | 338 | } |
337 | 339 | ||
@@ -342,19 +344,16 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
342 | if (!(addr_type & IPV6_ADDR_MULTICAST)) { | 344 | if (!(addr_type & IPV6_ADDR_MULTICAST)) { |
343 | if (!ipv6_chk_addr(net, &addr->sin6_addr, | 345 | if (!ipv6_chk_addr(net, &addr->sin6_addr, |
344 | dev, 0)) { | 346 | dev, 0)) { |
345 | if (dev) | ||
346 | dev_put(dev); | ||
347 | err = -EADDRNOTAVAIL; | 347 | err = -EADDRNOTAVAIL; |
348 | goto out; | 348 | goto out_unlock; |
349 | } | 349 | } |
350 | } | 350 | } |
351 | if (dev) | 351 | rcu_read_unlock(); |
352 | dev_put(dev); | ||
353 | } | 352 | } |
354 | } | 353 | } |
355 | 354 | ||
356 | inet->rcv_saddr = v4addr; | 355 | inet->inet_rcv_saddr = v4addr; |
357 | inet->saddr = v4addr; | 356 | inet->inet_saddr = v4addr; |
358 | 357 | ||
359 | ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr); | 358 | ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr); |
360 | 359 | ||
@@ -375,12 +374,15 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
375 | } | 374 | } |
376 | if (snum) | 375 | if (snum) |
377 | sk->sk_userlocks |= SOCK_BINDPORT_LOCK; | 376 | sk->sk_userlocks |= SOCK_BINDPORT_LOCK; |
378 | inet->sport = htons(inet->num); | 377 | inet->inet_sport = htons(inet->inet_num); |
379 | inet->dport = 0; | 378 | inet->inet_dport = 0; |
380 | inet->daddr = 0; | 379 | inet->inet_daddr = 0; |
381 | out: | 380 | out: |
382 | release_sock(sk); | 381 | release_sock(sk); |
383 | return err; | 382 | return err; |
383 | out_unlock: | ||
384 | rcu_read_unlock(); | ||
385 | goto out; | ||
384 | } | 386 | } |
385 | 387 | ||
386 | EXPORT_SYMBOL(inet6_bind); | 388 | EXPORT_SYMBOL(inet6_bind); |
@@ -441,12 +443,12 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr, | |||
441 | sin->sin6_flowinfo = 0; | 443 | sin->sin6_flowinfo = 0; |
442 | sin->sin6_scope_id = 0; | 444 | sin->sin6_scope_id = 0; |
443 | if (peer) { | 445 | if (peer) { |
444 | if (!inet->dport) | 446 | if (!inet->inet_dport) |
445 | return -ENOTCONN; | 447 | return -ENOTCONN; |
446 | if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) && | 448 | if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) && |
447 | peer == 1) | 449 | peer == 1) |
448 | return -ENOTCONN; | 450 | return -ENOTCONN; |
449 | sin->sin6_port = inet->dport; | 451 | sin->sin6_port = inet->inet_dport; |
450 | ipv6_addr_copy(&sin->sin6_addr, &np->daddr); | 452 | ipv6_addr_copy(&sin->sin6_addr, &np->daddr); |
451 | if (np->sndflow) | 453 | if (np->sndflow) |
452 | sin->sin6_flowinfo = np->flow_label; | 454 | sin->sin6_flowinfo = np->flow_label; |
@@ -456,7 +458,7 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr, | |||
456 | else | 458 | else |
457 | ipv6_addr_copy(&sin->sin6_addr, &np->rcv_saddr); | 459 | ipv6_addr_copy(&sin->sin6_addr, &np->rcv_saddr); |
458 | 460 | ||
459 | sin->sin6_port = inet->sport; | 461 | sin->sin6_port = inet->inet_sport; |
460 | } | 462 | } |
461 | if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) | 463 | if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) |
462 | sin->sin6_scope_id = sk->sk_bound_dev_if; | 464 | sin->sin6_scope_id = sk->sk_bound_dev_if; |
@@ -552,7 +554,7 @@ const struct proto_ops inet6_dgram_ops = { | |||
552 | #endif | 554 | #endif |
553 | }; | 555 | }; |
554 | 556 | ||
555 | static struct net_proto_family inet6_family_ops = { | 557 | static const struct net_proto_family inet6_family_ops = { |
556 | .family = PF_INET6, | 558 | .family = PF_INET6, |
557 | .create = inet6_create, | 559 | .create = inet6_create, |
558 | .owner = THIS_MODULE, | 560 | .owner = THIS_MODULE, |
@@ -654,8 +656,9 @@ int inet6_sk_rebuild_header(struct sock *sk) | |||
654 | ipv6_addr_copy(&fl.fl6_src, &np->saddr); | 656 | ipv6_addr_copy(&fl.fl6_src, &np->saddr); |
655 | fl.fl6_flowlabel = np->flow_label; | 657 | fl.fl6_flowlabel = np->flow_label; |
656 | fl.oif = sk->sk_bound_dev_if; | 658 | fl.oif = sk->sk_bound_dev_if; |
657 | fl.fl_ip_dport = inet->dport; | 659 | fl.mark = sk->sk_mark; |
658 | fl.fl_ip_sport = inet->sport; | 660 | fl.fl_ip_dport = inet->inet_dport; |
661 | fl.fl_ip_sport = inet->inet_sport; | ||
659 | security_sk_classify_flow(sk, &fl); | 662 | security_sk_classify_flow(sk, &fl); |
660 | 663 | ||
661 | if (np->opt && np->opt->srcrt) { | 664 | if (np->opt && np->opt->srcrt) { |
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index c1589e2f1dc9..0f526f8ea518 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c | |||
@@ -24,18 +24,92 @@ | |||
24 | * This file is derived from net/ipv4/ah.c. | 24 | * This file is derived from net/ipv4/ah.c. |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <crypto/hash.h> | ||
27 | #include <linux/module.h> | 28 | #include <linux/module.h> |
28 | #include <net/ip.h> | 29 | #include <net/ip.h> |
29 | #include <net/ah.h> | 30 | #include <net/ah.h> |
30 | #include <linux/crypto.h> | 31 | #include <linux/crypto.h> |
31 | #include <linux/pfkeyv2.h> | 32 | #include <linux/pfkeyv2.h> |
32 | #include <linux/spinlock.h> | ||
33 | #include <linux/string.h> | 33 | #include <linux/string.h> |
34 | #include <linux/scatterlist.h> | ||
34 | #include <net/icmp.h> | 35 | #include <net/icmp.h> |
35 | #include <net/ipv6.h> | 36 | #include <net/ipv6.h> |
36 | #include <net/protocol.h> | 37 | #include <net/protocol.h> |
37 | #include <net/xfrm.h> | 38 | #include <net/xfrm.h> |
38 | 39 | ||
40 | #define IPV6HDR_BASELEN 8 | ||
41 | |||
42 | struct tmp_ext { | ||
43 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | ||
44 | struct in6_addr saddr; | ||
45 | #endif | ||
46 | struct in6_addr daddr; | ||
47 | char hdrs[0]; | ||
48 | }; | ||
49 | |||
50 | struct ah_skb_cb { | ||
51 | struct xfrm_skb_cb xfrm; | ||
52 | void *tmp; | ||
53 | }; | ||
54 | |||
55 | #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0])) | ||
56 | |||
57 | static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags, | ||
58 | unsigned int size) | ||
59 | { | ||
60 | unsigned int len; | ||
61 | |||
62 | len = size + crypto_ahash_digestsize(ahash) + | ||
63 | (crypto_ahash_alignmask(ahash) & | ||
64 | ~(crypto_tfm_ctx_alignment() - 1)); | ||
65 | |||
66 | len = ALIGN(len, crypto_tfm_ctx_alignment()); | ||
67 | |||
68 | len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash); | ||
69 | len = ALIGN(len, __alignof__(struct scatterlist)); | ||
70 | |||
71 | len += sizeof(struct scatterlist) * nfrags; | ||
72 | |||
73 | return kmalloc(len, GFP_ATOMIC); | ||
74 | } | ||
75 | |||
76 | static inline struct tmp_ext *ah_tmp_ext(void *base) | ||
77 | { | ||
78 | return base + IPV6HDR_BASELEN; | ||
79 | } | ||
80 | |||
81 | static inline u8 *ah_tmp_auth(u8 *tmp, unsigned int offset) | ||
82 | { | ||
83 | return tmp + offset; | ||
84 | } | ||
85 | |||
86 | static inline u8 *ah_tmp_icv(struct crypto_ahash *ahash, void *tmp, | ||
87 | unsigned int offset) | ||
88 | { | ||
89 | return PTR_ALIGN((u8 *)tmp + offset, crypto_ahash_alignmask(ahash) + 1); | ||
90 | } | ||
91 | |||
92 | static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash, | ||
93 | u8 *icv) | ||
94 | { | ||
95 | struct ahash_request *req; | ||
96 | |||
97 | req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash), | ||
98 | crypto_tfm_ctx_alignment()); | ||
99 | |||
100 | ahash_request_set_tfm(req, ahash); | ||
101 | |||
102 | return req; | ||
103 | } | ||
104 | |||
105 | static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash, | ||
106 | struct ahash_request *req) | ||
107 | { | ||
108 | return (void *)ALIGN((unsigned long)(req + 1) + | ||
109 | crypto_ahash_reqsize(ahash), | ||
110 | __alignof__(struct scatterlist)); | ||
111 | } | ||
112 | |||
39 | static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr) | 113 | static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr) |
40 | { | 114 | { |
41 | u8 *opt = (u8 *)opthdr; | 115 | u8 *opt = (u8 *)opthdr; |
@@ -218,24 +292,85 @@ static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len, int dir) | |||
218 | return 0; | 292 | return 0; |
219 | } | 293 | } |
220 | 294 | ||
295 | static void ah6_output_done(struct crypto_async_request *base, int err) | ||
296 | { | ||
297 | int extlen; | ||
298 | u8 *iph_base; | ||
299 | u8 *icv; | ||
300 | struct sk_buff *skb = base->data; | ||
301 | struct xfrm_state *x = skb_dst(skb)->xfrm; | ||
302 | struct ah_data *ahp = x->data; | ||
303 | struct ipv6hdr *top_iph = ipv6_hdr(skb); | ||
304 | struct ip_auth_hdr *ah = ip_auth_hdr(skb); | ||
305 | struct tmp_ext *iph_ext; | ||
306 | |||
307 | extlen = skb_network_header_len(skb) - sizeof(struct ipv6hdr); | ||
308 | if (extlen) | ||
309 | extlen += sizeof(*iph_ext); | ||
310 | |||
311 | iph_base = AH_SKB_CB(skb)->tmp; | ||
312 | iph_ext = ah_tmp_ext(iph_base); | ||
313 | icv = ah_tmp_icv(ahp->ahash, iph_ext, extlen); | ||
314 | |||
315 | memcpy(ah->auth_data, icv, ahp->icv_trunc_len); | ||
316 | memcpy(top_iph, iph_base, IPV6HDR_BASELEN); | ||
317 | |||
318 | if (extlen) { | ||
319 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | ||
320 | memcpy(&top_iph->saddr, iph_ext, extlen); | ||
321 | #else | ||
322 | memcpy(&top_iph->daddr, iph_ext, extlen); | ||
323 | #endif | ||
324 | } | ||
325 | |||
326 | err = ah->nexthdr; | ||
327 | |||
328 | kfree(AH_SKB_CB(skb)->tmp); | ||
329 | xfrm_output_resume(skb, err); | ||
330 | } | ||
331 | |||
221 | static int ah6_output(struct xfrm_state *x, struct sk_buff *skb) | 332 | static int ah6_output(struct xfrm_state *x, struct sk_buff *skb) |
222 | { | 333 | { |
223 | int err; | 334 | int err; |
335 | int nfrags; | ||
224 | int extlen; | 336 | int extlen; |
337 | u8 *iph_base; | ||
338 | u8 *icv; | ||
339 | u8 nexthdr; | ||
340 | struct sk_buff *trailer; | ||
341 | struct crypto_ahash *ahash; | ||
342 | struct ahash_request *req; | ||
343 | struct scatterlist *sg; | ||
225 | struct ipv6hdr *top_iph; | 344 | struct ipv6hdr *top_iph; |
226 | struct ip_auth_hdr *ah; | 345 | struct ip_auth_hdr *ah; |
227 | struct ah_data *ahp; | 346 | struct ah_data *ahp; |
228 | u8 nexthdr; | 347 | struct tmp_ext *iph_ext; |
229 | char tmp_base[8]; | 348 | |
230 | struct { | 349 | ahp = x->data; |
231 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 350 | ahash = ahp->ahash; |
232 | struct in6_addr saddr; | 351 | |
233 | #endif | 352 | if ((err = skb_cow_data(skb, 0, &trailer)) < 0) |
234 | struct in6_addr daddr; | 353 | goto out; |
235 | char hdrs[0]; | 354 | nfrags = err; |
236 | } *tmp_ext; | ||
237 | 355 | ||
238 | skb_push(skb, -skb_network_offset(skb)); | 356 | skb_push(skb, -skb_network_offset(skb)); |
357 | extlen = skb_network_header_len(skb) - sizeof(struct ipv6hdr); | ||
358 | if (extlen) | ||
359 | extlen += sizeof(*iph_ext); | ||
360 | |||
361 | err = -ENOMEM; | ||
362 | iph_base = ah_alloc_tmp(ahash, nfrags, IPV6HDR_BASELEN + extlen); | ||
363 | if (!iph_base) | ||
364 | goto out; | ||
365 | |||
366 | iph_ext = ah_tmp_ext(iph_base); | ||
367 | icv = ah_tmp_icv(ahash, iph_ext, extlen); | ||
368 | req = ah_tmp_req(ahash, icv); | ||
369 | sg = ah_req_sg(ahash, req); | ||
370 | |||
371 | ah = ip_auth_hdr(skb); | ||
372 | memset(ah->auth_data, 0, ahp->icv_trunc_len); | ||
373 | |||
239 | top_iph = ipv6_hdr(skb); | 374 | top_iph = ipv6_hdr(skb); |
240 | top_iph->payload_len = htons(skb->len - sizeof(*top_iph)); | 375 | top_iph->payload_len = htons(skb->len - sizeof(*top_iph)); |
241 | 376 | ||
@@ -245,31 +380,22 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb) | |||
245 | /* When there are no extension headers, we only need to save the first | 380 | /* When there are no extension headers, we only need to save the first |
246 | * 8 bytes of the base IP header. | 381 | * 8 bytes of the base IP header. |
247 | */ | 382 | */ |
248 | memcpy(tmp_base, top_iph, sizeof(tmp_base)); | 383 | memcpy(iph_base, top_iph, IPV6HDR_BASELEN); |
249 | 384 | ||
250 | tmp_ext = NULL; | ||
251 | extlen = skb_transport_offset(skb) - sizeof(struct ipv6hdr); | ||
252 | if (extlen) { | 385 | if (extlen) { |
253 | extlen += sizeof(*tmp_ext); | ||
254 | tmp_ext = kmalloc(extlen, GFP_ATOMIC); | ||
255 | if (!tmp_ext) { | ||
256 | err = -ENOMEM; | ||
257 | goto error; | ||
258 | } | ||
259 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 386 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) |
260 | memcpy(tmp_ext, &top_iph->saddr, extlen); | 387 | memcpy(iph_ext, &top_iph->saddr, extlen); |
261 | #else | 388 | #else |
262 | memcpy(tmp_ext, &top_iph->daddr, extlen); | 389 | memcpy(iph_ext, &top_iph->daddr, extlen); |
263 | #endif | 390 | #endif |
264 | err = ipv6_clear_mutable_options(top_iph, | 391 | err = ipv6_clear_mutable_options(top_iph, |
265 | extlen - sizeof(*tmp_ext) + | 392 | extlen - sizeof(*iph_ext) + |
266 | sizeof(*top_iph), | 393 | sizeof(*top_iph), |
267 | XFRM_POLICY_OUT); | 394 | XFRM_POLICY_OUT); |
268 | if (err) | 395 | if (err) |
269 | goto error_free_iph; | 396 | goto out_free; |
270 | } | 397 | } |
271 | 398 | ||
272 | ah = ip_auth_hdr(skb); | ||
273 | ah->nexthdr = nexthdr; | 399 | ah->nexthdr = nexthdr; |
274 | 400 | ||
275 | top_iph->priority = 0; | 401 | top_iph->priority = 0; |
@@ -278,36 +404,80 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb) | |||
278 | top_iph->flow_lbl[2] = 0; | 404 | top_iph->flow_lbl[2] = 0; |
279 | top_iph->hop_limit = 0; | 405 | top_iph->hop_limit = 0; |
280 | 406 | ||
281 | ahp = x->data; | ||
282 | ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2; | 407 | ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2; |
283 | 408 | ||
284 | ah->reserved = 0; | 409 | ah->reserved = 0; |
285 | ah->spi = x->id.spi; | 410 | ah->spi = x->id.spi; |
286 | ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output); | 411 | ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output); |
287 | 412 | ||
288 | spin_lock_bh(&x->lock); | 413 | sg_init_table(sg, nfrags); |
289 | err = ah_mac_digest(ahp, skb, ah->auth_data); | 414 | skb_to_sgvec(skb, sg, 0, skb->len); |
290 | memcpy(ah->auth_data, ahp->work_icv, ahp->icv_trunc_len); | ||
291 | spin_unlock_bh(&x->lock); | ||
292 | 415 | ||
293 | if (err) | 416 | ahash_request_set_crypt(req, sg, icv, skb->len); |
294 | goto error_free_iph; | 417 | ahash_request_set_callback(req, 0, ah6_output_done, skb); |
418 | |||
419 | AH_SKB_CB(skb)->tmp = iph_base; | ||
295 | 420 | ||
296 | memcpy(top_iph, tmp_base, sizeof(tmp_base)); | 421 | err = crypto_ahash_digest(req); |
297 | if (tmp_ext) { | 422 | if (err) { |
423 | if (err == -EINPROGRESS) | ||
424 | goto out; | ||
425 | |||
426 | if (err == -EBUSY) | ||
427 | err = NET_XMIT_DROP; | ||
428 | goto out_free; | ||
429 | } | ||
430 | |||
431 | memcpy(ah->auth_data, icv, ahp->icv_trunc_len); | ||
432 | memcpy(top_iph, iph_base, IPV6HDR_BASELEN); | ||
433 | |||
434 | if (extlen) { | ||
298 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 435 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) |
299 | memcpy(&top_iph->saddr, tmp_ext, extlen); | 436 | memcpy(&top_iph->saddr, iph_ext, extlen); |
300 | #else | 437 | #else |
301 | memcpy(&top_iph->daddr, tmp_ext, extlen); | 438 | memcpy(&top_iph->daddr, iph_ext, extlen); |
302 | #endif | 439 | #endif |
303 | error_free_iph: | ||
304 | kfree(tmp_ext); | ||
305 | } | 440 | } |
306 | 441 | ||
307 | error: | 442 | out_free: |
443 | kfree(iph_base); | ||
444 | out: | ||
308 | return err; | 445 | return err; |
309 | } | 446 | } |
310 | 447 | ||
448 | static void ah6_input_done(struct crypto_async_request *base, int err) | ||
449 | { | ||
450 | u8 *auth_data; | ||
451 | u8 *icv; | ||
452 | u8 *work_iph; | ||
453 | struct sk_buff *skb = base->data; | ||
454 | struct xfrm_state *x = xfrm_input_state(skb); | ||
455 | struct ah_data *ahp = x->data; | ||
456 | struct ip_auth_hdr *ah = ip_auth_hdr(skb); | ||
457 | int hdr_len = skb_network_header_len(skb); | ||
458 | int ah_hlen = (ah->hdrlen + 2) << 2; | ||
459 | |||
460 | work_iph = AH_SKB_CB(skb)->tmp; | ||
461 | auth_data = ah_tmp_auth(work_iph, hdr_len); | ||
462 | icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len); | ||
463 | |||
464 | err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0; | ||
465 | if (err) | ||
466 | goto out; | ||
467 | |||
468 | skb->network_header += ah_hlen; | ||
469 | memcpy(skb_network_header(skb), work_iph, hdr_len); | ||
470 | __skb_pull(skb, ah_hlen + hdr_len); | ||
471 | skb_set_transport_header(skb, -hdr_len); | ||
472 | |||
473 | err = ah->nexthdr; | ||
474 | out: | ||
475 | kfree(AH_SKB_CB(skb)->tmp); | ||
476 | xfrm_input_resume(skb, err); | ||
477 | } | ||
478 | |||
479 | |||
480 | |||
311 | static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) | 481 | static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) |
312 | { | 482 | { |
313 | /* | 483 | /* |
@@ -325,14 +495,21 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) | |||
325 | * There is offset of AH before IPv6 header after the process. | 495 | * There is offset of AH before IPv6 header after the process. |
326 | */ | 496 | */ |
327 | 497 | ||
498 | u8 *auth_data; | ||
499 | u8 *icv; | ||
500 | u8 *work_iph; | ||
501 | struct sk_buff *trailer; | ||
502 | struct crypto_ahash *ahash; | ||
503 | struct ahash_request *req; | ||
504 | struct scatterlist *sg; | ||
328 | struct ip_auth_hdr *ah; | 505 | struct ip_auth_hdr *ah; |
329 | struct ipv6hdr *ip6h; | 506 | struct ipv6hdr *ip6h; |
330 | struct ah_data *ahp; | 507 | struct ah_data *ahp; |
331 | unsigned char *tmp_hdr = NULL; | ||
332 | u16 hdr_len; | 508 | u16 hdr_len; |
333 | u16 ah_hlen; | 509 | u16 ah_hlen; |
334 | int nexthdr; | 510 | int nexthdr; |
335 | int err = -EINVAL; | 511 | int nfrags; |
512 | int err = -ENOMEM; | ||
336 | 513 | ||
337 | if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr))) | 514 | if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr))) |
338 | goto out; | 515 | goto out; |
@@ -345,9 +522,11 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) | |||
345 | 522 | ||
346 | skb->ip_summed = CHECKSUM_NONE; | 523 | skb->ip_summed = CHECKSUM_NONE; |
347 | 524 | ||
348 | hdr_len = skb->data - skb_network_header(skb); | 525 | hdr_len = skb_network_header_len(skb); |
349 | ah = (struct ip_auth_hdr *)skb->data; | 526 | ah = (struct ip_auth_hdr *)skb->data; |
350 | ahp = x->data; | 527 | ahp = x->data; |
528 | ahash = ahp->ahash; | ||
529 | |||
351 | nexthdr = ah->nexthdr; | 530 | nexthdr = ah->nexthdr; |
352 | ah_hlen = (ah->hdrlen + 2) << 2; | 531 | ah_hlen = (ah->hdrlen + 2) << 2; |
353 | 532 | ||
@@ -358,48 +537,67 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) | |||
358 | if (!pskb_may_pull(skb, ah_hlen)) | 537 | if (!pskb_may_pull(skb, ah_hlen)) |
359 | goto out; | 538 | goto out; |
360 | 539 | ||
361 | tmp_hdr = kmemdup(skb_network_header(skb), hdr_len, GFP_ATOMIC); | ||
362 | if (!tmp_hdr) | ||
363 | goto out; | ||
364 | ip6h = ipv6_hdr(skb); | 540 | ip6h = ipv6_hdr(skb); |
541 | |||
542 | skb_push(skb, hdr_len); | ||
543 | |||
544 | if ((err = skb_cow_data(skb, 0, &trailer)) < 0) | ||
545 | goto out; | ||
546 | nfrags = err; | ||
547 | |||
548 | work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len); | ||
549 | if (!work_iph) | ||
550 | goto out; | ||
551 | |||
552 | auth_data = ah_tmp_auth(work_iph, hdr_len); | ||
553 | icv = ah_tmp_icv(ahash, auth_data, ahp->icv_trunc_len); | ||
554 | req = ah_tmp_req(ahash, icv); | ||
555 | sg = ah_req_sg(ahash, req); | ||
556 | |||
557 | memcpy(work_iph, ip6h, hdr_len); | ||
558 | memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); | ||
559 | memset(ah->auth_data, 0, ahp->icv_trunc_len); | ||
560 | |||
365 | if (ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN)) | 561 | if (ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN)) |
366 | goto free_out; | 562 | goto out_free; |
563 | |||
367 | ip6h->priority = 0; | 564 | ip6h->priority = 0; |
368 | ip6h->flow_lbl[0] = 0; | 565 | ip6h->flow_lbl[0] = 0; |
369 | ip6h->flow_lbl[1] = 0; | 566 | ip6h->flow_lbl[1] = 0; |
370 | ip6h->flow_lbl[2] = 0; | 567 | ip6h->flow_lbl[2] = 0; |
371 | ip6h->hop_limit = 0; | 568 | ip6h->hop_limit = 0; |
372 | 569 | ||
373 | spin_lock(&x->lock); | 570 | sg_init_table(sg, nfrags); |
374 | { | 571 | skb_to_sgvec(skb, sg, 0, skb->len); |
375 | u8 auth_data[MAX_AH_AUTH_LEN]; | ||
376 | 572 | ||
377 | memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); | 573 | ahash_request_set_crypt(req, sg, icv, skb->len); |
378 | memset(ah->auth_data, 0, ahp->icv_trunc_len); | 574 | ahash_request_set_callback(req, 0, ah6_input_done, skb); |
379 | skb_push(skb, hdr_len); | 575 | |
380 | err = ah_mac_digest(ahp, skb, ah->auth_data); | 576 | AH_SKB_CB(skb)->tmp = work_iph; |
381 | if (err) | 577 | |
382 | goto unlock; | 578 | err = crypto_ahash_digest(req); |
383 | if (memcmp(ahp->work_icv, auth_data, ahp->icv_trunc_len)) | 579 | if (err) { |
384 | err = -EBADMSG; | 580 | if (err == -EINPROGRESS) |
581 | goto out; | ||
582 | |||
583 | if (err == -EBUSY) | ||
584 | err = NET_XMIT_DROP; | ||
585 | goto out_free; | ||
385 | } | 586 | } |
386 | unlock: | ||
387 | spin_unlock(&x->lock); | ||
388 | 587 | ||
588 | err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0; | ||
389 | if (err) | 589 | if (err) |
390 | goto free_out; | 590 | goto out_free; |
391 | 591 | ||
392 | skb->network_header += ah_hlen; | 592 | skb->network_header += ah_hlen; |
393 | memcpy(skb_network_header(skb), tmp_hdr, hdr_len); | 593 | memcpy(skb_network_header(skb), work_iph, hdr_len); |
394 | skb->transport_header = skb->network_header; | 594 | skb->transport_header = skb->network_header; |
395 | __skb_pull(skb, ah_hlen + hdr_len); | 595 | __skb_pull(skb, ah_hlen + hdr_len); |
396 | 596 | ||
397 | kfree(tmp_hdr); | 597 | err = nexthdr; |
398 | 598 | ||
399 | return nexthdr; | 599 | out_free: |
400 | 600 | kfree(work_iph); | |
401 | free_out: | ||
402 | kfree(tmp_hdr); | ||
403 | out: | 601 | out: |
404 | return err; | 602 | return err; |
405 | } | 603 | } |
@@ -430,7 +628,7 @@ static int ah6_init_state(struct xfrm_state *x) | |||
430 | { | 628 | { |
431 | struct ah_data *ahp = NULL; | 629 | struct ah_data *ahp = NULL; |
432 | struct xfrm_algo_desc *aalg_desc; | 630 | struct xfrm_algo_desc *aalg_desc; |
433 | struct crypto_hash *tfm; | 631 | struct crypto_ahash *ahash; |
434 | 632 | ||
435 | if (!x->aalg) | 633 | if (!x->aalg) |
436 | goto error; | 634 | goto error; |
@@ -442,12 +640,12 @@ static int ah6_init_state(struct xfrm_state *x) | |||
442 | if (ahp == NULL) | 640 | if (ahp == NULL) |
443 | return -ENOMEM; | 641 | return -ENOMEM; |
444 | 642 | ||
445 | tfm = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC); | 643 | ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0); |
446 | if (IS_ERR(tfm)) | 644 | if (IS_ERR(ahash)) |
447 | goto error; | 645 | goto error; |
448 | 646 | ||
449 | ahp->tfm = tfm; | 647 | ahp->ahash = ahash; |
450 | if (crypto_hash_setkey(tfm, x->aalg->alg_key, | 648 | if (crypto_ahash_setkey(ahash, x->aalg->alg_key, |
451 | (x->aalg->alg_key_len + 7) / 8)) | 649 | (x->aalg->alg_key_len + 7) / 8)) |
452 | goto error; | 650 | goto error; |
453 | 651 | ||
@@ -461,9 +659,9 @@ static int ah6_init_state(struct xfrm_state *x) | |||
461 | BUG_ON(!aalg_desc); | 659 | BUG_ON(!aalg_desc); |
462 | 660 | ||
463 | if (aalg_desc->uinfo.auth.icv_fullbits/8 != | 661 | if (aalg_desc->uinfo.auth.icv_fullbits/8 != |
464 | crypto_hash_digestsize(tfm)) { | 662 | crypto_ahash_digestsize(ahash)) { |
465 | printk(KERN_INFO "AH: %s digestsize %u != %hu\n", | 663 | printk(KERN_INFO "AH: %s digestsize %u != %hu\n", |
466 | x->aalg->alg_name, crypto_hash_digestsize(tfm), | 664 | x->aalg->alg_name, crypto_ahash_digestsize(ahash), |
467 | aalg_desc->uinfo.auth.icv_fullbits/8); | 665 | aalg_desc->uinfo.auth.icv_fullbits/8); |
468 | goto error; | 666 | goto error; |
469 | } | 667 | } |
@@ -473,10 +671,6 @@ static int ah6_init_state(struct xfrm_state *x) | |||
473 | 671 | ||
474 | BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN); | 672 | BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN); |
475 | 673 | ||
476 | ahp->work_icv = kmalloc(ahp->icv_full_len, GFP_KERNEL); | ||
477 | if (!ahp->work_icv) | ||
478 | goto error; | ||
479 | |||
480 | x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + | 674 | x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + |
481 | ahp->icv_trunc_len); | 675 | ahp->icv_trunc_len); |
482 | switch (x->props.mode) { | 676 | switch (x->props.mode) { |
@@ -495,8 +689,7 @@ static int ah6_init_state(struct xfrm_state *x) | |||
495 | 689 | ||
496 | error: | 690 | error: |
497 | if (ahp) { | 691 | if (ahp) { |
498 | kfree(ahp->work_icv); | 692 | crypto_free_ahash(ahp->ahash); |
499 | crypto_free_hash(ahp->tfm); | ||
500 | kfree(ahp); | 693 | kfree(ahp); |
501 | } | 694 | } |
502 | return -EINVAL; | 695 | return -EINVAL; |
@@ -509,8 +702,7 @@ static void ah6_destroy(struct xfrm_state *x) | |||
509 | if (!ahp) | 702 | if (!ahp) |
510 | return; | 703 | return; |
511 | 704 | ||
512 | kfree(ahp->work_icv); | 705 | crypto_free_ahash(ahp->ahash); |
513 | crypto_free_hash(ahp->tfm); | ||
514 | kfree(ahp); | 706 | kfree(ahp); |
515 | } | 707 | } |
516 | 708 | ||
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index 1ae58bec1de0..f1c74c8ef9de 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c | |||
@@ -404,13 +404,13 @@ int ipv6_chk_acast_addr(struct net *net, struct net_device *dev, | |||
404 | 404 | ||
405 | if (dev) | 405 | if (dev) |
406 | return ipv6_chk_acast_dev(dev, addr); | 406 | return ipv6_chk_acast_dev(dev, addr); |
407 | read_lock(&dev_base_lock); | 407 | rcu_read_lock(); |
408 | for_each_netdev(net, dev) | 408 | for_each_netdev_rcu(net, dev) |
409 | if (ipv6_chk_acast_dev(dev, addr)) { | 409 | if (ipv6_chk_acast_dev(dev, addr)) { |
410 | found = 1; | 410 | found = 1; |
411 | break; | 411 | break; |
412 | } | 412 | } |
413 | read_unlock(&dev_base_lock); | 413 | rcu_read_unlock(); |
414 | return found; | 414 | return found; |
415 | } | 415 | } |
416 | 416 | ||
@@ -431,9 +431,9 @@ static inline struct ifacaddr6 *ac6_get_first(struct seq_file *seq) | |||
431 | struct net *net = seq_file_net(seq); | 431 | struct net *net = seq_file_net(seq); |
432 | 432 | ||
433 | state->idev = NULL; | 433 | state->idev = NULL; |
434 | for_each_netdev(net, state->dev) { | 434 | for_each_netdev_rcu(net, state->dev) { |
435 | struct inet6_dev *idev; | 435 | struct inet6_dev *idev; |
436 | idev = in6_dev_get(state->dev); | 436 | idev = __in6_dev_get(state->dev); |
437 | if (!idev) | 437 | if (!idev) |
438 | continue; | 438 | continue; |
439 | read_lock_bh(&idev->lock); | 439 | read_lock_bh(&idev->lock); |
@@ -443,7 +443,6 @@ static inline struct ifacaddr6 *ac6_get_first(struct seq_file *seq) | |||
443 | break; | 443 | break; |
444 | } | 444 | } |
445 | read_unlock_bh(&idev->lock); | 445 | read_unlock_bh(&idev->lock); |
446 | in6_dev_put(idev); | ||
447 | } | 446 | } |
448 | return im; | 447 | return im; |
449 | } | 448 | } |
@@ -454,16 +453,15 @@ static struct ifacaddr6 *ac6_get_next(struct seq_file *seq, struct ifacaddr6 *im | |||
454 | 453 | ||
455 | im = im->aca_next; | 454 | im = im->aca_next; |
456 | while (!im) { | 455 | while (!im) { |
457 | if (likely(state->idev != NULL)) { | 456 | if (likely(state->idev != NULL)) |
458 | read_unlock_bh(&state->idev->lock); | 457 | read_unlock_bh(&state->idev->lock); |
459 | in6_dev_put(state->idev); | 458 | |
460 | } | 459 | state->dev = next_net_device_rcu(state->dev); |
461 | state->dev = next_net_device(state->dev); | ||
462 | if (!state->dev) { | 460 | if (!state->dev) { |
463 | state->idev = NULL; | 461 | state->idev = NULL; |
464 | break; | 462 | break; |
465 | } | 463 | } |
466 | state->idev = in6_dev_get(state->dev); | 464 | state->idev = __in6_dev_get(state->dev); |
467 | if (!state->idev) | 465 | if (!state->idev) |
468 | continue; | 466 | continue; |
469 | read_lock_bh(&state->idev->lock); | 467 | read_lock_bh(&state->idev->lock); |
@@ -482,29 +480,30 @@ static struct ifacaddr6 *ac6_get_idx(struct seq_file *seq, loff_t pos) | |||
482 | } | 480 | } |
483 | 481 | ||
484 | static void *ac6_seq_start(struct seq_file *seq, loff_t *pos) | 482 | static void *ac6_seq_start(struct seq_file *seq, loff_t *pos) |
485 | __acquires(dev_base_lock) | 483 | __acquires(RCU) |
486 | { | 484 | { |
487 | read_lock(&dev_base_lock); | 485 | rcu_read_lock(); |
488 | return ac6_get_idx(seq, *pos); | 486 | return ac6_get_idx(seq, *pos); |
489 | } | 487 | } |
490 | 488 | ||
491 | static void *ac6_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 489 | static void *ac6_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
492 | { | 490 | { |
493 | struct ifacaddr6 *im; | 491 | struct ifacaddr6 *im = ac6_get_next(seq, v); |
494 | im = ac6_get_next(seq, v); | 492 | |
495 | ++*pos; | 493 | ++*pos; |
496 | return im; | 494 | return im; |
497 | } | 495 | } |
498 | 496 | ||
499 | static void ac6_seq_stop(struct seq_file *seq, void *v) | 497 | static void ac6_seq_stop(struct seq_file *seq, void *v) |
500 | __releases(dev_base_lock) | 498 | __releases(RCU) |
501 | { | 499 | { |
502 | struct ac6_iter_state *state = ac6_seq_private(seq); | 500 | struct ac6_iter_state *state = ac6_seq_private(seq); |
501 | |||
503 | if (likely(state->idev != NULL)) { | 502 | if (likely(state->idev != NULL)) { |
504 | read_unlock_bh(&state->idev->lock); | 503 | read_unlock_bh(&state->idev->lock); |
505 | in6_dev_put(state->idev); | 504 | state->idev = NULL; |
506 | } | 505 | } |
507 | read_unlock(&dev_base_lock); | 506 | rcu_read_unlock(); |
508 | } | 507 | } |
509 | 508 | ||
510 | static int ac6_seq_show(struct seq_file *seq, void *v) | 509 | static int ac6_seq_show(struct seq_file *seq, void *v) |
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index e2bdc6d83a43..e6f9cdf780fe 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -98,17 +98,15 @@ ipv4_connected: | |||
98 | if (err) | 98 | if (err) |
99 | goto out; | 99 | goto out; |
100 | 100 | ||
101 | ipv6_addr_set(&np->daddr, 0, 0, htonl(0x0000ffff), inet->daddr); | 101 | ipv6_addr_set_v4mapped(inet->inet_daddr, &np->daddr); |
102 | 102 | ||
103 | if (ipv6_addr_any(&np->saddr)) { | 103 | if (ipv6_addr_any(&np->saddr)) |
104 | ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000ffff), | 104 | ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr); |
105 | inet->saddr); | 105 | |
106 | } | 106 | if (ipv6_addr_any(&np->rcv_saddr)) |
107 | ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, | ||
108 | &np->rcv_saddr); | ||
107 | 109 | ||
108 | if (ipv6_addr_any(&np->rcv_saddr)) { | ||
109 | ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000ffff), | ||
110 | inet->rcv_saddr); | ||
111 | } | ||
112 | goto out; | 110 | goto out; |
113 | } | 111 | } |
114 | 112 | ||
@@ -136,7 +134,7 @@ ipv4_connected: | |||
136 | ipv6_addr_copy(&np->daddr, daddr); | 134 | ipv6_addr_copy(&np->daddr, daddr); |
137 | np->flow_label = fl.fl6_flowlabel; | 135 | np->flow_label = fl.fl6_flowlabel; |
138 | 136 | ||
139 | inet->dport = usin->sin6_port; | 137 | inet->inet_dport = usin->sin6_port; |
140 | 138 | ||
141 | /* | 139 | /* |
142 | * Check for a route to destination an obtain the | 140 | * Check for a route to destination an obtain the |
@@ -147,8 +145,9 @@ ipv4_connected: | |||
147 | ipv6_addr_copy(&fl.fl6_dst, &np->daddr); | 145 | ipv6_addr_copy(&fl.fl6_dst, &np->daddr); |
148 | ipv6_addr_copy(&fl.fl6_src, &np->saddr); | 146 | ipv6_addr_copy(&fl.fl6_src, &np->saddr); |
149 | fl.oif = sk->sk_bound_dev_if; | 147 | fl.oif = sk->sk_bound_dev_if; |
150 | fl.fl_ip_dport = inet->dport; | 148 | fl.mark = sk->sk_mark; |
151 | fl.fl_ip_sport = inet->sport; | 149 | fl.fl_ip_dport = inet->inet_dport; |
150 | fl.fl_ip_sport = inet->inet_sport; | ||
152 | 151 | ||
153 | if (!fl.oif && (addr_type&IPV6_ADDR_MULTICAST)) | 152 | if (!fl.oif && (addr_type&IPV6_ADDR_MULTICAST)) |
154 | fl.oif = np->mcast_oif; | 153 | fl.oif = np->mcast_oif; |
@@ -190,7 +189,7 @@ ipv4_connected: | |||
190 | 189 | ||
191 | if (ipv6_addr_any(&np->rcv_saddr)) { | 190 | if (ipv6_addr_any(&np->rcv_saddr)) { |
192 | ipv6_addr_copy(&np->rcv_saddr, &fl.fl6_src); | 191 | ipv6_addr_copy(&np->rcv_saddr, &fl.fl6_src); |
193 | inet->rcv_saddr = LOOPBACK4_IPV6; | 192 | inet->inet_rcv_saddr = LOOPBACK4_IPV6; |
194 | } | 193 | } |
195 | 194 | ||
196 | ip6_dst_store(sk, dst, | 195 | ip6_dst_store(sk, dst, |
@@ -329,9 +328,8 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) | |||
329 | if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) | 328 | if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) |
330 | sin->sin6_scope_id = IP6CB(skb)->iif; | 329 | sin->sin6_scope_id = IP6CB(skb)->iif; |
331 | } else { | 330 | } else { |
332 | ipv6_addr_set(&sin->sin6_addr, 0, 0, | 331 | ipv6_addr_set_v4mapped(*(__be32 *)(nh + serr->addr_offset), |
333 | htonl(0xffff), | 332 | &sin->sin6_addr); |
334 | *(__be32 *)(nh + serr->addr_offset)); | ||
335 | } | 333 | } |
336 | } | 334 | } |
337 | 335 | ||
@@ -351,8 +349,8 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) | |||
351 | } else { | 349 | } else { |
352 | struct inet_sock *inet = inet_sk(sk); | 350 | struct inet_sock *inet = inet_sk(sk); |
353 | 351 | ||
354 | ipv6_addr_set(&sin->sin6_addr, 0, 0, | 352 | ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, |
355 | htonl(0xffff), ip_hdr(skb)->saddr); | 353 | &sin->sin6_addr); |
356 | if (inet->cmsg_flags) | 354 | if (inet->cmsg_flags) |
357 | ip_cmsg_recv(msg, skb); | 355 | ip_cmsg_recv(msg, skb); |
358 | } | 356 | } |
@@ -539,12 +537,17 @@ int datagram_send_ctl(struct net *net, | |||
539 | 537 | ||
540 | addr_type = __ipv6_addr_type(&src_info->ipi6_addr); | 538 | addr_type = __ipv6_addr_type(&src_info->ipi6_addr); |
541 | 539 | ||
540 | rcu_read_lock(); | ||
542 | if (fl->oif) { | 541 | if (fl->oif) { |
543 | dev = dev_get_by_index(net, fl->oif); | 542 | dev = dev_get_by_index_rcu(net, fl->oif); |
544 | if (!dev) | 543 | if (!dev) { |
544 | rcu_read_unlock(); | ||
545 | return -ENODEV; | 545 | return -ENODEV; |
546 | } else if (addr_type & IPV6_ADDR_LINKLOCAL) | 546 | } |
547 | } else if (addr_type & IPV6_ADDR_LINKLOCAL) { | ||
548 | rcu_read_unlock(); | ||
547 | return -EINVAL; | 549 | return -EINVAL; |
550 | } | ||
548 | 551 | ||
549 | if (addr_type != IPV6_ADDR_ANY) { | 552 | if (addr_type != IPV6_ADDR_ANY) { |
550 | int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL; | 553 | int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL; |
@@ -555,8 +558,7 @@ int datagram_send_ctl(struct net *net, | |||
555 | ipv6_addr_copy(&fl->fl6_src, &src_info->ipi6_addr); | 558 | ipv6_addr_copy(&fl->fl6_src, &src_info->ipi6_addr); |
556 | } | 559 | } |
557 | 560 | ||
558 | if (dev) | 561 | rcu_read_unlock(); |
559 | dev_put(dev); | ||
560 | 562 | ||
561 | if (err) | 563 | if (err) |
562 | goto exit_f; | 564 | goto exit_f; |
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index cc4797dd8325..3516e6fe2e56 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c | |||
@@ -132,7 +132,7 @@ void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr) | |||
132 | 132 | ||
133 | sin6->sin6_family = AF_INET6; | 133 | sin6->sin6_family = AF_INET6; |
134 | ipv6_addr_copy(&sin6->sin6_addr, &np->daddr); | 134 | ipv6_addr_copy(&sin6->sin6_addr, &np->daddr); |
135 | sin6->sin6_port = inet_sk(sk)->dport; | 135 | sin6->sin6_port = inet_sk(sk)->inet_dport; |
136 | /* We do not store received flowlabel for TCP */ | 136 | /* We do not store received flowlabel for TCP */ |
137 | sin6->sin6_flowinfo = 0; | 137 | sin6->sin6_flowinfo = 0; |
138 | sin6->sin6_scope_id = 0; | 138 | sin6->sin6_scope_id = 0; |
@@ -168,8 +168,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie) | |||
168 | if (dst) { | 168 | if (dst) { |
169 | struct rt6_info *rt = (struct rt6_info *)dst; | 169 | struct rt6_info *rt = (struct rt6_info *)dst; |
170 | if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) { | 170 | if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) { |
171 | sk->sk_dst_cache = NULL; | 171 | __sk_dst_reset(sk); |
172 | dst_release(dst); | ||
173 | dst = NULL; | 172 | dst = NULL; |
174 | } | 173 | } |
175 | } | 174 | } |
@@ -194,8 +193,9 @@ int inet6_csk_xmit(struct sk_buff *skb, int ipfragok) | |||
194 | fl.fl6_flowlabel = np->flow_label; | 193 | fl.fl6_flowlabel = np->flow_label; |
195 | IP6_ECN_flow_xmit(sk, fl.fl6_flowlabel); | 194 | IP6_ECN_flow_xmit(sk, fl.fl6_flowlabel); |
196 | fl.oif = sk->sk_bound_dev_if; | 195 | fl.oif = sk->sk_bound_dev_if; |
197 | fl.fl_ip_sport = inet->sport; | 196 | fl.mark = sk->sk_mark; |
198 | fl.fl_ip_dport = inet->dport; | 197 | fl.fl_ip_sport = inet->inet_sport; |
198 | fl.fl_ip_dport = inet->inet_dport; | ||
199 | security_sk_classify_flow(sk, &fl); | 199 | security_sk_classify_flow(sk, &fl); |
200 | 200 | ||
201 | if (np->opt && np->opt->srcrt) { | 201 | if (np->opt && np->opt->srcrt) { |
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 1bcc3431859e..00c6a3e6cddf 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c | |||
@@ -73,7 +73,7 @@ struct sock *__inet6_lookup_established(struct net *net, | |||
73 | * have wildcards anyways. | 73 | * have wildcards anyways. |
74 | */ | 74 | */ |
75 | unsigned int hash = inet6_ehashfn(net, daddr, hnum, saddr, sport); | 75 | unsigned int hash = inet6_ehashfn(net, daddr, hnum, saddr, sport); |
76 | unsigned int slot = hash & (hashinfo->ehash_size - 1); | 76 | unsigned int slot = hash & hashinfo->ehash_mask; |
77 | struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; | 77 | struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; |
78 | 78 | ||
79 | 79 | ||
@@ -125,7 +125,7 @@ static int inline compute_score(struct sock *sk, struct net *net, | |||
125 | { | 125 | { |
126 | int score = -1; | 126 | int score = -1; |
127 | 127 | ||
128 | if (net_eq(sock_net(sk), net) && inet_sk(sk)->num == hnum && | 128 | if (net_eq(sock_net(sk), net) && inet_sk(sk)->inet_num == hnum && |
129 | sk->sk_family == PF_INET6) { | 129 | sk->sk_family == PF_INET6) { |
130 | const struct ipv6_pinfo *np = inet6_sk(sk); | 130 | const struct ipv6_pinfo *np = inet6_sk(sk); |
131 | 131 | ||
@@ -214,10 +214,10 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row, | |||
214 | const struct in6_addr *daddr = &np->rcv_saddr; | 214 | const struct in6_addr *daddr = &np->rcv_saddr; |
215 | const struct in6_addr *saddr = &np->daddr; | 215 | const struct in6_addr *saddr = &np->daddr; |
216 | const int dif = sk->sk_bound_dev_if; | 216 | const int dif = sk->sk_bound_dev_if; |
217 | const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport); | 217 | const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport); |
218 | struct net *net = sock_net(sk); | 218 | struct net *net = sock_net(sk); |
219 | const unsigned int hash = inet6_ehashfn(net, daddr, lport, saddr, | 219 | const unsigned int hash = inet6_ehashfn(net, daddr, lport, saddr, |
220 | inet->dport); | 220 | inet->inet_dport); |
221 | struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); | 221 | struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); |
222 | spinlock_t *lock = inet_ehash_lockp(hinfo, hash); | 222 | spinlock_t *lock = inet_ehash_lockp(hinfo, hash); |
223 | struct sock *sk2; | 223 | struct sock *sk2; |
@@ -248,8 +248,8 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row, | |||
248 | unique: | 248 | unique: |
249 | /* Must record num and sport now. Otherwise we will see | 249 | /* Must record num and sport now. Otherwise we will see |
250 | * in hash table socket with a funny identity. */ | 250 | * in hash table socket with a funny identity. */ |
251 | inet->num = lport; | 251 | inet->inet_num = lport; |
252 | inet->sport = htons(lport); | 252 | inet->inet_sport = htons(lport); |
253 | WARN_ON(!sk_unhashed(sk)); | 253 | WARN_ON(!sk_unhashed(sk)); |
254 | __sk_nulls_add_node_rcu(sk, &head->chain); | 254 | __sk_nulls_add_node_rcu(sk, &head->chain); |
255 | sk->sk_hash = hash; | 255 | sk->sk_hash = hash; |
@@ -279,7 +279,7 @@ static inline u32 inet6_sk_port_offset(const struct sock *sk) | |||
279 | const struct ipv6_pinfo *np = inet6_sk(sk); | 279 | const struct ipv6_pinfo *np = inet6_sk(sk); |
280 | return secure_ipv6_port_ephemeral(np->rcv_saddr.s6_addr32, | 280 | return secure_ipv6_port_ephemeral(np->rcv_saddr.s6_addr32, |
281 | np->daddr.s6_addr32, | 281 | np->daddr.s6_addr32, |
282 | inet->dport); | 282 | inet->inet_dport); |
283 | } | 283 | } |
284 | 284 | ||
285 | int inet6_hash_connect(struct inet_timewait_death_row *death_row, | 285 | int inet6_hash_connect(struct inet_timewait_death_row *death_row, |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index c595bbe1ed99..e5c0f6bb8314 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -78,7 +78,7 @@ static void ip6_fb_tnl_dev_init(struct net_device *dev); | |||
78 | static void ip6_tnl_dev_init(struct net_device *dev); | 78 | static void ip6_tnl_dev_init(struct net_device *dev); |
79 | static void ip6_tnl_dev_setup(struct net_device *dev); | 79 | static void ip6_tnl_dev_setup(struct net_device *dev); |
80 | 80 | ||
81 | static int ip6_tnl_net_id; | 81 | static int ip6_tnl_net_id __read_mostly; |
82 | struct ip6_tnl_net { | 82 | struct ip6_tnl_net { |
83 | /* the IPv6 tunnel fallback device */ | 83 | /* the IPv6 tunnel fallback device */ |
84 | struct net_device *fb_tnl_dev; | 84 | struct net_device *fb_tnl_dev; |
@@ -88,8 +88,10 @@ struct ip6_tnl_net { | |||
88 | struct ip6_tnl **tnls[2]; | 88 | struct ip6_tnl **tnls[2]; |
89 | }; | 89 | }; |
90 | 90 | ||
91 | /* lock for the tunnel lists */ | 91 | /* |
92 | static DEFINE_RWLOCK(ip6_tnl_lock); | 92 | * Locking : hash tables are protected by RCU and a spinlock |
93 | */ | ||
94 | static DEFINE_SPINLOCK(ip6_tnl_lock); | ||
93 | 95 | ||
94 | static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t) | 96 | static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t) |
95 | { | 97 | { |
@@ -130,6 +132,9 @@ static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst) | |||
130 | * else %NULL | 132 | * else %NULL |
131 | **/ | 133 | **/ |
132 | 134 | ||
135 | #define for_each_ip6_tunnel_rcu(start) \ | ||
136 | for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) | ||
137 | |||
133 | static struct ip6_tnl * | 138 | static struct ip6_tnl * |
134 | ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local) | 139 | ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local) |
135 | { | 140 | { |
@@ -138,13 +143,14 @@ ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local) | |||
138 | struct ip6_tnl *t; | 143 | struct ip6_tnl *t; |
139 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); | 144 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); |
140 | 145 | ||
141 | for (t = ip6n->tnls_r_l[h0 ^ h1]; t; t = t->next) { | 146 | for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[h0 ^ h1]) { |
142 | if (ipv6_addr_equal(local, &t->parms.laddr) && | 147 | if (ipv6_addr_equal(local, &t->parms.laddr) && |
143 | ipv6_addr_equal(remote, &t->parms.raddr) && | 148 | ipv6_addr_equal(remote, &t->parms.raddr) && |
144 | (t->dev->flags & IFF_UP)) | 149 | (t->dev->flags & IFF_UP)) |
145 | return t; | 150 | return t; |
146 | } | 151 | } |
147 | if ((t = ip6n->tnls_wc[0]) != NULL && (t->dev->flags & IFF_UP)) | 152 | t = rcu_dereference(ip6n->tnls_wc[0]); |
153 | if (t && (t->dev->flags & IFF_UP)) | ||
148 | return t; | 154 | return t; |
149 | 155 | ||
150 | return NULL; | 156 | return NULL; |
@@ -186,10 +192,10 @@ ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) | |||
186 | { | 192 | { |
187 | struct ip6_tnl **tp = ip6_tnl_bucket(ip6n, &t->parms); | 193 | struct ip6_tnl **tp = ip6_tnl_bucket(ip6n, &t->parms); |
188 | 194 | ||
195 | spin_lock_bh(&ip6_tnl_lock); | ||
189 | t->next = *tp; | 196 | t->next = *tp; |
190 | write_lock_bh(&ip6_tnl_lock); | 197 | rcu_assign_pointer(*tp, t); |
191 | *tp = t; | 198 | spin_unlock_bh(&ip6_tnl_lock); |
192 | write_unlock_bh(&ip6_tnl_lock); | ||
193 | } | 199 | } |
194 | 200 | ||
195 | /** | 201 | /** |
@@ -204,9 +210,9 @@ ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) | |||
204 | 210 | ||
205 | for (tp = ip6_tnl_bucket(ip6n, &t->parms); *tp; tp = &(*tp)->next) { | 211 | for (tp = ip6_tnl_bucket(ip6n, &t->parms); *tp; tp = &(*tp)->next) { |
206 | if (t == *tp) { | 212 | if (t == *tp) { |
207 | write_lock_bh(&ip6_tnl_lock); | 213 | spin_lock_bh(&ip6_tnl_lock); |
208 | *tp = t->next; | 214 | *tp = t->next; |
209 | write_unlock_bh(&ip6_tnl_lock); | 215 | spin_unlock_bh(&ip6_tnl_lock); |
210 | break; | 216 | break; |
211 | } | 217 | } |
212 | } | 218 | } |
@@ -313,9 +319,9 @@ ip6_tnl_dev_uninit(struct net_device *dev) | |||
313 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); | 319 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); |
314 | 320 | ||
315 | if (dev == ip6n->fb_tnl_dev) { | 321 | if (dev == ip6n->fb_tnl_dev) { |
316 | write_lock_bh(&ip6_tnl_lock); | 322 | spin_lock_bh(&ip6_tnl_lock); |
317 | ip6n->tnls_wc[0] = NULL; | 323 | ip6n->tnls_wc[0] = NULL; |
318 | write_unlock_bh(&ip6_tnl_lock); | 324 | spin_unlock_bh(&ip6_tnl_lock); |
319 | } else { | 325 | } else { |
320 | ip6_tnl_unlink(ip6n, t); | 326 | ip6_tnl_unlink(ip6n, t); |
321 | } | 327 | } |
@@ -409,7 +415,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, | |||
409 | in trouble since we might need the source address for further | 415 | in trouble since we might need the source address for further |
410 | processing of the error. */ | 416 | processing of the error. */ |
411 | 417 | ||
412 | read_lock(&ip6_tnl_lock); | 418 | rcu_read_lock(); |
413 | if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, | 419 | if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, |
414 | &ipv6h->saddr)) == NULL) | 420 | &ipv6h->saddr)) == NULL) |
415 | goto out; | 421 | goto out; |
@@ -482,7 +488,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, | |||
482 | *msg = rel_msg; | 488 | *msg = rel_msg; |
483 | 489 | ||
484 | out: | 490 | out: |
485 | read_unlock(&ip6_tnl_lock); | 491 | rcu_read_unlock(); |
486 | return err; | 492 | return err; |
487 | } | 493 | } |
488 | 494 | ||
@@ -652,6 +658,7 @@ static void ip6ip6_dscp_ecn_decapsulate(struct ip6_tnl *t, | |||
652 | IP6_ECN_set_ce(ipv6_hdr(skb)); | 658 | IP6_ECN_set_ce(ipv6_hdr(skb)); |
653 | } | 659 | } |
654 | 660 | ||
661 | /* called with rcu_read_lock() */ | ||
655 | static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t) | 662 | static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t) |
656 | { | 663 | { |
657 | struct ip6_tnl_parm *p = &t->parms; | 664 | struct ip6_tnl_parm *p = &t->parms; |
@@ -662,15 +669,13 @@ static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t) | |||
662 | struct net_device *ldev = NULL; | 669 | struct net_device *ldev = NULL; |
663 | 670 | ||
664 | if (p->link) | 671 | if (p->link) |
665 | ldev = dev_get_by_index(net, p->link); | 672 | ldev = dev_get_by_index_rcu(net, p->link); |
666 | 673 | ||
667 | if ((ipv6_addr_is_multicast(&p->laddr) || | 674 | if ((ipv6_addr_is_multicast(&p->laddr) || |
668 | likely(ipv6_chk_addr(net, &p->laddr, ldev, 0))) && | 675 | likely(ipv6_chk_addr(net, &p->laddr, ldev, 0))) && |
669 | likely(!ipv6_chk_addr(net, &p->raddr, NULL, 0))) | 676 | likely(!ipv6_chk_addr(net, &p->raddr, NULL, 0))) |
670 | ret = 1; | 677 | ret = 1; |
671 | 678 | ||
672 | if (ldev) | ||
673 | dev_put(ldev); | ||
674 | } | 679 | } |
675 | return ret; | 680 | return ret; |
676 | } | 681 | } |
@@ -693,23 +698,23 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, | |||
693 | struct ip6_tnl *t; | 698 | struct ip6_tnl *t; |
694 | struct ipv6hdr *ipv6h = ipv6_hdr(skb); | 699 | struct ipv6hdr *ipv6h = ipv6_hdr(skb); |
695 | 700 | ||
696 | read_lock(&ip6_tnl_lock); | 701 | rcu_read_lock(); |
697 | 702 | ||
698 | if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, | 703 | if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, |
699 | &ipv6h->daddr)) != NULL) { | 704 | &ipv6h->daddr)) != NULL) { |
700 | if (t->parms.proto != ipproto && t->parms.proto != 0) { | 705 | if (t->parms.proto != ipproto && t->parms.proto != 0) { |
701 | read_unlock(&ip6_tnl_lock); | 706 | rcu_read_unlock(); |
702 | goto discard; | 707 | goto discard; |
703 | } | 708 | } |
704 | 709 | ||
705 | if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { | 710 | if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { |
706 | read_unlock(&ip6_tnl_lock); | 711 | rcu_read_unlock(); |
707 | goto discard; | 712 | goto discard; |
708 | } | 713 | } |
709 | 714 | ||
710 | if (!ip6_tnl_rcv_ctl(t)) { | 715 | if (!ip6_tnl_rcv_ctl(t)) { |
711 | t->dev->stats.rx_dropped++; | 716 | t->dev->stats.rx_dropped++; |
712 | read_unlock(&ip6_tnl_lock); | 717 | rcu_read_unlock(); |
713 | goto discard; | 718 | goto discard; |
714 | } | 719 | } |
715 | secpath_reset(skb); | 720 | secpath_reset(skb); |
@@ -727,10 +732,10 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, | |||
727 | t->dev->stats.rx_packets++; | 732 | t->dev->stats.rx_packets++; |
728 | t->dev->stats.rx_bytes += skb->len; | 733 | t->dev->stats.rx_bytes += skb->len; |
729 | netif_rx(skb); | 734 | netif_rx(skb); |
730 | read_unlock(&ip6_tnl_lock); | 735 | rcu_read_unlock(); |
731 | return 0; | 736 | return 0; |
732 | } | 737 | } |
733 | read_unlock(&ip6_tnl_lock); | 738 | rcu_read_unlock(); |
734 | return 1; | 739 | return 1; |
735 | 740 | ||
736 | discard: | 741 | discard: |
@@ -798,8 +803,9 @@ static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t) | |||
798 | if (p->flags & IP6_TNL_F_CAP_XMIT) { | 803 | if (p->flags & IP6_TNL_F_CAP_XMIT) { |
799 | struct net_device *ldev = NULL; | 804 | struct net_device *ldev = NULL; |
800 | 805 | ||
806 | rcu_read_lock(); | ||
801 | if (p->link) | 807 | if (p->link) |
802 | ldev = dev_get_by_index(net, p->link); | 808 | ldev = dev_get_by_index_rcu(net, p->link); |
803 | 809 | ||
804 | if (unlikely(!ipv6_chk_addr(net, &p->laddr, ldev, 0))) | 810 | if (unlikely(!ipv6_chk_addr(net, &p->laddr, ldev, 0))) |
805 | printk(KERN_WARNING | 811 | printk(KERN_WARNING |
@@ -813,8 +819,7 @@ static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t) | |||
813 | p->name); | 819 | p->name); |
814 | else | 820 | else |
815 | ret = 1; | 821 | ret = 1; |
816 | if (ldev) | 822 | rcu_read_unlock(); |
817 | dev_put(ldev); | ||
818 | } | 823 | } |
819 | return ret; | 824 | return ret; |
820 | } | 825 | } |
@@ -1387,14 +1392,19 @@ static void ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n) | |||
1387 | { | 1392 | { |
1388 | int h; | 1393 | int h; |
1389 | struct ip6_tnl *t; | 1394 | struct ip6_tnl *t; |
1395 | LIST_HEAD(list); | ||
1390 | 1396 | ||
1391 | for (h = 0; h < HASH_SIZE; h++) { | 1397 | for (h = 0; h < HASH_SIZE; h++) { |
1392 | while ((t = ip6n->tnls_r_l[h]) != NULL) | 1398 | t = ip6n->tnls_r_l[h]; |
1393 | unregister_netdevice(t->dev); | 1399 | while (t != NULL) { |
1400 | unregister_netdevice_queue(t->dev, &list); | ||
1401 | t = t->next; | ||
1402 | } | ||
1394 | } | 1403 | } |
1395 | 1404 | ||
1396 | t = ip6n->tnls_wc[0]; | 1405 | t = ip6n->tnls_wc[0]; |
1397 | unregister_netdevice(t->dev); | 1406 | unregister_netdevice_queue(t->dev, &list); |
1407 | unregister_netdevice_many(&list); | ||
1398 | } | 1408 | } |
1399 | 1409 | ||
1400 | static int ip6_tnl_init_net(struct net *net) | 1410 | static int ip6_tnl_init_net(struct net *net) |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 716153941fc4..52e0f74fdfe0 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -477,7 +477,7 @@ failure: | |||
477 | * Delete a VIF entry | 477 | * Delete a VIF entry |
478 | */ | 478 | */ |
479 | 479 | ||
480 | static int mif6_delete(struct net *net, int vifi) | 480 | static int mif6_delete(struct net *net, int vifi, struct list_head *head) |
481 | { | 481 | { |
482 | struct mif_device *v; | 482 | struct mif_device *v; |
483 | struct net_device *dev; | 483 | struct net_device *dev; |
@@ -519,7 +519,7 @@ static int mif6_delete(struct net *net, int vifi) | |||
519 | in6_dev->cnf.mc_forwarding--; | 519 | in6_dev->cnf.mc_forwarding--; |
520 | 520 | ||
521 | if (v->flags & MIFF_REGISTER) | 521 | if (v->flags & MIFF_REGISTER) |
522 | unregister_netdevice(dev); | 522 | unregister_netdevice_queue(dev, head); |
523 | 523 | ||
524 | dev_put(dev); | 524 | dev_put(dev); |
525 | return 0; | 525 | return 0; |
@@ -976,6 +976,7 @@ static int ip6mr_device_event(struct notifier_block *this, | |||
976 | struct net *net = dev_net(dev); | 976 | struct net *net = dev_net(dev); |
977 | struct mif_device *v; | 977 | struct mif_device *v; |
978 | int ct; | 978 | int ct; |
979 | LIST_HEAD(list); | ||
979 | 980 | ||
980 | if (event != NETDEV_UNREGISTER) | 981 | if (event != NETDEV_UNREGISTER) |
981 | return NOTIFY_DONE; | 982 | return NOTIFY_DONE; |
@@ -983,8 +984,10 @@ static int ip6mr_device_event(struct notifier_block *this, | |||
983 | v = &net->ipv6.vif6_table[0]; | 984 | v = &net->ipv6.vif6_table[0]; |
984 | for (ct = 0; ct < net->ipv6.maxvif; ct++, v++) { | 985 | for (ct = 0; ct < net->ipv6.maxvif; ct++, v++) { |
985 | if (v->dev == dev) | 986 | if (v->dev == dev) |
986 | mif6_delete(net, ct); | 987 | mif6_delete(net, ct, &list); |
987 | } | 988 | } |
989 | unregister_netdevice_many(&list); | ||
990 | |||
988 | return NOTIFY_DONE; | 991 | return NOTIFY_DONE; |
989 | } | 992 | } |
990 | 993 | ||
@@ -1188,14 +1191,16 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) | |||
1188 | static void mroute_clean_tables(struct net *net) | 1191 | static void mroute_clean_tables(struct net *net) |
1189 | { | 1192 | { |
1190 | int i; | 1193 | int i; |
1194 | LIST_HEAD(list); | ||
1191 | 1195 | ||
1192 | /* | 1196 | /* |
1193 | * Shut down all active vif entries | 1197 | * Shut down all active vif entries |
1194 | */ | 1198 | */ |
1195 | for (i = 0; i < net->ipv6.maxvif; i++) { | 1199 | for (i = 0; i < net->ipv6.maxvif; i++) { |
1196 | if (!(net->ipv6.vif6_table[i].flags & VIFF_STATIC)) | 1200 | if (!(net->ipv6.vif6_table[i].flags & VIFF_STATIC)) |
1197 | mif6_delete(net, i); | 1201 | mif6_delete(net, i, &list); |
1198 | } | 1202 | } |
1203 | unregister_netdevice_many(&list); | ||
1199 | 1204 | ||
1200 | /* | 1205 | /* |
1201 | * Wipe the cache | 1206 | * Wipe the cache |
@@ -1297,7 +1302,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns | |||
1297 | switch (optname) { | 1302 | switch (optname) { |
1298 | case MRT6_INIT: | 1303 | case MRT6_INIT: |
1299 | if (sk->sk_type != SOCK_RAW || | 1304 | if (sk->sk_type != SOCK_RAW || |
1300 | inet_sk(sk)->num != IPPROTO_ICMPV6) | 1305 | inet_sk(sk)->inet_num != IPPROTO_ICMPV6) |
1301 | return -EOPNOTSUPP; | 1306 | return -EOPNOTSUPP; |
1302 | if (optlen < sizeof(int)) | 1307 | if (optlen < sizeof(int)) |
1303 | return -EINVAL; | 1308 | return -EINVAL; |
@@ -1325,7 +1330,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns | |||
1325 | if (copy_from_user(&mifi, optval, sizeof(mifi_t))) | 1330 | if (copy_from_user(&mifi, optval, sizeof(mifi_t))) |
1326 | return -EFAULT; | 1331 | return -EFAULT; |
1327 | rtnl_lock(); | 1332 | rtnl_lock(); |
1328 | ret = mif6_delete(net, mifi); | 1333 | ret = mif6_delete(net, mifi, NULL); |
1329 | rtnl_unlock(); | 1334 | rtnl_unlock(); |
1330 | return ret; | 1335 | return ret; |
1331 | 1336 | ||
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 4f7aaf6996a3..430454ee5ead 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
@@ -64,7 +64,7 @@ int ip6_ra_control(struct sock *sk, int sel) | |||
64 | struct ip6_ra_chain *ra, *new_ra, **rap; | 64 | struct ip6_ra_chain *ra, *new_ra, **rap; |
65 | 65 | ||
66 | /* RA packet may be delivered ONLY to IPPROTO_RAW socket */ | 66 | /* RA packet may be delivered ONLY to IPPROTO_RAW socket */ |
67 | if (sk->sk_type != SOCK_RAW || inet_sk(sk)->num != IPPROTO_RAW) | 67 | if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num != IPPROTO_RAW) |
68 | return -ENOPROTOOPT; | 68 | return -ENOPROTOOPT; |
69 | 69 | ||
70 | new_ra = (sel>=0) ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; | 70 | new_ra = (sel>=0) ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; |
@@ -106,7 +106,7 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk, | |||
106 | if (inet_sk(sk)->is_icsk) { | 106 | if (inet_sk(sk)->is_icsk) { |
107 | if (opt && | 107 | if (opt && |
108 | !((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) && | 108 | !((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) && |
109 | inet_sk(sk)->daddr != LOOPBACK4_IPV6) { | 109 | inet_sk(sk)->inet_daddr != LOOPBACK4_IPV6) { |
110 | struct inet_connection_sock *icsk = inet_csk(sk); | 110 | struct inet_connection_sock *icsk = inet_csk(sk); |
111 | icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen; | 111 | icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen; |
112 | icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); | 112 | icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); |
@@ -234,7 +234,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, | |||
234 | 234 | ||
235 | case IPV6_V6ONLY: | 235 | case IPV6_V6ONLY: |
236 | if (optlen < sizeof(int) || | 236 | if (optlen < sizeof(int) || |
237 | inet_sk(sk)->num) | 237 | inet_sk(sk)->inet_num) |
238 | goto e_inval; | 238 | goto e_inval; |
239 | np->ipv6only = valbool; | 239 | np->ipv6only = valbool; |
240 | retv = 0; | 240 | retv = 0; |
@@ -424,6 +424,7 @@ sticky_done: | |||
424 | 424 | ||
425 | fl.fl6_flowlabel = 0; | 425 | fl.fl6_flowlabel = 0; |
426 | fl.oif = sk->sk_bound_dev_if; | 426 | fl.oif = sk->sk_bound_dev_if; |
427 | fl.mark = sk->sk_mark; | ||
427 | 428 | ||
428 | if (optlen == 0) | 429 | if (optlen == 0) |
429 | goto update; | 430 | goto update; |
@@ -665,7 +666,7 @@ done: | |||
665 | case IPV6_MTU_DISCOVER: | 666 | case IPV6_MTU_DISCOVER: |
666 | if (optlen < sizeof(int)) | 667 | if (optlen < sizeof(int)) |
667 | goto e_inval; | 668 | goto e_inval; |
668 | if (val<0 || val>3) | 669 | if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_PROBE) |
669 | goto e_inval; | 670 | goto e_inval; |
670 | np->pmtudisc = val; | 671 | np->pmtudisc = val; |
671 | retv = 0; | 672 | retv = 0; |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index f9fcf690bd5d..1f9c44442e65 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -2375,9 +2375,9 @@ static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq) | |||
2375 | struct net *net = seq_file_net(seq); | 2375 | struct net *net = seq_file_net(seq); |
2376 | 2376 | ||
2377 | state->idev = NULL; | 2377 | state->idev = NULL; |
2378 | for_each_netdev(net, state->dev) { | 2378 | for_each_netdev_rcu(net, state->dev) { |
2379 | struct inet6_dev *idev; | 2379 | struct inet6_dev *idev; |
2380 | idev = in6_dev_get(state->dev); | 2380 | idev = __in6_dev_get(state->dev); |
2381 | if (!idev) | 2381 | if (!idev) |
2382 | continue; | 2382 | continue; |
2383 | read_lock_bh(&idev->lock); | 2383 | read_lock_bh(&idev->lock); |
@@ -2387,7 +2387,6 @@ static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq) | |||
2387 | break; | 2387 | break; |
2388 | } | 2388 | } |
2389 | read_unlock_bh(&idev->lock); | 2389 | read_unlock_bh(&idev->lock); |
2390 | in6_dev_put(idev); | ||
2391 | } | 2390 | } |
2392 | return im; | 2391 | return im; |
2393 | } | 2392 | } |
@@ -2398,16 +2397,15 @@ static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr | |||
2398 | 2397 | ||
2399 | im = im->next; | 2398 | im = im->next; |
2400 | while (!im) { | 2399 | while (!im) { |
2401 | if (likely(state->idev != NULL)) { | 2400 | if (likely(state->idev != NULL)) |
2402 | read_unlock_bh(&state->idev->lock); | 2401 | read_unlock_bh(&state->idev->lock); |
2403 | in6_dev_put(state->idev); | 2402 | |
2404 | } | 2403 | state->dev = next_net_device_rcu(state->dev); |
2405 | state->dev = next_net_device(state->dev); | ||
2406 | if (!state->dev) { | 2404 | if (!state->dev) { |
2407 | state->idev = NULL; | 2405 | state->idev = NULL; |
2408 | break; | 2406 | break; |
2409 | } | 2407 | } |
2410 | state->idev = in6_dev_get(state->dev); | 2408 | state->idev = __in6_dev_get(state->dev); |
2411 | if (!state->idev) | 2409 | if (!state->idev) |
2412 | continue; | 2410 | continue; |
2413 | read_lock_bh(&state->idev->lock); | 2411 | read_lock_bh(&state->idev->lock); |
@@ -2426,31 +2424,31 @@ static struct ifmcaddr6 *igmp6_mc_get_idx(struct seq_file *seq, loff_t pos) | |||
2426 | } | 2424 | } |
2427 | 2425 | ||
2428 | static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos) | 2426 | static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos) |
2429 | __acquires(dev_base_lock) | 2427 | __acquires(RCU) |
2430 | { | 2428 | { |
2431 | read_lock(&dev_base_lock); | 2429 | rcu_read_lock(); |
2432 | return igmp6_mc_get_idx(seq, *pos); | 2430 | return igmp6_mc_get_idx(seq, *pos); |
2433 | } | 2431 | } |
2434 | 2432 | ||
2435 | static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 2433 | static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
2436 | { | 2434 | { |
2437 | struct ifmcaddr6 *im; | 2435 | struct ifmcaddr6 *im = igmp6_mc_get_next(seq, v); |
2438 | im = igmp6_mc_get_next(seq, v); | 2436 | |
2439 | ++*pos; | 2437 | ++*pos; |
2440 | return im; | 2438 | return im; |
2441 | } | 2439 | } |
2442 | 2440 | ||
2443 | static void igmp6_mc_seq_stop(struct seq_file *seq, void *v) | 2441 | static void igmp6_mc_seq_stop(struct seq_file *seq, void *v) |
2444 | __releases(dev_base_lock) | 2442 | __releases(RCU) |
2445 | { | 2443 | { |
2446 | struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); | 2444 | struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); |
2445 | |||
2447 | if (likely(state->idev != NULL)) { | 2446 | if (likely(state->idev != NULL)) { |
2448 | read_unlock_bh(&state->idev->lock); | 2447 | read_unlock_bh(&state->idev->lock); |
2449 | in6_dev_put(state->idev); | ||
2450 | state->idev = NULL; | 2448 | state->idev = NULL; |
2451 | } | 2449 | } |
2452 | state->dev = NULL; | 2450 | state->dev = NULL; |
2453 | read_unlock(&dev_base_lock); | 2451 | rcu_read_unlock(); |
2454 | } | 2452 | } |
2455 | 2453 | ||
2456 | static int igmp6_mc_seq_show(struct seq_file *seq, void *v) | 2454 | static int igmp6_mc_seq_show(struct seq_file *seq, void *v) |
@@ -2507,9 +2505,9 @@ static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq) | |||
2507 | 2505 | ||
2508 | state->idev = NULL; | 2506 | state->idev = NULL; |
2509 | state->im = NULL; | 2507 | state->im = NULL; |
2510 | for_each_netdev(net, state->dev) { | 2508 | for_each_netdev_rcu(net, state->dev) { |
2511 | struct inet6_dev *idev; | 2509 | struct inet6_dev *idev; |
2512 | idev = in6_dev_get(state->dev); | 2510 | idev = __in6_dev_get(state->dev); |
2513 | if (unlikely(idev == NULL)) | 2511 | if (unlikely(idev == NULL)) |
2514 | continue; | 2512 | continue; |
2515 | read_lock_bh(&idev->lock); | 2513 | read_lock_bh(&idev->lock); |
@@ -2525,7 +2523,6 @@ static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq) | |||
2525 | spin_unlock_bh(&im->mca_lock); | 2523 | spin_unlock_bh(&im->mca_lock); |
2526 | } | 2524 | } |
2527 | read_unlock_bh(&idev->lock); | 2525 | read_unlock_bh(&idev->lock); |
2528 | in6_dev_put(idev); | ||
2529 | } | 2526 | } |
2530 | return psf; | 2527 | return psf; |
2531 | } | 2528 | } |
@@ -2539,16 +2536,15 @@ static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_s | |||
2539 | spin_unlock_bh(&state->im->mca_lock); | 2536 | spin_unlock_bh(&state->im->mca_lock); |
2540 | state->im = state->im->next; | 2537 | state->im = state->im->next; |
2541 | while (!state->im) { | 2538 | while (!state->im) { |
2542 | if (likely(state->idev != NULL)) { | 2539 | if (likely(state->idev != NULL)) |
2543 | read_unlock_bh(&state->idev->lock); | 2540 | read_unlock_bh(&state->idev->lock); |
2544 | in6_dev_put(state->idev); | 2541 | |
2545 | } | 2542 | state->dev = next_net_device_rcu(state->dev); |
2546 | state->dev = next_net_device(state->dev); | ||
2547 | if (!state->dev) { | 2543 | if (!state->dev) { |
2548 | state->idev = NULL; | 2544 | state->idev = NULL; |
2549 | goto out; | 2545 | goto out; |
2550 | } | 2546 | } |
2551 | state->idev = in6_dev_get(state->dev); | 2547 | state->idev = __in6_dev_get(state->dev); |
2552 | if (!state->idev) | 2548 | if (!state->idev) |
2553 | continue; | 2549 | continue; |
2554 | read_lock_bh(&state->idev->lock); | 2550 | read_lock_bh(&state->idev->lock); |
@@ -2573,9 +2569,9 @@ static struct ip6_sf_list *igmp6_mcf_get_idx(struct seq_file *seq, loff_t pos) | |||
2573 | } | 2569 | } |
2574 | 2570 | ||
2575 | static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos) | 2571 | static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos) |
2576 | __acquires(dev_base_lock) | 2572 | __acquires(RCU) |
2577 | { | 2573 | { |
2578 | read_lock(&dev_base_lock); | 2574 | rcu_read_lock(); |
2579 | return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; | 2575 | return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; |
2580 | } | 2576 | } |
2581 | 2577 | ||
@@ -2591,7 +2587,7 @@ static void *igmp6_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
2591 | } | 2587 | } |
2592 | 2588 | ||
2593 | static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v) | 2589 | static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v) |
2594 | __releases(dev_base_lock) | 2590 | __releases(RCU) |
2595 | { | 2591 | { |
2596 | struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); | 2592 | struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); |
2597 | if (likely(state->im != NULL)) { | 2593 | if (likely(state->im != NULL)) { |
@@ -2600,11 +2596,10 @@ static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v) | |||
2600 | } | 2596 | } |
2601 | if (likely(state->idev != NULL)) { | 2597 | if (likely(state->idev != NULL)) { |
2602 | read_unlock_bh(&state->idev->lock); | 2598 | read_unlock_bh(&state->idev->lock); |
2603 | in6_dev_put(state->idev); | ||
2604 | state->idev = NULL; | 2599 | state->idev = NULL; |
2605 | } | 2600 | } |
2606 | state->dev = NULL; | 2601 | state->dev = NULL; |
2607 | read_unlock(&dev_base_lock); | 2602 | rcu_read_unlock(); |
2608 | } | 2603 | } |
2609 | 2604 | ||
2610 | static int igmp6_mcf_seq_show(struct seq_file *seq, void *v) | 2605 | static int igmp6_mcf_seq_show(struct seq_file *seq, void *v) |
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index f74e4e2cdd06..3507cfe1e7a2 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
@@ -598,6 +598,7 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh, | |||
598 | icmp6h.icmp6_solicited = solicited; | 598 | icmp6h.icmp6_solicited = solicited; |
599 | icmp6h.icmp6_override = override; | 599 | icmp6h.icmp6_override = override; |
600 | 600 | ||
601 | inc_opt |= ifp->idev->cnf.force_tllao; | ||
601 | __ndisc_send(dev, neigh, daddr, src_addr, | 602 | __ndisc_send(dev, neigh, daddr, src_addr, |
602 | &icmp6h, solicited_addr, | 603 | &icmp6h, solicited_addr, |
603 | inc_opt ? ND_OPT_TARGET_LL_ADDR : 0); | 604 | inc_opt ? ND_OPT_TARGET_LL_ADDR : 0); |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 4f24570b0869..926ce8eeffaf 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -72,7 +72,7 @@ static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk, | |||
72 | int is_multicast = ipv6_addr_is_multicast(loc_addr); | 72 | int is_multicast = ipv6_addr_is_multicast(loc_addr); |
73 | 73 | ||
74 | sk_for_each_from(sk, node) | 74 | sk_for_each_from(sk, node) |
75 | if (inet_sk(sk)->num == num) { | 75 | if (inet_sk(sk)->inet_num == num) { |
76 | struct ipv6_pinfo *np = inet6_sk(sk); | 76 | struct ipv6_pinfo *np = inet6_sk(sk); |
77 | 77 | ||
78 | if (!net_eq(sock_net(sk), net)) | 78 | if (!net_eq(sock_net(sk), net)) |
@@ -249,7 +249,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
249 | 249 | ||
250 | /* Raw sockets are IPv6 only */ | 250 | /* Raw sockets are IPv6 only */ |
251 | if (addr_type == IPV6_ADDR_MAPPED) | 251 | if (addr_type == IPV6_ADDR_MAPPED) |
252 | return(-EADDRNOTAVAIL); | 252 | return -EADDRNOTAVAIL; |
253 | 253 | ||
254 | lock_sock(sk); | 254 | lock_sock(sk); |
255 | 255 | ||
@@ -257,6 +257,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
257 | if (sk->sk_state != TCP_CLOSE) | 257 | if (sk->sk_state != TCP_CLOSE) |
258 | goto out; | 258 | goto out; |
259 | 259 | ||
260 | rcu_read_lock(); | ||
260 | /* Check if the address belongs to the host. */ | 261 | /* Check if the address belongs to the host. */ |
261 | if (addr_type != IPV6_ADDR_ANY) { | 262 | if (addr_type != IPV6_ADDR_ANY) { |
262 | struct net_device *dev = NULL; | 263 | struct net_device *dev = NULL; |
@@ -272,13 +273,13 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
272 | 273 | ||
273 | /* Binding to link-local address requires an interface */ | 274 | /* Binding to link-local address requires an interface */ |
274 | if (!sk->sk_bound_dev_if) | 275 | if (!sk->sk_bound_dev_if) |
275 | goto out; | 276 | goto out_unlock; |
276 | 277 | ||
277 | dev = dev_get_by_index(sock_net(sk), sk->sk_bound_dev_if); | 278 | err = -ENODEV; |
278 | if (!dev) { | 279 | dev = dev_get_by_index_rcu(sock_net(sk), |
279 | err = -ENODEV; | 280 | sk->sk_bound_dev_if); |
280 | goto out; | 281 | if (!dev) |
281 | } | 282 | goto out_unlock; |
282 | } | 283 | } |
283 | 284 | ||
284 | /* ipv4 addr of the socket is invalid. Only the | 285 | /* ipv4 addr of the socket is invalid. Only the |
@@ -289,20 +290,18 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
289 | err = -EADDRNOTAVAIL; | 290 | err = -EADDRNOTAVAIL; |
290 | if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr, | 291 | if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr, |
291 | dev, 0)) { | 292 | dev, 0)) { |
292 | if (dev) | 293 | goto out_unlock; |
293 | dev_put(dev); | ||
294 | goto out; | ||
295 | } | 294 | } |
296 | } | 295 | } |
297 | if (dev) | ||
298 | dev_put(dev); | ||
299 | } | 296 | } |
300 | 297 | ||
301 | inet->rcv_saddr = inet->saddr = v4addr; | 298 | inet->inet_rcv_saddr = inet->inet_saddr = v4addr; |
302 | ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr); | 299 | ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr); |
303 | if (!(addr_type & IPV6_ADDR_MULTICAST)) | 300 | if (!(addr_type & IPV6_ADDR_MULTICAST)) |
304 | ipv6_addr_copy(&np->saddr, &addr->sin6_addr); | 301 | ipv6_addr_copy(&np->saddr, &addr->sin6_addr); |
305 | err = 0; | 302 | err = 0; |
303 | out_unlock: | ||
304 | rcu_read_unlock(); | ||
306 | out: | 305 | out: |
307 | release_sock(sk); | 306 | release_sock(sk); |
308 | return err; | 307 | return err; |
@@ -381,8 +380,7 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb) | |||
381 | } | 380 | } |
382 | 381 | ||
383 | /* Charge it to the socket. */ | 382 | /* Charge it to the socket. */ |
384 | if (sock_queue_rcv_skb(sk,skb)<0) { | 383 | if (sock_queue_rcv_skb(sk, skb) < 0) { |
385 | atomic_inc(&sk->sk_drops); | ||
386 | kfree_skb(skb); | 384 | kfree_skb(skb); |
387 | return NET_RX_DROP; | 385 | return NET_RX_DROP; |
388 | } | 386 | } |
@@ -416,14 +414,14 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb) | |||
416 | skb_network_header_len(skb)); | 414 | skb_network_header_len(skb)); |
417 | if (!csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | 415 | if (!csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
418 | &ipv6_hdr(skb)->daddr, | 416 | &ipv6_hdr(skb)->daddr, |
419 | skb->len, inet->num, skb->csum)) | 417 | skb->len, inet->inet_num, skb->csum)) |
420 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 418 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
421 | } | 419 | } |
422 | if (!skb_csum_unnecessary(skb)) | 420 | if (!skb_csum_unnecessary(skb)) |
423 | skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | 421 | skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
424 | &ipv6_hdr(skb)->daddr, | 422 | &ipv6_hdr(skb)->daddr, |
425 | skb->len, | 423 | skb->len, |
426 | inet->num, 0)); | 424 | inet->inet_num, 0)); |
427 | 425 | ||
428 | if (inet->hdrincl) { | 426 | if (inet->hdrincl) { |
429 | if (skb_checksum_complete(skb)) { | 427 | if (skb_checksum_complete(skb)) { |
@@ -497,7 +495,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
497 | sin6->sin6_scope_id = IP6CB(skb)->iif; | 495 | sin6->sin6_scope_id = IP6CB(skb)->iif; |
498 | } | 496 | } |
499 | 497 | ||
500 | sock_recv_timestamp(msg, sk, skb); | 498 | sock_recv_ts_and_drops(msg, sk, skb); |
501 | 499 | ||
502 | if (np->rxopt.all) | 500 | if (np->rxopt.all) |
503 | datagram_recv_ctl(sk, msg, skb); | 501 | datagram_recv_ctl(sk, msg, skb); |
@@ -518,7 +516,6 @@ csum_copy_err: | |||
518 | as some normal condition. | 516 | as some normal condition. |
519 | */ | 517 | */ |
520 | err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH; | 518 | err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH; |
521 | atomic_inc(&sk->sk_drops); | ||
522 | goto out; | 519 | goto out; |
523 | } | 520 | } |
524 | 521 | ||
@@ -766,8 +763,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
766 | proto = ntohs(sin6->sin6_port); | 763 | proto = ntohs(sin6->sin6_port); |
767 | 764 | ||
768 | if (!proto) | 765 | if (!proto) |
769 | proto = inet->num; | 766 | proto = inet->inet_num; |
770 | else if (proto != inet->num) | 767 | else if (proto != inet->inet_num) |
771 | return(-EINVAL); | 768 | return(-EINVAL); |
772 | 769 | ||
773 | if (proto > 255) | 770 | if (proto > 255) |
@@ -800,7 +797,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
800 | if (sk->sk_state != TCP_ESTABLISHED) | 797 | if (sk->sk_state != TCP_ESTABLISHED) |
801 | return -EDESTADDRREQ; | 798 | return -EDESTADDRREQ; |
802 | 799 | ||
803 | proto = inet->num; | 800 | proto = inet->inet_num; |
804 | daddr = &np->daddr; | 801 | daddr = &np->daddr; |
805 | fl.fl6_flowlabel = np->flow_label; | 802 | fl.fl6_flowlabel = np->flow_label; |
806 | } | 803 | } |
@@ -967,7 +964,7 @@ static int do_rawv6_setsockopt(struct sock *sk, int level, int optname, | |||
967 | 964 | ||
968 | switch (optname) { | 965 | switch (optname) { |
969 | case IPV6_CHECKSUM: | 966 | case IPV6_CHECKSUM: |
970 | if (inet_sk(sk)->num == IPPROTO_ICMPV6 && | 967 | if (inet_sk(sk)->inet_num == IPPROTO_ICMPV6 && |
971 | level == IPPROTO_IPV6) { | 968 | level == IPPROTO_IPV6) { |
972 | /* | 969 | /* |
973 | * RFC3542 tells that IPV6_CHECKSUM socket | 970 | * RFC3542 tells that IPV6_CHECKSUM socket |
@@ -1007,7 +1004,7 @@ static int rawv6_setsockopt(struct sock *sk, int level, int optname, | |||
1007 | break; | 1004 | break; |
1008 | 1005 | ||
1009 | case SOL_ICMPV6: | 1006 | case SOL_ICMPV6: |
1010 | if (inet_sk(sk)->num != IPPROTO_ICMPV6) | 1007 | if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6) |
1011 | return -EOPNOTSUPP; | 1008 | return -EOPNOTSUPP; |
1012 | return rawv6_seticmpfilter(sk, level, optname, optval, | 1009 | return rawv6_seticmpfilter(sk, level, optname, optval, |
1013 | optlen); | 1010 | optlen); |
@@ -1030,7 +1027,7 @@ static int compat_rawv6_setsockopt(struct sock *sk, int level, int optname, | |||
1030 | case SOL_RAW: | 1027 | case SOL_RAW: |
1031 | break; | 1028 | break; |
1032 | case SOL_ICMPV6: | 1029 | case SOL_ICMPV6: |
1033 | if (inet_sk(sk)->num != IPPROTO_ICMPV6) | 1030 | if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6) |
1034 | return -EOPNOTSUPP; | 1031 | return -EOPNOTSUPP; |
1035 | return rawv6_seticmpfilter(sk, level, optname, optval, optlen); | 1032 | return rawv6_seticmpfilter(sk, level, optname, optval, optlen); |
1036 | case SOL_IPV6: | 1033 | case SOL_IPV6: |
@@ -1087,7 +1084,7 @@ static int rawv6_getsockopt(struct sock *sk, int level, int optname, | |||
1087 | break; | 1084 | break; |
1088 | 1085 | ||
1089 | case SOL_ICMPV6: | 1086 | case SOL_ICMPV6: |
1090 | if (inet_sk(sk)->num != IPPROTO_ICMPV6) | 1087 | if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6) |
1091 | return -EOPNOTSUPP; | 1088 | return -EOPNOTSUPP; |
1092 | return rawv6_geticmpfilter(sk, level, optname, optval, | 1089 | return rawv6_geticmpfilter(sk, level, optname, optval, |
1093 | optlen); | 1090 | optlen); |
@@ -1110,7 +1107,7 @@ static int compat_rawv6_getsockopt(struct sock *sk, int level, int optname, | |||
1110 | case SOL_RAW: | 1107 | case SOL_RAW: |
1111 | break; | 1108 | break; |
1112 | case SOL_ICMPV6: | 1109 | case SOL_ICMPV6: |
1113 | if (inet_sk(sk)->num != IPPROTO_ICMPV6) | 1110 | if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6) |
1114 | return -EOPNOTSUPP; | 1111 | return -EOPNOTSUPP; |
1115 | return rawv6_geticmpfilter(sk, level, optname, optval, optlen); | 1112 | return rawv6_geticmpfilter(sk, level, optname, optval, optlen); |
1116 | case SOL_IPV6: | 1113 | case SOL_IPV6: |
@@ -1157,7 +1154,7 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg) | |||
1157 | 1154 | ||
1158 | static void rawv6_close(struct sock *sk, long timeout) | 1155 | static void rawv6_close(struct sock *sk, long timeout) |
1159 | { | 1156 | { |
1160 | if (inet_sk(sk)->num == IPPROTO_RAW) | 1157 | if (inet_sk(sk)->inet_num == IPPROTO_RAW) |
1161 | ip6_ra_control(sk, -1); | 1158 | ip6_ra_control(sk, -1); |
1162 | ip6mr_sk_done(sk); | 1159 | ip6mr_sk_done(sk); |
1163 | sk_common_release(sk); | 1160 | sk_common_release(sk); |
@@ -1176,7 +1173,7 @@ static int rawv6_init_sk(struct sock *sk) | |||
1176 | { | 1173 | { |
1177 | struct raw6_sock *rp = raw6_sk(sk); | 1174 | struct raw6_sock *rp = raw6_sk(sk); |
1178 | 1175 | ||
1179 | switch (inet_sk(sk)->num) { | 1176 | switch (inet_sk(sk)->inet_num) { |
1180 | case IPPROTO_ICMPV6: | 1177 | case IPPROTO_ICMPV6: |
1181 | rp->checksum = 1; | 1178 | rp->checksum = 1; |
1182 | rp->offset = 2; | 1179 | rp->offset = 2; |
@@ -1226,7 +1223,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) | |||
1226 | dest = &np->daddr; | 1223 | dest = &np->daddr; |
1227 | src = &np->rcv_saddr; | 1224 | src = &np->rcv_saddr; |
1228 | destp = 0; | 1225 | destp = 0; |
1229 | srcp = inet_sk(sp)->num; | 1226 | srcp = inet_sk(sp)->inet_num; |
1230 | seq_printf(seq, | 1227 | seq_printf(seq, |
1231 | "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " | 1228 | "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " |
1232 | "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", | 1229 | "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", |
@@ -1338,7 +1335,6 @@ static struct inet_protosw rawv6_protosw = { | |||
1338 | .protocol = IPPROTO_IP, /* wild card */ | 1335 | .protocol = IPPROTO_IP, /* wild card */ |
1339 | .prot = &rawv6_prot, | 1336 | .prot = &rawv6_prot, |
1340 | .ops = &inet6_sockraw_ops, | 1337 | .ops = &inet6_sockraw_ops, |
1341 | .capability = CAP_NET_RAW, | ||
1342 | .no_check = UDP_CSUM_DEFAULT, | 1338 | .no_check = UDP_CSUM_DEFAULT, |
1343 | .flags = INET_PROTOSW_REUSE, | 1339 | .flags = INET_PROTOSW_REUSE, |
1344 | }; | 1340 | }; |
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index da5bd0ed83df..dce699fb2672 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
@@ -208,18 +208,17 @@ static void ip6_frag_expire(unsigned long data) | |||
208 | fq_kill(fq); | 208 | fq_kill(fq); |
209 | 209 | ||
210 | net = container_of(fq->q.net, struct net, ipv6.frags); | 210 | net = container_of(fq->q.net, struct net, ipv6.frags); |
211 | dev = dev_get_by_index(net, fq->iif); | 211 | rcu_read_lock(); |
212 | dev = dev_get_by_index_rcu(net, fq->iif); | ||
212 | if (!dev) | 213 | if (!dev) |
213 | goto out; | 214 | goto out_rcu_unlock; |
214 | 215 | ||
215 | rcu_read_lock(); | ||
216 | IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); | 216 | IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); |
217 | IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); | 217 | IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); |
218 | rcu_read_unlock(); | ||
219 | 218 | ||
220 | /* Don't send error if the first segment did not arrive. */ | 219 | /* Don't send error if the first segment did not arrive. */ |
221 | if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments) | 220 | if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments) |
222 | goto out; | 221 | goto out_rcu_unlock; |
223 | 222 | ||
224 | /* | 223 | /* |
225 | But use as source device on which LAST ARRIVED | 224 | But use as source device on which LAST ARRIVED |
@@ -228,9 +227,9 @@ static void ip6_frag_expire(unsigned long data) | |||
228 | */ | 227 | */ |
229 | fq->q.fragments->dev = dev; | 228 | fq->q.fragments->dev = dev; |
230 | icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev); | 229 | icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev); |
230 | out_rcu_unlock: | ||
231 | rcu_read_unlock(); | ||
231 | out: | 232 | out: |
232 | if (dev) | ||
233 | dev_put(dev); | ||
234 | spin_unlock(&fq->q.lock); | 233 | spin_unlock(&fq->q.lock); |
235 | fq_put(fq); | 234 | fq_put(fq); |
236 | } | 235 | } |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index d6fe7646a8ff..df9432a46ffc 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -1471,9 +1471,10 @@ static struct rt6_info *ip6_route_redirect(struct in6_addr *dest, | |||
1471 | }, | 1471 | }, |
1472 | }, | 1472 | }, |
1473 | }, | 1473 | }, |
1474 | .gateway = *gateway, | ||
1475 | }; | 1474 | }; |
1476 | 1475 | ||
1476 | ipv6_addr_copy(&rdfl.gateway, gateway); | ||
1477 | |||
1477 | if (rt6_need_strict(dest)) | 1478 | if (rt6_need_strict(dest)) |
1478 | flags |= RT6_LOOKUP_F_IFACE; | 1479 | flags |= RT6_LOOKUP_F_IFACE; |
1479 | 1480 | ||
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index dbd19a78ca73..d9deaa7753ef 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -66,7 +66,7 @@ static void ipip6_fb_tunnel_init(struct net_device *dev); | |||
66 | static void ipip6_tunnel_init(struct net_device *dev); | 66 | static void ipip6_tunnel_init(struct net_device *dev); |
67 | static void ipip6_tunnel_setup(struct net_device *dev); | 67 | static void ipip6_tunnel_setup(struct net_device *dev); |
68 | 68 | ||
69 | static int sit_net_id; | 69 | static int sit_net_id __read_mostly; |
70 | struct sit_net { | 70 | struct sit_net { |
71 | struct ip_tunnel *tunnels_r_l[HASH_SIZE]; | 71 | struct ip_tunnel *tunnels_r_l[HASH_SIZE]; |
72 | struct ip_tunnel *tunnels_r[HASH_SIZE]; | 72 | struct ip_tunnel *tunnels_r[HASH_SIZE]; |
@@ -77,8 +77,17 @@ struct sit_net { | |||
77 | struct net_device *fb_tunnel_dev; | 77 | struct net_device *fb_tunnel_dev; |
78 | }; | 78 | }; |
79 | 79 | ||
80 | static DEFINE_RWLOCK(ipip6_lock); | 80 | /* |
81 | * Locking : hash tables are protected by RCU and a spinlock | ||
82 | */ | ||
83 | static DEFINE_SPINLOCK(ipip6_lock); | ||
84 | |||
85 | #define for_each_ip_tunnel_rcu(start) \ | ||
86 | for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) | ||
81 | 87 | ||
88 | /* | ||
89 | * Must be invoked with rcu_read_lock | ||
90 | */ | ||
82 | static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net, | 91 | static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net, |
83 | struct net_device *dev, __be32 remote, __be32 local) | 92 | struct net_device *dev, __be32 remote, __be32 local) |
84 | { | 93 | { |
@@ -87,26 +96,26 @@ static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net, | |||
87 | struct ip_tunnel *t; | 96 | struct ip_tunnel *t; |
88 | struct sit_net *sitn = net_generic(net, sit_net_id); | 97 | struct sit_net *sitn = net_generic(net, sit_net_id); |
89 | 98 | ||
90 | for (t = sitn->tunnels_r_l[h0^h1]; t; t = t->next) { | 99 | for_each_ip_tunnel_rcu(sitn->tunnels_r_l[h0 ^ h1]) { |
91 | if (local == t->parms.iph.saddr && | 100 | if (local == t->parms.iph.saddr && |
92 | remote == t->parms.iph.daddr && | 101 | remote == t->parms.iph.daddr && |
93 | (!dev || !t->parms.link || dev->iflink == t->parms.link) && | 102 | (!dev || !t->parms.link || dev->iflink == t->parms.link) && |
94 | (t->dev->flags & IFF_UP)) | 103 | (t->dev->flags & IFF_UP)) |
95 | return t; | 104 | return t; |
96 | } | 105 | } |
97 | for (t = sitn->tunnels_r[h0]; t; t = t->next) { | 106 | for_each_ip_tunnel_rcu(sitn->tunnels_r[h0]) { |
98 | if (remote == t->parms.iph.daddr && | 107 | if (remote == t->parms.iph.daddr && |
99 | (!dev || !t->parms.link || dev->iflink == t->parms.link) && | 108 | (!dev || !t->parms.link || dev->iflink == t->parms.link) && |
100 | (t->dev->flags & IFF_UP)) | 109 | (t->dev->flags & IFF_UP)) |
101 | return t; | 110 | return t; |
102 | } | 111 | } |
103 | for (t = sitn->tunnels_l[h1]; t; t = t->next) { | 112 | for_each_ip_tunnel_rcu(sitn->tunnels_l[h1]) { |
104 | if (local == t->parms.iph.saddr && | 113 | if (local == t->parms.iph.saddr && |
105 | (!dev || !t->parms.link || dev->iflink == t->parms.link) && | 114 | (!dev || !t->parms.link || dev->iflink == t->parms.link) && |
106 | (t->dev->flags & IFF_UP)) | 115 | (t->dev->flags & IFF_UP)) |
107 | return t; | 116 | return t; |
108 | } | 117 | } |
109 | t = sitn->tunnels_wc[0]; | 118 | t = rcu_dereference(sitn->tunnels_wc[0]); |
110 | if ((t != NULL) && (t->dev->flags & IFF_UP)) | 119 | if ((t != NULL) && (t->dev->flags & IFF_UP)) |
111 | return t; | 120 | return t; |
112 | return NULL; | 121 | return NULL; |
@@ -143,9 +152,9 @@ static void ipip6_tunnel_unlink(struct sit_net *sitn, struct ip_tunnel *t) | |||
143 | 152 | ||
144 | for (tp = ipip6_bucket(sitn, t); *tp; tp = &(*tp)->next) { | 153 | for (tp = ipip6_bucket(sitn, t); *tp; tp = &(*tp)->next) { |
145 | if (t == *tp) { | 154 | if (t == *tp) { |
146 | write_lock_bh(&ipip6_lock); | 155 | spin_lock_bh(&ipip6_lock); |
147 | *tp = t->next; | 156 | *tp = t->next; |
148 | write_unlock_bh(&ipip6_lock); | 157 | spin_unlock_bh(&ipip6_lock); |
149 | break; | 158 | break; |
150 | } | 159 | } |
151 | } | 160 | } |
@@ -155,10 +164,27 @@ static void ipip6_tunnel_link(struct sit_net *sitn, struct ip_tunnel *t) | |||
155 | { | 164 | { |
156 | struct ip_tunnel **tp = ipip6_bucket(sitn, t); | 165 | struct ip_tunnel **tp = ipip6_bucket(sitn, t); |
157 | 166 | ||
167 | spin_lock_bh(&ipip6_lock); | ||
158 | t->next = *tp; | 168 | t->next = *tp; |
159 | write_lock_bh(&ipip6_lock); | 169 | rcu_assign_pointer(*tp, t); |
160 | *tp = t; | 170 | spin_unlock_bh(&ipip6_lock); |
161 | write_unlock_bh(&ipip6_lock); | 171 | } |
172 | |||
173 | static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn) | ||
174 | { | ||
175 | #ifdef CONFIG_IPV6_SIT_6RD | ||
176 | struct ip_tunnel *t = netdev_priv(dev); | ||
177 | |||
178 | if (t->dev == sitn->fb_tunnel_dev) { | ||
179 | ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0); | ||
180 | t->ip6rd.relay_prefix = 0; | ||
181 | t->ip6rd.prefixlen = 16; | ||
182 | t->ip6rd.relay_prefixlen = 0; | ||
183 | } else { | ||
184 | struct ip_tunnel *t0 = netdev_priv(sitn->fb_tunnel_dev); | ||
185 | memcpy(&t->ip6rd, &t0->ip6rd, sizeof(t->ip6rd)); | ||
186 | } | ||
187 | #endif | ||
162 | } | 188 | } |
163 | 189 | ||
164 | static struct ip_tunnel * ipip6_tunnel_locate(struct net *net, | 190 | static struct ip_tunnel * ipip6_tunnel_locate(struct net *net, |
@@ -204,6 +230,7 @@ static struct ip_tunnel * ipip6_tunnel_locate(struct net *net, | |||
204 | 230 | ||
205 | nt->parms = *parms; | 231 | nt->parms = *parms; |
206 | ipip6_tunnel_init(dev); | 232 | ipip6_tunnel_init(dev); |
233 | ipip6_tunnel_clone_6rd(dev, sitn); | ||
207 | 234 | ||
208 | if (parms->i_flags & SIT_ISATAP) | 235 | if (parms->i_flags & SIT_ISATAP) |
209 | dev->priv_flags |= IFF_ISATAP; | 236 | dev->priv_flags |= IFF_ISATAP; |
@@ -222,15 +249,22 @@ failed: | |||
222 | return NULL; | 249 | return NULL; |
223 | } | 250 | } |
224 | 251 | ||
252 | static DEFINE_SPINLOCK(ipip6_prl_lock); | ||
253 | |||
254 | #define for_each_prl_rcu(start) \ | ||
255 | for (prl = rcu_dereference(start); \ | ||
256 | prl; \ | ||
257 | prl = rcu_dereference(prl->next)) | ||
258 | |||
225 | static struct ip_tunnel_prl_entry * | 259 | static struct ip_tunnel_prl_entry * |
226 | __ipip6_tunnel_locate_prl(struct ip_tunnel *t, __be32 addr) | 260 | __ipip6_tunnel_locate_prl(struct ip_tunnel *t, __be32 addr) |
227 | { | 261 | { |
228 | struct ip_tunnel_prl_entry *p = (struct ip_tunnel_prl_entry *)NULL; | 262 | struct ip_tunnel_prl_entry *prl; |
229 | 263 | ||
230 | for (p = t->prl; p; p = p->next) | 264 | for_each_prl_rcu(t->prl) |
231 | if (p->addr == addr) | 265 | if (prl->addr == addr) |
232 | break; | 266 | break; |
233 | return p; | 267 | return prl; |
234 | 268 | ||
235 | } | 269 | } |
236 | 270 | ||
@@ -255,7 +289,7 @@ static int ipip6_tunnel_get_prl(struct ip_tunnel *t, | |||
255 | kcalloc(cmax, sizeof(*kp), GFP_KERNEL) : | 289 | kcalloc(cmax, sizeof(*kp), GFP_KERNEL) : |
256 | NULL; | 290 | NULL; |
257 | 291 | ||
258 | read_lock(&ipip6_lock); | 292 | rcu_read_lock(); |
259 | 293 | ||
260 | ca = t->prl_count < cmax ? t->prl_count : cmax; | 294 | ca = t->prl_count < cmax ? t->prl_count : cmax; |
261 | 295 | ||
@@ -273,7 +307,7 @@ static int ipip6_tunnel_get_prl(struct ip_tunnel *t, | |||
273 | } | 307 | } |
274 | 308 | ||
275 | c = 0; | 309 | c = 0; |
276 | for (prl = t->prl; prl; prl = prl->next) { | 310 | for_each_prl_rcu(t->prl) { |
277 | if (c >= cmax) | 311 | if (c >= cmax) |
278 | break; | 312 | break; |
279 | if (kprl.addr != htonl(INADDR_ANY) && prl->addr != kprl.addr) | 313 | if (kprl.addr != htonl(INADDR_ANY) && prl->addr != kprl.addr) |
@@ -285,7 +319,7 @@ static int ipip6_tunnel_get_prl(struct ip_tunnel *t, | |||
285 | break; | 319 | break; |
286 | } | 320 | } |
287 | out: | 321 | out: |
288 | read_unlock(&ipip6_lock); | 322 | rcu_read_unlock(); |
289 | 323 | ||
290 | len = sizeof(*kp) * c; | 324 | len = sizeof(*kp) * c; |
291 | ret = 0; | 325 | ret = 0; |
@@ -306,12 +340,14 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg) | |||
306 | if (a->addr == htonl(INADDR_ANY)) | 340 | if (a->addr == htonl(INADDR_ANY)) |
307 | return -EINVAL; | 341 | return -EINVAL; |
308 | 342 | ||
309 | write_lock(&ipip6_lock); | 343 | spin_lock(&ipip6_prl_lock); |
310 | 344 | ||
311 | for (p = t->prl; p; p = p->next) { | 345 | for (p = t->prl; p; p = p->next) { |
312 | if (p->addr == a->addr) { | 346 | if (p->addr == a->addr) { |
313 | if (chg) | 347 | if (chg) { |
314 | goto update; | 348 | p->flags = a->flags; |
349 | goto out; | ||
350 | } | ||
315 | err = -EEXIST; | 351 | err = -EEXIST; |
316 | goto out; | 352 | goto out; |
317 | } | 353 | } |
@@ -328,46 +364,63 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg) | |||
328 | goto out; | 364 | goto out; |
329 | } | 365 | } |
330 | 366 | ||
367 | INIT_RCU_HEAD(&p->rcu_head); | ||
331 | p->next = t->prl; | 368 | p->next = t->prl; |
332 | t->prl = p; | ||
333 | t->prl_count++; | ||
334 | update: | ||
335 | p->addr = a->addr; | 369 | p->addr = a->addr; |
336 | p->flags = a->flags; | 370 | p->flags = a->flags; |
371 | t->prl_count++; | ||
372 | rcu_assign_pointer(t->prl, p); | ||
337 | out: | 373 | out: |
338 | write_unlock(&ipip6_lock); | 374 | spin_unlock(&ipip6_prl_lock); |
339 | return err; | 375 | return err; |
340 | } | 376 | } |
341 | 377 | ||
378 | static void prl_entry_destroy_rcu(struct rcu_head *head) | ||
379 | { | ||
380 | kfree(container_of(head, struct ip_tunnel_prl_entry, rcu_head)); | ||
381 | } | ||
382 | |||
383 | static void prl_list_destroy_rcu(struct rcu_head *head) | ||
384 | { | ||
385 | struct ip_tunnel_prl_entry *p, *n; | ||
386 | |||
387 | p = container_of(head, struct ip_tunnel_prl_entry, rcu_head); | ||
388 | do { | ||
389 | n = p->next; | ||
390 | kfree(p); | ||
391 | p = n; | ||
392 | } while (p); | ||
393 | } | ||
394 | |||
342 | static int | 395 | static int |
343 | ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a) | 396 | ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a) |
344 | { | 397 | { |
345 | struct ip_tunnel_prl_entry *x, **p; | 398 | struct ip_tunnel_prl_entry *x, **p; |
346 | int err = 0; | 399 | int err = 0; |
347 | 400 | ||
348 | write_lock(&ipip6_lock); | 401 | spin_lock(&ipip6_prl_lock); |
349 | 402 | ||
350 | if (a && a->addr != htonl(INADDR_ANY)) { | 403 | if (a && a->addr != htonl(INADDR_ANY)) { |
351 | for (p = &t->prl; *p; p = &(*p)->next) { | 404 | for (p = &t->prl; *p; p = &(*p)->next) { |
352 | if ((*p)->addr == a->addr) { | 405 | if ((*p)->addr == a->addr) { |
353 | x = *p; | 406 | x = *p; |
354 | *p = x->next; | 407 | *p = x->next; |
355 | kfree(x); | 408 | call_rcu(&x->rcu_head, prl_entry_destroy_rcu); |
356 | t->prl_count--; | 409 | t->prl_count--; |
357 | goto out; | 410 | goto out; |
358 | } | 411 | } |
359 | } | 412 | } |
360 | err = -ENXIO; | 413 | err = -ENXIO; |
361 | } else { | 414 | } else { |
362 | while (t->prl) { | 415 | if (t->prl) { |
416 | t->prl_count = 0; | ||
363 | x = t->prl; | 417 | x = t->prl; |
364 | t->prl = t->prl->next; | 418 | call_rcu(&x->rcu_head, prl_list_destroy_rcu); |
365 | kfree(x); | 419 | t->prl = NULL; |
366 | t->prl_count--; | ||
367 | } | 420 | } |
368 | } | 421 | } |
369 | out: | 422 | out: |
370 | write_unlock(&ipip6_lock); | 423 | spin_unlock(&ipip6_prl_lock); |
371 | return err; | 424 | return err; |
372 | } | 425 | } |
373 | 426 | ||
@@ -377,7 +430,7 @@ isatap_chksrc(struct sk_buff *skb, struct iphdr *iph, struct ip_tunnel *t) | |||
377 | struct ip_tunnel_prl_entry *p; | 430 | struct ip_tunnel_prl_entry *p; |
378 | int ok = 1; | 431 | int ok = 1; |
379 | 432 | ||
380 | read_lock(&ipip6_lock); | 433 | rcu_read_lock(); |
381 | p = __ipip6_tunnel_locate_prl(t, iph->saddr); | 434 | p = __ipip6_tunnel_locate_prl(t, iph->saddr); |
382 | if (p) { | 435 | if (p) { |
383 | if (p->flags & PRL_DEFAULT) | 436 | if (p->flags & PRL_DEFAULT) |
@@ -393,7 +446,7 @@ isatap_chksrc(struct sk_buff *skb, struct iphdr *iph, struct ip_tunnel *t) | |||
393 | else | 446 | else |
394 | ok = 0; | 447 | ok = 0; |
395 | } | 448 | } |
396 | read_unlock(&ipip6_lock); | 449 | rcu_read_unlock(); |
397 | return ok; | 450 | return ok; |
398 | } | 451 | } |
399 | 452 | ||
@@ -403,9 +456,9 @@ static void ipip6_tunnel_uninit(struct net_device *dev) | |||
403 | struct sit_net *sitn = net_generic(net, sit_net_id); | 456 | struct sit_net *sitn = net_generic(net, sit_net_id); |
404 | 457 | ||
405 | if (dev == sitn->fb_tunnel_dev) { | 458 | if (dev == sitn->fb_tunnel_dev) { |
406 | write_lock_bh(&ipip6_lock); | 459 | spin_lock_bh(&ipip6_lock); |
407 | sitn->tunnels_wc[0] = NULL; | 460 | sitn->tunnels_wc[0] = NULL; |
408 | write_unlock_bh(&ipip6_lock); | 461 | spin_unlock_bh(&ipip6_lock); |
409 | dev_put(dev); | 462 | dev_put(dev); |
410 | } else { | 463 | } else { |
411 | ipip6_tunnel_unlink(sitn, netdev_priv(dev)); | 464 | ipip6_tunnel_unlink(sitn, netdev_priv(dev)); |
@@ -458,7 +511,7 @@ static int ipip6_err(struct sk_buff *skb, u32 info) | |||
458 | 511 | ||
459 | err = -ENOENT; | 512 | err = -ENOENT; |
460 | 513 | ||
461 | read_lock(&ipip6_lock); | 514 | rcu_read_lock(); |
462 | t = ipip6_tunnel_lookup(dev_net(skb->dev), | 515 | t = ipip6_tunnel_lookup(dev_net(skb->dev), |
463 | skb->dev, | 516 | skb->dev, |
464 | iph->daddr, | 517 | iph->daddr, |
@@ -476,7 +529,7 @@ static int ipip6_err(struct sk_buff *skb, u32 info) | |||
476 | t->err_count = 1; | 529 | t->err_count = 1; |
477 | t->err_time = jiffies; | 530 | t->err_time = jiffies; |
478 | out: | 531 | out: |
479 | read_unlock(&ipip6_lock); | 532 | rcu_read_unlock(); |
480 | return err; | 533 | return err; |
481 | } | 534 | } |
482 | 535 | ||
@@ -496,7 +549,7 @@ static int ipip6_rcv(struct sk_buff *skb) | |||
496 | 549 | ||
497 | iph = ip_hdr(skb); | 550 | iph = ip_hdr(skb); |
498 | 551 | ||
499 | read_lock(&ipip6_lock); | 552 | rcu_read_lock(); |
500 | tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev, | 553 | tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev, |
501 | iph->saddr, iph->daddr); | 554 | iph->saddr, iph->daddr); |
502 | if (tunnel != NULL) { | 555 | if (tunnel != NULL) { |
@@ -510,7 +563,7 @@ static int ipip6_rcv(struct sk_buff *skb) | |||
510 | if ((tunnel->dev->priv_flags & IFF_ISATAP) && | 563 | if ((tunnel->dev->priv_flags & IFF_ISATAP) && |
511 | !isatap_chksrc(skb, iph, tunnel)) { | 564 | !isatap_chksrc(skb, iph, tunnel)) { |
512 | tunnel->dev->stats.rx_errors++; | 565 | tunnel->dev->stats.rx_errors++; |
513 | read_unlock(&ipip6_lock); | 566 | rcu_read_unlock(); |
514 | kfree_skb(skb); | 567 | kfree_skb(skb); |
515 | return 0; | 568 | return 0; |
516 | } | 569 | } |
@@ -521,28 +574,52 @@ static int ipip6_rcv(struct sk_buff *skb) | |||
521 | nf_reset(skb); | 574 | nf_reset(skb); |
522 | ipip6_ecn_decapsulate(iph, skb); | 575 | ipip6_ecn_decapsulate(iph, skb); |
523 | netif_rx(skb); | 576 | netif_rx(skb); |
524 | read_unlock(&ipip6_lock); | 577 | rcu_read_unlock(); |
525 | return 0; | 578 | return 0; |
526 | } | 579 | } |
527 | 580 | ||
528 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); | 581 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); |
529 | read_unlock(&ipip6_lock); | 582 | rcu_read_unlock(); |
530 | out: | 583 | out: |
531 | kfree_skb(skb); | 584 | kfree_skb(skb); |
532 | return 0; | 585 | return 0; |
533 | } | 586 | } |
534 | 587 | ||
535 | /* Returns the embedded IPv4 address if the IPv6 address | 588 | /* |
536 | comes from 6to4 (RFC 3056) addr space */ | 589 | * Returns the embedded IPv4 address if the IPv6 address |
537 | 590 | * comes from 6rd / 6to4 (RFC 3056) addr space. | |
538 | static inline __be32 try_6to4(struct in6_addr *v6dst) | 591 | */ |
592 | static inline | ||
593 | __be32 try_6rd(struct in6_addr *v6dst, struct ip_tunnel *tunnel) | ||
539 | { | 594 | { |
540 | __be32 dst = 0; | 595 | __be32 dst = 0; |
541 | 596 | ||
597 | #ifdef CONFIG_IPV6_SIT_6RD | ||
598 | if (ipv6_prefix_equal(v6dst, &tunnel->ip6rd.prefix, | ||
599 | tunnel->ip6rd.prefixlen)) { | ||
600 | unsigned pbw0, pbi0; | ||
601 | int pbi1; | ||
602 | u32 d; | ||
603 | |||
604 | pbw0 = tunnel->ip6rd.prefixlen >> 5; | ||
605 | pbi0 = tunnel->ip6rd.prefixlen & 0x1f; | ||
606 | |||
607 | d = (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >> | ||
608 | tunnel->ip6rd.relay_prefixlen; | ||
609 | |||
610 | pbi1 = pbi0 - tunnel->ip6rd.relay_prefixlen; | ||
611 | if (pbi1 > 0) | ||
612 | d |= ntohl(v6dst->s6_addr32[pbw0 + 1]) >> | ||
613 | (32 - pbi1); | ||
614 | |||
615 | dst = tunnel->ip6rd.relay_prefix | htonl(d); | ||
616 | } | ||
617 | #else | ||
542 | if (v6dst->s6_addr16[0] == htons(0x2002)) { | 618 | if (v6dst->s6_addr16[0] == htons(0x2002)) { |
543 | /* 6to4 v6 addr has 16 bits prefix, 32 v4addr, 16 SLA, ... */ | 619 | /* 6to4 v6 addr has 16 bits prefix, 32 v4addr, 16 SLA, ... */ |
544 | memcpy(&dst, &v6dst->s6_addr16[1], 4); | 620 | memcpy(&dst, &v6dst->s6_addr16[1], 4); |
545 | } | 621 | } |
622 | #endif | ||
546 | return dst; | 623 | return dst; |
547 | } | 624 | } |
548 | 625 | ||
@@ -555,10 +632,12 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
555 | struct net_device *dev) | 632 | struct net_device *dev) |
556 | { | 633 | { |
557 | struct ip_tunnel *tunnel = netdev_priv(dev); | 634 | struct ip_tunnel *tunnel = netdev_priv(dev); |
558 | struct net_device_stats *stats = &tunnel->dev->stats; | 635 | struct net_device_stats *stats = &dev->stats; |
636 | struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); | ||
559 | struct iphdr *tiph = &tunnel->parms.iph; | 637 | struct iphdr *tiph = &tunnel->parms.iph; |
560 | struct ipv6hdr *iph6 = ipv6_hdr(skb); | 638 | struct ipv6hdr *iph6 = ipv6_hdr(skb); |
561 | u8 tos = tunnel->parms.iph.tos; | 639 | u8 tos = tunnel->parms.iph.tos; |
640 | __be16 df = tiph->frag_off; | ||
562 | struct rtable *rt; /* Route to the other host */ | 641 | struct rtable *rt; /* Route to the other host */ |
563 | struct net_device *tdev; /* Device to other host */ | 642 | struct net_device *tdev; /* Device to other host */ |
564 | struct iphdr *iph; /* Our new IP header */ | 643 | struct iphdr *iph; /* Our new IP header */ |
@@ -595,7 +674,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
595 | } | 674 | } |
596 | 675 | ||
597 | if (!dst) | 676 | if (!dst) |
598 | dst = try_6to4(&iph6->daddr); | 677 | dst = try_6rd(&iph6->daddr, tunnel); |
599 | 678 | ||
600 | if (!dst) { | 679 | if (!dst) { |
601 | struct neighbour *neigh = NULL; | 680 | struct neighbour *neigh = NULL; |
@@ -648,25 +727,28 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
648 | goto tx_error; | 727 | goto tx_error; |
649 | } | 728 | } |
650 | 729 | ||
651 | if (tiph->frag_off) | 730 | if (df) { |
652 | mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); | 731 | mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); |
653 | else | ||
654 | mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; | ||
655 | 732 | ||
656 | if (mtu < 68) { | 733 | if (mtu < 68) { |
657 | stats->collisions++; | 734 | stats->collisions++; |
658 | ip_rt_put(rt); | 735 | ip_rt_put(rt); |
659 | goto tx_error; | 736 | goto tx_error; |
660 | } | 737 | } |
661 | if (mtu < IPV6_MIN_MTU) | ||
662 | mtu = IPV6_MIN_MTU; | ||
663 | if (tunnel->parms.iph.daddr && skb_dst(skb)) | ||
664 | skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); | ||
665 | 738 | ||
666 | if (skb->len > mtu) { | 739 | if (mtu < IPV6_MIN_MTU) { |
667 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); | 740 | mtu = IPV6_MIN_MTU; |
668 | ip_rt_put(rt); | 741 | df = 0; |
669 | goto tx_error; | 742 | } |
743 | |||
744 | if (tunnel->parms.iph.daddr && skb_dst(skb)) | ||
745 | skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); | ||
746 | |||
747 | if (skb->len > mtu) { | ||
748 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); | ||
749 | ip_rt_put(rt); | ||
750 | goto tx_error; | ||
751 | } | ||
670 | } | 752 | } |
671 | 753 | ||
672 | if (tunnel->err_count > 0) { | 754 | if (tunnel->err_count > 0) { |
@@ -688,7 +770,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
688 | struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); | 770 | struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); |
689 | if (!new_skb) { | 771 | if (!new_skb) { |
690 | ip_rt_put(rt); | 772 | ip_rt_put(rt); |
691 | stats->tx_dropped++; | 773 | txq->tx_dropped++; |
692 | dev_kfree_skb(skb); | 774 | dev_kfree_skb(skb); |
693 | return NETDEV_TX_OK; | 775 | return NETDEV_TX_OK; |
694 | } | 776 | } |
@@ -714,11 +796,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
714 | iph = ip_hdr(skb); | 796 | iph = ip_hdr(skb); |
715 | iph->version = 4; | 797 | iph->version = 4; |
716 | iph->ihl = sizeof(struct iphdr)>>2; | 798 | iph->ihl = sizeof(struct iphdr)>>2; |
717 | if (mtu > IPV6_MIN_MTU) | 799 | iph->frag_off = df; |
718 | iph->frag_off = tiph->frag_off; | ||
719 | else | ||
720 | iph->frag_off = 0; | ||
721 | |||
722 | iph->protocol = IPPROTO_IPV6; | 800 | iph->protocol = IPPROTO_IPV6; |
723 | iph->tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); | 801 | iph->tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); |
724 | iph->daddr = rt->rt_dst; | 802 | iph->daddr = rt->rt_dst; |
@@ -785,9 +863,15 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
785 | struct ip_tunnel *t; | 863 | struct ip_tunnel *t; |
786 | struct net *net = dev_net(dev); | 864 | struct net *net = dev_net(dev); |
787 | struct sit_net *sitn = net_generic(net, sit_net_id); | 865 | struct sit_net *sitn = net_generic(net, sit_net_id); |
866 | #ifdef CONFIG_IPV6_SIT_6RD | ||
867 | struct ip_tunnel_6rd ip6rd; | ||
868 | #endif | ||
788 | 869 | ||
789 | switch (cmd) { | 870 | switch (cmd) { |
790 | case SIOCGETTUNNEL: | 871 | case SIOCGETTUNNEL: |
872 | #ifdef CONFIG_IPV6_SIT_6RD | ||
873 | case SIOCGET6RD: | ||
874 | #endif | ||
791 | t = NULL; | 875 | t = NULL; |
792 | if (dev == sitn->fb_tunnel_dev) { | 876 | if (dev == sitn->fb_tunnel_dev) { |
793 | if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { | 877 | if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { |
@@ -798,9 +882,25 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
798 | } | 882 | } |
799 | if (t == NULL) | 883 | if (t == NULL) |
800 | t = netdev_priv(dev); | 884 | t = netdev_priv(dev); |
801 | memcpy(&p, &t->parms, sizeof(p)); | 885 | |
802 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) | 886 | err = -EFAULT; |
803 | err = -EFAULT; | 887 | if (cmd == SIOCGETTUNNEL) { |
888 | memcpy(&p, &t->parms, sizeof(p)); | ||
889 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, | ||
890 | sizeof(p))) | ||
891 | goto done; | ||
892 | #ifdef CONFIG_IPV6_SIT_6RD | ||
893 | } else { | ||
894 | ipv6_addr_copy(&ip6rd.prefix, &t->ip6rd.prefix); | ||
895 | ip6rd.relay_prefix = t->ip6rd.relay_prefix; | ||
896 | ip6rd.prefixlen = t->ip6rd.prefixlen; | ||
897 | ip6rd.relay_prefixlen = t->ip6rd.relay_prefixlen; | ||
898 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &ip6rd, | ||
899 | sizeof(ip6rd))) | ||
900 | goto done; | ||
901 | #endif | ||
902 | } | ||
903 | err = 0; | ||
804 | break; | 904 | break; |
805 | 905 | ||
806 | case SIOCADDTUNNEL: | 906 | case SIOCADDTUNNEL: |
@@ -921,6 +1021,54 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
921 | netdev_state_change(dev); | 1021 | netdev_state_change(dev); |
922 | break; | 1022 | break; |
923 | 1023 | ||
1024 | #ifdef CONFIG_IPV6_SIT_6RD | ||
1025 | case SIOCADD6RD: | ||
1026 | case SIOCCHG6RD: | ||
1027 | case SIOCDEL6RD: | ||
1028 | err = -EPERM; | ||
1029 | if (!capable(CAP_NET_ADMIN)) | ||
1030 | goto done; | ||
1031 | |||
1032 | err = -EFAULT; | ||
1033 | if (copy_from_user(&ip6rd, ifr->ifr_ifru.ifru_data, | ||
1034 | sizeof(ip6rd))) | ||
1035 | goto done; | ||
1036 | |||
1037 | t = netdev_priv(dev); | ||
1038 | |||
1039 | if (cmd != SIOCDEL6RD) { | ||
1040 | struct in6_addr prefix; | ||
1041 | __be32 relay_prefix; | ||
1042 | |||
1043 | err = -EINVAL; | ||
1044 | if (ip6rd.relay_prefixlen > 32 || | ||
1045 | ip6rd.prefixlen + (32 - ip6rd.relay_prefixlen) > 64) | ||
1046 | goto done; | ||
1047 | |||
1048 | ipv6_addr_prefix(&prefix, &ip6rd.prefix, | ||
1049 | ip6rd.prefixlen); | ||
1050 | if (!ipv6_addr_equal(&prefix, &ip6rd.prefix)) | ||
1051 | goto done; | ||
1052 | if (ip6rd.relay_prefixlen) | ||
1053 | relay_prefix = ip6rd.relay_prefix & | ||
1054 | htonl(0xffffffffUL << | ||
1055 | (32 - ip6rd.relay_prefixlen)); | ||
1056 | else | ||
1057 | relay_prefix = 0; | ||
1058 | if (relay_prefix != ip6rd.relay_prefix) | ||
1059 | goto done; | ||
1060 | |||
1061 | ipv6_addr_copy(&t->ip6rd.prefix, &prefix); | ||
1062 | t->ip6rd.relay_prefix = relay_prefix; | ||
1063 | t->ip6rd.prefixlen = ip6rd.prefixlen; | ||
1064 | t->ip6rd.relay_prefixlen = ip6rd.relay_prefixlen; | ||
1065 | } else | ||
1066 | ipip6_tunnel_clone_6rd(dev, sitn); | ||
1067 | |||
1068 | err = 0; | ||
1069 | break; | ||
1070 | #endif | ||
1071 | |||
924 | default: | 1072 | default: |
925 | err = -EINVAL; | 1073 | err = -EINVAL; |
926 | } | 1074 | } |
@@ -997,16 +1145,19 @@ static struct xfrm_tunnel sit_handler = { | |||
997 | .priority = 1, | 1145 | .priority = 1, |
998 | }; | 1146 | }; |
999 | 1147 | ||
1000 | static void sit_destroy_tunnels(struct sit_net *sitn) | 1148 | static void sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head) |
1001 | { | 1149 | { |
1002 | int prio; | 1150 | int prio; |
1003 | 1151 | ||
1004 | for (prio = 1; prio < 4; prio++) { | 1152 | for (prio = 1; prio < 4; prio++) { |
1005 | int h; | 1153 | int h; |
1006 | for (h = 0; h < HASH_SIZE; h++) { | 1154 | for (h = 0; h < HASH_SIZE; h++) { |
1007 | struct ip_tunnel *t; | 1155 | struct ip_tunnel *t = sitn->tunnels[prio][h]; |
1008 | while ((t = sitn->tunnels[prio][h]) != NULL) | 1156 | |
1009 | unregister_netdevice(t->dev); | 1157 | while (t != NULL) { |
1158 | unregister_netdevice_queue(t->dev, head); | ||
1159 | t = t->next; | ||
1160 | } | ||
1010 | } | 1161 | } |
1011 | } | 1162 | } |
1012 | } | 1163 | } |
@@ -1039,6 +1190,7 @@ static int sit_init_net(struct net *net) | |||
1039 | dev_net_set(sitn->fb_tunnel_dev, net); | 1190 | dev_net_set(sitn->fb_tunnel_dev, net); |
1040 | 1191 | ||
1041 | ipip6_fb_tunnel_init(sitn->fb_tunnel_dev); | 1192 | ipip6_fb_tunnel_init(sitn->fb_tunnel_dev); |
1193 | ipip6_tunnel_clone_6rd(sitn->fb_tunnel_dev, sitn); | ||
1042 | 1194 | ||
1043 | if ((err = register_netdev(sitn->fb_tunnel_dev))) | 1195 | if ((err = register_netdev(sitn->fb_tunnel_dev))) |
1044 | goto err_reg_dev; | 1196 | goto err_reg_dev; |
@@ -1059,11 +1211,13 @@ err_alloc: | |||
1059 | static void sit_exit_net(struct net *net) | 1211 | static void sit_exit_net(struct net *net) |
1060 | { | 1212 | { |
1061 | struct sit_net *sitn; | 1213 | struct sit_net *sitn; |
1214 | LIST_HEAD(list); | ||
1062 | 1215 | ||
1063 | sitn = net_generic(net, sit_net_id); | 1216 | sitn = net_generic(net, sit_net_id); |
1064 | rtnl_lock(); | 1217 | rtnl_lock(); |
1065 | sit_destroy_tunnels(sitn); | 1218 | sit_destroy_tunnels(sitn, &list); |
1066 | unregister_netdevice(sitn->fb_tunnel_dev); | 1219 | unregister_netdevice_queue(sitn->fb_tunnel_dev, &list); |
1220 | unregister_netdevice_many(&list); | ||
1067 | rtnl_unlock(); | 1221 | rtnl_unlock(); |
1068 | kfree(sitn); | 1222 | kfree(sitn); |
1069 | } | 1223 | } |
@@ -1078,6 +1232,7 @@ static void __exit sit_cleanup(void) | |||
1078 | xfrm4_tunnel_deregister(&sit_handler, AF_INET6); | 1232 | xfrm4_tunnel_deregister(&sit_handler, AF_INET6); |
1079 | 1233 | ||
1080 | unregister_pernet_gen_device(sit_net_id, &sit_net_ops); | 1234 | unregister_pernet_gen_device(sit_net_id, &sit_net_ops); |
1235 | rcu_barrier(); /* Wait for completion of call_rcu()'s */ | ||
1081 | } | 1236 | } |
1082 | 1237 | ||
1083 | static int __init sit_init(void) | 1238 | static int __init sit_init(void) |
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 6b6ae913b5d4..612fc53e0bb9 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c | |||
@@ -184,13 +184,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | |||
184 | 184 | ||
185 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); | 185 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); |
186 | 186 | ||
187 | /* check for timestamp cookie support */ | ||
188 | memset(&tcp_opt, 0, sizeof(tcp_opt)); | ||
189 | tcp_parse_options(skb, &tcp_opt, 0); | ||
190 | |||
191 | if (tcp_opt.saw_tstamp) | ||
192 | cookie_check_timestamp(&tcp_opt); | ||
193 | |||
194 | ret = NULL; | 187 | ret = NULL; |
195 | req = inet6_reqsk_alloc(&tcp6_request_sock_ops); | 188 | req = inet6_reqsk_alloc(&tcp6_request_sock_ops); |
196 | if (!req) | 189 | if (!req) |
@@ -224,12 +217,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | |||
224 | req->expires = 0UL; | 217 | req->expires = 0UL; |
225 | req->retrans = 0; | 218 | req->retrans = 0; |
226 | ireq->ecn_ok = 0; | 219 | ireq->ecn_ok = 0; |
227 | ireq->snd_wscale = tcp_opt.snd_wscale; | ||
228 | ireq->rcv_wscale = tcp_opt.rcv_wscale; | ||
229 | ireq->sack_ok = tcp_opt.sack_ok; | ||
230 | ireq->wscale_ok = tcp_opt.wscale_ok; | ||
231 | ireq->tstamp_ok = tcp_opt.saw_tstamp; | ||
232 | req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; | ||
233 | treq->rcv_isn = ntohl(th->seq) - 1; | 220 | treq->rcv_isn = ntohl(th->seq) - 1; |
234 | treq->snt_isn = cookie; | 221 | treq->snt_isn = cookie; |
235 | 222 | ||
@@ -252,8 +239,9 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | |||
252 | } | 239 | } |
253 | ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr); | 240 | ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr); |
254 | fl.oif = sk->sk_bound_dev_if; | 241 | fl.oif = sk->sk_bound_dev_if; |
242 | fl.mark = sk->sk_mark; | ||
255 | fl.fl_ip_dport = inet_rsk(req)->rmt_port; | 243 | fl.fl_ip_dport = inet_rsk(req)->rmt_port; |
256 | fl.fl_ip_sport = inet_sk(sk)->sport; | 244 | fl.fl_ip_sport = inet_sk(sk)->inet_sport; |
257 | security_req_classify_flow(req, &fl); | 245 | security_req_classify_flow(req, &fl); |
258 | if (ip6_dst_lookup(sk, &dst, &fl)) | 246 | if (ip6_dst_lookup(sk, &dst, &fl)) |
259 | goto out_free; | 247 | goto out_free; |
@@ -264,6 +252,21 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | |||
264 | goto out_free; | 252 | goto out_free; |
265 | } | 253 | } |
266 | 254 | ||
255 | /* check for timestamp cookie support */ | ||
256 | memset(&tcp_opt, 0, sizeof(tcp_opt)); | ||
257 | tcp_parse_options(skb, &tcp_opt, 0, dst); | ||
258 | |||
259 | if (tcp_opt.saw_tstamp) | ||
260 | cookie_check_timestamp(&tcp_opt); | ||
261 | |||
262 | req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; | ||
263 | |||
264 | ireq->snd_wscale = tcp_opt.snd_wscale; | ||
265 | ireq->rcv_wscale = tcp_opt.rcv_wscale; | ||
266 | ireq->sack_ok = tcp_opt.sack_ok; | ||
267 | ireq->wscale_ok = tcp_opt.wscale_ok; | ||
268 | ireq->tstamp_ok = tcp_opt.saw_tstamp; | ||
269 | |||
267 | req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); | 270 | req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); |
268 | tcp_select_initial_window(tcp_full_space(sk), req->mss, | 271 | tcp_select_initial_window(tcp_full_space(sk), req->mss, |
269 | &req->rcv_wnd, &req->window_clamp, | 272 | &req->rcv_wnd, &req->window_clamp, |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 21d100b68b19..de709091b26d 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -226,10 +226,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
226 | #endif | 226 | #endif |
227 | goto failure; | 227 | goto failure; |
228 | } else { | 228 | } else { |
229 | ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF), | 229 | ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr); |
230 | inet->saddr); | 230 | ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, |
231 | ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF), | 231 | &np->rcv_saddr); |
232 | inet->rcv_saddr); | ||
233 | } | 232 | } |
234 | 233 | ||
235 | return err; | 234 | return err; |
@@ -243,8 +242,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
243 | ipv6_addr_copy(&fl.fl6_src, | 242 | ipv6_addr_copy(&fl.fl6_src, |
244 | (saddr ? saddr : &np->saddr)); | 243 | (saddr ? saddr : &np->saddr)); |
245 | fl.oif = sk->sk_bound_dev_if; | 244 | fl.oif = sk->sk_bound_dev_if; |
245 | fl.mark = sk->sk_mark; | ||
246 | fl.fl_ip_dport = usin->sin6_port; | 246 | fl.fl_ip_dport = usin->sin6_port; |
247 | fl.fl_ip_sport = inet->sport; | 247 | fl.fl_ip_sport = inet->inet_sport; |
248 | 248 | ||
249 | if (np->opt && np->opt->srcrt) { | 249 | if (np->opt && np->opt->srcrt) { |
250 | struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt; | 250 | struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt; |
@@ -276,7 +276,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
276 | 276 | ||
277 | /* set the source address */ | 277 | /* set the source address */ |
278 | ipv6_addr_copy(&np->saddr, saddr); | 278 | ipv6_addr_copy(&np->saddr, saddr); |
279 | inet->rcv_saddr = LOOPBACK4_IPV6; | 279 | inet->inet_rcv_saddr = LOOPBACK4_IPV6; |
280 | 280 | ||
281 | sk->sk_gso_type = SKB_GSO_TCPV6; | 281 | sk->sk_gso_type = SKB_GSO_TCPV6; |
282 | __ip6_dst_store(sk, dst, NULL, NULL); | 282 | __ip6_dst_store(sk, dst, NULL, NULL); |
@@ -288,7 +288,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
288 | 288 | ||
289 | tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); | 289 | tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); |
290 | 290 | ||
291 | inet->dport = usin->sin6_port; | 291 | inet->inet_dport = usin->sin6_port; |
292 | 292 | ||
293 | tcp_set_state(sk, TCP_SYN_SENT); | 293 | tcp_set_state(sk, TCP_SYN_SENT); |
294 | err = inet6_hash_connect(&tcp_death_row, sk); | 294 | err = inet6_hash_connect(&tcp_death_row, sk); |
@@ -298,8 +298,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
298 | if (!tp->write_seq) | 298 | if (!tp->write_seq) |
299 | tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32, | 299 | tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32, |
300 | np->daddr.s6_addr32, | 300 | np->daddr.s6_addr32, |
301 | inet->sport, | 301 | inet->inet_sport, |
302 | inet->dport); | 302 | inet->inet_dport); |
303 | 303 | ||
304 | err = tcp_connect(sk); | 304 | err = tcp_connect(sk); |
305 | if (err) | 305 | if (err) |
@@ -311,7 +311,7 @@ late_failure: | |||
311 | tcp_set_state(sk, TCP_CLOSE); | 311 | tcp_set_state(sk, TCP_CLOSE); |
312 | __sk_dst_reset(sk); | 312 | __sk_dst_reset(sk); |
313 | failure: | 313 | failure: |
314 | inet->dport = 0; | 314 | inet->inet_dport = 0; |
315 | sk->sk_route_caps = 0; | 315 | sk->sk_route_caps = 0; |
316 | return err; | 316 | return err; |
317 | } | 317 | } |
@@ -383,8 +383,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
383 | ipv6_addr_copy(&fl.fl6_dst, &np->daddr); | 383 | ipv6_addr_copy(&fl.fl6_dst, &np->daddr); |
384 | ipv6_addr_copy(&fl.fl6_src, &np->saddr); | 384 | ipv6_addr_copy(&fl.fl6_src, &np->saddr); |
385 | fl.oif = sk->sk_bound_dev_if; | 385 | fl.oif = sk->sk_bound_dev_if; |
386 | fl.fl_ip_dport = inet->dport; | 386 | fl.mark = sk->sk_mark; |
387 | fl.fl_ip_sport = inet->sport; | 387 | fl.fl_ip_dport = inet->inet_dport; |
388 | fl.fl_ip_sport = inet->inet_sport; | ||
388 | security_skb_classify_flow(skb, &fl); | 389 | security_skb_classify_flow(skb, &fl); |
389 | 390 | ||
390 | if ((err = ip6_dst_lookup(sk, &dst, &fl))) { | 391 | if ((err = ip6_dst_lookup(sk, &dst, &fl))) { |
@@ -477,6 +478,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req) | |||
477 | ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr); | 478 | ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr); |
478 | fl.fl6_flowlabel = 0; | 479 | fl.fl6_flowlabel = 0; |
479 | fl.oif = treq->iif; | 480 | fl.oif = treq->iif; |
481 | fl.mark = sk->sk_mark; | ||
480 | fl.fl_ip_dport = inet_rsk(req)->rmt_port; | 482 | fl.fl_ip_dport = inet_rsk(req)->rmt_port; |
481 | fl.fl_ip_sport = inet_rsk(req)->loc_port; | 483 | fl.fl_ip_sport = inet_rsk(req)->loc_port; |
482 | security_req_classify_flow(req, &fl); | 484 | security_req_classify_flow(req, &fl); |
@@ -1165,6 +1167,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1165 | struct tcp_sock *tp = tcp_sk(sk); | 1167 | struct tcp_sock *tp = tcp_sk(sk); |
1166 | struct request_sock *req = NULL; | 1168 | struct request_sock *req = NULL; |
1167 | __u32 isn = TCP_SKB_CB(skb)->when; | 1169 | __u32 isn = TCP_SKB_CB(skb)->when; |
1170 | struct dst_entry *dst = __sk_dst_get(sk); | ||
1168 | #ifdef CONFIG_SYN_COOKIES | 1171 | #ifdef CONFIG_SYN_COOKIES |
1169 | int want_cookie = 0; | 1172 | int want_cookie = 0; |
1170 | #else | 1173 | #else |
@@ -1203,7 +1206,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1203 | tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); | 1206 | tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); |
1204 | tmp_opt.user_mss = tp->rx_opt.user_mss; | 1207 | tmp_opt.user_mss = tp->rx_opt.user_mss; |
1205 | 1208 | ||
1206 | tcp_parse_options(skb, &tmp_opt, 0); | 1209 | tcp_parse_options(skb, &tmp_opt, 0, dst); |
1207 | 1210 | ||
1208 | if (want_cookie && !tmp_opt.saw_tstamp) | 1211 | if (want_cookie && !tmp_opt.saw_tstamp) |
1209 | tcp_clear_options(&tmp_opt); | 1212 | tcp_clear_options(&tmp_opt); |
@@ -1290,11 +1293,9 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1290 | 1293 | ||
1291 | memcpy(newnp, np, sizeof(struct ipv6_pinfo)); | 1294 | memcpy(newnp, np, sizeof(struct ipv6_pinfo)); |
1292 | 1295 | ||
1293 | ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF), | 1296 | ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr); |
1294 | newinet->daddr); | ||
1295 | 1297 | ||
1296 | ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF), | 1298 | ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr); |
1297 | newinet->saddr); | ||
1298 | 1299 | ||
1299 | ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr); | 1300 | ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr); |
1300 | 1301 | ||
@@ -1345,6 +1346,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1345 | } | 1346 | } |
1346 | ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr); | 1347 | ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr); |
1347 | fl.oif = sk->sk_bound_dev_if; | 1348 | fl.oif = sk->sk_bound_dev_if; |
1349 | fl.mark = sk->sk_mark; | ||
1348 | fl.fl_ip_dport = inet_rsk(req)->rmt_port; | 1350 | fl.fl_ip_dport = inet_rsk(req)->rmt_port; |
1349 | fl.fl_ip_sport = inet_rsk(req)->loc_port; | 1351 | fl.fl_ip_sport = inet_rsk(req)->loc_port; |
1350 | security_req_classify_flow(req, &fl); | 1352 | security_req_classify_flow(req, &fl); |
@@ -1431,7 +1433,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1431 | newtp->advmss = dst_metric(dst, RTAX_ADVMSS); | 1433 | newtp->advmss = dst_metric(dst, RTAX_ADVMSS); |
1432 | tcp_initialize_rcv_mss(newsk); | 1434 | tcp_initialize_rcv_mss(newsk); |
1433 | 1435 | ||
1434 | newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6; | 1436 | newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; |
1437 | newinet->inet_rcv_saddr = LOOPBACK4_IPV6; | ||
1435 | 1438 | ||
1436 | #ifdef CONFIG_TCP_MD5SIG | 1439 | #ifdef CONFIG_TCP_MD5SIG |
1437 | /* Copy over the MD5 key from the original socket */ | 1440 | /* Copy over the MD5 key from the original socket */ |
@@ -1848,7 +1851,7 @@ static int tcp_v6_init_sock(struct sock *sk) | |||
1848 | */ | 1851 | */ |
1849 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; | 1852 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; |
1850 | tp->snd_cwnd_clamp = ~0; | 1853 | tp->snd_cwnd_clamp = ~0; |
1851 | tp->mss_cache = 536; | 1854 | tp->mss_cache = TCP_MSS_DEFAULT; |
1852 | 1855 | ||
1853 | tp->reordering = sysctl_tcp_reordering; | 1856 | tp->reordering = sysctl_tcp_reordering; |
1854 | 1857 | ||
@@ -1931,8 +1934,8 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) | |||
1931 | 1934 | ||
1932 | dest = &np->daddr; | 1935 | dest = &np->daddr; |
1933 | src = &np->rcv_saddr; | 1936 | src = &np->rcv_saddr; |
1934 | destp = ntohs(inet->dport); | 1937 | destp = ntohs(inet->inet_dport); |
1935 | srcp = ntohs(inet->sport); | 1938 | srcp = ntohs(inet->inet_sport); |
1936 | 1939 | ||
1937 | if (icsk->icsk_pending == ICSK_TIME_RETRANS) { | 1940 | if (icsk->icsk_pending == ICSK_TIME_RETRANS) { |
1938 | timer_active = 1; | 1941 | timer_active = 1; |
@@ -2109,7 +2112,6 @@ static struct inet_protosw tcpv6_protosw = { | |||
2109 | .protocol = IPPROTO_TCP, | 2112 | .protocol = IPPROTO_TCP, |
2110 | .prot = &tcpv6_prot, | 2113 | .prot = &tcpv6_prot, |
2111 | .ops = &inet6_stream_ops, | 2114 | .ops = &inet6_stream_ops, |
2112 | .capability = -1, | ||
2113 | .no_check = 0, | 2115 | .no_check = 0, |
2114 | .flags = INET_PROTOSW_PERMANENT | | 2116 | .flags = INET_PROTOSW_PERMANENT | |
2115 | INET_PROTOSW_ICSK, | 2117 | INET_PROTOSW_ICSK, |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index cf538ed5ef6a..69ebdbe78c47 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -53,7 +53,7 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) | |||
53 | { | 53 | { |
54 | const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr; | 54 | const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr; |
55 | const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2); | 55 | const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2); |
56 | __be32 sk_rcv_saddr = inet_sk(sk)->rcv_saddr; | 56 | __be32 sk1_rcv_saddr = inet_sk(sk)->inet_rcv_saddr; |
57 | __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2); | 57 | __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2); |
58 | int sk_ipv6only = ipv6_only_sock(sk); | 58 | int sk_ipv6only = ipv6_only_sock(sk); |
59 | int sk2_ipv6only = inet_v6_ipv6only(sk2); | 59 | int sk2_ipv6only = inet_v6_ipv6only(sk2); |
@@ -63,8 +63,8 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) | |||
63 | /* if both are mapped, treat as IPv4 */ | 63 | /* if both are mapped, treat as IPv4 */ |
64 | if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) | 64 | if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) |
65 | return (!sk2_ipv6only && | 65 | return (!sk2_ipv6only && |
66 | (!sk_rcv_saddr || !sk2_rcv_saddr || | 66 | (!sk1_rcv_saddr || !sk2_rcv_saddr || |
67 | sk_rcv_saddr == sk2_rcv_saddr)); | 67 | sk1_rcv_saddr == sk2_rcv_saddr)); |
68 | 68 | ||
69 | if (addr_type2 == IPV6_ADDR_ANY && | 69 | if (addr_type2 == IPV6_ADDR_ANY && |
70 | !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED)) | 70 | !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED)) |
@@ -81,9 +81,33 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) | |||
81 | return 0; | 81 | return 0; |
82 | } | 82 | } |
83 | 83 | ||
84 | static unsigned int udp6_portaddr_hash(struct net *net, | ||
85 | const struct in6_addr *addr6, | ||
86 | unsigned int port) | ||
87 | { | ||
88 | unsigned int hash, mix = net_hash_mix(net); | ||
89 | |||
90 | if (ipv6_addr_any(addr6)) | ||
91 | hash = jhash_1word(0, mix); | ||
92 | else if (ipv6_addr_v4mapped(addr6)) | ||
93 | hash = jhash_1word(addr6->s6_addr32[3], mix); | ||
94 | else | ||
95 | hash = jhash2(addr6->s6_addr32, 4, mix); | ||
96 | |||
97 | return hash ^ port; | ||
98 | } | ||
99 | |||
100 | |||
84 | int udp_v6_get_port(struct sock *sk, unsigned short snum) | 101 | int udp_v6_get_port(struct sock *sk, unsigned short snum) |
85 | { | 102 | { |
86 | return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal); | 103 | unsigned int hash2_nulladdr = |
104 | udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum); | ||
105 | unsigned int hash2_partial = | ||
106 | udp6_portaddr_hash(sock_net(sk), &inet6_sk(sk)->rcv_saddr, 0); | ||
107 | |||
108 | /* precompute partial secondary hash */ | ||
109 | udp_sk(sk)->udp_portaddr_hash = hash2_partial; | ||
110 | return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr); | ||
87 | } | 111 | } |
88 | 112 | ||
89 | static inline int compute_score(struct sock *sk, struct net *net, | 113 | static inline int compute_score(struct sock *sk, struct net *net, |
@@ -94,14 +118,14 @@ static inline int compute_score(struct sock *sk, struct net *net, | |||
94 | { | 118 | { |
95 | int score = -1; | 119 | int score = -1; |
96 | 120 | ||
97 | if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum && | 121 | if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum && |
98 | sk->sk_family == PF_INET6) { | 122 | sk->sk_family == PF_INET6) { |
99 | struct ipv6_pinfo *np = inet6_sk(sk); | 123 | struct ipv6_pinfo *np = inet6_sk(sk); |
100 | struct inet_sock *inet = inet_sk(sk); | 124 | struct inet_sock *inet = inet_sk(sk); |
101 | 125 | ||
102 | score = 0; | 126 | score = 0; |
103 | if (inet->dport) { | 127 | if (inet->inet_dport) { |
104 | if (inet->dport != sport) | 128 | if (inet->inet_dport != sport) |
105 | return -1; | 129 | return -1; |
106 | score++; | 130 | score++; |
107 | } | 131 | } |
@@ -124,6 +148,86 @@ static inline int compute_score(struct sock *sk, struct net *net, | |||
124 | return score; | 148 | return score; |
125 | } | 149 | } |
126 | 150 | ||
151 | #define SCORE2_MAX (1 + 1 + 1) | ||
152 | static inline int compute_score2(struct sock *sk, struct net *net, | ||
153 | const struct in6_addr *saddr, __be16 sport, | ||
154 | const struct in6_addr *daddr, unsigned short hnum, | ||
155 | int dif) | ||
156 | { | ||
157 | int score = -1; | ||
158 | |||
159 | if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum && | ||
160 | sk->sk_family == PF_INET6) { | ||
161 | struct ipv6_pinfo *np = inet6_sk(sk); | ||
162 | struct inet_sock *inet = inet_sk(sk); | ||
163 | |||
164 | if (!ipv6_addr_equal(&np->rcv_saddr, daddr)) | ||
165 | return -1; | ||
166 | score = 0; | ||
167 | if (inet->inet_dport) { | ||
168 | if (inet->inet_dport != sport) | ||
169 | return -1; | ||
170 | score++; | ||
171 | } | ||
172 | if (!ipv6_addr_any(&np->daddr)) { | ||
173 | if (!ipv6_addr_equal(&np->daddr, saddr)) | ||
174 | return -1; | ||
175 | score++; | ||
176 | } | ||
177 | if (sk->sk_bound_dev_if) { | ||
178 | if (sk->sk_bound_dev_if != dif) | ||
179 | return -1; | ||
180 | score++; | ||
181 | } | ||
182 | } | ||
183 | return score; | ||
184 | } | ||
185 | |||
186 | |||
187 | /* called with read_rcu_lock() */ | ||
188 | static struct sock *udp6_lib_lookup2(struct net *net, | ||
189 | const struct in6_addr *saddr, __be16 sport, | ||
190 | const struct in6_addr *daddr, unsigned int hnum, int dif, | ||
191 | struct udp_hslot *hslot2, unsigned int slot2) | ||
192 | { | ||
193 | struct sock *sk, *result; | ||
194 | struct hlist_nulls_node *node; | ||
195 | int score, badness; | ||
196 | |||
197 | begin: | ||
198 | result = NULL; | ||
199 | badness = -1; | ||
200 | udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) { | ||
201 | score = compute_score2(sk, net, saddr, sport, | ||
202 | daddr, hnum, dif); | ||
203 | if (score > badness) { | ||
204 | result = sk; | ||
205 | badness = score; | ||
206 | if (score == SCORE2_MAX) | ||
207 | goto exact_match; | ||
208 | } | ||
209 | } | ||
210 | /* | ||
211 | * if the nulls value we got at the end of this lookup is | ||
212 | * not the expected one, we must restart lookup. | ||
213 | * We probably met an item that was moved to another chain. | ||
214 | */ | ||
215 | if (get_nulls_value(node) != slot2) | ||
216 | goto begin; | ||
217 | |||
218 | if (result) { | ||
219 | exact_match: | ||
220 | if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt))) | ||
221 | result = NULL; | ||
222 | else if (unlikely(compute_score2(result, net, saddr, sport, | ||
223 | daddr, hnum, dif) < badness)) { | ||
224 | sock_put(result); | ||
225 | goto begin; | ||
226 | } | ||
227 | } | ||
228 | return result; | ||
229 | } | ||
230 | |||
127 | static struct sock *__udp6_lib_lookup(struct net *net, | 231 | static struct sock *__udp6_lib_lookup(struct net *net, |
128 | struct in6_addr *saddr, __be16 sport, | 232 | struct in6_addr *saddr, __be16 sport, |
129 | struct in6_addr *daddr, __be16 dport, | 233 | struct in6_addr *daddr, __be16 dport, |
@@ -132,11 +236,35 @@ static struct sock *__udp6_lib_lookup(struct net *net, | |||
132 | struct sock *sk, *result; | 236 | struct sock *sk, *result; |
133 | struct hlist_nulls_node *node; | 237 | struct hlist_nulls_node *node; |
134 | unsigned short hnum = ntohs(dport); | 238 | unsigned short hnum = ntohs(dport); |
135 | unsigned int hash = udp_hashfn(net, hnum); | 239 | unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask); |
136 | struct udp_hslot *hslot = &udptable->hash[hash]; | 240 | struct udp_hslot *hslot2, *hslot = &udptable->hash[slot]; |
137 | int score, badness; | 241 | int score, badness; |
138 | 242 | ||
139 | rcu_read_lock(); | 243 | rcu_read_lock(); |
244 | if (hslot->count > 10) { | ||
245 | hash2 = udp6_portaddr_hash(net, daddr, hnum); | ||
246 | slot2 = hash2 & udptable->mask; | ||
247 | hslot2 = &udptable->hash2[slot2]; | ||
248 | if (hslot->count < hslot2->count) | ||
249 | goto begin; | ||
250 | |||
251 | result = udp6_lib_lookup2(net, saddr, sport, | ||
252 | daddr, hnum, dif, | ||
253 | hslot2, slot2); | ||
254 | if (!result) { | ||
255 | hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum); | ||
256 | slot2 = hash2 & udptable->mask; | ||
257 | hslot2 = &udptable->hash2[slot2]; | ||
258 | if (hslot->count < hslot2->count) | ||
259 | goto begin; | ||
260 | |||
261 | result = udp6_lib_lookup2(net, &in6addr_any, sport, | ||
262 | daddr, hnum, dif, | ||
263 | hslot2, slot2); | ||
264 | } | ||
265 | rcu_read_unlock(); | ||
266 | return result; | ||
267 | } | ||
140 | begin: | 268 | begin: |
141 | result = NULL; | 269 | result = NULL; |
142 | badness = -1; | 270 | badness = -1; |
@@ -152,7 +280,7 @@ begin: | |||
152 | * not the expected one, we must restart lookup. | 280 | * not the expected one, we must restart lookup. |
153 | * We probably met an item that was moved to another chain. | 281 | * We probably met an item that was moved to another chain. |
154 | */ | 282 | */ |
155 | if (get_nulls_value(node) != hash) | 283 | if (get_nulls_value(node) != slot) |
156 | goto begin; | 284 | goto begin; |
157 | 285 | ||
158 | if (result) { | 286 | if (result) { |
@@ -252,7 +380,7 @@ try_again: | |||
252 | UDP_MIB_INDATAGRAMS, is_udplite); | 380 | UDP_MIB_INDATAGRAMS, is_udplite); |
253 | } | 381 | } |
254 | 382 | ||
255 | sock_recv_timestamp(msg, sk, skb); | 383 | sock_recv_ts_and_drops(msg, sk, skb); |
256 | 384 | ||
257 | /* Copy the address. */ | 385 | /* Copy the address. */ |
258 | if (msg->msg_name) { | 386 | if (msg->msg_name) { |
@@ -265,8 +393,8 @@ try_again: | |||
265 | sin6->sin6_scope_id = 0; | 393 | sin6->sin6_scope_id = 0; |
266 | 394 | ||
267 | if (is_udp4) | 395 | if (is_udp4) |
268 | ipv6_addr_set(&sin6->sin6_addr, 0, 0, | 396 | ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, |
269 | htonl(0xffff), ip_hdr(skb)->saddr); | 397 | &sin6->sin6_addr); |
270 | else { | 398 | else { |
271 | ipv6_addr_copy(&sin6->sin6_addr, | 399 | ipv6_addr_copy(&sin6->sin6_addr, |
272 | &ipv6_hdr(skb)->saddr); | 400 | &ipv6_hdr(skb)->saddr); |
@@ -383,18 +511,18 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) | |||
383 | goto drop; | 511 | goto drop; |
384 | } | 512 | } |
385 | 513 | ||
386 | if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { | 514 | if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) { |
387 | /* Note that an ENOMEM error is charged twice */ | 515 | /* Note that an ENOMEM error is charged twice */ |
388 | if (rc == -ENOMEM) { | 516 | if (rc == -ENOMEM) |
389 | UDP6_INC_STATS_BH(sock_net(sk), | 517 | UDP6_INC_STATS_BH(sock_net(sk), |
390 | UDP_MIB_RCVBUFERRORS, is_udplite); | 518 | UDP_MIB_RCVBUFERRORS, is_udplite); |
391 | atomic_inc(&sk->sk_drops); | 519 | goto drop_no_sk_drops_inc; |
392 | } | ||
393 | goto drop; | ||
394 | } | 520 | } |
395 | 521 | ||
396 | return 0; | 522 | return 0; |
397 | drop: | 523 | drop: |
524 | atomic_inc(&sk->sk_drops); | ||
525 | drop_no_sk_drops_inc: | ||
398 | UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); | 526 | UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); |
399 | kfree_skb(skb); | 527 | kfree_skb(skb); |
400 | return -1; | 528 | return -1; |
@@ -415,10 +543,11 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk, | |||
415 | if (!net_eq(sock_net(s), net)) | 543 | if (!net_eq(sock_net(s), net)) |
416 | continue; | 544 | continue; |
417 | 545 | ||
418 | if (s->sk_hash == num && s->sk_family == PF_INET6) { | 546 | if (udp_sk(s)->udp_port_hash == num && |
547 | s->sk_family == PF_INET6) { | ||
419 | struct ipv6_pinfo *np = inet6_sk(s); | 548 | struct ipv6_pinfo *np = inet6_sk(s); |
420 | if (inet->dport) { | 549 | if (inet->inet_dport) { |
421 | if (inet->dport != rmt_port) | 550 | if (inet->inet_dport != rmt_port) |
422 | continue; | 551 | continue; |
423 | } | 552 | } |
424 | if (!ipv6_addr_any(&np->daddr) && | 553 | if (!ipv6_addr_any(&np->daddr) && |
@@ -440,6 +569,33 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk, | |||
440 | return NULL; | 569 | return NULL; |
441 | } | 570 | } |
442 | 571 | ||
572 | static void flush_stack(struct sock **stack, unsigned int count, | ||
573 | struct sk_buff *skb, unsigned int final) | ||
574 | { | ||
575 | unsigned int i; | ||
576 | struct sock *sk; | ||
577 | struct sk_buff *skb1; | ||
578 | |||
579 | for (i = 0; i < count; i++) { | ||
580 | skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); | ||
581 | |||
582 | sk = stack[i]; | ||
583 | if (skb1) { | ||
584 | bh_lock_sock(sk); | ||
585 | if (!sock_owned_by_user(sk)) | ||
586 | udpv6_queue_rcv_skb(sk, skb1); | ||
587 | else | ||
588 | sk_add_backlog(sk, skb1); | ||
589 | bh_unlock_sock(sk); | ||
590 | } else { | ||
591 | atomic_inc(&sk->sk_drops); | ||
592 | UDP6_INC_STATS_BH(sock_net(sk), | ||
593 | UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk)); | ||
594 | UDP6_INC_STATS_BH(sock_net(sk), | ||
595 | UDP_MIB_INERRORS, IS_UDPLITE(sk)); | ||
596 | } | ||
597 | } | ||
598 | } | ||
443 | /* | 599 | /* |
444 | * Note: called only from the BH handler context, | 600 | * Note: called only from the BH handler context, |
445 | * so we don't need to lock the hashes. | 601 | * so we don't need to lock the hashes. |
@@ -448,41 +604,43 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, | |||
448 | struct in6_addr *saddr, struct in6_addr *daddr, | 604 | struct in6_addr *saddr, struct in6_addr *daddr, |
449 | struct udp_table *udptable) | 605 | struct udp_table *udptable) |
450 | { | 606 | { |
451 | struct sock *sk, *sk2; | 607 | struct sock *sk, *stack[256 / sizeof(struct sock *)]; |
452 | const struct udphdr *uh = udp_hdr(skb); | 608 | const struct udphdr *uh = udp_hdr(skb); |
453 | struct udp_hslot *hslot = &udptable->hash[udp_hashfn(net, ntohs(uh->dest))]; | 609 | struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest)); |
454 | int dif; | 610 | int dif; |
611 | unsigned int i, count = 0; | ||
455 | 612 | ||
456 | spin_lock(&hslot->lock); | 613 | spin_lock(&hslot->lock); |
457 | sk = sk_nulls_head(&hslot->head); | 614 | sk = sk_nulls_head(&hslot->head); |
458 | dif = inet6_iif(skb); | 615 | dif = inet6_iif(skb); |
459 | sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); | 616 | sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); |
460 | if (!sk) { | 617 | while (sk) { |
461 | kfree_skb(skb); | 618 | stack[count++] = sk; |
462 | goto out; | 619 | sk = udp_v6_mcast_next(net, sk_nulls_next(sk), uh->dest, daddr, |
463 | } | 620 | uh->source, saddr, dif); |
464 | 621 | if (unlikely(count == ARRAY_SIZE(stack))) { | |
465 | sk2 = sk; | 622 | if (!sk) |
466 | while ((sk2 = udp_v6_mcast_next(net, sk_nulls_next(sk2), uh->dest, daddr, | 623 | break; |
467 | uh->source, saddr, dif))) { | 624 | flush_stack(stack, count, skb, ~0); |
468 | struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC); | 625 | count = 0; |
469 | if (buff) { | ||
470 | bh_lock_sock(sk2); | ||
471 | if (!sock_owned_by_user(sk2)) | ||
472 | udpv6_queue_rcv_skb(sk2, buff); | ||
473 | else | ||
474 | sk_add_backlog(sk2, buff); | ||
475 | bh_unlock_sock(sk2); | ||
476 | } | 626 | } |
477 | } | 627 | } |
478 | bh_lock_sock(sk); | 628 | /* |
479 | if (!sock_owned_by_user(sk)) | 629 | * before releasing the lock, we must take reference on sockets |
480 | udpv6_queue_rcv_skb(sk, skb); | 630 | */ |
481 | else | 631 | for (i = 0; i < count; i++) |
482 | sk_add_backlog(sk, skb); | 632 | sock_hold(stack[i]); |
483 | bh_unlock_sock(sk); | 633 | |
484 | out: | ||
485 | spin_unlock(&hslot->lock); | 634 | spin_unlock(&hslot->lock); |
635 | |||
636 | if (count) { | ||
637 | flush_stack(stack, count, skb, count - 1); | ||
638 | |||
639 | for (i = 0; i < count; i++) | ||
640 | sock_put(stack[i]); | ||
641 | } else { | ||
642 | kfree_skb(skb); | ||
643 | } | ||
486 | return 0; | 644 | return 0; |
487 | } | 645 | } |
488 | 646 | ||
@@ -792,7 +950,7 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
792 | if (ipv6_addr_v4mapped(daddr)) { | 950 | if (ipv6_addr_v4mapped(daddr)) { |
793 | struct sockaddr_in sin; | 951 | struct sockaddr_in sin; |
794 | sin.sin_family = AF_INET; | 952 | sin.sin_family = AF_INET; |
795 | sin.sin_port = sin6 ? sin6->sin6_port : inet->dport; | 953 | sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport; |
796 | sin.sin_addr.s_addr = daddr->s6_addr32[3]; | 954 | sin.sin_addr.s_addr = daddr->s6_addr32[3]; |
797 | msg->msg_name = &sin; | 955 | msg->msg_name = &sin; |
798 | msg->msg_namelen = sizeof(sin); | 956 | msg->msg_namelen = sizeof(sin); |
@@ -865,7 +1023,7 @@ do_udp_sendmsg: | |||
865 | if (sk->sk_state != TCP_ESTABLISHED) | 1023 | if (sk->sk_state != TCP_ESTABLISHED) |
866 | return -EDESTADDRREQ; | 1024 | return -EDESTADDRREQ; |
867 | 1025 | ||
868 | fl.fl_ip_dport = inet->dport; | 1026 | fl.fl_ip_dport = inet->inet_dport; |
869 | daddr = &np->daddr; | 1027 | daddr = &np->daddr; |
870 | fl.fl6_flowlabel = np->flow_label; | 1028 | fl.fl6_flowlabel = np->flow_label; |
871 | connected = 1; | 1029 | connected = 1; |
@@ -877,6 +1035,8 @@ do_udp_sendmsg: | |||
877 | if (!fl.oif) | 1035 | if (!fl.oif) |
878 | fl.oif = np->sticky_pktinfo.ipi6_ifindex; | 1036 | fl.oif = np->sticky_pktinfo.ipi6_ifindex; |
879 | 1037 | ||
1038 | fl.mark = sk->sk_mark; | ||
1039 | |||
880 | if (msg->msg_controllen) { | 1040 | if (msg->msg_controllen) { |
881 | opt = &opt_space; | 1041 | opt = &opt_space; |
882 | memset(opt, 0, sizeof(struct ipv6_txoptions)); | 1042 | memset(opt, 0, sizeof(struct ipv6_txoptions)); |
@@ -909,7 +1069,7 @@ do_udp_sendmsg: | |||
909 | fl.fl6_dst.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ | 1069 | fl.fl6_dst.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ |
910 | if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr)) | 1070 | if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr)) |
911 | ipv6_addr_copy(&fl.fl6_src, &np->saddr); | 1071 | ipv6_addr_copy(&fl.fl6_src, &np->saddr); |
912 | fl.fl_ip_sport = inet->sport; | 1072 | fl.fl_ip_sport = inet->inet_sport; |
913 | 1073 | ||
914 | /* merge ip6_build_xmit from ip6_output */ | 1074 | /* merge ip6_build_xmit from ip6_output */ |
915 | if (opt && opt->srcrt) { | 1075 | if (opt && opt->srcrt) { |
@@ -1190,10 +1350,10 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket | |||
1190 | 1350 | ||
1191 | dest = &np->daddr; | 1351 | dest = &np->daddr; |
1192 | src = &np->rcv_saddr; | 1352 | src = &np->rcv_saddr; |
1193 | destp = ntohs(inet->dport); | 1353 | destp = ntohs(inet->inet_dport); |
1194 | srcp = ntohs(inet->sport); | 1354 | srcp = ntohs(inet->inet_sport); |
1195 | seq_printf(seq, | 1355 | seq_printf(seq, |
1196 | "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " | 1356 | "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " |
1197 | "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", | 1357 | "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", |
1198 | bucket, | 1358 | bucket, |
1199 | src->s6_addr32[0], src->s6_addr32[1], | 1359 | src->s6_addr32[0], src->s6_addr32[1], |
@@ -1282,7 +1442,6 @@ static struct inet_protosw udpv6_protosw = { | |||
1282 | .protocol = IPPROTO_UDP, | 1442 | .protocol = IPPROTO_UDP, |
1283 | .prot = &udpv6_prot, | 1443 | .prot = &udpv6_prot, |
1284 | .ops = &inet6_dgram_ops, | 1444 | .ops = &inet6_dgram_ops, |
1285 | .capability =-1, | ||
1286 | .no_check = UDP_CSUM_DEFAULT, | 1445 | .no_check = UDP_CSUM_DEFAULT, |
1287 | .flags = INET_PROTOSW_PERMANENT, | 1446 | .flags = INET_PROTOSW_PERMANENT, |
1288 | }; | 1447 | }; |
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c index d737a27ee010..6ea6938919e6 100644 --- a/net/ipv6/udplite.c +++ b/net/ipv6/udplite.c | |||
@@ -62,7 +62,6 @@ static struct inet_protosw udplite6_protosw = { | |||
62 | .protocol = IPPROTO_UDPLITE, | 62 | .protocol = IPPROTO_UDPLITE, |
63 | .prot = &udplitev6_prot, | 63 | .prot = &udplitev6_prot, |
64 | .ops = &inet6_dgram_ops, | 64 | .ops = &inet6_dgram_ops, |
65 | .capability = -1, | ||
66 | .no_check = 0, | 65 | .no_check = 0, |
67 | .flags = INET_PROTOSW_PERMANENT, | 66 | .flags = INET_PROTOSW_PERMANENT, |
68 | }; | 67 | }; |
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index 81a95c00e503..438831d33593 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c | |||
@@ -23,7 +23,7 @@ | |||
23 | */ | 23 | */ |
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/xfrm.h> | 25 | #include <linux/xfrm.h> |
26 | #include <linux/list.h> | 26 | #include <linux/rculist.h> |
27 | #include <net/ip.h> | 27 | #include <net/ip.h> |
28 | #include <net/xfrm.h> | 28 | #include <net/xfrm.h> |
29 | #include <net/ipv6.h> | 29 | #include <net/ipv6.h> |
@@ -36,14 +36,15 @@ | |||
36 | * per xfrm_address_t. | 36 | * per xfrm_address_t. |
37 | */ | 37 | */ |
38 | struct xfrm6_tunnel_spi { | 38 | struct xfrm6_tunnel_spi { |
39 | struct hlist_node list_byaddr; | 39 | struct hlist_node list_byaddr; |
40 | struct hlist_node list_byspi; | 40 | struct hlist_node list_byspi; |
41 | xfrm_address_t addr; | 41 | xfrm_address_t addr; |
42 | u32 spi; | 42 | u32 spi; |
43 | atomic_t refcnt; | 43 | atomic_t refcnt; |
44 | struct rcu_head rcu_head; | ||
44 | }; | 45 | }; |
45 | 46 | ||
46 | static DEFINE_RWLOCK(xfrm6_tunnel_spi_lock); | 47 | static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock); |
47 | 48 | ||
48 | static u32 xfrm6_tunnel_spi; | 49 | static u32 xfrm6_tunnel_spi; |
49 | 50 | ||
@@ -107,6 +108,7 @@ static void xfrm6_tunnel_spi_fini(void) | |||
107 | if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i])) | 108 | if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i])) |
108 | return; | 109 | return; |
109 | } | 110 | } |
111 | rcu_barrier(); | ||
110 | kmem_cache_destroy(xfrm6_tunnel_spi_kmem); | 112 | kmem_cache_destroy(xfrm6_tunnel_spi_kmem); |
111 | xfrm6_tunnel_spi_kmem = NULL; | 113 | xfrm6_tunnel_spi_kmem = NULL; |
112 | } | 114 | } |
@@ -116,7 +118,7 @@ static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr) | |||
116 | struct xfrm6_tunnel_spi *x6spi; | 118 | struct xfrm6_tunnel_spi *x6spi; |
117 | struct hlist_node *pos; | 119 | struct hlist_node *pos; |
118 | 120 | ||
119 | hlist_for_each_entry(x6spi, pos, | 121 | hlist_for_each_entry_rcu(x6spi, pos, |
120 | &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], | 122 | &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], |
121 | list_byaddr) { | 123 | list_byaddr) { |
122 | if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) | 124 | if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) |
@@ -131,10 +133,10 @@ __be32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr) | |||
131 | struct xfrm6_tunnel_spi *x6spi; | 133 | struct xfrm6_tunnel_spi *x6spi; |
132 | u32 spi; | 134 | u32 spi; |
133 | 135 | ||
134 | read_lock_bh(&xfrm6_tunnel_spi_lock); | 136 | rcu_read_lock_bh(); |
135 | x6spi = __xfrm6_tunnel_spi_lookup(saddr); | 137 | x6spi = __xfrm6_tunnel_spi_lookup(saddr); |
136 | spi = x6spi ? x6spi->spi : 0; | 138 | spi = x6spi ? x6spi->spi : 0; |
137 | read_unlock_bh(&xfrm6_tunnel_spi_lock); | 139 | rcu_read_unlock_bh(); |
138 | return htonl(spi); | 140 | return htonl(spi); |
139 | } | 141 | } |
140 | 142 | ||
@@ -185,14 +187,15 @@ alloc_spi: | |||
185 | if (!x6spi) | 187 | if (!x6spi) |
186 | goto out; | 188 | goto out; |
187 | 189 | ||
190 | INIT_RCU_HEAD(&x6spi->rcu_head); | ||
188 | memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr)); | 191 | memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr)); |
189 | x6spi->spi = spi; | 192 | x6spi->spi = spi; |
190 | atomic_set(&x6spi->refcnt, 1); | 193 | atomic_set(&x6spi->refcnt, 1); |
191 | 194 | ||
192 | hlist_add_head(&x6spi->list_byspi, &xfrm6_tunnel_spi_byspi[index]); | 195 | hlist_add_head_rcu(&x6spi->list_byspi, &xfrm6_tunnel_spi_byspi[index]); |
193 | 196 | ||
194 | index = xfrm6_tunnel_spi_hash_byaddr(saddr); | 197 | index = xfrm6_tunnel_spi_hash_byaddr(saddr); |
195 | hlist_add_head(&x6spi->list_byaddr, &xfrm6_tunnel_spi_byaddr[index]); | 198 | hlist_add_head_rcu(&x6spi->list_byaddr, &xfrm6_tunnel_spi_byaddr[index]); |
196 | out: | 199 | out: |
197 | return spi; | 200 | return spi; |
198 | } | 201 | } |
@@ -202,26 +205,32 @@ __be32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr) | |||
202 | struct xfrm6_tunnel_spi *x6spi; | 205 | struct xfrm6_tunnel_spi *x6spi; |
203 | u32 spi; | 206 | u32 spi; |
204 | 207 | ||
205 | write_lock_bh(&xfrm6_tunnel_spi_lock); | 208 | spin_lock_bh(&xfrm6_tunnel_spi_lock); |
206 | x6spi = __xfrm6_tunnel_spi_lookup(saddr); | 209 | x6spi = __xfrm6_tunnel_spi_lookup(saddr); |
207 | if (x6spi) { | 210 | if (x6spi) { |
208 | atomic_inc(&x6spi->refcnt); | 211 | atomic_inc(&x6spi->refcnt); |
209 | spi = x6spi->spi; | 212 | spi = x6spi->spi; |
210 | } else | 213 | } else |
211 | spi = __xfrm6_tunnel_alloc_spi(saddr); | 214 | spi = __xfrm6_tunnel_alloc_spi(saddr); |
212 | write_unlock_bh(&xfrm6_tunnel_spi_lock); | 215 | spin_unlock_bh(&xfrm6_tunnel_spi_lock); |
213 | 216 | ||
214 | return htonl(spi); | 217 | return htonl(spi); |
215 | } | 218 | } |
216 | 219 | ||
217 | EXPORT_SYMBOL(xfrm6_tunnel_alloc_spi); | 220 | EXPORT_SYMBOL(xfrm6_tunnel_alloc_spi); |
218 | 221 | ||
222 | static void x6spi_destroy_rcu(struct rcu_head *head) | ||
223 | { | ||
224 | kmem_cache_free(xfrm6_tunnel_spi_kmem, | ||
225 | container_of(head, struct xfrm6_tunnel_spi, rcu_head)); | ||
226 | } | ||
227 | |||
219 | void xfrm6_tunnel_free_spi(xfrm_address_t *saddr) | 228 | void xfrm6_tunnel_free_spi(xfrm_address_t *saddr) |
220 | { | 229 | { |
221 | struct xfrm6_tunnel_spi *x6spi; | 230 | struct xfrm6_tunnel_spi *x6spi; |
222 | struct hlist_node *pos, *n; | 231 | struct hlist_node *pos, *n; |
223 | 232 | ||
224 | write_lock_bh(&xfrm6_tunnel_spi_lock); | 233 | spin_lock_bh(&xfrm6_tunnel_spi_lock); |
225 | 234 | ||
226 | hlist_for_each_entry_safe(x6spi, pos, n, | 235 | hlist_for_each_entry_safe(x6spi, pos, n, |
227 | &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], | 236 | &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], |
@@ -229,14 +238,14 @@ void xfrm6_tunnel_free_spi(xfrm_address_t *saddr) | |||
229 | { | 238 | { |
230 | if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) { | 239 | if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) { |
231 | if (atomic_dec_and_test(&x6spi->refcnt)) { | 240 | if (atomic_dec_and_test(&x6spi->refcnt)) { |
232 | hlist_del(&x6spi->list_byaddr); | 241 | hlist_del_rcu(&x6spi->list_byaddr); |
233 | hlist_del(&x6spi->list_byspi); | 242 | hlist_del_rcu(&x6spi->list_byspi); |
234 | kmem_cache_free(xfrm6_tunnel_spi_kmem, x6spi); | 243 | call_rcu(&x6spi->rcu_head, x6spi_destroy_rcu); |
235 | break; | 244 | break; |
236 | } | 245 | } |
237 | } | 246 | } |
238 | } | 247 | } |
239 | write_unlock_bh(&xfrm6_tunnel_spi_lock); | 248 | spin_unlock_bh(&xfrm6_tunnel_spi_lock); |
240 | } | 249 | } |
241 | 250 | ||
242 | EXPORT_SYMBOL(xfrm6_tunnel_free_spi); | 251 | EXPORT_SYMBOL(xfrm6_tunnel_free_spi); |
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index 66c7a20011f3..975c5a366e55 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c | |||
@@ -1298,6 +1298,7 @@ static int ipx_setsockopt(struct socket *sock, int level, int optname, | |||
1298 | int opt; | 1298 | int opt; |
1299 | int rc = -EINVAL; | 1299 | int rc = -EINVAL; |
1300 | 1300 | ||
1301 | lock_kernel(); | ||
1301 | if (optlen != sizeof(int)) | 1302 | if (optlen != sizeof(int)) |
1302 | goto out; | 1303 | goto out; |
1303 | 1304 | ||
@@ -1312,6 +1313,7 @@ static int ipx_setsockopt(struct socket *sock, int level, int optname, | |||
1312 | ipx_sk(sk)->type = opt; | 1313 | ipx_sk(sk)->type = opt; |
1313 | rc = 0; | 1314 | rc = 0; |
1314 | out: | 1315 | out: |
1316 | unlock_kernel(); | ||
1315 | return rc; | 1317 | return rc; |
1316 | } | 1318 | } |
1317 | 1319 | ||
@@ -1323,6 +1325,7 @@ static int ipx_getsockopt(struct socket *sock, int level, int optname, | |||
1323 | int len; | 1325 | int len; |
1324 | int rc = -ENOPROTOOPT; | 1326 | int rc = -ENOPROTOOPT; |
1325 | 1327 | ||
1328 | lock_kernel(); | ||
1326 | if (!(level == SOL_IPX && optname == IPX_TYPE)) | 1329 | if (!(level == SOL_IPX && optname == IPX_TYPE)) |
1327 | goto out; | 1330 | goto out; |
1328 | 1331 | ||
@@ -1343,6 +1346,7 @@ static int ipx_getsockopt(struct socket *sock, int level, int optname, | |||
1343 | 1346 | ||
1344 | rc = 0; | 1347 | rc = 0; |
1345 | out: | 1348 | out: |
1349 | unlock_kernel(); | ||
1346 | return rc; | 1350 | return rc; |
1347 | } | 1351 | } |
1348 | 1352 | ||
@@ -1352,7 +1356,8 @@ static struct proto ipx_proto = { | |||
1352 | .obj_size = sizeof(struct ipx_sock), | 1356 | .obj_size = sizeof(struct ipx_sock), |
1353 | }; | 1357 | }; |
1354 | 1358 | ||
1355 | static int ipx_create(struct net *net, struct socket *sock, int protocol) | 1359 | static int ipx_create(struct net *net, struct socket *sock, int protocol, |
1360 | int kern) | ||
1356 | { | 1361 | { |
1357 | int rc = -ESOCKTNOSUPPORT; | 1362 | int rc = -ESOCKTNOSUPPORT; |
1358 | struct sock *sk; | 1363 | struct sock *sk; |
@@ -1390,6 +1395,7 @@ static int ipx_release(struct socket *sock) | |||
1390 | if (!sk) | 1395 | if (!sk) |
1391 | goto out; | 1396 | goto out; |
1392 | 1397 | ||
1398 | lock_kernel(); | ||
1393 | if (!sock_flag(sk, SOCK_DEAD)) | 1399 | if (!sock_flag(sk, SOCK_DEAD)) |
1394 | sk->sk_state_change(sk); | 1400 | sk->sk_state_change(sk); |
1395 | 1401 | ||
@@ -1397,6 +1403,7 @@ static int ipx_release(struct socket *sock) | |||
1397 | sock->sk = NULL; | 1403 | sock->sk = NULL; |
1398 | sk_refcnt_debug_release(sk); | 1404 | sk_refcnt_debug_release(sk); |
1399 | ipx_destroy_socket(sk); | 1405 | ipx_destroy_socket(sk); |
1406 | unlock_kernel(); | ||
1400 | out: | 1407 | out: |
1401 | return 0; | 1408 | return 0; |
1402 | } | 1409 | } |
@@ -1424,7 +1431,8 @@ static __be16 ipx_first_free_socketnum(struct ipx_interface *intrfc) | |||
1424 | return htons(socketNum); | 1431 | return htons(socketNum); |
1425 | } | 1432 | } |
1426 | 1433 | ||
1427 | static int ipx_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | 1434 | static int __ipx_bind(struct socket *sock, |
1435 | struct sockaddr *uaddr, int addr_len) | ||
1428 | { | 1436 | { |
1429 | struct sock *sk = sock->sk; | 1437 | struct sock *sk = sock->sk; |
1430 | struct ipx_sock *ipxs = ipx_sk(sk); | 1438 | struct ipx_sock *ipxs = ipx_sk(sk); |
@@ -1519,6 +1527,17 @@ out: | |||
1519 | return rc; | 1527 | return rc; |
1520 | } | 1528 | } |
1521 | 1529 | ||
1530 | static int ipx_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | ||
1531 | { | ||
1532 | int rc; | ||
1533 | |||
1534 | lock_kernel(); | ||
1535 | rc = __ipx_bind(sock, uaddr, addr_len); | ||
1536 | unlock_kernel(); | ||
1537 | |||
1538 | return rc; | ||
1539 | } | ||
1540 | |||
1522 | static int ipx_connect(struct socket *sock, struct sockaddr *uaddr, | 1541 | static int ipx_connect(struct socket *sock, struct sockaddr *uaddr, |
1523 | int addr_len, int flags) | 1542 | int addr_len, int flags) |
1524 | { | 1543 | { |
@@ -1531,6 +1550,7 @@ static int ipx_connect(struct socket *sock, struct sockaddr *uaddr, | |||
1531 | sk->sk_state = TCP_CLOSE; | 1550 | sk->sk_state = TCP_CLOSE; |
1532 | sock->state = SS_UNCONNECTED; | 1551 | sock->state = SS_UNCONNECTED; |
1533 | 1552 | ||
1553 | lock_kernel(); | ||
1534 | if (addr_len != sizeof(*addr)) | 1554 | if (addr_len != sizeof(*addr)) |
1535 | goto out; | 1555 | goto out; |
1536 | addr = (struct sockaddr_ipx *)uaddr; | 1556 | addr = (struct sockaddr_ipx *)uaddr; |
@@ -1550,7 +1570,7 @@ static int ipx_connect(struct socket *sock, struct sockaddr *uaddr, | |||
1550 | IPX_NODE_LEN); | 1570 | IPX_NODE_LEN); |
1551 | #endif /* CONFIG_IPX_INTERN */ | 1571 | #endif /* CONFIG_IPX_INTERN */ |
1552 | 1572 | ||
1553 | rc = ipx_bind(sock, (struct sockaddr *)&uaddr, | 1573 | rc = __ipx_bind(sock, (struct sockaddr *)&uaddr, |
1554 | sizeof(struct sockaddr_ipx)); | 1574 | sizeof(struct sockaddr_ipx)); |
1555 | if (rc) | 1575 | if (rc) |
1556 | goto out; | 1576 | goto out; |
@@ -1577,6 +1597,7 @@ static int ipx_connect(struct socket *sock, struct sockaddr *uaddr, | |||
1577 | ipxrtr_put(rt); | 1597 | ipxrtr_put(rt); |
1578 | rc = 0; | 1598 | rc = 0; |
1579 | out: | 1599 | out: |
1600 | unlock_kernel(); | ||
1580 | return rc; | 1601 | return rc; |
1581 | } | 1602 | } |
1582 | 1603 | ||
@@ -1592,6 +1613,7 @@ static int ipx_getname(struct socket *sock, struct sockaddr *uaddr, | |||
1592 | 1613 | ||
1593 | *uaddr_len = sizeof(struct sockaddr_ipx); | 1614 | *uaddr_len = sizeof(struct sockaddr_ipx); |
1594 | 1615 | ||
1616 | lock_kernel(); | ||
1595 | if (peer) { | 1617 | if (peer) { |
1596 | rc = -ENOTCONN; | 1618 | rc = -ENOTCONN; |
1597 | if (sk->sk_state != TCP_ESTABLISHED) | 1619 | if (sk->sk_state != TCP_ESTABLISHED) |
@@ -1626,6 +1648,19 @@ static int ipx_getname(struct socket *sock, struct sockaddr *uaddr, | |||
1626 | 1648 | ||
1627 | rc = 0; | 1649 | rc = 0; |
1628 | out: | 1650 | out: |
1651 | unlock_kernel(); | ||
1652 | return rc; | ||
1653 | } | ||
1654 | |||
1655 | static unsigned int ipx_datagram_poll(struct file *file, struct socket *sock, | ||
1656 | poll_table *wait) | ||
1657 | { | ||
1658 | int rc; | ||
1659 | |||
1660 | lock_kernel(); | ||
1661 | rc = datagram_poll(file, sock, wait); | ||
1662 | unlock_kernel(); | ||
1663 | |||
1629 | return rc; | 1664 | return rc; |
1630 | } | 1665 | } |
1631 | 1666 | ||
@@ -1700,6 +1735,7 @@ static int ipx_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1700 | int rc = -EINVAL; | 1735 | int rc = -EINVAL; |
1701 | int flags = msg->msg_flags; | 1736 | int flags = msg->msg_flags; |
1702 | 1737 | ||
1738 | lock_kernel(); | ||
1703 | /* Socket gets bound below anyway */ | 1739 | /* Socket gets bound below anyway */ |
1704 | /* if (sk->sk_zapped) | 1740 | /* if (sk->sk_zapped) |
1705 | return -EIO; */ /* Socket not bound */ | 1741 | return -EIO; */ /* Socket not bound */ |
@@ -1723,7 +1759,7 @@ static int ipx_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1723 | memcpy(uaddr.sipx_node, ipxs->intrfc->if_node, | 1759 | memcpy(uaddr.sipx_node, ipxs->intrfc->if_node, |
1724 | IPX_NODE_LEN); | 1760 | IPX_NODE_LEN); |
1725 | #endif | 1761 | #endif |
1726 | rc = ipx_bind(sock, (struct sockaddr *)&uaddr, | 1762 | rc = __ipx_bind(sock, (struct sockaddr *)&uaddr, |
1727 | sizeof(struct sockaddr_ipx)); | 1763 | sizeof(struct sockaddr_ipx)); |
1728 | if (rc) | 1764 | if (rc) |
1729 | goto out; | 1765 | goto out; |
@@ -1751,6 +1787,7 @@ static int ipx_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1751 | if (rc >= 0) | 1787 | if (rc >= 0) |
1752 | rc = len; | 1788 | rc = len; |
1753 | out: | 1789 | out: |
1790 | unlock_kernel(); | ||
1754 | return rc; | 1791 | return rc; |
1755 | } | 1792 | } |
1756 | 1793 | ||
@@ -1765,6 +1802,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1765 | struct sk_buff *skb; | 1802 | struct sk_buff *skb; |
1766 | int copied, rc; | 1803 | int copied, rc; |
1767 | 1804 | ||
1805 | lock_kernel(); | ||
1768 | /* put the autobinding in */ | 1806 | /* put the autobinding in */ |
1769 | if (!ipxs->port) { | 1807 | if (!ipxs->port) { |
1770 | struct sockaddr_ipx uaddr; | 1808 | struct sockaddr_ipx uaddr; |
@@ -1779,7 +1817,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1779 | memcpy(uaddr.sipx_node, ipxs->intrfc->if_node, IPX_NODE_LEN); | 1817 | memcpy(uaddr.sipx_node, ipxs->intrfc->if_node, IPX_NODE_LEN); |
1780 | #endif /* CONFIG_IPX_INTERN */ | 1818 | #endif /* CONFIG_IPX_INTERN */ |
1781 | 1819 | ||
1782 | rc = ipx_bind(sock, (struct sockaddr *)&uaddr, | 1820 | rc = __ipx_bind(sock, (struct sockaddr *)&uaddr, |
1783 | sizeof(struct sockaddr_ipx)); | 1821 | sizeof(struct sockaddr_ipx)); |
1784 | if (rc) | 1822 | if (rc) |
1785 | goto out; | 1823 | goto out; |
@@ -1823,6 +1861,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1823 | out_free: | 1861 | out_free: |
1824 | skb_free_datagram(sk, skb); | 1862 | skb_free_datagram(sk, skb); |
1825 | out: | 1863 | out: |
1864 | unlock_kernel(); | ||
1826 | return rc; | 1865 | return rc; |
1827 | } | 1866 | } |
1828 | 1867 | ||
@@ -1834,6 +1873,7 @@ static int ipx_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
1834 | struct sock *sk = sock->sk; | 1873 | struct sock *sk = sock->sk; |
1835 | void __user *argp = (void __user *)arg; | 1874 | void __user *argp = (void __user *)arg; |
1836 | 1875 | ||
1876 | lock_kernel(); | ||
1837 | switch (cmd) { | 1877 | switch (cmd) { |
1838 | case TIOCOUTQ: | 1878 | case TIOCOUTQ: |
1839 | amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); | 1879 | amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); |
@@ -1896,6 +1936,7 @@ static int ipx_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
1896 | rc = -ENOIOCTLCMD; | 1936 | rc = -ENOIOCTLCMD; |
1897 | break; | 1937 | break; |
1898 | } | 1938 | } |
1939 | unlock_kernel(); | ||
1899 | 1940 | ||
1900 | return rc; | 1941 | return rc; |
1901 | } | 1942 | } |
@@ -1927,13 +1968,13 @@ static int ipx_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long | |||
1927 | * Socket family declarations | 1968 | * Socket family declarations |
1928 | */ | 1969 | */ |
1929 | 1970 | ||
1930 | static struct net_proto_family ipx_family_ops = { | 1971 | static const struct net_proto_family ipx_family_ops = { |
1931 | .family = PF_IPX, | 1972 | .family = PF_IPX, |
1932 | .create = ipx_create, | 1973 | .create = ipx_create, |
1933 | .owner = THIS_MODULE, | 1974 | .owner = THIS_MODULE, |
1934 | }; | 1975 | }; |
1935 | 1976 | ||
1936 | static const struct proto_ops SOCKOPS_WRAPPED(ipx_dgram_ops) = { | 1977 | static const struct proto_ops ipx_dgram_ops = { |
1937 | .family = PF_IPX, | 1978 | .family = PF_IPX, |
1938 | .owner = THIS_MODULE, | 1979 | .owner = THIS_MODULE, |
1939 | .release = ipx_release, | 1980 | .release = ipx_release, |
@@ -1942,7 +1983,7 @@ static const struct proto_ops SOCKOPS_WRAPPED(ipx_dgram_ops) = { | |||
1942 | .socketpair = sock_no_socketpair, | 1983 | .socketpair = sock_no_socketpair, |
1943 | .accept = sock_no_accept, | 1984 | .accept = sock_no_accept, |
1944 | .getname = ipx_getname, | 1985 | .getname = ipx_getname, |
1945 | .poll = datagram_poll, | 1986 | .poll = ipx_datagram_poll, |
1946 | .ioctl = ipx_ioctl, | 1987 | .ioctl = ipx_ioctl, |
1947 | #ifdef CONFIG_COMPAT | 1988 | #ifdef CONFIG_COMPAT |
1948 | .compat_ioctl = ipx_compat_ioctl, | 1989 | .compat_ioctl = ipx_compat_ioctl, |
@@ -1957,8 +1998,6 @@ static const struct proto_ops SOCKOPS_WRAPPED(ipx_dgram_ops) = { | |||
1957 | .sendpage = sock_no_sendpage, | 1998 | .sendpage = sock_no_sendpage, |
1958 | }; | 1999 | }; |
1959 | 2000 | ||
1960 | SOCKOPS_WRAP(ipx_dgram, PF_IPX); | ||
1961 | |||
1962 | static struct packet_type ipx_8023_packet_type __read_mostly = { | 2001 | static struct packet_type ipx_8023_packet_type __read_mostly = { |
1963 | .type = cpu_to_be16(ETH_P_802_3), | 2002 | .type = cpu_to_be16(ETH_P_802_3), |
1964 | .func = ipx_rcv, | 2003 | .func = ipx_rcv, |
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index dd35641835f4..10093aab6173 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c | |||
@@ -61,7 +61,7 @@ | |||
61 | 61 | ||
62 | #include <net/irda/af_irda.h> | 62 | #include <net/irda/af_irda.h> |
63 | 63 | ||
64 | static int irda_create(struct net *net, struct socket *sock, int protocol); | 64 | static int irda_create(struct net *net, struct socket *sock, int protocol, int kern); |
65 | 65 | ||
66 | static const struct proto_ops irda_stream_ops; | 66 | static const struct proto_ops irda_stream_ops; |
67 | static const struct proto_ops irda_seqpacket_ops; | 67 | static const struct proto_ops irda_seqpacket_ops; |
@@ -714,11 +714,14 @@ static int irda_getname(struct socket *sock, struct sockaddr *uaddr, | |||
714 | struct sockaddr_irda saddr; | 714 | struct sockaddr_irda saddr; |
715 | struct sock *sk = sock->sk; | 715 | struct sock *sk = sock->sk; |
716 | struct irda_sock *self = irda_sk(sk); | 716 | struct irda_sock *self = irda_sk(sk); |
717 | int err; | ||
717 | 718 | ||
719 | lock_kernel(); | ||
718 | memset(&saddr, 0, sizeof(saddr)); | 720 | memset(&saddr, 0, sizeof(saddr)); |
719 | if (peer) { | 721 | if (peer) { |
722 | err = -ENOTCONN; | ||
720 | if (sk->sk_state != TCP_ESTABLISHED) | 723 | if (sk->sk_state != TCP_ESTABLISHED) |
721 | return -ENOTCONN; | 724 | goto out; |
722 | 725 | ||
723 | saddr.sir_family = AF_IRDA; | 726 | saddr.sir_family = AF_IRDA; |
724 | saddr.sir_lsap_sel = self->dtsap_sel; | 727 | saddr.sir_lsap_sel = self->dtsap_sel; |
@@ -735,8 +738,10 @@ static int irda_getname(struct socket *sock, struct sockaddr *uaddr, | |||
735 | /* uaddr_len come to us uninitialised */ | 738 | /* uaddr_len come to us uninitialised */ |
736 | *uaddr_len = sizeof (struct sockaddr_irda); | 739 | *uaddr_len = sizeof (struct sockaddr_irda); |
737 | memcpy(uaddr, &saddr, *uaddr_len); | 740 | memcpy(uaddr, &saddr, *uaddr_len); |
738 | 741 | err = 0; | |
739 | return 0; | 742 | out: |
743 | unlock_kernel(); | ||
744 | return err; | ||
740 | } | 745 | } |
741 | 746 | ||
742 | /* | 747 | /* |
@@ -748,21 +753,25 @@ static int irda_getname(struct socket *sock, struct sockaddr *uaddr, | |||
748 | static int irda_listen(struct socket *sock, int backlog) | 753 | static int irda_listen(struct socket *sock, int backlog) |
749 | { | 754 | { |
750 | struct sock *sk = sock->sk; | 755 | struct sock *sk = sock->sk; |
756 | int err = -EOPNOTSUPP; | ||
751 | 757 | ||
752 | IRDA_DEBUG(2, "%s()\n", __func__); | 758 | IRDA_DEBUG(2, "%s()\n", __func__); |
753 | 759 | ||
760 | lock_kernel(); | ||
754 | if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) && | 761 | if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) && |
755 | (sk->sk_type != SOCK_DGRAM)) | 762 | (sk->sk_type != SOCK_DGRAM)) |
756 | return -EOPNOTSUPP; | 763 | goto out; |
757 | 764 | ||
758 | if (sk->sk_state != TCP_LISTEN) { | 765 | if (sk->sk_state != TCP_LISTEN) { |
759 | sk->sk_max_ack_backlog = backlog; | 766 | sk->sk_max_ack_backlog = backlog; |
760 | sk->sk_state = TCP_LISTEN; | 767 | sk->sk_state = TCP_LISTEN; |
761 | 768 | ||
762 | return 0; | 769 | err = 0; |
763 | } | 770 | } |
771 | out: | ||
772 | unlock_kernel(); | ||
764 | 773 | ||
765 | return -EOPNOTSUPP; | 774 | return err; |
766 | } | 775 | } |
767 | 776 | ||
768 | /* | 777 | /* |
@@ -783,36 +792,40 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
783 | if (addr_len != sizeof(struct sockaddr_irda)) | 792 | if (addr_len != sizeof(struct sockaddr_irda)) |
784 | return -EINVAL; | 793 | return -EINVAL; |
785 | 794 | ||
795 | lock_kernel(); | ||
786 | #ifdef CONFIG_IRDA_ULTRA | 796 | #ifdef CONFIG_IRDA_ULTRA |
787 | /* Special care for Ultra sockets */ | 797 | /* Special care for Ultra sockets */ |
788 | if ((sk->sk_type == SOCK_DGRAM) && | 798 | if ((sk->sk_type == SOCK_DGRAM) && |
789 | (sk->sk_protocol == IRDAPROTO_ULTRA)) { | 799 | (sk->sk_protocol == IRDAPROTO_ULTRA)) { |
790 | self->pid = addr->sir_lsap_sel; | 800 | self->pid = addr->sir_lsap_sel; |
801 | err = -EOPNOTSUPP; | ||
791 | if (self->pid & 0x80) { | 802 | if (self->pid & 0x80) { |
792 | IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__); | 803 | IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__); |
793 | return -EOPNOTSUPP; | 804 | goto out; |
794 | } | 805 | } |
795 | err = irda_open_lsap(self, self->pid); | 806 | err = irda_open_lsap(self, self->pid); |
796 | if (err < 0) | 807 | if (err < 0) |
797 | return err; | 808 | goto out; |
798 | 809 | ||
799 | /* Pretend we are connected */ | 810 | /* Pretend we are connected */ |
800 | sock->state = SS_CONNECTED; | 811 | sock->state = SS_CONNECTED; |
801 | sk->sk_state = TCP_ESTABLISHED; | 812 | sk->sk_state = TCP_ESTABLISHED; |
813 | err = 0; | ||
802 | 814 | ||
803 | return 0; | 815 | goto out; |
804 | } | 816 | } |
805 | #endif /* CONFIG_IRDA_ULTRA */ | 817 | #endif /* CONFIG_IRDA_ULTRA */ |
806 | 818 | ||
807 | self->ias_obj = irias_new_object(addr->sir_name, jiffies); | 819 | self->ias_obj = irias_new_object(addr->sir_name, jiffies); |
820 | err = -ENOMEM; | ||
808 | if (self->ias_obj == NULL) | 821 | if (self->ias_obj == NULL) |
809 | return -ENOMEM; | 822 | goto out; |
810 | 823 | ||
811 | err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name); | 824 | err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name); |
812 | if (err < 0) { | 825 | if (err < 0) { |
813 | kfree(self->ias_obj->name); | 826 | kfree(self->ias_obj->name); |
814 | kfree(self->ias_obj); | 827 | kfree(self->ias_obj); |
815 | return err; | 828 | goto out; |
816 | } | 829 | } |
817 | 830 | ||
818 | /* Register with LM-IAS */ | 831 | /* Register with LM-IAS */ |
@@ -820,7 +833,10 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
820 | self->stsap_sel, IAS_KERNEL_ATTR); | 833 | self->stsap_sel, IAS_KERNEL_ATTR); |
821 | irias_insert_object(self->ias_obj); | 834 | irias_insert_object(self->ias_obj); |
822 | 835 | ||
823 | return 0; | 836 | err = 0; |
837 | out: | ||
838 | unlock_kernel(); | ||
839 | return err; | ||
824 | } | 840 | } |
825 | 841 | ||
826 | /* | 842 | /* |
@@ -839,22 +855,26 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags) | |||
839 | 855 | ||
840 | IRDA_DEBUG(2, "%s()\n", __func__); | 856 | IRDA_DEBUG(2, "%s()\n", __func__); |
841 | 857 | ||
842 | err = irda_create(sock_net(sk), newsock, sk->sk_protocol); | 858 | lock_kernel(); |
859 | err = irda_create(sock_net(sk), newsock, sk->sk_protocol, 0); | ||
843 | if (err) | 860 | if (err) |
844 | return err; | 861 | goto out; |
845 | 862 | ||
863 | err = -EINVAL; | ||
846 | if (sock->state != SS_UNCONNECTED) | 864 | if (sock->state != SS_UNCONNECTED) |
847 | return -EINVAL; | 865 | goto out; |
848 | 866 | ||
849 | if ((sk = sock->sk) == NULL) | 867 | if ((sk = sock->sk) == NULL) |
850 | return -EINVAL; | 868 | goto out; |
851 | 869 | ||
870 | err = -EOPNOTSUPP; | ||
852 | if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) && | 871 | if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) && |
853 | (sk->sk_type != SOCK_DGRAM)) | 872 | (sk->sk_type != SOCK_DGRAM)) |
854 | return -EOPNOTSUPP; | 873 | goto out; |
855 | 874 | ||
875 | err = -EINVAL; | ||
856 | if (sk->sk_state != TCP_LISTEN) | 876 | if (sk->sk_state != TCP_LISTEN) |
857 | return -EINVAL; | 877 | goto out; |
858 | 878 | ||
859 | /* | 879 | /* |
860 | * The read queue this time is holding sockets ready to use | 880 | * The read queue this time is holding sockets ready to use |
@@ -875,18 +895,20 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags) | |||
875 | break; | 895 | break; |
876 | 896 | ||
877 | /* Non blocking operation */ | 897 | /* Non blocking operation */ |
898 | err = -EWOULDBLOCK; | ||
878 | if (flags & O_NONBLOCK) | 899 | if (flags & O_NONBLOCK) |
879 | return -EWOULDBLOCK; | 900 | goto out; |
880 | 901 | ||
881 | err = wait_event_interruptible(*(sk->sk_sleep), | 902 | err = wait_event_interruptible(*(sk->sk_sleep), |
882 | skb_peek(&sk->sk_receive_queue)); | 903 | skb_peek(&sk->sk_receive_queue)); |
883 | if (err) | 904 | if (err) |
884 | return err; | 905 | goto out; |
885 | } | 906 | } |
886 | 907 | ||
887 | newsk = newsock->sk; | 908 | newsk = newsock->sk; |
909 | err = -EIO; | ||
888 | if (newsk == NULL) | 910 | if (newsk == NULL) |
889 | return -EIO; | 911 | goto out; |
890 | 912 | ||
891 | newsk->sk_state = TCP_ESTABLISHED; | 913 | newsk->sk_state = TCP_ESTABLISHED; |
892 | 914 | ||
@@ -894,10 +916,11 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags) | |||
894 | 916 | ||
895 | /* Now attach up the new socket */ | 917 | /* Now attach up the new socket */ |
896 | new->tsap = irttp_dup(self->tsap, new); | 918 | new->tsap = irttp_dup(self->tsap, new); |
919 | err = -EPERM; /* value does not seem to make sense. -arnd */ | ||
897 | if (!new->tsap) { | 920 | if (!new->tsap) { |
898 | IRDA_DEBUG(0, "%s(), dup failed!\n", __func__); | 921 | IRDA_DEBUG(0, "%s(), dup failed!\n", __func__); |
899 | kfree_skb(skb); | 922 | kfree_skb(skb); |
900 | return -1; | 923 | goto out; |
901 | } | 924 | } |
902 | 925 | ||
903 | new->stsap_sel = new->tsap->stsap_sel; | 926 | new->stsap_sel = new->tsap->stsap_sel; |
@@ -921,8 +944,10 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags) | |||
921 | newsock->state = SS_CONNECTED; | 944 | newsock->state = SS_CONNECTED; |
922 | 945 | ||
923 | irda_connect_response(new); | 946 | irda_connect_response(new); |
924 | 947 | err = 0; | |
925 | return 0; | 948 | out: |
949 | unlock_kernel(); | ||
950 | return err; | ||
926 | } | 951 | } |
927 | 952 | ||
928 | /* | 953 | /* |
@@ -955,28 +980,34 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr, | |||
955 | 980 | ||
956 | IRDA_DEBUG(2, "%s(%p)\n", __func__, self); | 981 | IRDA_DEBUG(2, "%s(%p)\n", __func__, self); |
957 | 982 | ||
983 | lock_kernel(); | ||
958 | /* Don't allow connect for Ultra sockets */ | 984 | /* Don't allow connect for Ultra sockets */ |
985 | err = -ESOCKTNOSUPPORT; | ||
959 | if ((sk->sk_type == SOCK_DGRAM) && (sk->sk_protocol == IRDAPROTO_ULTRA)) | 986 | if ((sk->sk_type == SOCK_DGRAM) && (sk->sk_protocol == IRDAPROTO_ULTRA)) |
960 | return -ESOCKTNOSUPPORT; | 987 | goto out; |
961 | 988 | ||
962 | if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { | 989 | if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { |
963 | sock->state = SS_CONNECTED; | 990 | sock->state = SS_CONNECTED; |
964 | return 0; /* Connect completed during a ERESTARTSYS event */ | 991 | err = 0; |
992 | goto out; /* Connect completed during a ERESTARTSYS event */ | ||
965 | } | 993 | } |
966 | 994 | ||
967 | if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { | 995 | if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { |
968 | sock->state = SS_UNCONNECTED; | 996 | sock->state = SS_UNCONNECTED; |
969 | return -ECONNREFUSED; | 997 | err = -ECONNREFUSED; |
998 | goto out; | ||
970 | } | 999 | } |
971 | 1000 | ||
1001 | err = -EISCONN; /* No reconnect on a seqpacket socket */ | ||
972 | if (sk->sk_state == TCP_ESTABLISHED) | 1002 | if (sk->sk_state == TCP_ESTABLISHED) |
973 | return -EISCONN; /* No reconnect on a seqpacket socket */ | 1003 | goto out; |
974 | 1004 | ||
975 | sk->sk_state = TCP_CLOSE; | 1005 | sk->sk_state = TCP_CLOSE; |
976 | sock->state = SS_UNCONNECTED; | 1006 | sock->state = SS_UNCONNECTED; |
977 | 1007 | ||
1008 | err = -EINVAL; | ||
978 | if (addr_len != sizeof(struct sockaddr_irda)) | 1009 | if (addr_len != sizeof(struct sockaddr_irda)) |
979 | return -EINVAL; | 1010 | goto out; |
980 | 1011 | ||
981 | /* Check if user supplied any destination device address */ | 1012 | /* Check if user supplied any destination device address */ |
982 | if ((!addr->sir_addr) || (addr->sir_addr == DEV_ADDR_ANY)) { | 1013 | if ((!addr->sir_addr) || (addr->sir_addr == DEV_ADDR_ANY)) { |
@@ -984,7 +1015,7 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr, | |||
984 | err = irda_discover_daddr_and_lsap_sel(self, addr->sir_name); | 1015 | err = irda_discover_daddr_and_lsap_sel(self, addr->sir_name); |
985 | if (err) { | 1016 | if (err) { |
986 | IRDA_DEBUG(0, "%s(), auto-connect failed!\n", __func__); | 1017 | IRDA_DEBUG(0, "%s(), auto-connect failed!\n", __func__); |
987 | return err; | 1018 | goto out; |
988 | } | 1019 | } |
989 | } else { | 1020 | } else { |
990 | /* Use the one provided by the user */ | 1021 | /* Use the one provided by the user */ |
@@ -1000,7 +1031,7 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr, | |||
1000 | err = irda_find_lsap_sel(self, addr->sir_name); | 1031 | err = irda_find_lsap_sel(self, addr->sir_name); |
1001 | if (err) { | 1032 | if (err) { |
1002 | IRDA_DEBUG(0, "%s(), connect failed!\n", __func__); | 1033 | IRDA_DEBUG(0, "%s(), connect failed!\n", __func__); |
1003 | return err; | 1034 | goto out; |
1004 | } | 1035 | } |
1005 | } else { | 1036 | } else { |
1006 | /* Directly connect to the remote LSAP | 1037 | /* Directly connect to the remote LSAP |
@@ -1025,29 +1056,35 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr, | |||
1025 | self->max_sdu_size_rx, NULL); | 1056 | self->max_sdu_size_rx, NULL); |
1026 | if (err) { | 1057 | if (err) { |
1027 | IRDA_DEBUG(0, "%s(), connect failed!\n", __func__); | 1058 | IRDA_DEBUG(0, "%s(), connect failed!\n", __func__); |
1028 | return err; | 1059 | goto out; |
1029 | } | 1060 | } |
1030 | 1061 | ||
1031 | /* Now the loop */ | 1062 | /* Now the loop */ |
1063 | err = -EINPROGRESS; | ||
1032 | if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) | 1064 | if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) |
1033 | return -EINPROGRESS; | 1065 | goto out; |
1034 | 1066 | ||
1067 | err = -ERESTARTSYS; | ||
1035 | if (wait_event_interruptible(*(sk->sk_sleep), | 1068 | if (wait_event_interruptible(*(sk->sk_sleep), |
1036 | (sk->sk_state != TCP_SYN_SENT))) | 1069 | (sk->sk_state != TCP_SYN_SENT))) |
1037 | return -ERESTARTSYS; | 1070 | goto out; |
1038 | 1071 | ||
1039 | if (sk->sk_state != TCP_ESTABLISHED) { | 1072 | if (sk->sk_state != TCP_ESTABLISHED) { |
1040 | sock->state = SS_UNCONNECTED; | 1073 | sock->state = SS_UNCONNECTED; |
1041 | err = sock_error(sk); | 1074 | err = sock_error(sk); |
1042 | return err? err : -ECONNRESET; | 1075 | if (!err) |
1076 | err = -ECONNRESET; | ||
1077 | goto out; | ||
1043 | } | 1078 | } |
1044 | 1079 | ||
1045 | sock->state = SS_CONNECTED; | 1080 | sock->state = SS_CONNECTED; |
1046 | 1081 | ||
1047 | /* At this point, IrLMP has assigned our source address */ | 1082 | /* At this point, IrLMP has assigned our source address */ |
1048 | self->saddr = irttp_get_saddr(self->tsap); | 1083 | self->saddr = irttp_get_saddr(self->tsap); |
1049 | 1084 | err = 0; | |
1050 | return 0; | 1085 | out: |
1086 | unlock_kernel(); | ||
1087 | return err; | ||
1051 | } | 1088 | } |
1052 | 1089 | ||
1053 | static struct proto irda_proto = { | 1090 | static struct proto irda_proto = { |
@@ -1062,7 +1099,8 @@ static struct proto irda_proto = { | |||
1062 | * Create IrDA socket | 1099 | * Create IrDA socket |
1063 | * | 1100 | * |
1064 | */ | 1101 | */ |
1065 | static int irda_create(struct net *net, struct socket *sock, int protocol) | 1102 | static int irda_create(struct net *net, struct socket *sock, int protocol, |
1103 | int kern) | ||
1066 | { | 1104 | { |
1067 | struct sock *sk; | 1105 | struct sock *sk; |
1068 | struct irda_sock *self; | 1106 | struct irda_sock *self; |
@@ -1192,6 +1230,7 @@ static int irda_release(struct socket *sock) | |||
1192 | if (sk == NULL) | 1230 | if (sk == NULL) |
1193 | return 0; | 1231 | return 0; |
1194 | 1232 | ||
1233 | lock_kernel(); | ||
1195 | lock_sock(sk); | 1234 | lock_sock(sk); |
1196 | sk->sk_state = TCP_CLOSE; | 1235 | sk->sk_state = TCP_CLOSE; |
1197 | sk->sk_shutdown |= SEND_SHUTDOWN; | 1236 | sk->sk_shutdown |= SEND_SHUTDOWN; |
@@ -1210,6 +1249,7 @@ static int irda_release(struct socket *sock) | |||
1210 | /* Destroy networking socket if we are the last reference on it, | 1249 | /* Destroy networking socket if we are the last reference on it, |
1211 | * i.e. if(sk->sk_refcnt == 0) -> sk_free(sk) */ | 1250 | * i.e. if(sk->sk_refcnt == 0) -> sk_free(sk) */ |
1212 | sock_put(sk); | 1251 | sock_put(sk); |
1252 | unlock_kernel(); | ||
1213 | 1253 | ||
1214 | /* Notes on socket locking and deallocation... - Jean II | 1254 | /* Notes on socket locking and deallocation... - Jean II |
1215 | * In theory we should put pairs of sock_hold() / sock_put() to | 1255 | * In theory we should put pairs of sock_hold() / sock_put() to |
@@ -1257,28 +1297,37 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1257 | 1297 | ||
1258 | IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len); | 1298 | IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len); |
1259 | 1299 | ||
1300 | lock_kernel(); | ||
1260 | /* Note : socket.c set MSG_EOR on SEQPACKET sockets */ | 1301 | /* Note : socket.c set MSG_EOR on SEQPACKET sockets */ |
1261 | if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT | | 1302 | if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT | |
1262 | MSG_NOSIGNAL)) | 1303 | MSG_NOSIGNAL)) { |
1263 | return -EINVAL; | 1304 | err = -EINVAL; |
1305 | goto out; | ||
1306 | } | ||
1264 | 1307 | ||
1265 | if (sk->sk_shutdown & SEND_SHUTDOWN) | 1308 | if (sk->sk_shutdown & SEND_SHUTDOWN) |
1266 | goto out_err; | 1309 | goto out_err; |
1267 | 1310 | ||
1268 | if (sk->sk_state != TCP_ESTABLISHED) | 1311 | if (sk->sk_state != TCP_ESTABLISHED) { |
1269 | return -ENOTCONN; | 1312 | err = -ENOTCONN; |
1313 | goto out; | ||
1314 | } | ||
1270 | 1315 | ||
1271 | self = irda_sk(sk); | 1316 | self = irda_sk(sk); |
1272 | 1317 | ||
1273 | /* Check if IrTTP is wants us to slow down */ | 1318 | /* Check if IrTTP is wants us to slow down */ |
1274 | 1319 | ||
1275 | if (wait_event_interruptible(*(sk->sk_sleep), | 1320 | if (wait_event_interruptible(*(sk->sk_sleep), |
1276 | (self->tx_flow != FLOW_STOP || sk->sk_state != TCP_ESTABLISHED))) | 1321 | (self->tx_flow != FLOW_STOP || sk->sk_state != TCP_ESTABLISHED))) { |
1277 | return -ERESTARTSYS; | 1322 | err = -ERESTARTSYS; |
1323 | goto out; | ||
1324 | } | ||
1278 | 1325 | ||
1279 | /* Check if we are still connected */ | 1326 | /* Check if we are still connected */ |
1280 | if (sk->sk_state != TCP_ESTABLISHED) | 1327 | if (sk->sk_state != TCP_ESTABLISHED) { |
1281 | return -ENOTCONN; | 1328 | err = -ENOTCONN; |
1329 | goto out; | ||
1330 | } | ||
1282 | 1331 | ||
1283 | /* Check that we don't send out too big frames */ | 1332 | /* Check that we don't send out too big frames */ |
1284 | if (len > self->max_data_size) { | 1333 | if (len > self->max_data_size) { |
@@ -1310,11 +1359,16 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1310 | IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err); | 1359 | IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err); |
1311 | goto out_err; | 1360 | goto out_err; |
1312 | } | 1361 | } |
1362 | |||
1363 | unlock_kernel(); | ||
1313 | /* Tell client how much data we actually sent */ | 1364 | /* Tell client how much data we actually sent */ |
1314 | return len; | 1365 | return len; |
1315 | 1366 | ||
1316 | out_err: | 1367 | out_err: |
1317 | return sk_stream_error(sk, msg->msg_flags, err); | 1368 | err = sk_stream_error(sk, msg->msg_flags, err); |
1369 | out: | ||
1370 | unlock_kernel(); | ||
1371 | return err; | ||
1318 | 1372 | ||
1319 | } | 1373 | } |
1320 | 1374 | ||
@@ -1335,13 +1389,14 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, | |||
1335 | 1389 | ||
1336 | IRDA_DEBUG(4, "%s()\n", __func__); | 1390 | IRDA_DEBUG(4, "%s()\n", __func__); |
1337 | 1391 | ||
1392 | lock_kernel(); | ||
1338 | if ((err = sock_error(sk)) < 0) | 1393 | if ((err = sock_error(sk)) < 0) |
1339 | return err; | 1394 | goto out; |
1340 | 1395 | ||
1341 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, | 1396 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, |
1342 | flags & MSG_DONTWAIT, &err); | 1397 | flags & MSG_DONTWAIT, &err); |
1343 | if (!skb) | 1398 | if (!skb) |
1344 | return err; | 1399 | goto out; |
1345 | 1400 | ||
1346 | skb_reset_transport_header(skb); | 1401 | skb_reset_transport_header(skb); |
1347 | copied = skb->len; | 1402 | copied = skb->len; |
@@ -1369,8 +1424,12 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, | |||
1369 | irttp_flow_request(self->tsap, FLOW_START); | 1424 | irttp_flow_request(self->tsap, FLOW_START); |
1370 | } | 1425 | } |
1371 | } | 1426 | } |
1372 | 1427 | unlock_kernel(); | |
1373 | return copied; | 1428 | return copied; |
1429 | |||
1430 | out: | ||
1431 | unlock_kernel(); | ||
1432 | return err; | ||
1374 | } | 1433 | } |
1375 | 1434 | ||
1376 | /* | 1435 | /* |
@@ -1388,15 +1447,19 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock, | |||
1388 | 1447 | ||
1389 | IRDA_DEBUG(3, "%s()\n", __func__); | 1448 | IRDA_DEBUG(3, "%s()\n", __func__); |
1390 | 1449 | ||
1450 | lock_kernel(); | ||
1391 | if ((err = sock_error(sk)) < 0) | 1451 | if ((err = sock_error(sk)) < 0) |
1392 | return err; | 1452 | goto out; |
1393 | 1453 | ||
1454 | err = -EINVAL; | ||
1394 | if (sock->flags & __SO_ACCEPTCON) | 1455 | if (sock->flags & __SO_ACCEPTCON) |
1395 | return(-EINVAL); | 1456 | goto out; |
1396 | 1457 | ||
1458 | err =-EOPNOTSUPP; | ||
1397 | if (flags & MSG_OOB) | 1459 | if (flags & MSG_OOB) |
1398 | return -EOPNOTSUPP; | 1460 | goto out; |
1399 | 1461 | ||
1462 | err = 0; | ||
1400 | target = sock_rcvlowat(sk, flags & MSG_WAITALL, size); | 1463 | target = sock_rcvlowat(sk, flags & MSG_WAITALL, size); |
1401 | timeo = sock_rcvtimeo(sk, noblock); | 1464 | timeo = sock_rcvtimeo(sk, noblock); |
1402 | 1465 | ||
@@ -1408,7 +1471,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock, | |||
1408 | 1471 | ||
1409 | if (skb == NULL) { | 1472 | if (skb == NULL) { |
1410 | DEFINE_WAIT(wait); | 1473 | DEFINE_WAIT(wait); |
1411 | int ret = 0; | 1474 | err = 0; |
1412 | 1475 | ||
1413 | if (copied >= target) | 1476 | if (copied >= target) |
1414 | break; | 1477 | break; |
@@ -1418,25 +1481,25 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock, | |||
1418 | /* | 1481 | /* |
1419 | * POSIX 1003.1g mandates this order. | 1482 | * POSIX 1003.1g mandates this order. |
1420 | */ | 1483 | */ |
1421 | ret = sock_error(sk); | 1484 | err = sock_error(sk); |
1422 | if (ret) | 1485 | if (err) |
1423 | ; | 1486 | ; |
1424 | else if (sk->sk_shutdown & RCV_SHUTDOWN) | 1487 | else if (sk->sk_shutdown & RCV_SHUTDOWN) |
1425 | ; | 1488 | ; |
1426 | else if (noblock) | 1489 | else if (noblock) |
1427 | ret = -EAGAIN; | 1490 | err = -EAGAIN; |
1428 | else if (signal_pending(current)) | 1491 | else if (signal_pending(current)) |
1429 | ret = sock_intr_errno(timeo); | 1492 | err = sock_intr_errno(timeo); |
1430 | else if (sk->sk_state != TCP_ESTABLISHED) | 1493 | else if (sk->sk_state != TCP_ESTABLISHED) |
1431 | ret = -ENOTCONN; | 1494 | err = -ENOTCONN; |
1432 | else if (skb_peek(&sk->sk_receive_queue) == NULL) | 1495 | else if (skb_peek(&sk->sk_receive_queue) == NULL) |
1433 | /* Wait process until data arrives */ | 1496 | /* Wait process until data arrives */ |
1434 | schedule(); | 1497 | schedule(); |
1435 | 1498 | ||
1436 | finish_wait(sk->sk_sleep, &wait); | 1499 | finish_wait(sk->sk_sleep, &wait); |
1437 | 1500 | ||
1438 | if (ret) | 1501 | if (err) |
1439 | return ret; | 1502 | goto out; |
1440 | if (sk->sk_shutdown & RCV_SHUTDOWN) | 1503 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
1441 | break; | 1504 | break; |
1442 | 1505 | ||
@@ -1489,7 +1552,9 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock, | |||
1489 | } | 1552 | } |
1490 | } | 1553 | } |
1491 | 1554 | ||
1492 | return copied; | 1555 | out: |
1556 | unlock_kernel(); | ||
1557 | return err ? : copied; | ||
1493 | } | 1558 | } |
1494 | 1559 | ||
1495 | /* | 1560 | /* |
@@ -1507,18 +1572,23 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock, | |||
1507 | struct sk_buff *skb; | 1572 | struct sk_buff *skb; |
1508 | int err; | 1573 | int err; |
1509 | 1574 | ||
1575 | lock_kernel(); | ||
1576 | |||
1510 | IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len); | 1577 | IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len); |
1511 | 1578 | ||
1579 | err = -EINVAL; | ||
1512 | if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) | 1580 | if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) |
1513 | return -EINVAL; | 1581 | goto out; |
1514 | 1582 | ||
1515 | if (sk->sk_shutdown & SEND_SHUTDOWN) { | 1583 | if (sk->sk_shutdown & SEND_SHUTDOWN) { |
1516 | send_sig(SIGPIPE, current, 0); | 1584 | send_sig(SIGPIPE, current, 0); |
1517 | return -EPIPE; | 1585 | err = -EPIPE; |
1586 | goto out; | ||
1518 | } | 1587 | } |
1519 | 1588 | ||
1589 | err = -ENOTCONN; | ||
1520 | if (sk->sk_state != TCP_ESTABLISHED) | 1590 | if (sk->sk_state != TCP_ESTABLISHED) |
1521 | return -ENOTCONN; | 1591 | goto out; |
1522 | 1592 | ||
1523 | self = irda_sk(sk); | 1593 | self = irda_sk(sk); |
1524 | 1594 | ||
@@ -1535,8 +1605,9 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock, | |||
1535 | 1605 | ||
1536 | skb = sock_alloc_send_skb(sk, len + self->max_header_size, | 1606 | skb = sock_alloc_send_skb(sk, len + self->max_header_size, |
1537 | msg->msg_flags & MSG_DONTWAIT, &err); | 1607 | msg->msg_flags & MSG_DONTWAIT, &err); |
1608 | err = -ENOBUFS; | ||
1538 | if (!skb) | 1609 | if (!skb) |
1539 | return -ENOBUFS; | 1610 | goto out; |
1540 | 1611 | ||
1541 | skb_reserve(skb, self->max_header_size); | 1612 | skb_reserve(skb, self->max_header_size); |
1542 | skb_reset_transport_header(skb); | 1613 | skb_reset_transport_header(skb); |
@@ -1546,7 +1617,7 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock, | |||
1546 | err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); | 1617 | err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); |
1547 | if (err) { | 1618 | if (err) { |
1548 | kfree_skb(skb); | 1619 | kfree_skb(skb); |
1549 | return err; | 1620 | goto out; |
1550 | } | 1621 | } |
1551 | 1622 | ||
1552 | /* | 1623 | /* |
@@ -1556,9 +1627,13 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock, | |||
1556 | err = irttp_udata_request(self->tsap, skb); | 1627 | err = irttp_udata_request(self->tsap, skb); |
1557 | if (err) { | 1628 | if (err) { |
1558 | IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err); | 1629 | IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err); |
1559 | return err; | 1630 | goto out; |
1560 | } | 1631 | } |
1632 | unlock_kernel(); | ||
1561 | return len; | 1633 | return len; |
1634 | out: | ||
1635 | unlock_kernel(); | ||
1636 | return err; | ||
1562 | } | 1637 | } |
1563 | 1638 | ||
1564 | /* | 1639 | /* |
@@ -1580,12 +1655,15 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock, | |||
1580 | 1655 | ||
1581 | IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len); | 1656 | IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len); |
1582 | 1657 | ||
1658 | lock_kernel(); | ||
1659 | err = -EINVAL; | ||
1583 | if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) | 1660 | if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) |
1584 | return -EINVAL; | 1661 | goto out; |
1585 | 1662 | ||
1663 | err = -EPIPE; | ||
1586 | if (sk->sk_shutdown & SEND_SHUTDOWN) { | 1664 | if (sk->sk_shutdown & SEND_SHUTDOWN) { |
1587 | send_sig(SIGPIPE, current, 0); | 1665 | send_sig(SIGPIPE, current, 0); |
1588 | return -EPIPE; | 1666 | goto out; |
1589 | } | 1667 | } |
1590 | 1668 | ||
1591 | self = irda_sk(sk); | 1669 | self = irda_sk(sk); |
@@ -1593,16 +1671,18 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock, | |||
1593 | /* Check if an address was specified with sendto. Jean II */ | 1671 | /* Check if an address was specified with sendto. Jean II */ |
1594 | if (msg->msg_name) { | 1672 | if (msg->msg_name) { |
1595 | struct sockaddr_irda *addr = (struct sockaddr_irda *) msg->msg_name; | 1673 | struct sockaddr_irda *addr = (struct sockaddr_irda *) msg->msg_name; |
1674 | err = -EINVAL; | ||
1596 | /* Check address, extract pid. Jean II */ | 1675 | /* Check address, extract pid. Jean II */ |
1597 | if (msg->msg_namelen < sizeof(*addr)) | 1676 | if (msg->msg_namelen < sizeof(*addr)) |
1598 | return -EINVAL; | 1677 | goto out; |
1599 | if (addr->sir_family != AF_IRDA) | 1678 | if (addr->sir_family != AF_IRDA) |
1600 | return -EINVAL; | 1679 | goto out; |
1601 | 1680 | ||
1602 | pid = addr->sir_lsap_sel; | 1681 | pid = addr->sir_lsap_sel; |
1603 | if (pid & 0x80) { | 1682 | if (pid & 0x80) { |
1604 | IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__); | 1683 | IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__); |
1605 | return -EOPNOTSUPP; | 1684 | err = -EOPNOTSUPP; |
1685 | goto out; | ||
1606 | } | 1686 | } |
1607 | } else { | 1687 | } else { |
1608 | /* Check that the socket is properly bound to an Ultra | 1688 | /* Check that the socket is properly bound to an Ultra |
@@ -1611,7 +1691,8 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock, | |||
1611 | (sk->sk_state != TCP_ESTABLISHED)) { | 1691 | (sk->sk_state != TCP_ESTABLISHED)) { |
1612 | IRDA_DEBUG(0, "%s(), socket not bound to Ultra PID.\n", | 1692 | IRDA_DEBUG(0, "%s(), socket not bound to Ultra PID.\n", |
1613 | __func__); | 1693 | __func__); |
1614 | return -ENOTCONN; | 1694 | err = -ENOTCONN; |
1695 | goto out; | ||
1615 | } | 1696 | } |
1616 | /* Use PID from socket */ | 1697 | /* Use PID from socket */ |
1617 | bound = 1; | 1698 | bound = 1; |
@@ -1630,8 +1711,9 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock, | |||
1630 | 1711 | ||
1631 | skb = sock_alloc_send_skb(sk, len + self->max_header_size, | 1712 | skb = sock_alloc_send_skb(sk, len + self->max_header_size, |
1632 | msg->msg_flags & MSG_DONTWAIT, &err); | 1713 | msg->msg_flags & MSG_DONTWAIT, &err); |
1714 | err = -ENOBUFS; | ||
1633 | if (!skb) | 1715 | if (!skb) |
1634 | return -ENOBUFS; | 1716 | goto out; |
1635 | 1717 | ||
1636 | skb_reserve(skb, self->max_header_size); | 1718 | skb_reserve(skb, self->max_header_size); |
1637 | skb_reset_transport_header(skb); | 1719 | skb_reset_transport_header(skb); |
@@ -1641,16 +1723,16 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock, | |||
1641 | err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); | 1723 | err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); |
1642 | if (err) { | 1724 | if (err) { |
1643 | kfree_skb(skb); | 1725 | kfree_skb(skb); |
1644 | return err; | 1726 | goto out; |
1645 | } | 1727 | } |
1646 | 1728 | ||
1647 | err = irlmp_connless_data_request((bound ? self->lsap : NULL), | 1729 | err = irlmp_connless_data_request((bound ? self->lsap : NULL), |
1648 | skb, pid); | 1730 | skb, pid); |
1649 | if (err) { | 1731 | if (err) |
1650 | IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err); | 1732 | IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err); |
1651 | return err; | 1733 | out: |
1652 | } | 1734 | unlock_kernel(); |
1653 | return len; | 1735 | return err ? : len; |
1654 | } | 1736 | } |
1655 | #endif /* CONFIG_IRDA_ULTRA */ | 1737 | #endif /* CONFIG_IRDA_ULTRA */ |
1656 | 1738 | ||
@@ -1664,6 +1746,8 @@ static int irda_shutdown(struct socket *sock, int how) | |||
1664 | 1746 | ||
1665 | IRDA_DEBUG(1, "%s(%p)\n", __func__, self); | 1747 | IRDA_DEBUG(1, "%s(%p)\n", __func__, self); |
1666 | 1748 | ||
1749 | lock_kernel(); | ||
1750 | |||
1667 | sk->sk_state = TCP_CLOSE; | 1751 | sk->sk_state = TCP_CLOSE; |
1668 | sk->sk_shutdown |= SEND_SHUTDOWN; | 1752 | sk->sk_shutdown |= SEND_SHUTDOWN; |
1669 | sk->sk_state_change(sk); | 1753 | sk->sk_state_change(sk); |
@@ -1684,6 +1768,8 @@ static int irda_shutdown(struct socket *sock, int how) | |||
1684 | self->daddr = DEV_ADDR_ANY; /* Until we get re-connected */ | 1768 | self->daddr = DEV_ADDR_ANY; /* Until we get re-connected */ |
1685 | self->saddr = 0x0; /* so IrLMP assign us any link */ | 1769 | self->saddr = 0x0; /* so IrLMP assign us any link */ |
1686 | 1770 | ||
1771 | unlock_kernel(); | ||
1772 | |||
1687 | return 0; | 1773 | return 0; |
1688 | } | 1774 | } |
1689 | 1775 | ||
@@ -1699,6 +1785,7 @@ static unsigned int irda_poll(struct file * file, struct socket *sock, | |||
1699 | 1785 | ||
1700 | IRDA_DEBUG(4, "%s()\n", __func__); | 1786 | IRDA_DEBUG(4, "%s()\n", __func__); |
1701 | 1787 | ||
1788 | lock_kernel(); | ||
1702 | poll_wait(file, sk->sk_sleep, wait); | 1789 | poll_wait(file, sk->sk_sleep, wait); |
1703 | mask = 0; | 1790 | mask = 0; |
1704 | 1791 | ||
@@ -1746,18 +1833,34 @@ static unsigned int irda_poll(struct file * file, struct socket *sock, | |||
1746 | default: | 1833 | default: |
1747 | break; | 1834 | break; |
1748 | } | 1835 | } |
1836 | unlock_kernel(); | ||
1749 | return mask; | 1837 | return mask; |
1750 | } | 1838 | } |
1751 | 1839 | ||
1840 | static unsigned int irda_datagram_poll(struct file *file, struct socket *sock, | ||
1841 | poll_table *wait) | ||
1842 | { | ||
1843 | int err; | ||
1844 | |||
1845 | lock_kernel(); | ||
1846 | err = datagram_poll(file, sock, wait); | ||
1847 | unlock_kernel(); | ||
1848 | |||
1849 | return err; | ||
1850 | } | ||
1851 | |||
1752 | /* | 1852 | /* |
1753 | * Function irda_ioctl (sock, cmd, arg) | 1853 | * Function irda_ioctl (sock, cmd, arg) |
1754 | */ | 1854 | */ |
1755 | static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | 1855 | static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) |
1756 | { | 1856 | { |
1757 | struct sock *sk = sock->sk; | 1857 | struct sock *sk = sock->sk; |
1858 | int err; | ||
1758 | 1859 | ||
1759 | IRDA_DEBUG(4, "%s(), cmd=%#x\n", __func__, cmd); | 1860 | IRDA_DEBUG(4, "%s(), cmd=%#x\n", __func__, cmd); |
1760 | 1861 | ||
1862 | lock_kernel(); | ||
1863 | err = -EINVAL; | ||
1761 | switch (cmd) { | 1864 | switch (cmd) { |
1762 | case TIOCOUTQ: { | 1865 | case TIOCOUTQ: { |
1763 | long amount; | 1866 | long amount; |
@@ -1765,9 +1868,8 @@ static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
1765 | amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); | 1868 | amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); |
1766 | if (amount < 0) | 1869 | if (amount < 0) |
1767 | amount = 0; | 1870 | amount = 0; |
1768 | if (put_user(amount, (unsigned int __user *)arg)) | 1871 | err = put_user(amount, (unsigned int __user *)arg); |
1769 | return -EFAULT; | 1872 | break; |
1770 | return 0; | ||
1771 | } | 1873 | } |
1772 | 1874 | ||
1773 | case TIOCINQ: { | 1875 | case TIOCINQ: { |
@@ -1776,15 +1878,14 @@ static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
1776 | /* These two are safe on a single CPU system as only user tasks fiddle here */ | 1878 | /* These two are safe on a single CPU system as only user tasks fiddle here */ |
1777 | if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) | 1879 | if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) |
1778 | amount = skb->len; | 1880 | amount = skb->len; |
1779 | if (put_user(amount, (unsigned int __user *)arg)) | 1881 | err = put_user(amount, (unsigned int __user *)arg); |
1780 | return -EFAULT; | 1882 | break; |
1781 | return 0; | ||
1782 | } | 1883 | } |
1783 | 1884 | ||
1784 | case SIOCGSTAMP: | 1885 | case SIOCGSTAMP: |
1785 | if (sk != NULL) | 1886 | if (sk != NULL) |
1786 | return sock_get_timestamp(sk, (struct timeval __user *)arg); | 1887 | err = sock_get_timestamp(sk, (struct timeval __user *)arg); |
1787 | return -EINVAL; | 1888 | break; |
1788 | 1889 | ||
1789 | case SIOCGIFADDR: | 1890 | case SIOCGIFADDR: |
1790 | case SIOCSIFADDR: | 1891 | case SIOCSIFADDR: |
@@ -1796,14 +1897,14 @@ static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
1796 | case SIOCSIFNETMASK: | 1897 | case SIOCSIFNETMASK: |
1797 | case SIOCGIFMETRIC: | 1898 | case SIOCGIFMETRIC: |
1798 | case SIOCSIFMETRIC: | 1899 | case SIOCSIFMETRIC: |
1799 | return -EINVAL; | 1900 | break; |
1800 | default: | 1901 | default: |
1801 | IRDA_DEBUG(1, "%s(), doing device ioctl!\n", __func__); | 1902 | IRDA_DEBUG(1, "%s(), doing device ioctl!\n", __func__); |
1802 | return -ENOIOCTLCMD; | 1903 | err = -ENOIOCTLCMD; |
1803 | } | 1904 | } |
1905 | unlock_kernel(); | ||
1804 | 1906 | ||
1805 | /*NOTREACHED*/ | 1907 | return err; |
1806 | return 0; | ||
1807 | } | 1908 | } |
1808 | 1909 | ||
1809 | #ifdef CONFIG_COMPAT | 1910 | #ifdef CONFIG_COMPAT |
@@ -1825,7 +1926,7 @@ static int irda_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned lon | |||
1825 | * Set some options for the socket | 1926 | * Set some options for the socket |
1826 | * | 1927 | * |
1827 | */ | 1928 | */ |
1828 | static int irda_setsockopt(struct socket *sock, int level, int optname, | 1929 | static int __irda_setsockopt(struct socket *sock, int level, int optname, |
1829 | char __user *optval, unsigned int optlen) | 1930 | char __user *optval, unsigned int optlen) |
1830 | { | 1931 | { |
1831 | struct sock *sk = sock->sk; | 1932 | struct sock *sk = sock->sk; |
@@ -2083,6 +2184,18 @@ static int irda_setsockopt(struct socket *sock, int level, int optname, | |||
2083 | return 0; | 2184 | return 0; |
2084 | } | 2185 | } |
2085 | 2186 | ||
2187 | static int irda_setsockopt(struct socket *sock, int level, int optname, | ||
2188 | char __user *optval, unsigned int optlen) | ||
2189 | { | ||
2190 | int err; | ||
2191 | |||
2192 | lock_kernel(); | ||
2193 | err = __irda_setsockopt(sock, level, optname, optval, optlen); | ||
2194 | unlock_kernel(); | ||
2195 | |||
2196 | return err; | ||
2197 | } | ||
2198 | |||
2086 | /* | 2199 | /* |
2087 | * Function irda_extract_ias_value(ias_opt, ias_value) | 2200 | * Function irda_extract_ias_value(ias_opt, ias_value) |
2088 | * | 2201 | * |
@@ -2135,7 +2248,7 @@ static int irda_extract_ias_value(struct irda_ias_set *ias_opt, | |||
2135 | /* | 2248 | /* |
2136 | * Function irda_getsockopt (sock, level, optname, optval, optlen) | 2249 | * Function irda_getsockopt (sock, level, optname, optval, optlen) |
2137 | */ | 2250 | */ |
2138 | static int irda_getsockopt(struct socket *sock, int level, int optname, | 2251 | static int __irda_getsockopt(struct socket *sock, int level, int optname, |
2139 | char __user *optval, int __user *optlen) | 2252 | char __user *optval, int __user *optlen) |
2140 | { | 2253 | { |
2141 | struct sock *sk = sock->sk; | 2254 | struct sock *sk = sock->sk; |
@@ -2463,13 +2576,25 @@ bed: | |||
2463 | return 0; | 2576 | return 0; |
2464 | } | 2577 | } |
2465 | 2578 | ||
2466 | static struct net_proto_family irda_family_ops = { | 2579 | static int irda_getsockopt(struct socket *sock, int level, int optname, |
2580 | char __user *optval, int __user *optlen) | ||
2581 | { | ||
2582 | int err; | ||
2583 | |||
2584 | lock_kernel(); | ||
2585 | err = __irda_getsockopt(sock, level, optname, optval, optlen); | ||
2586 | unlock_kernel(); | ||
2587 | |||
2588 | return err; | ||
2589 | } | ||
2590 | |||
2591 | static const struct net_proto_family irda_family_ops = { | ||
2467 | .family = PF_IRDA, | 2592 | .family = PF_IRDA, |
2468 | .create = irda_create, | 2593 | .create = irda_create, |
2469 | .owner = THIS_MODULE, | 2594 | .owner = THIS_MODULE, |
2470 | }; | 2595 | }; |
2471 | 2596 | ||
2472 | static const struct proto_ops SOCKOPS_WRAPPED(irda_stream_ops) = { | 2597 | static const struct proto_ops irda_stream_ops = { |
2473 | .family = PF_IRDA, | 2598 | .family = PF_IRDA, |
2474 | .owner = THIS_MODULE, | 2599 | .owner = THIS_MODULE, |
2475 | .release = irda_release, | 2600 | .release = irda_release, |
@@ -2493,7 +2618,7 @@ static const struct proto_ops SOCKOPS_WRAPPED(irda_stream_ops) = { | |||
2493 | .sendpage = sock_no_sendpage, | 2618 | .sendpage = sock_no_sendpage, |
2494 | }; | 2619 | }; |
2495 | 2620 | ||
2496 | static const struct proto_ops SOCKOPS_WRAPPED(irda_seqpacket_ops) = { | 2621 | static const struct proto_ops irda_seqpacket_ops = { |
2497 | .family = PF_IRDA, | 2622 | .family = PF_IRDA, |
2498 | .owner = THIS_MODULE, | 2623 | .owner = THIS_MODULE, |
2499 | .release = irda_release, | 2624 | .release = irda_release, |
@@ -2502,7 +2627,7 @@ static const struct proto_ops SOCKOPS_WRAPPED(irda_seqpacket_ops) = { | |||
2502 | .socketpair = sock_no_socketpair, | 2627 | .socketpair = sock_no_socketpair, |
2503 | .accept = irda_accept, | 2628 | .accept = irda_accept, |
2504 | .getname = irda_getname, | 2629 | .getname = irda_getname, |
2505 | .poll = datagram_poll, | 2630 | .poll = irda_datagram_poll, |
2506 | .ioctl = irda_ioctl, | 2631 | .ioctl = irda_ioctl, |
2507 | #ifdef CONFIG_COMPAT | 2632 | #ifdef CONFIG_COMPAT |
2508 | .compat_ioctl = irda_compat_ioctl, | 2633 | .compat_ioctl = irda_compat_ioctl, |
@@ -2517,7 +2642,7 @@ static const struct proto_ops SOCKOPS_WRAPPED(irda_seqpacket_ops) = { | |||
2517 | .sendpage = sock_no_sendpage, | 2642 | .sendpage = sock_no_sendpage, |
2518 | }; | 2643 | }; |
2519 | 2644 | ||
2520 | static const struct proto_ops SOCKOPS_WRAPPED(irda_dgram_ops) = { | 2645 | static const struct proto_ops irda_dgram_ops = { |
2521 | .family = PF_IRDA, | 2646 | .family = PF_IRDA, |
2522 | .owner = THIS_MODULE, | 2647 | .owner = THIS_MODULE, |
2523 | .release = irda_release, | 2648 | .release = irda_release, |
@@ -2526,7 +2651,7 @@ static const struct proto_ops SOCKOPS_WRAPPED(irda_dgram_ops) = { | |||
2526 | .socketpair = sock_no_socketpair, | 2651 | .socketpair = sock_no_socketpair, |
2527 | .accept = irda_accept, | 2652 | .accept = irda_accept, |
2528 | .getname = irda_getname, | 2653 | .getname = irda_getname, |
2529 | .poll = datagram_poll, | 2654 | .poll = irda_datagram_poll, |
2530 | .ioctl = irda_ioctl, | 2655 | .ioctl = irda_ioctl, |
2531 | #ifdef CONFIG_COMPAT | 2656 | #ifdef CONFIG_COMPAT |
2532 | .compat_ioctl = irda_compat_ioctl, | 2657 | .compat_ioctl = irda_compat_ioctl, |
@@ -2542,7 +2667,7 @@ static const struct proto_ops SOCKOPS_WRAPPED(irda_dgram_ops) = { | |||
2542 | }; | 2667 | }; |
2543 | 2668 | ||
2544 | #ifdef CONFIG_IRDA_ULTRA | 2669 | #ifdef CONFIG_IRDA_ULTRA |
2545 | static const struct proto_ops SOCKOPS_WRAPPED(irda_ultra_ops) = { | 2670 | static const struct proto_ops irda_ultra_ops = { |
2546 | .family = PF_IRDA, | 2671 | .family = PF_IRDA, |
2547 | .owner = THIS_MODULE, | 2672 | .owner = THIS_MODULE, |
2548 | .release = irda_release, | 2673 | .release = irda_release, |
@@ -2551,7 +2676,7 @@ static const struct proto_ops SOCKOPS_WRAPPED(irda_ultra_ops) = { | |||
2551 | .socketpair = sock_no_socketpair, | 2676 | .socketpair = sock_no_socketpair, |
2552 | .accept = sock_no_accept, | 2677 | .accept = sock_no_accept, |
2553 | .getname = irda_getname, | 2678 | .getname = irda_getname, |
2554 | .poll = datagram_poll, | 2679 | .poll = irda_datagram_poll, |
2555 | .ioctl = irda_ioctl, | 2680 | .ioctl = irda_ioctl, |
2556 | #ifdef CONFIG_COMPAT | 2681 | #ifdef CONFIG_COMPAT |
2557 | .compat_ioctl = irda_compat_ioctl, | 2682 | .compat_ioctl = irda_compat_ioctl, |
@@ -2567,13 +2692,6 @@ static const struct proto_ops SOCKOPS_WRAPPED(irda_ultra_ops) = { | |||
2567 | }; | 2692 | }; |
2568 | #endif /* CONFIG_IRDA_ULTRA */ | 2693 | #endif /* CONFIG_IRDA_ULTRA */ |
2569 | 2694 | ||
2570 | SOCKOPS_WRAP(irda_stream, PF_IRDA); | ||
2571 | SOCKOPS_WRAP(irda_seqpacket, PF_IRDA); | ||
2572 | SOCKOPS_WRAP(irda_dgram, PF_IRDA); | ||
2573 | #ifdef CONFIG_IRDA_ULTRA | ||
2574 | SOCKOPS_WRAP(irda_ultra, PF_IRDA); | ||
2575 | #endif /* CONFIG_IRDA_ULTRA */ | ||
2576 | |||
2577 | /* | 2695 | /* |
2578 | * Function irsock_init (pro) | 2696 | * Function irsock_init (pro) |
2579 | * | 2697 | * |
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index bada1b9c670b..1e428863574f 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -428,7 +428,6 @@ static void iucv_sock_close(struct sock *sk) | |||
428 | break; | 428 | break; |
429 | 429 | ||
430 | default: | 430 | default: |
431 | sock_set_flag(sk, SOCK_ZAPPED); | ||
432 | /* nothing to do here */ | 431 | /* nothing to do here */ |
433 | break; | 432 | break; |
434 | } | 433 | } |
@@ -482,7 +481,8 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio) | |||
482 | } | 481 | } |
483 | 482 | ||
484 | /* Create an IUCV socket */ | 483 | /* Create an IUCV socket */ |
485 | static int iucv_sock_create(struct net *net, struct socket *sock, int protocol) | 484 | static int iucv_sock_create(struct net *net, struct socket *sock, int protocol, |
485 | int kern) | ||
486 | { | 486 | { |
487 | struct sock *sk; | 487 | struct sock *sk; |
488 | 488 | ||
@@ -536,7 +536,7 @@ void iucv_accept_enqueue(struct sock *parent, struct sock *sk) | |||
536 | list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q); | 536 | list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q); |
537 | spin_unlock_irqrestore(&par->accept_q_lock, flags); | 537 | spin_unlock_irqrestore(&par->accept_q_lock, flags); |
538 | iucv_sk(sk)->parent = parent; | 538 | iucv_sk(sk)->parent = parent; |
539 | parent->sk_ack_backlog++; | 539 | sk_acceptq_added(parent); |
540 | } | 540 | } |
541 | 541 | ||
542 | void iucv_accept_unlink(struct sock *sk) | 542 | void iucv_accept_unlink(struct sock *sk) |
@@ -547,7 +547,7 @@ void iucv_accept_unlink(struct sock *sk) | |||
547 | spin_lock_irqsave(&par->accept_q_lock, flags); | 547 | spin_lock_irqsave(&par->accept_q_lock, flags); |
548 | list_del_init(&iucv_sk(sk)->accept_q); | 548 | list_del_init(&iucv_sk(sk)->accept_q); |
549 | spin_unlock_irqrestore(&par->accept_q_lock, flags); | 549 | spin_unlock_irqrestore(&par->accept_q_lock, flags); |
550 | iucv_sk(sk)->parent->sk_ack_backlog--; | 550 | sk_acceptq_removed(iucv_sk(sk)->parent); |
551 | iucv_sk(sk)->parent = NULL; | 551 | iucv_sk(sk)->parent = NULL; |
552 | sock_put(sk); | 552 | sock_put(sk); |
553 | } | 553 | } |
@@ -1715,7 +1715,7 @@ static const struct proto_ops iucv_sock_ops = { | |||
1715 | .getsockopt = iucv_sock_getsockopt, | 1715 | .getsockopt = iucv_sock_getsockopt, |
1716 | }; | 1716 | }; |
1717 | 1717 | ||
1718 | static struct net_proto_family iucv_sock_family_ops = { | 1718 | static const struct net_proto_family iucv_sock_family_ops = { |
1719 | .family = AF_IUCV, | 1719 | .family = AF_IUCV, |
1720 | .owner = THIS_MODULE, | 1720 | .owner = THIS_MODULE, |
1721 | .create = iucv_sock_create, | 1721 | .create = iucv_sock_create, |
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 3973d0e61e56..3b1f5f5f8de7 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c | |||
@@ -1768,7 +1768,6 @@ static void iucv_tasklet_fn(unsigned long ignored) | |||
1768 | */ | 1768 | */ |
1769 | static void iucv_work_fn(struct work_struct *work) | 1769 | static void iucv_work_fn(struct work_struct *work) |
1770 | { | 1770 | { |
1771 | typedef void iucv_irq_fn(struct iucv_irq_data *); | ||
1772 | LIST_HEAD(work_queue); | 1771 | LIST_HEAD(work_queue); |
1773 | struct iucv_irq_list *p, *n; | 1772 | struct iucv_irq_list *p, *n; |
1774 | 1773 | ||
@@ -1878,14 +1877,25 @@ int iucv_path_table_empty(void) | |||
1878 | static int iucv_pm_freeze(struct device *dev) | 1877 | static int iucv_pm_freeze(struct device *dev) |
1879 | { | 1878 | { |
1880 | int cpu; | 1879 | int cpu; |
1880 | struct iucv_irq_list *p, *n; | ||
1881 | int rc = 0; | 1881 | int rc = 0; |
1882 | 1882 | ||
1883 | #ifdef CONFIG_PM_DEBUG | 1883 | #ifdef CONFIG_PM_DEBUG |
1884 | printk(KERN_WARNING "iucv_pm_freeze\n"); | 1884 | printk(KERN_WARNING "iucv_pm_freeze\n"); |
1885 | #endif | 1885 | #endif |
1886 | if (iucv_pm_state != IUCV_PM_FREEZING) { | ||
1887 | for_each_cpu_mask_nr(cpu, iucv_irq_cpumask) | ||
1888 | smp_call_function_single(cpu, iucv_block_cpu_almost, | ||
1889 | NULL, 1); | ||
1890 | cancel_work_sync(&iucv_work); | ||
1891 | list_for_each_entry_safe(p, n, &iucv_work_queue, list) { | ||
1892 | list_del_init(&p->list); | ||
1893 | iucv_sever_pathid(p->data.ippathid, | ||
1894 | iucv_error_no_listener); | ||
1895 | kfree(p); | ||
1896 | } | ||
1897 | } | ||
1886 | iucv_pm_state = IUCV_PM_FREEZING; | 1898 | iucv_pm_state = IUCV_PM_FREEZING; |
1887 | for_each_cpu_mask_nr(cpu, iucv_irq_cpumask) | ||
1888 | smp_call_function_single(cpu, iucv_block_cpu_almost, NULL, 1); | ||
1889 | if (dev->driver && dev->driver->pm && dev->driver->pm->freeze) | 1899 | if (dev->driver && dev->driver->pm && dev->driver->pm->freeze) |
1890 | rc = dev->driver->pm->freeze(dev); | 1900 | rc = dev->driver->pm->freeze(dev); |
1891 | if (iucv_path_table_empty()) | 1901 | if (iucv_path_table_empty()) |
diff --git a/net/key/af_key.c b/net/key/af_key.c index 4e98193dfa0f..478c8b32a5fb 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -35,7 +35,7 @@ | |||
35 | #define _X2KEY(x) ((x) == XFRM_INF ? 0 : (x)) | 35 | #define _X2KEY(x) ((x) == XFRM_INF ? 0 : (x)) |
36 | #define _KEY2X(x) ((x) == 0 ? XFRM_INF : (x)) | 36 | #define _KEY2X(x) ((x) == 0 ? XFRM_INF : (x)) |
37 | 37 | ||
38 | static int pfkey_net_id; | 38 | static int pfkey_net_id __read_mostly; |
39 | struct netns_pfkey { | 39 | struct netns_pfkey { |
40 | /* List of all pfkey sockets. */ | 40 | /* List of all pfkey sockets. */ |
41 | struct hlist_head table; | 41 | struct hlist_head table; |
@@ -177,7 +177,8 @@ static struct proto key_proto = { | |||
177 | .obj_size = sizeof(struct pfkey_sock), | 177 | .obj_size = sizeof(struct pfkey_sock), |
178 | }; | 178 | }; |
179 | 179 | ||
180 | static int pfkey_create(struct net *net, struct socket *sock, int protocol) | 180 | static int pfkey_create(struct net *net, struct socket *sock, int protocol, |
181 | int kern) | ||
181 | { | 182 | { |
182 | struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); | 183 | struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); |
183 | struct sock *sk; | 184 | struct sock *sk; |
@@ -3606,7 +3607,7 @@ static int pfkey_recvmsg(struct kiocb *kiocb, | |||
3606 | if (err) | 3607 | if (err) |
3607 | goto out_free; | 3608 | goto out_free; |
3608 | 3609 | ||
3609 | sock_recv_timestamp(msg, sk, skb); | 3610 | sock_recv_ts_and_drops(msg, sk, skb); |
3610 | 3611 | ||
3611 | err = (flags & MSG_TRUNC) ? skb->len : copied; | 3612 | err = (flags & MSG_TRUNC) ? skb->len : copied; |
3612 | 3613 | ||
@@ -3644,7 +3645,7 @@ static const struct proto_ops pfkey_ops = { | |||
3644 | .recvmsg = pfkey_recvmsg, | 3645 | .recvmsg = pfkey_recvmsg, |
3645 | }; | 3646 | }; |
3646 | 3647 | ||
3647 | static struct net_proto_family pfkey_family_ops = { | 3648 | static const struct net_proto_family pfkey_family_ops = { |
3648 | .family = PF_KEY, | 3649 | .family = PF_KEY, |
3649 | .create = pfkey_create, | 3650 | .create = pfkey_create, |
3650 | .owner = THIS_MODULE, | 3651 | .owner = THIS_MODULE, |
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 7aa4fd170104..5266c286b260 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c | |||
@@ -140,14 +140,17 @@ static struct proto llc_proto = { | |||
140 | 140 | ||
141 | /** | 141 | /** |
142 | * llc_ui_create - alloc and init a new llc_ui socket | 142 | * llc_ui_create - alloc and init a new llc_ui socket |
143 | * @net: network namespace (must be default network) | ||
143 | * @sock: Socket to initialize and attach allocated sk to. | 144 | * @sock: Socket to initialize and attach allocated sk to. |
144 | * @protocol: Unused. | 145 | * @protocol: Unused. |
146 | * @kern: on behalf of kernel or userspace | ||
145 | * | 147 | * |
146 | * Allocate and initialize a new llc_ui socket, validate the user wants a | 148 | * Allocate and initialize a new llc_ui socket, validate the user wants a |
147 | * socket type we have available. | 149 | * socket type we have available. |
148 | * Returns 0 upon success, negative upon failure. | 150 | * Returns 0 upon success, negative upon failure. |
149 | */ | 151 | */ |
150 | static int llc_ui_create(struct net *net, struct socket *sock, int protocol) | 152 | static int llc_ui_create(struct net *net, struct socket *sock, int protocol, |
153 | int kern) | ||
151 | { | 154 | { |
152 | struct sock *sk; | 155 | struct sock *sk; |
153 | int rc = -ESOCKTNOSUPPORT; | 156 | int rc = -ESOCKTNOSUPPORT; |
@@ -1092,7 +1095,7 @@ out: | |||
1092 | return rc; | 1095 | return rc; |
1093 | } | 1096 | } |
1094 | 1097 | ||
1095 | static struct net_proto_family llc_ui_family_ops = { | 1098 | static const struct net_proto_family llc_ui_family_ops = { |
1096 | .family = PF_LLC, | 1099 | .family = PF_LLC, |
1097 | .create = llc_ui_create, | 1100 | .create = llc_ui_create, |
1098 | .owner = THIS_MODULE, | 1101 | .owner = THIS_MODULE, |
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig index 4d5543af3123..a10d508b07e1 100644 --- a/net/mac80211/Kconfig +++ b/net/mac80211/Kconfig | |||
@@ -194,6 +194,19 @@ config MAC80211_VERBOSE_MPL_DEBUG | |||
194 | 194 | ||
195 | Do not select this option. | 195 | Do not select this option. |
196 | 196 | ||
197 | config MAC80211_VERBOSE_MHWMP_DEBUG | ||
198 | bool "Verbose mesh HWMP routing debugging" | ||
199 | depends on MAC80211_DEBUG_MENU | ||
200 | depends on MAC80211_MESH | ||
201 | ---help--- | ||
202 | Selecting this option causes mac80211 to print out very | ||
203 | verbose mesh routing (HWMP) debugging messages (when mac80211 | ||
204 | is taking part in a mesh network). | ||
205 | It should not be selected on production systems as those | ||
206 | messages are remotely triggerable. | ||
207 | |||
208 | Do not select this option. | ||
209 | |||
197 | config MAC80211_DEBUG_COUNTERS | 210 | config MAC80211_DEBUG_COUNTERS |
198 | bool "Extra statistics for TX/RX debugging" | 211 | bool "Extra statistics for TX/RX debugging" |
199 | depends on MAC80211_DEBUG_MENU | 212 | depends on MAC80211_DEBUG_MENU |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 7b5131bd6fa1..7f18c8fa1880 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -36,6 +36,24 @@ static bool nl80211_type_check(enum nl80211_iftype type) | |||
36 | } | 36 | } |
37 | } | 37 | } |
38 | 38 | ||
39 | static bool nl80211_params_check(enum nl80211_iftype type, | ||
40 | struct vif_params *params) | ||
41 | { | ||
42 | if (!nl80211_type_check(type)) | ||
43 | return false; | ||
44 | |||
45 | if (params->use_4addr > 0) { | ||
46 | switch(type) { | ||
47 | case NL80211_IFTYPE_AP_VLAN: | ||
48 | case NL80211_IFTYPE_STATION: | ||
49 | break; | ||
50 | default: | ||
51 | return false; | ||
52 | } | ||
53 | } | ||
54 | return true; | ||
55 | } | ||
56 | |||
39 | static int ieee80211_add_iface(struct wiphy *wiphy, char *name, | 57 | static int ieee80211_add_iface(struct wiphy *wiphy, char *name, |
40 | enum nl80211_iftype type, u32 *flags, | 58 | enum nl80211_iftype type, u32 *flags, |
41 | struct vif_params *params) | 59 | struct vif_params *params) |
@@ -45,7 +63,7 @@ static int ieee80211_add_iface(struct wiphy *wiphy, char *name, | |||
45 | struct ieee80211_sub_if_data *sdata; | 63 | struct ieee80211_sub_if_data *sdata; |
46 | int err; | 64 | int err; |
47 | 65 | ||
48 | if (!nl80211_type_check(type)) | 66 | if (!nl80211_params_check(type, params)) |
49 | return -EINVAL; | 67 | return -EINVAL; |
50 | 68 | ||
51 | err = ieee80211_if_add(local, name, &dev, type, params); | 69 | err = ieee80211_if_add(local, name, &dev, type, params); |
@@ -75,7 +93,7 @@ static int ieee80211_change_iface(struct wiphy *wiphy, | |||
75 | if (netif_running(dev)) | 93 | if (netif_running(dev)) |
76 | return -EBUSY; | 94 | return -EBUSY; |
77 | 95 | ||
78 | if (!nl80211_type_check(type)) | 96 | if (!nl80211_params_check(type, params)) |
79 | return -EINVAL; | 97 | return -EINVAL; |
80 | 98 | ||
81 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 99 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
@@ -89,6 +107,9 @@ static int ieee80211_change_iface(struct wiphy *wiphy, | |||
89 | params->mesh_id_len, | 107 | params->mesh_id_len, |
90 | params->mesh_id); | 108 | params->mesh_id); |
91 | 109 | ||
110 | if (params->use_4addr >= 0) | ||
111 | sdata->use_4addr = !!params->use_4addr; | ||
112 | |||
92 | if (sdata->vif.type != NL80211_IFTYPE_MONITOR || !flags) | 113 | if (sdata->vif.type != NL80211_IFTYPE_MONITOR || !flags) |
93 | return 0; | 114 | return 0; |
94 | 115 | ||
@@ -738,13 +759,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, | |||
738 | 759 | ||
739 | err = sta_info_insert(sta); | 760 | err = sta_info_insert(sta); |
740 | if (err) { | 761 | if (err) { |
741 | /* STA has been freed */ | ||
742 | if (err == -EEXIST && layer2_update) { | ||
743 | /* Need to update layer 2 devices on reassociation */ | ||
744 | sta = sta_info_get(local, mac); | ||
745 | if (sta) | ||
746 | ieee80211_send_layer2_update(sta); | ||
747 | } | ||
748 | rcu_read_unlock(); | 762 | rcu_read_unlock(); |
749 | return err; | 763 | return err; |
750 | } | 764 | } |
@@ -813,6 +827,13 @@ static int ieee80211_change_station(struct wiphy *wiphy, | |||
813 | return -EINVAL; | 827 | return -EINVAL; |
814 | } | 828 | } |
815 | 829 | ||
830 | if (vlansdata->use_4addr) { | ||
831 | if (vlansdata->u.vlan.sta) | ||
832 | return -EBUSY; | ||
833 | |||
834 | rcu_assign_pointer(vlansdata->u.vlan.sta, sta); | ||
835 | } | ||
836 | |||
816 | sta->sdata = vlansdata; | 837 | sta->sdata = vlansdata; |
817 | ieee80211_send_layer2_update(sta); | 838 | ieee80211_send_layer2_update(sta); |
818 | } | 839 | } |
@@ -914,7 +935,7 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop, | |||
914 | pinfo->generation = mesh_paths_generation; | 935 | pinfo->generation = mesh_paths_generation; |
915 | 936 | ||
916 | pinfo->filled = MPATH_INFO_FRAME_QLEN | | 937 | pinfo->filled = MPATH_INFO_FRAME_QLEN | |
917 | MPATH_INFO_DSN | | 938 | MPATH_INFO_SN | |
918 | MPATH_INFO_METRIC | | 939 | MPATH_INFO_METRIC | |
919 | MPATH_INFO_EXPTIME | | 940 | MPATH_INFO_EXPTIME | |
920 | MPATH_INFO_DISCOVERY_TIMEOUT | | 941 | MPATH_INFO_DISCOVERY_TIMEOUT | |
@@ -922,7 +943,7 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop, | |||
922 | MPATH_INFO_FLAGS; | 943 | MPATH_INFO_FLAGS; |
923 | 944 | ||
924 | pinfo->frame_qlen = mpath->frame_queue.qlen; | 945 | pinfo->frame_qlen = mpath->frame_queue.qlen; |
925 | pinfo->dsn = mpath->dsn; | 946 | pinfo->sn = mpath->sn; |
926 | pinfo->metric = mpath->metric; | 947 | pinfo->metric = mpath->metric; |
927 | if (time_before(jiffies, mpath->exp_time)) | 948 | if (time_before(jiffies, mpath->exp_time)) |
928 | pinfo->exptime = jiffies_to_msecs(mpath->exp_time - jiffies); | 949 | pinfo->exptime = jiffies_to_msecs(mpath->exp_time - jiffies); |
@@ -934,8 +955,8 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop, | |||
934 | pinfo->flags |= NL80211_MPATH_FLAG_ACTIVE; | 955 | pinfo->flags |= NL80211_MPATH_FLAG_ACTIVE; |
935 | if (mpath->flags & MESH_PATH_RESOLVING) | 956 | if (mpath->flags & MESH_PATH_RESOLVING) |
936 | pinfo->flags |= NL80211_MPATH_FLAG_RESOLVING; | 957 | pinfo->flags |= NL80211_MPATH_FLAG_RESOLVING; |
937 | if (mpath->flags & MESH_PATH_DSN_VALID) | 958 | if (mpath->flags & MESH_PATH_SN_VALID) |
938 | pinfo->flags |= NL80211_MPATH_FLAG_DSN_VALID; | 959 | pinfo->flags |= NL80211_MPATH_FLAG_SN_VALID; |
939 | if (mpath->flags & MESH_PATH_FIXED) | 960 | if (mpath->flags & MESH_PATH_FIXED) |
940 | pinfo->flags |= NL80211_MPATH_FLAG_FIXED; | 961 | pinfo->flags |= NL80211_MPATH_FLAG_FIXED; |
941 | if (mpath->flags & MESH_PATH_RESOLVING) | 962 | if (mpath->flags & MESH_PATH_RESOLVING) |
@@ -1008,7 +1029,10 @@ static int ieee80211_set_mesh_params(struct wiphy *wiphy, | |||
1008 | { | 1029 | { |
1009 | struct mesh_config *conf; | 1030 | struct mesh_config *conf; |
1010 | struct ieee80211_sub_if_data *sdata; | 1031 | struct ieee80211_sub_if_data *sdata; |
1032 | struct ieee80211_if_mesh *ifmsh; | ||
1033 | |||
1011 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 1034 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
1035 | ifmsh = &sdata->u.mesh; | ||
1012 | 1036 | ||
1013 | /* Set the config options which we are interested in setting */ | 1037 | /* Set the config options which we are interested in setting */ |
1014 | conf = &(sdata->u.mesh.mshcfg); | 1038 | conf = &(sdata->u.mesh.mshcfg); |
@@ -1043,6 +1067,10 @@ static int ieee80211_set_mesh_params(struct wiphy *wiphy, | |||
1043 | mask)) | 1067 | mask)) |
1044 | conf->dot11MeshHWMPnetDiameterTraversalTime = | 1068 | conf->dot11MeshHWMPnetDiameterTraversalTime = |
1045 | nconf->dot11MeshHWMPnetDiameterTraversalTime; | 1069 | nconf->dot11MeshHWMPnetDiameterTraversalTime; |
1070 | if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_ROOTMODE, mask)) { | ||
1071 | conf->dot11MeshHWMPRootMode = nconf->dot11MeshHWMPRootMode; | ||
1072 | ieee80211_mesh_root_setup(ifmsh); | ||
1073 | } | ||
1046 | return 0; | 1074 | return 0; |
1047 | } | 1075 | } |
1048 | 1076 | ||
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index 96991b68f048..82c807723b6f 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c | |||
@@ -1,3 +1,4 @@ | |||
1 | |||
1 | /* | 2 | /* |
2 | * mac80211 debugfs for wireless PHYs | 3 | * mac80211 debugfs for wireless PHYs |
3 | * | 4 | * |
@@ -38,16 +39,10 @@ static const struct file_operations name## _ops = { \ | |||
38 | }; | 39 | }; |
39 | 40 | ||
40 | #define DEBUGFS_ADD(name) \ | 41 | #define DEBUGFS_ADD(name) \ |
41 | local->debugfs.name = debugfs_create_file(#name, 0400, phyd, \ | 42 | debugfs_create_file(#name, 0400, phyd, local, &name## _ops); |
42 | local, &name## _ops); | ||
43 | 43 | ||
44 | #define DEBUGFS_ADD_MODE(name, mode) \ | 44 | #define DEBUGFS_ADD_MODE(name, mode) \ |
45 | local->debugfs.name = debugfs_create_file(#name, mode, phyd, \ | 45 | debugfs_create_file(#name, mode, phyd, local, &name## _ops); |
46 | local, &name## _ops); | ||
47 | |||
48 | #define DEBUGFS_DEL(name) \ | ||
49 | debugfs_remove(local->debugfs.name); \ | ||
50 | local->debugfs.name = NULL; | ||
51 | 46 | ||
52 | 47 | ||
53 | DEBUGFS_READONLY_FILE(frequency, 20, "%d", | 48 | DEBUGFS_READONLY_FILE(frequency, 20, "%d", |
@@ -233,12 +228,7 @@ static const struct file_operations stats_ ##name## _ops = { \ | |||
233 | }; | 228 | }; |
234 | 229 | ||
235 | #define DEBUGFS_STATS_ADD(name) \ | 230 | #define DEBUGFS_STATS_ADD(name) \ |
236 | local->debugfs.stats.name = debugfs_create_file(#name, 0400, statsd,\ | 231 | debugfs_create_file(#name, 0400, statsd, local, &stats_ ##name## _ops); |
237 | local, &stats_ ##name## _ops); | ||
238 | |||
239 | #define DEBUGFS_STATS_DEL(name) \ | ||
240 | debugfs_remove(local->debugfs.stats.name); \ | ||
241 | local->debugfs.stats.name = NULL; | ||
242 | 232 | ||
243 | DEBUGFS_STATS_FILE(transmitted_fragment_count, 20, "%u", | 233 | DEBUGFS_STATS_FILE(transmitted_fragment_count, 20, "%u", |
244 | local->dot11TransmittedFragmentCount); | 234 | local->dot11TransmittedFragmentCount); |
@@ -326,7 +316,6 @@ void debugfs_hw_add(struct ieee80211_local *local) | |||
326 | DEBUGFS_ADD(noack); | 316 | DEBUGFS_ADD(noack); |
327 | 317 | ||
328 | statsd = debugfs_create_dir("statistics", phyd); | 318 | statsd = debugfs_create_dir("statistics", phyd); |
329 | local->debugfs.statistics = statsd; | ||
330 | 319 | ||
331 | /* if the dir failed, don't put all the other things into the root! */ | 320 | /* if the dir failed, don't put all the other things into the root! */ |
332 | if (!statsd) | 321 | if (!statsd) |
@@ -367,57 +356,3 @@ void debugfs_hw_add(struct ieee80211_local *local) | |||
367 | DEBUGFS_STATS_ADD(dot11FCSErrorCount); | 356 | DEBUGFS_STATS_ADD(dot11FCSErrorCount); |
368 | DEBUGFS_STATS_ADD(dot11RTSSuccessCount); | 357 | DEBUGFS_STATS_ADD(dot11RTSSuccessCount); |
369 | } | 358 | } |
370 | |||
371 | void debugfs_hw_del(struct ieee80211_local *local) | ||
372 | { | ||
373 | DEBUGFS_DEL(frequency); | ||
374 | DEBUGFS_DEL(total_ps_buffered); | ||
375 | DEBUGFS_DEL(wep_iv); | ||
376 | DEBUGFS_DEL(tsf); | ||
377 | DEBUGFS_DEL(queues); | ||
378 | DEBUGFS_DEL(reset); | ||
379 | DEBUGFS_DEL(noack); | ||
380 | |||
381 | DEBUGFS_STATS_DEL(transmitted_fragment_count); | ||
382 | DEBUGFS_STATS_DEL(multicast_transmitted_frame_count); | ||
383 | DEBUGFS_STATS_DEL(failed_count); | ||
384 | DEBUGFS_STATS_DEL(retry_count); | ||
385 | DEBUGFS_STATS_DEL(multiple_retry_count); | ||
386 | DEBUGFS_STATS_DEL(frame_duplicate_count); | ||
387 | DEBUGFS_STATS_DEL(received_fragment_count); | ||
388 | DEBUGFS_STATS_DEL(multicast_received_frame_count); | ||
389 | DEBUGFS_STATS_DEL(transmitted_frame_count); | ||
390 | DEBUGFS_STATS_DEL(num_scans); | ||
391 | #ifdef CONFIG_MAC80211_DEBUG_COUNTERS | ||
392 | DEBUGFS_STATS_DEL(tx_handlers_drop); | ||
393 | DEBUGFS_STATS_DEL(tx_handlers_queued); | ||
394 | DEBUGFS_STATS_DEL(tx_handlers_drop_unencrypted); | ||
395 | DEBUGFS_STATS_DEL(tx_handlers_drop_fragment); | ||
396 | DEBUGFS_STATS_DEL(tx_handlers_drop_wep); | ||
397 | DEBUGFS_STATS_DEL(tx_handlers_drop_not_assoc); | ||
398 | DEBUGFS_STATS_DEL(tx_handlers_drop_unauth_port); | ||
399 | DEBUGFS_STATS_DEL(rx_handlers_drop); | ||
400 | DEBUGFS_STATS_DEL(rx_handlers_queued); | ||
401 | DEBUGFS_STATS_DEL(rx_handlers_drop_nullfunc); | ||
402 | DEBUGFS_STATS_DEL(rx_handlers_drop_defrag); | ||
403 | DEBUGFS_STATS_DEL(rx_handlers_drop_short); | ||
404 | DEBUGFS_STATS_DEL(rx_handlers_drop_passive_scan); | ||
405 | DEBUGFS_STATS_DEL(tx_expand_skb_head); | ||
406 | DEBUGFS_STATS_DEL(tx_expand_skb_head_cloned); | ||
407 | DEBUGFS_STATS_DEL(rx_expand_skb_head); | ||
408 | DEBUGFS_STATS_DEL(rx_expand_skb_head2); | ||
409 | DEBUGFS_STATS_DEL(rx_handlers_fragments); | ||
410 | DEBUGFS_STATS_DEL(tx_status_drop); | ||
411 | #endif | ||
412 | DEBUGFS_STATS_DEL(dot11ACKFailureCount); | ||
413 | DEBUGFS_STATS_DEL(dot11RTSFailureCount); | ||
414 | DEBUGFS_STATS_DEL(dot11FCSErrorCount); | ||
415 | DEBUGFS_STATS_DEL(dot11RTSSuccessCount); | ||
416 | |||
417 | debugfs_remove(local->debugfs.statistics); | ||
418 | local->debugfs.statistics = NULL; | ||
419 | debugfs_remove(local->debugfs.stations); | ||
420 | local->debugfs.stations = NULL; | ||
421 | debugfs_remove(local->debugfs.keys); | ||
422 | local->debugfs.keys = NULL; | ||
423 | } | ||
diff --git a/net/mac80211/debugfs.h b/net/mac80211/debugfs.h index dd2541935c27..68e6a2050f9a 100644 --- a/net/mac80211/debugfs.h +++ b/net/mac80211/debugfs.h | |||
@@ -3,14 +3,12 @@ | |||
3 | 3 | ||
4 | #ifdef CONFIG_MAC80211_DEBUGFS | 4 | #ifdef CONFIG_MAC80211_DEBUGFS |
5 | extern void debugfs_hw_add(struct ieee80211_local *local); | 5 | extern void debugfs_hw_add(struct ieee80211_local *local); |
6 | extern void debugfs_hw_del(struct ieee80211_local *local); | ||
7 | extern int mac80211_open_file_generic(struct inode *inode, struct file *file); | 6 | extern int mac80211_open_file_generic(struct inode *inode, struct file *file); |
8 | #else | 7 | #else |
9 | static inline void debugfs_hw_add(struct ieee80211_local *local) | 8 | static inline void debugfs_hw_add(struct ieee80211_local *local) |
10 | { | 9 | { |
11 | return; | 10 | return; |
12 | } | 11 | } |
13 | static inline void debugfs_hw_del(struct ieee80211_local *local) {} | ||
14 | #endif | 12 | #endif |
15 | 13 | ||
16 | #endif /* __MAC80211_DEBUGFS_H */ | 14 | #endif /* __MAC80211_DEBUGFS_H */ |
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c index 99c752588b30..e0f5224630da 100644 --- a/net/mac80211/debugfs_key.c +++ b/net/mac80211/debugfs_key.c | |||
@@ -225,8 +225,8 @@ static ssize_t key_key_read(struct file *file, char __user *userbuf, | |||
225 | KEY_OPS(key); | 225 | KEY_OPS(key); |
226 | 226 | ||
227 | #define DEBUGFS_ADD(name) \ | 227 | #define DEBUGFS_ADD(name) \ |
228 | key->debugfs.name = debugfs_create_file(#name, 0400,\ | 228 | debugfs_create_file(#name, 0400, key->debugfs.dir, \ |
229 | key->debugfs.dir, key, &key_##name##_ops); | 229 | key, &key_##name##_ops); |
230 | 230 | ||
231 | void ieee80211_debugfs_key_add(struct ieee80211_key *key) | 231 | void ieee80211_debugfs_key_add(struct ieee80211_key *key) |
232 | { | 232 | { |
@@ -271,30 +271,12 @@ void ieee80211_debugfs_key_add(struct ieee80211_key *key) | |||
271 | DEBUGFS_ADD(ifindex); | 271 | DEBUGFS_ADD(ifindex); |
272 | }; | 272 | }; |
273 | 273 | ||
274 | #define DEBUGFS_DEL(name) \ | ||
275 | debugfs_remove(key->debugfs.name); key->debugfs.name = NULL; | ||
276 | |||
277 | void ieee80211_debugfs_key_remove(struct ieee80211_key *key) | 274 | void ieee80211_debugfs_key_remove(struct ieee80211_key *key) |
278 | { | 275 | { |
279 | if (!key) | 276 | if (!key) |
280 | return; | 277 | return; |
281 | 278 | ||
282 | DEBUGFS_DEL(keylen); | 279 | debugfs_remove_recursive(key->debugfs.dir); |
283 | DEBUGFS_DEL(flags); | ||
284 | DEBUGFS_DEL(keyidx); | ||
285 | DEBUGFS_DEL(hw_key_idx); | ||
286 | DEBUGFS_DEL(tx_rx_count); | ||
287 | DEBUGFS_DEL(algorithm); | ||
288 | DEBUGFS_DEL(tx_spec); | ||
289 | DEBUGFS_DEL(rx_spec); | ||
290 | DEBUGFS_DEL(replays); | ||
291 | DEBUGFS_DEL(icverrors); | ||
292 | DEBUGFS_DEL(key); | ||
293 | DEBUGFS_DEL(ifindex); | ||
294 | |||
295 | debugfs_remove(key->debugfs.stalink); | ||
296 | key->debugfs.stalink = NULL; | ||
297 | debugfs_remove(key->debugfs.dir); | ||
298 | key->debugfs.dir = NULL; | 280 | key->debugfs.dir = NULL; |
299 | } | 281 | } |
300 | void ieee80211_debugfs_key_add_default(struct ieee80211_sub_if_data *sdata) | 282 | void ieee80211_debugfs_key_add_default(struct ieee80211_sub_if_data *sdata) |
@@ -302,7 +284,7 @@ void ieee80211_debugfs_key_add_default(struct ieee80211_sub_if_data *sdata) | |||
302 | char buf[50]; | 284 | char buf[50]; |
303 | struct ieee80211_key *key; | 285 | struct ieee80211_key *key; |
304 | 286 | ||
305 | if (!sdata->debugfsdir) | 287 | if (!sdata->debugfs.dir) |
306 | return; | 288 | return; |
307 | 289 | ||
308 | /* this is running under the key lock */ | 290 | /* this is running under the key lock */ |
@@ -310,9 +292,9 @@ void ieee80211_debugfs_key_add_default(struct ieee80211_sub_if_data *sdata) | |||
310 | key = sdata->default_key; | 292 | key = sdata->default_key; |
311 | if (key) { | 293 | if (key) { |
312 | sprintf(buf, "../keys/%d", key->debugfs.cnt); | 294 | sprintf(buf, "../keys/%d", key->debugfs.cnt); |
313 | sdata->common_debugfs.default_key = | 295 | sdata->debugfs.default_key = |
314 | debugfs_create_symlink("default_key", | 296 | debugfs_create_symlink("default_key", |
315 | sdata->debugfsdir, buf); | 297 | sdata->debugfs.dir, buf); |
316 | } else | 298 | } else |
317 | ieee80211_debugfs_key_remove_default(sdata); | 299 | ieee80211_debugfs_key_remove_default(sdata); |
318 | } | 300 | } |
@@ -322,8 +304,8 @@ void ieee80211_debugfs_key_remove_default(struct ieee80211_sub_if_data *sdata) | |||
322 | if (!sdata) | 304 | if (!sdata) |
323 | return; | 305 | return; |
324 | 306 | ||
325 | debugfs_remove(sdata->common_debugfs.default_key); | 307 | debugfs_remove(sdata->debugfs.default_key); |
326 | sdata->common_debugfs.default_key = NULL; | 308 | sdata->debugfs.default_key = NULL; |
327 | } | 309 | } |
328 | 310 | ||
329 | void ieee80211_debugfs_key_add_mgmt_default(struct ieee80211_sub_if_data *sdata) | 311 | void ieee80211_debugfs_key_add_mgmt_default(struct ieee80211_sub_if_data *sdata) |
@@ -331,7 +313,7 @@ void ieee80211_debugfs_key_add_mgmt_default(struct ieee80211_sub_if_data *sdata) | |||
331 | char buf[50]; | 313 | char buf[50]; |
332 | struct ieee80211_key *key; | 314 | struct ieee80211_key *key; |
333 | 315 | ||
334 | if (!sdata->debugfsdir) | 316 | if (!sdata->debugfs.dir) |
335 | return; | 317 | return; |
336 | 318 | ||
337 | /* this is running under the key lock */ | 319 | /* this is running under the key lock */ |
@@ -339,9 +321,9 @@ void ieee80211_debugfs_key_add_mgmt_default(struct ieee80211_sub_if_data *sdata) | |||
339 | key = sdata->default_mgmt_key; | 321 | key = sdata->default_mgmt_key; |
340 | if (key) { | 322 | if (key) { |
341 | sprintf(buf, "../keys/%d", key->debugfs.cnt); | 323 | sprintf(buf, "../keys/%d", key->debugfs.cnt); |
342 | sdata->common_debugfs.default_mgmt_key = | 324 | sdata->debugfs.default_mgmt_key = |
343 | debugfs_create_symlink("default_mgmt_key", | 325 | debugfs_create_symlink("default_mgmt_key", |
344 | sdata->debugfsdir, buf); | 326 | sdata->debugfs.dir, buf); |
345 | } else | 327 | } else |
346 | ieee80211_debugfs_key_remove_mgmt_default(sdata); | 328 | ieee80211_debugfs_key_remove_mgmt_default(sdata); |
347 | } | 329 | } |
@@ -351,8 +333,8 @@ void ieee80211_debugfs_key_remove_mgmt_default(struct ieee80211_sub_if_data *sda | |||
351 | if (!sdata) | 333 | if (!sdata) |
352 | return; | 334 | return; |
353 | 335 | ||
354 | debugfs_remove(sdata->common_debugfs.default_mgmt_key); | 336 | debugfs_remove(sdata->debugfs.default_mgmt_key); |
355 | sdata->common_debugfs.default_mgmt_key = NULL; | 337 | sdata->debugfs.default_mgmt_key = NULL; |
356 | } | 338 | } |
357 | 339 | ||
358 | void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key, | 340 | void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key, |
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 61234e79022b..472b2039906c 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c | |||
@@ -149,12 +149,14 @@ IEEE80211_IF_FILE(path_refresh_time, | |||
149 | u.mesh.mshcfg.path_refresh_time, DEC); | 149 | u.mesh.mshcfg.path_refresh_time, DEC); |
150 | IEEE80211_IF_FILE(min_discovery_timeout, | 150 | IEEE80211_IF_FILE(min_discovery_timeout, |
151 | u.mesh.mshcfg.min_discovery_timeout, DEC); | 151 | u.mesh.mshcfg.min_discovery_timeout, DEC); |
152 | IEEE80211_IF_FILE(dot11MeshHWMPRootMode, | ||
153 | u.mesh.mshcfg.dot11MeshHWMPRootMode, DEC); | ||
152 | #endif | 154 | #endif |
153 | 155 | ||
154 | 156 | ||
155 | #define DEBUGFS_ADD(name, type)\ | 157 | #define DEBUGFS_ADD(name, type) \ |
156 | sdata->debugfs.type.name = debugfs_create_file(#name, 0400,\ | 158 | debugfs_create_file(#name, 0400, sdata->debugfs.dir, \ |
157 | sdata->debugfsdir, sdata, &name##_ops); | 159 | sdata, &name##_ops); |
158 | 160 | ||
159 | static void add_sta_files(struct ieee80211_sub_if_data *sdata) | 161 | static void add_sta_files(struct ieee80211_sub_if_data *sdata) |
160 | { | 162 | { |
@@ -199,30 +201,32 @@ static void add_monitor_files(struct ieee80211_sub_if_data *sdata) | |||
199 | } | 201 | } |
200 | 202 | ||
201 | #ifdef CONFIG_MAC80211_MESH | 203 | #ifdef CONFIG_MAC80211_MESH |
202 | #define MESHSTATS_ADD(name)\ | ||
203 | sdata->mesh_stats.name = debugfs_create_file(#name, 0400,\ | ||
204 | sdata->mesh_stats_dir, sdata, &name##_ops); | ||
205 | 204 | ||
206 | static void add_mesh_stats(struct ieee80211_sub_if_data *sdata) | 205 | static void add_mesh_stats(struct ieee80211_sub_if_data *sdata) |
207 | { | 206 | { |
208 | sdata->mesh_stats_dir = debugfs_create_dir("mesh_stats", | 207 | struct dentry *dir = debugfs_create_dir("mesh_stats", |
209 | sdata->debugfsdir); | 208 | sdata->debugfs.dir); |
209 | |||
210 | #define MESHSTATS_ADD(name)\ | ||
211 | debugfs_create_file(#name, 0400, dir, sdata, &name##_ops); | ||
212 | |||
210 | MESHSTATS_ADD(fwded_mcast); | 213 | MESHSTATS_ADD(fwded_mcast); |
211 | MESHSTATS_ADD(fwded_unicast); | 214 | MESHSTATS_ADD(fwded_unicast); |
212 | MESHSTATS_ADD(fwded_frames); | 215 | MESHSTATS_ADD(fwded_frames); |
213 | MESHSTATS_ADD(dropped_frames_ttl); | 216 | MESHSTATS_ADD(dropped_frames_ttl); |
214 | MESHSTATS_ADD(dropped_frames_no_route); | 217 | MESHSTATS_ADD(dropped_frames_no_route); |
215 | MESHSTATS_ADD(estab_plinks); | 218 | MESHSTATS_ADD(estab_plinks); |
219 | #undef MESHSTATS_ADD | ||
216 | } | 220 | } |
217 | 221 | ||
218 | #define MESHPARAMS_ADD(name)\ | ||
219 | sdata->mesh_config.name = debugfs_create_file(#name, 0600,\ | ||
220 | sdata->mesh_config_dir, sdata, &name##_ops); | ||
221 | |||
222 | static void add_mesh_config(struct ieee80211_sub_if_data *sdata) | 222 | static void add_mesh_config(struct ieee80211_sub_if_data *sdata) |
223 | { | 223 | { |
224 | sdata->mesh_config_dir = debugfs_create_dir("mesh_config", | 224 | struct dentry *dir = debugfs_create_dir("mesh_config", |
225 | sdata->debugfsdir); | 225 | sdata->debugfs.dir); |
226 | |||
227 | #define MESHPARAMS_ADD(name) \ | ||
228 | debugfs_create_file(#name, 0600, dir, sdata, &name##_ops); | ||
229 | |||
226 | MESHPARAMS_ADD(dot11MeshMaxRetries); | 230 | MESHPARAMS_ADD(dot11MeshMaxRetries); |
227 | MESHPARAMS_ADD(dot11MeshRetryTimeout); | 231 | MESHPARAMS_ADD(dot11MeshRetryTimeout); |
228 | MESHPARAMS_ADD(dot11MeshConfirmTimeout); | 232 | MESHPARAMS_ADD(dot11MeshConfirmTimeout); |
@@ -236,12 +240,14 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata) | |||
236 | MESHPARAMS_ADD(dot11MeshHWMPmaxPREQretries); | 240 | MESHPARAMS_ADD(dot11MeshHWMPmaxPREQretries); |
237 | MESHPARAMS_ADD(path_refresh_time); | 241 | MESHPARAMS_ADD(path_refresh_time); |
238 | MESHPARAMS_ADD(min_discovery_timeout); | 242 | MESHPARAMS_ADD(min_discovery_timeout); |
243 | |||
244 | #undef MESHPARAMS_ADD | ||
239 | } | 245 | } |
240 | #endif | 246 | #endif |
241 | 247 | ||
242 | static void add_files(struct ieee80211_sub_if_data *sdata) | 248 | static void add_files(struct ieee80211_sub_if_data *sdata) |
243 | { | 249 | { |
244 | if (!sdata->debugfsdir) | 250 | if (!sdata->debugfs.dir) |
245 | return; | 251 | return; |
246 | 252 | ||
247 | switch (sdata->vif.type) { | 253 | switch (sdata->vif.type) { |
@@ -274,134 +280,6 @@ static void add_files(struct ieee80211_sub_if_data *sdata) | |||
274 | } | 280 | } |
275 | } | 281 | } |
276 | 282 | ||
277 | #define DEBUGFS_DEL(name, type) \ | ||
278 | do { \ | ||
279 | debugfs_remove(sdata->debugfs.type.name); \ | ||
280 | sdata->debugfs.type.name = NULL; \ | ||
281 | } while (0) | ||
282 | |||
283 | static void del_sta_files(struct ieee80211_sub_if_data *sdata) | ||
284 | { | ||
285 | DEBUGFS_DEL(drop_unencrypted, sta); | ||
286 | DEBUGFS_DEL(force_unicast_rateidx, sta); | ||
287 | DEBUGFS_DEL(max_ratectrl_rateidx, sta); | ||
288 | |||
289 | DEBUGFS_DEL(bssid, sta); | ||
290 | DEBUGFS_DEL(aid, sta); | ||
291 | DEBUGFS_DEL(capab, sta); | ||
292 | } | ||
293 | |||
294 | static void del_ap_files(struct ieee80211_sub_if_data *sdata) | ||
295 | { | ||
296 | DEBUGFS_DEL(drop_unencrypted, ap); | ||
297 | DEBUGFS_DEL(force_unicast_rateidx, ap); | ||
298 | DEBUGFS_DEL(max_ratectrl_rateidx, ap); | ||
299 | |||
300 | DEBUGFS_DEL(num_sta_ps, ap); | ||
301 | DEBUGFS_DEL(dtim_count, ap); | ||
302 | DEBUGFS_DEL(num_buffered_multicast, ap); | ||
303 | } | ||
304 | |||
305 | static void del_wds_files(struct ieee80211_sub_if_data *sdata) | ||
306 | { | ||
307 | DEBUGFS_DEL(drop_unencrypted, wds); | ||
308 | DEBUGFS_DEL(force_unicast_rateidx, wds); | ||
309 | DEBUGFS_DEL(max_ratectrl_rateidx, wds); | ||
310 | |||
311 | DEBUGFS_DEL(peer, wds); | ||
312 | } | ||
313 | |||
314 | static void del_vlan_files(struct ieee80211_sub_if_data *sdata) | ||
315 | { | ||
316 | DEBUGFS_DEL(drop_unencrypted, vlan); | ||
317 | DEBUGFS_DEL(force_unicast_rateidx, vlan); | ||
318 | DEBUGFS_DEL(max_ratectrl_rateidx, vlan); | ||
319 | } | ||
320 | |||
321 | static void del_monitor_files(struct ieee80211_sub_if_data *sdata) | ||
322 | { | ||
323 | } | ||
324 | |||
325 | #ifdef CONFIG_MAC80211_MESH | ||
326 | #define MESHSTATS_DEL(name) \ | ||
327 | do { \ | ||
328 | debugfs_remove(sdata->mesh_stats.name); \ | ||
329 | sdata->mesh_stats.name = NULL; \ | ||
330 | } while (0) | ||
331 | |||
332 | static void del_mesh_stats(struct ieee80211_sub_if_data *sdata) | ||
333 | { | ||
334 | MESHSTATS_DEL(fwded_mcast); | ||
335 | MESHSTATS_DEL(fwded_unicast); | ||
336 | MESHSTATS_DEL(fwded_frames); | ||
337 | MESHSTATS_DEL(dropped_frames_ttl); | ||
338 | MESHSTATS_DEL(dropped_frames_no_route); | ||
339 | MESHSTATS_DEL(estab_plinks); | ||
340 | debugfs_remove(sdata->mesh_stats_dir); | ||
341 | sdata->mesh_stats_dir = NULL; | ||
342 | } | ||
343 | |||
344 | #define MESHPARAMS_DEL(name) \ | ||
345 | do { \ | ||
346 | debugfs_remove(sdata->mesh_config.name); \ | ||
347 | sdata->mesh_config.name = NULL; \ | ||
348 | } while (0) | ||
349 | |||
350 | static void del_mesh_config(struct ieee80211_sub_if_data *sdata) | ||
351 | { | ||
352 | MESHPARAMS_DEL(dot11MeshMaxRetries); | ||
353 | MESHPARAMS_DEL(dot11MeshRetryTimeout); | ||
354 | MESHPARAMS_DEL(dot11MeshConfirmTimeout); | ||
355 | MESHPARAMS_DEL(dot11MeshHoldingTimeout); | ||
356 | MESHPARAMS_DEL(dot11MeshTTL); | ||
357 | MESHPARAMS_DEL(auto_open_plinks); | ||
358 | MESHPARAMS_DEL(dot11MeshMaxPeerLinks); | ||
359 | MESHPARAMS_DEL(dot11MeshHWMPactivePathTimeout); | ||
360 | MESHPARAMS_DEL(dot11MeshHWMPpreqMinInterval); | ||
361 | MESHPARAMS_DEL(dot11MeshHWMPnetDiameterTraversalTime); | ||
362 | MESHPARAMS_DEL(dot11MeshHWMPmaxPREQretries); | ||
363 | MESHPARAMS_DEL(path_refresh_time); | ||
364 | MESHPARAMS_DEL(min_discovery_timeout); | ||
365 | debugfs_remove(sdata->mesh_config_dir); | ||
366 | sdata->mesh_config_dir = NULL; | ||
367 | } | ||
368 | #endif | ||
369 | |||
370 | static void del_files(struct ieee80211_sub_if_data *sdata) | ||
371 | { | ||
372 | if (!sdata->debugfsdir) | ||
373 | return; | ||
374 | |||
375 | switch (sdata->vif.type) { | ||
376 | case NL80211_IFTYPE_MESH_POINT: | ||
377 | #ifdef CONFIG_MAC80211_MESH | ||
378 | del_mesh_stats(sdata); | ||
379 | del_mesh_config(sdata); | ||
380 | #endif | ||
381 | break; | ||
382 | case NL80211_IFTYPE_STATION: | ||
383 | del_sta_files(sdata); | ||
384 | break; | ||
385 | case NL80211_IFTYPE_ADHOC: | ||
386 | /* XXX */ | ||
387 | break; | ||
388 | case NL80211_IFTYPE_AP: | ||
389 | del_ap_files(sdata); | ||
390 | break; | ||
391 | case NL80211_IFTYPE_WDS: | ||
392 | del_wds_files(sdata); | ||
393 | break; | ||
394 | case NL80211_IFTYPE_MONITOR: | ||
395 | del_monitor_files(sdata); | ||
396 | break; | ||
397 | case NL80211_IFTYPE_AP_VLAN: | ||
398 | del_vlan_files(sdata); | ||
399 | break; | ||
400 | default: | ||
401 | break; | ||
402 | } | ||
403 | } | ||
404 | |||
405 | static int notif_registered; | 283 | static int notif_registered; |
406 | 284 | ||
407 | void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata) | 285 | void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata) |
@@ -412,16 +290,18 @@ void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata) | |||
412 | return; | 290 | return; |
413 | 291 | ||
414 | sprintf(buf, "netdev:%s", sdata->dev->name); | 292 | sprintf(buf, "netdev:%s", sdata->dev->name); |
415 | sdata->debugfsdir = debugfs_create_dir(buf, | 293 | sdata->debugfs.dir = debugfs_create_dir(buf, |
416 | sdata->local->hw.wiphy->debugfsdir); | 294 | sdata->local->hw.wiphy->debugfsdir); |
417 | add_files(sdata); | 295 | add_files(sdata); |
418 | } | 296 | } |
419 | 297 | ||
420 | void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata) | 298 | void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata) |
421 | { | 299 | { |
422 | del_files(sdata); | 300 | if (!sdata->debugfs.dir) |
423 | debugfs_remove(sdata->debugfsdir); | 301 | return; |
424 | sdata->debugfsdir = NULL; | 302 | |
303 | debugfs_remove_recursive(sdata->debugfs.dir); | ||
304 | sdata->debugfs.dir = NULL; | ||
425 | } | 305 | } |
426 | 306 | ||
427 | static int netdev_notify(struct notifier_block *nb, | 307 | static int netdev_notify(struct notifier_block *nb, |
@@ -444,7 +324,7 @@ static int netdev_notify(struct notifier_block *nb, | |||
444 | 324 | ||
445 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 325 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
446 | 326 | ||
447 | dir = sdata->debugfsdir; | 327 | dir = sdata->debugfs.dir; |
448 | 328 | ||
449 | if (!dir) | 329 | if (!dir) |
450 | return 0; | 330 | return 0; |
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index 33a2e892115b..f043c29070d7 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c | |||
@@ -57,7 +57,6 @@ STA_FILE(tx_filtered, tx_filtered_count, LU); | |||
57 | STA_FILE(tx_retry_failed, tx_retry_failed, LU); | 57 | STA_FILE(tx_retry_failed, tx_retry_failed, LU); |
58 | STA_FILE(tx_retry_count, tx_retry_count, LU); | 58 | STA_FILE(tx_retry_count, tx_retry_count, LU); |
59 | STA_FILE(last_signal, last_signal, D); | 59 | STA_FILE(last_signal, last_signal, D); |
60 | STA_FILE(last_qual, last_qual, D); | ||
61 | STA_FILE(last_noise, last_noise, D); | 60 | STA_FILE(last_noise, last_noise, D); |
62 | STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU); | 61 | STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU); |
63 | 62 | ||
@@ -67,10 +66,11 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf, | |||
67 | char buf[100]; | 66 | char buf[100]; |
68 | struct sta_info *sta = file->private_data; | 67 | struct sta_info *sta = file->private_data; |
69 | u32 staflags = get_sta_flags(sta); | 68 | u32 staflags = get_sta_flags(sta); |
70 | int res = scnprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s", | 69 | int res = scnprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s", |
71 | staflags & WLAN_STA_AUTH ? "AUTH\n" : "", | 70 | staflags & WLAN_STA_AUTH ? "AUTH\n" : "", |
72 | staflags & WLAN_STA_ASSOC ? "ASSOC\n" : "", | 71 | staflags & WLAN_STA_ASSOC ? "ASSOC\n" : "", |
73 | staflags & WLAN_STA_PS ? "PS\n" : "", | 72 | staflags & WLAN_STA_PS_STA ? "PS (sta)\n" : "", |
73 | staflags & WLAN_STA_PS_DRIVER ? "PS (driver)\n" : "", | ||
74 | staflags & WLAN_STA_AUTHORIZED ? "AUTHORIZED\n" : "", | 74 | staflags & WLAN_STA_AUTHORIZED ? "AUTHORIZED\n" : "", |
75 | staflags & WLAN_STA_SHORT_PREAMBLE ? "SHORT PREAMBLE\n" : "", | 75 | staflags & WLAN_STA_SHORT_PREAMBLE ? "SHORT PREAMBLE\n" : "", |
76 | staflags & WLAN_STA_WME ? "WME\n" : "", | 76 | staflags & WLAN_STA_WME ? "WME\n" : "", |
@@ -158,13 +158,9 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, | |||
158 | STA_OPS(agg_status); | 158 | STA_OPS(agg_status); |
159 | 159 | ||
160 | #define DEBUGFS_ADD(name) \ | 160 | #define DEBUGFS_ADD(name) \ |
161 | sta->debugfs.name = debugfs_create_file(#name, 0400, \ | 161 | debugfs_create_file(#name, 0400, \ |
162 | sta->debugfs.dir, sta, &sta_ ##name## _ops); | 162 | sta->debugfs.dir, sta, &sta_ ##name## _ops); |
163 | 163 | ||
164 | #define DEBUGFS_DEL(name) \ | ||
165 | debugfs_remove(sta->debugfs.name);\ | ||
166 | sta->debugfs.name = NULL; | ||
167 | |||
168 | 164 | ||
169 | void ieee80211_sta_debugfs_add(struct sta_info *sta) | 165 | void ieee80211_sta_debugfs_add(struct sta_info *sta) |
170 | { | 166 | { |
@@ -209,36 +205,12 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta) | |||
209 | DEBUGFS_ADD(tx_retry_failed); | 205 | DEBUGFS_ADD(tx_retry_failed); |
210 | DEBUGFS_ADD(tx_retry_count); | 206 | DEBUGFS_ADD(tx_retry_count); |
211 | DEBUGFS_ADD(last_signal); | 207 | DEBUGFS_ADD(last_signal); |
212 | DEBUGFS_ADD(last_qual); | ||
213 | DEBUGFS_ADD(last_noise); | 208 | DEBUGFS_ADD(last_noise); |
214 | DEBUGFS_ADD(wep_weak_iv_count); | 209 | DEBUGFS_ADD(wep_weak_iv_count); |
215 | } | 210 | } |
216 | 211 | ||
217 | void ieee80211_sta_debugfs_remove(struct sta_info *sta) | 212 | void ieee80211_sta_debugfs_remove(struct sta_info *sta) |
218 | { | 213 | { |
219 | DEBUGFS_DEL(flags); | 214 | debugfs_remove_recursive(sta->debugfs.dir); |
220 | DEBUGFS_DEL(num_ps_buf_frames); | ||
221 | DEBUGFS_DEL(inactive_ms); | ||
222 | DEBUGFS_DEL(last_seq_ctrl); | ||
223 | DEBUGFS_DEL(agg_status); | ||
224 | DEBUGFS_DEL(aid); | ||
225 | DEBUGFS_DEL(dev); | ||
226 | DEBUGFS_DEL(rx_packets); | ||
227 | DEBUGFS_DEL(tx_packets); | ||
228 | DEBUGFS_DEL(rx_bytes); | ||
229 | DEBUGFS_DEL(tx_bytes); | ||
230 | DEBUGFS_DEL(rx_duplicates); | ||
231 | DEBUGFS_DEL(rx_fragments); | ||
232 | DEBUGFS_DEL(rx_dropped); | ||
233 | DEBUGFS_DEL(tx_fragments); | ||
234 | DEBUGFS_DEL(tx_filtered); | ||
235 | DEBUGFS_DEL(tx_retry_failed); | ||
236 | DEBUGFS_DEL(tx_retry_count); | ||
237 | DEBUGFS_DEL(last_signal); | ||
238 | DEBUGFS_DEL(last_qual); | ||
239 | DEBUGFS_DEL(last_noise); | ||
240 | DEBUGFS_DEL(wep_weak_iv_count); | ||
241 | |||
242 | debugfs_remove(sta->debugfs.dir); | ||
243 | sta->debugfs.dir = NULL; | 215 | sta->debugfs.dir = NULL; |
244 | } | 216 | } |
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index f1362f32c17d..fbffce90edbc 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c | |||
@@ -455,6 +455,10 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata) | |||
455 | 455 | ||
456 | ieee80211_sta_expire(sdata, IEEE80211_IBSS_INACTIVITY_LIMIT); | 456 | ieee80211_sta_expire(sdata, IEEE80211_IBSS_INACTIVITY_LIMIT); |
457 | 457 | ||
458 | if (time_before(jiffies, ifibss->last_scan_completed + | ||
459 | IEEE80211_IBSS_MERGE_INTERVAL)) | ||
460 | return; | ||
461 | |||
458 | if (ieee80211_sta_active_ibss(sdata)) | 462 | if (ieee80211_sta_active_ibss(sdata)) |
459 | return; | 463 | return; |
460 | 464 | ||
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 588005c84a6d..b63b99fb2fd3 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -176,7 +176,6 @@ struct ieee80211_rx_data { | |||
176 | struct ieee80211_rate *rate; | 176 | struct ieee80211_rate *rate; |
177 | 177 | ||
178 | unsigned int flags; | 178 | unsigned int flags; |
179 | int sent_ps_buffered; | ||
180 | int queue; | 179 | int queue; |
181 | u32 tkip_iv32; | 180 | u32 tkip_iv32; |
182 | u16 tkip_iv16; | 181 | u16 tkip_iv16; |
@@ -209,6 +208,9 @@ struct ieee80211_if_wds { | |||
209 | 208 | ||
210 | struct ieee80211_if_vlan { | 209 | struct ieee80211_if_vlan { |
211 | struct list_head list; | 210 | struct list_head list; |
211 | |||
212 | /* used for all tx if the VLAN is configured to 4-addr mode */ | ||
213 | struct sta_info *sta; | ||
212 | }; | 214 | }; |
213 | 215 | ||
214 | struct mesh_stats { | 216 | struct mesh_stats { |
@@ -353,6 +355,7 @@ struct ieee80211_if_mesh { | |||
353 | struct work_struct work; | 355 | struct work_struct work; |
354 | struct timer_list housekeeping_timer; | 356 | struct timer_list housekeeping_timer; |
355 | struct timer_list mesh_path_timer; | 357 | struct timer_list mesh_path_timer; |
358 | struct timer_list mesh_path_root_timer; | ||
356 | struct sk_buff_head skb_queue; | 359 | struct sk_buff_head skb_queue; |
357 | 360 | ||
358 | unsigned long timers_running; | 361 | unsigned long timers_running; |
@@ -362,23 +365,23 @@ struct ieee80211_if_mesh { | |||
362 | u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN]; | 365 | u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN]; |
363 | size_t mesh_id_len; | 366 | size_t mesh_id_len; |
364 | /* Active Path Selection Protocol Identifier */ | 367 | /* Active Path Selection Protocol Identifier */ |
365 | u8 mesh_pp_id[4]; | 368 | u8 mesh_pp_id; |
366 | /* Active Path Selection Metric Identifier */ | 369 | /* Active Path Selection Metric Identifier */ |
367 | u8 mesh_pm_id[4]; | 370 | u8 mesh_pm_id; |
368 | /* Congestion Control Mode Identifier */ | 371 | /* Congestion Control Mode Identifier */ |
369 | u8 mesh_cc_id[4]; | 372 | u8 mesh_cc_id; |
370 | /* Synchronization Protocol Identifier */ | 373 | /* Synchronization Protocol Identifier */ |
371 | u8 mesh_sp_id[4]; | 374 | u8 mesh_sp_id; |
372 | /* Authentication Protocol Identifier */ | 375 | /* Authentication Protocol Identifier */ |
373 | u8 mesh_auth_id[4]; | 376 | u8 mesh_auth_id; |
374 | /* Local mesh Destination Sequence Number */ | 377 | /* Local mesh Sequence Number */ |
375 | u32 dsn; | 378 | u32 sn; |
376 | /* Last used PREQ ID */ | 379 | /* Last used PREQ ID */ |
377 | u32 preq_id; | 380 | u32 preq_id; |
378 | atomic_t mpaths; | 381 | atomic_t mpaths; |
379 | /* Timestamp of last DSN update */ | 382 | /* Timestamp of last SN update */ |
380 | unsigned long last_dsn_update; | 383 | unsigned long last_sn_update; |
381 | /* Timestamp of last DSN sent */ | 384 | /* Timestamp of last SN sent */ |
382 | unsigned long last_preq; | 385 | unsigned long last_preq; |
383 | struct mesh_rmc *rmc; | 386 | struct mesh_rmc *rmc; |
384 | spinlock_t mesh_preq_queue_lock; | 387 | spinlock_t mesh_preq_queue_lock; |
@@ -458,6 +461,8 @@ struct ieee80211_sub_if_data { | |||
458 | int force_unicast_rateidx; /* forced TX rateidx for unicast frames */ | 461 | int force_unicast_rateidx; /* forced TX rateidx for unicast frames */ |
459 | int max_ratectrl_rateidx; /* max TX rateidx for rate control */ | 462 | int max_ratectrl_rateidx; /* max TX rateidx for rate control */ |
460 | 463 | ||
464 | bool use_4addr; /* use 4-address frames */ | ||
465 | |||
461 | union { | 466 | union { |
462 | struct ieee80211_if_ap ap; | 467 | struct ieee80211_if_ap ap; |
463 | struct ieee80211_if_wds wds; | 468 | struct ieee80211_if_wds wds; |
@@ -471,74 +476,11 @@ struct ieee80211_sub_if_data { | |||
471 | } u; | 476 | } u; |
472 | 477 | ||
473 | #ifdef CONFIG_MAC80211_DEBUGFS | 478 | #ifdef CONFIG_MAC80211_DEBUGFS |
474 | struct dentry *debugfsdir; | ||
475 | union { | ||
476 | struct { | ||
477 | struct dentry *drop_unencrypted; | ||
478 | struct dentry *bssid; | ||
479 | struct dentry *aid; | ||
480 | struct dentry *capab; | ||
481 | struct dentry *force_unicast_rateidx; | ||
482 | struct dentry *max_ratectrl_rateidx; | ||
483 | } sta; | ||
484 | struct { | ||
485 | struct dentry *drop_unencrypted; | ||
486 | struct dentry *num_sta_ps; | ||
487 | struct dentry *dtim_count; | ||
488 | struct dentry *force_unicast_rateidx; | ||
489 | struct dentry *max_ratectrl_rateidx; | ||
490 | struct dentry *num_buffered_multicast; | ||
491 | } ap; | ||
492 | struct { | ||
493 | struct dentry *drop_unencrypted; | ||
494 | struct dentry *peer; | ||
495 | struct dentry *force_unicast_rateidx; | ||
496 | struct dentry *max_ratectrl_rateidx; | ||
497 | } wds; | ||
498 | struct { | ||
499 | struct dentry *drop_unencrypted; | ||
500 | struct dentry *force_unicast_rateidx; | ||
501 | struct dentry *max_ratectrl_rateidx; | ||
502 | } vlan; | ||
503 | struct { | ||
504 | struct dentry *mode; | ||
505 | } monitor; | ||
506 | } debugfs; | ||
507 | struct { | 479 | struct { |
480 | struct dentry *dir; | ||
508 | struct dentry *default_key; | 481 | struct dentry *default_key; |
509 | struct dentry *default_mgmt_key; | 482 | struct dentry *default_mgmt_key; |
510 | } common_debugfs; | 483 | } debugfs; |
511 | |||
512 | #ifdef CONFIG_MAC80211_MESH | ||
513 | struct dentry *mesh_stats_dir; | ||
514 | struct { | ||
515 | struct dentry *fwded_mcast; | ||
516 | struct dentry *fwded_unicast; | ||
517 | struct dentry *fwded_frames; | ||
518 | struct dentry *dropped_frames_ttl; | ||
519 | struct dentry *dropped_frames_no_route; | ||
520 | struct dentry *estab_plinks; | ||
521 | struct timer_list mesh_path_timer; | ||
522 | } mesh_stats; | ||
523 | |||
524 | struct dentry *mesh_config_dir; | ||
525 | struct { | ||
526 | struct dentry *dot11MeshRetryTimeout; | ||
527 | struct dentry *dot11MeshConfirmTimeout; | ||
528 | struct dentry *dot11MeshHoldingTimeout; | ||
529 | struct dentry *dot11MeshMaxRetries; | ||
530 | struct dentry *dot11MeshTTL; | ||
531 | struct dentry *auto_open_plinks; | ||
532 | struct dentry *dot11MeshMaxPeerLinks; | ||
533 | struct dentry *dot11MeshHWMPactivePathTimeout; | ||
534 | struct dentry *dot11MeshHWMPpreqMinInterval; | ||
535 | struct dentry *dot11MeshHWMPnetDiameterTraversalTime; | ||
536 | struct dentry *dot11MeshHWMPmaxPREQretries; | ||
537 | struct dentry *path_refresh_time; | ||
538 | struct dentry *min_discovery_timeout; | ||
539 | } mesh_config; | ||
540 | #endif | ||
541 | |||
542 | #endif | 484 | #endif |
543 | /* must be last, dynamically sized area in this! */ | 485 | /* must be last, dynamically sized area in this! */ |
544 | struct ieee80211_vif vif; | 486 | struct ieee80211_vif vif; |
@@ -730,10 +672,9 @@ struct ieee80211_local { | |||
730 | unsigned long scanning; | 672 | unsigned long scanning; |
731 | struct cfg80211_ssid scan_ssid; | 673 | struct cfg80211_ssid scan_ssid; |
732 | struct cfg80211_scan_request *int_scan_req; | 674 | struct cfg80211_scan_request *int_scan_req; |
733 | struct cfg80211_scan_request *scan_req; | 675 | struct cfg80211_scan_request *scan_req, *hw_scan_req; |
734 | struct ieee80211_channel *scan_channel; | 676 | struct ieee80211_channel *scan_channel; |
735 | const u8 *orig_ies; | 677 | enum ieee80211_band hw_scan_band; |
736 | int orig_ies_len; | ||
737 | int scan_channel_idx; | 678 | int scan_channel_idx; |
738 | int scan_ies_len; | 679 | int scan_ies_len; |
739 | 680 | ||
@@ -818,53 +759,6 @@ struct ieee80211_local { | |||
818 | #ifdef CONFIG_MAC80211_DEBUGFS | 759 | #ifdef CONFIG_MAC80211_DEBUGFS |
819 | struct local_debugfsdentries { | 760 | struct local_debugfsdentries { |
820 | struct dentry *rcdir; | 761 | struct dentry *rcdir; |
821 | struct dentry *rcname; | ||
822 | struct dentry *frequency; | ||
823 | struct dentry *total_ps_buffered; | ||
824 | struct dentry *wep_iv; | ||
825 | struct dentry *tsf; | ||
826 | struct dentry *queues; | ||
827 | struct dentry *reset; | ||
828 | struct dentry *noack; | ||
829 | struct dentry *statistics; | ||
830 | struct local_debugfsdentries_statsdentries { | ||
831 | struct dentry *transmitted_fragment_count; | ||
832 | struct dentry *multicast_transmitted_frame_count; | ||
833 | struct dentry *failed_count; | ||
834 | struct dentry *retry_count; | ||
835 | struct dentry *multiple_retry_count; | ||
836 | struct dentry *frame_duplicate_count; | ||
837 | struct dentry *received_fragment_count; | ||
838 | struct dentry *multicast_received_frame_count; | ||
839 | struct dentry *transmitted_frame_count; | ||
840 | struct dentry *wep_undecryptable_count; | ||
841 | struct dentry *num_scans; | ||
842 | #ifdef CONFIG_MAC80211_DEBUG_COUNTERS | ||
843 | struct dentry *tx_handlers_drop; | ||
844 | struct dentry *tx_handlers_queued; | ||
845 | struct dentry *tx_handlers_drop_unencrypted; | ||
846 | struct dentry *tx_handlers_drop_fragment; | ||
847 | struct dentry *tx_handlers_drop_wep; | ||
848 | struct dentry *tx_handlers_drop_not_assoc; | ||
849 | struct dentry *tx_handlers_drop_unauth_port; | ||
850 | struct dentry *rx_handlers_drop; | ||
851 | struct dentry *rx_handlers_queued; | ||
852 | struct dentry *rx_handlers_drop_nullfunc; | ||
853 | struct dentry *rx_handlers_drop_defrag; | ||
854 | struct dentry *rx_handlers_drop_short; | ||
855 | struct dentry *rx_handlers_drop_passive_scan; | ||
856 | struct dentry *tx_expand_skb_head; | ||
857 | struct dentry *tx_expand_skb_head_cloned; | ||
858 | struct dentry *rx_expand_skb_head; | ||
859 | struct dentry *rx_expand_skb_head2; | ||
860 | struct dentry *rx_handlers_fragments; | ||
861 | struct dentry *tx_status_drop; | ||
862 | #endif | ||
863 | struct dentry *dot11ACKFailureCount; | ||
864 | struct dentry *dot11RTSFailureCount; | ||
865 | struct dentry *dot11FCSErrorCount; | ||
866 | struct dentry *dot11RTSSuccessCount; | ||
867 | } stats; | ||
868 | struct dentry *stations; | 762 | struct dentry *stations; |
869 | struct dentry *keys; | 763 | struct dentry *keys; |
870 | } debugfs; | 764 | } debugfs; |
@@ -911,6 +805,7 @@ struct ieee802_11_elems { | |||
911 | u8 *preq; | 805 | u8 *preq; |
912 | u8 *prep; | 806 | u8 *prep; |
913 | u8 *perr; | 807 | u8 *perr; |
808 | struct ieee80211_rann_ie *rann; | ||
914 | u8 *ch_switch_elem; | 809 | u8 *ch_switch_elem; |
915 | u8 *country_elem; | 810 | u8 *country_elem; |
916 | u8 *pwr_constr_elem; | 811 | u8 *pwr_constr_elem; |
@@ -1160,7 +1055,8 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, | |||
1160 | u8 *extra, size_t extra_len, const u8 *bssid, | 1055 | u8 *extra, size_t extra_len, const u8 *bssid, |
1161 | const u8 *key, u8 key_len, u8 key_idx); | 1056 | const u8 *key, u8 key_len, u8 key_idx); |
1162 | int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, | 1057 | int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, |
1163 | const u8 *ie, size_t ie_len); | 1058 | const u8 *ie, size_t ie_len, |
1059 | enum ieee80211_band band); | ||
1164 | void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, | 1060 | void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, |
1165 | const u8 *ssid, size_t ssid_len, | 1061 | const u8 *ssid, size_t ssid_len, |
1166 | const u8 *ie, size_t ie_len); | 1062 | const u8 *ie, size_t ie_len); |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index b8295cbd7e8f..1f02b0610e82 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -184,10 +184,12 @@ static int ieee80211_open(struct net_device *dev) | |||
184 | * No need to check netif_running since we do not allow | 184 | * No need to check netif_running since we do not allow |
185 | * it to start up with this invalid address. | 185 | * it to start up with this invalid address. |
186 | */ | 186 | */ |
187 | if (compare_ether_addr(null_addr, ndev->dev_addr) == 0) | 187 | if (compare_ether_addr(null_addr, ndev->dev_addr) == 0) { |
188 | memcpy(ndev->dev_addr, | 188 | memcpy(ndev->dev_addr, |
189 | local->hw.wiphy->perm_addr, | 189 | local->hw.wiphy->perm_addr, |
190 | ETH_ALEN); | 190 | ETH_ALEN); |
191 | memcpy(ndev->perm_addr, ndev->dev_addr, ETH_ALEN); | ||
192 | } | ||
191 | } | 193 | } |
192 | 194 | ||
193 | /* | 195 | /* |
@@ -212,8 +214,8 @@ static int ieee80211_open(struct net_device *dev) | |||
212 | /* must be before the call to ieee80211_configure_filter */ | 214 | /* must be before the call to ieee80211_configure_filter */ |
213 | local->monitors++; | 215 | local->monitors++; |
214 | if (local->monitors == 1) { | 216 | if (local->monitors == 1) { |
215 | local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP; | 217 | local->hw.conf.flags |= IEEE80211_CONF_MONITOR; |
216 | hw_reconf_flags |= IEEE80211_CONF_CHANGE_RADIOTAP; | 218 | hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR; |
217 | } | 219 | } |
218 | 220 | ||
219 | if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL) | 221 | if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL) |
@@ -312,7 +314,7 @@ static int ieee80211_open(struct net_device *dev) | |||
312 | if (sdata->vif.type == NL80211_IFTYPE_STATION) | 314 | if (sdata->vif.type == NL80211_IFTYPE_STATION) |
313 | ieee80211_queue_work(&local->hw, &sdata->u.mgd.work); | 315 | ieee80211_queue_work(&local->hw, &sdata->u.mgd.work); |
314 | 316 | ||
315 | netif_tx_start_all_queues(dev); | 317 | netif_start_queue(dev); |
316 | 318 | ||
317 | return 0; | 319 | return 0; |
318 | err_del_interface: | 320 | err_del_interface: |
@@ -341,7 +343,7 @@ static int ieee80211_stop(struct net_device *dev) | |||
341 | /* | 343 | /* |
342 | * Stop TX on this interface first. | 344 | * Stop TX on this interface first. |
343 | */ | 345 | */ |
344 | netif_tx_stop_all_queues(dev); | 346 | netif_stop_queue(dev); |
345 | 347 | ||
346 | /* | 348 | /* |
347 | * Now delete all active aggregation sessions. | 349 | * Now delete all active aggregation sessions. |
@@ -433,8 +435,8 @@ static int ieee80211_stop(struct net_device *dev) | |||
433 | 435 | ||
434 | local->monitors--; | 436 | local->monitors--; |
435 | if (local->monitors == 0) { | 437 | if (local->monitors == 0) { |
436 | local->hw.conf.flags &= ~IEEE80211_CONF_RADIOTAP; | 438 | local->hw.conf.flags &= ~IEEE80211_CONF_MONITOR; |
437 | hw_reconf_flags |= IEEE80211_CONF_CHANGE_RADIOTAP; | 439 | hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR; |
438 | } | 440 | } |
439 | 441 | ||
440 | if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL) | 442 | if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL) |
@@ -750,14 +752,11 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, | |||
750 | ieee80211_mandatory_rates(sdata->local, | 752 | ieee80211_mandatory_rates(sdata->local, |
751 | sdata->local->hw.conf.channel->band); | 753 | sdata->local->hw.conf.channel->band); |
752 | sdata->drop_unencrypted = 0; | 754 | sdata->drop_unencrypted = 0; |
755 | sdata->use_4addr = 0; | ||
753 | 756 | ||
754 | return 0; | 757 | return 0; |
755 | } | 758 | } |
756 | 759 | ||
757 | static struct device_type wiphy_type = { | ||
758 | .name = "wlan", | ||
759 | }; | ||
760 | |||
761 | int ieee80211_if_add(struct ieee80211_local *local, const char *name, | 760 | int ieee80211_if_add(struct ieee80211_local *local, const char *name, |
762 | struct net_device **new_dev, enum nl80211_iftype type, | 761 | struct net_device **new_dev, enum nl80211_iftype type, |
763 | struct vif_params *params) | 762 | struct vif_params *params) |
@@ -788,8 +787,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, | |||
788 | goto fail; | 787 | goto fail; |
789 | 788 | ||
790 | memcpy(ndev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN); | 789 | memcpy(ndev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN); |
790 | memcpy(ndev->perm_addr, ndev->dev_addr, ETH_ALEN); | ||
791 | SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); | 791 | SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); |
792 | SET_NETDEV_DEVTYPE(ndev, &wiphy_type); | ||
793 | 792 | ||
794 | /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */ | 793 | /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */ |
795 | sdata = netdev_priv(ndev); | 794 | sdata = netdev_priv(ndev); |
@@ -821,6 +820,9 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, | |||
821 | params->mesh_id_len, | 820 | params->mesh_id_len, |
822 | params->mesh_id); | 821 | params->mesh_id); |
823 | 822 | ||
823 | if (params && params->use_4addr >= 0) | ||
824 | sdata->use_4addr = !!params->use_4addr; | ||
825 | |||
824 | mutex_lock(&local->iflist_mtx); | 826 | mutex_lock(&local->iflist_mtx); |
825 | list_add_tail_rcu(&sdata->list, &local->interfaces); | 827 | list_add_tail_rcu(&sdata->list, &local->interfaces); |
826 | mutex_unlock(&local->iflist_mtx); | 828 | mutex_unlock(&local->iflist_mtx); |
diff --git a/net/mac80211/key.h b/net/mac80211/key.h index 9572e00f532c..a49f93b79e92 100644 --- a/net/mac80211/key.h +++ b/net/mac80211/key.h | |||
@@ -118,18 +118,6 @@ struct ieee80211_key { | |||
118 | struct { | 118 | struct { |
119 | struct dentry *stalink; | 119 | struct dentry *stalink; |
120 | struct dentry *dir; | 120 | struct dentry *dir; |
121 | struct dentry *keylen; | ||
122 | struct dentry *flags; | ||
123 | struct dentry *keyidx; | ||
124 | struct dentry *hw_key_idx; | ||
125 | struct dentry *tx_rx_count; | ||
126 | struct dentry *algorithm; | ||
127 | struct dentry *tx_spec; | ||
128 | struct dentry *rx_spec; | ||
129 | struct dentry *replays; | ||
130 | struct dentry *icverrors; | ||
131 | struct dentry *key; | ||
132 | struct dentry *ifindex; | ||
133 | int cnt; | 121 | int cnt; |
134 | } debugfs; | 122 | } debugfs; |
135 | #endif | 123 | #endif |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 797f53942e5f..beb8718d905e 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -385,13 +385,13 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, | |||
385 | * can be unknown, for example with different interrupt status | 385 | * can be unknown, for example with different interrupt status |
386 | * bits. | 386 | * bits. |
387 | */ | 387 | */ |
388 | if (test_sta_flags(sta, WLAN_STA_PS) && | 388 | if (test_sta_flags(sta, WLAN_STA_PS_STA) && |
389 | skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) { | 389 | skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) { |
390 | skb_queue_tail(&sta->tx_filtered, skb); | 390 | skb_queue_tail(&sta->tx_filtered, skb); |
391 | return; | 391 | return; |
392 | } | 392 | } |
393 | 393 | ||
394 | if (!test_sta_flags(sta, WLAN_STA_PS) && | 394 | if (!test_sta_flags(sta, WLAN_STA_PS_STA) && |
395 | !(info->flags & IEEE80211_TX_INTFL_RETRIED)) { | 395 | !(info->flags & IEEE80211_TX_INTFL_RETRIED)) { |
396 | /* Software retry the packet once */ | 396 | /* Software retry the packet once */ |
397 | info->flags |= IEEE80211_TX_INTFL_RETRIED; | 397 | info->flags |= IEEE80211_TX_INTFL_RETRIED; |
@@ -406,7 +406,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, | |||
406 | "queue_len=%d PS=%d @%lu\n", | 406 | "queue_len=%d PS=%d @%lu\n", |
407 | wiphy_name(local->hw.wiphy), | 407 | wiphy_name(local->hw.wiphy), |
408 | skb_queue_len(&sta->tx_filtered), | 408 | skb_queue_len(&sta->tx_filtered), |
409 | !!test_sta_flags(sta, WLAN_STA_PS), jiffies); | 409 | !!test_sta_flags(sta, WLAN_STA_PS_STA), jiffies); |
410 | #endif | 410 | #endif |
411 | dev_kfree_skb(skb); | 411 | dev_kfree_skb(skb); |
412 | } | 412 | } |
@@ -446,7 +446,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
446 | 446 | ||
447 | if (sta) { | 447 | if (sta) { |
448 | if (!(info->flags & IEEE80211_TX_STAT_ACK) && | 448 | if (!(info->flags & IEEE80211_TX_STAT_ACK) && |
449 | test_sta_flags(sta, WLAN_STA_PS)) { | 449 | test_sta_flags(sta, WLAN_STA_PS_STA)) { |
450 | /* | 450 | /* |
451 | * The STA is in power save mode, so assume | 451 | * The STA is in power save mode, so assume |
452 | * that this TX packet failed because of that. | 452 | * that this TX packet failed because of that. |
@@ -901,6 +901,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
901 | i++; | 901 | i++; |
902 | } | 902 | } |
903 | } | 903 | } |
904 | local->int_scan_req->n_channels = i; | ||
904 | 905 | ||
905 | local->network_latency_notifier.notifier_call = | 906 | local->network_latency_notifier.notifier_call = |
906 | ieee80211_max_network_latency; | 907 | ieee80211_max_network_latency; |
@@ -923,7 +924,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
923 | fail_wep: | 924 | fail_wep: |
924 | sta_info_stop(local); | 925 | sta_info_stop(local); |
925 | fail_sta_info: | 926 | fail_sta_info: |
926 | debugfs_hw_del(local); | ||
927 | destroy_workqueue(local->workqueue); | 927 | destroy_workqueue(local->workqueue); |
928 | fail_workqueue: | 928 | fail_workqueue: |
929 | wiphy_unregister(local->hw.wiphy); | 929 | wiphy_unregister(local->hw.wiphy); |
@@ -959,7 +959,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) | |||
959 | ieee80211_clear_tx_pending(local); | 959 | ieee80211_clear_tx_pending(local); |
960 | sta_info_stop(local); | 960 | sta_info_stop(local); |
961 | rate_control_deinitialize(local); | 961 | rate_control_deinitialize(local); |
962 | debugfs_hw_del(local); | ||
963 | 962 | ||
964 | if (skb_queue_len(&local->skb_queue) | 963 | if (skb_queue_len(&local->skb_queue) |
965 | || skb_queue_len(&local->skb_queue_unreliable)) | 964 | || skb_queue_len(&local->skb_queue_unreliable)) |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index f7364e56f1ee..bbd56b087899 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2008 open80211s Ltd. | 2 | * Copyright (c) 2008, 2009 open80211s Ltd. |
3 | * Authors: Luis Carlos Cobo <luisca@cozybit.com> | 3 | * Authors: Luis Carlos Cobo <luisca@cozybit.com> |
4 | * Javier Cardona <javier@cozybit.com> | 4 | * Javier Cardona <javier@cozybit.com> |
5 | * | 5 | * |
@@ -14,18 +14,20 @@ | |||
14 | 14 | ||
15 | #define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ) | 15 | #define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ) |
16 | #define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ) | 16 | #define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ) |
17 | #define IEEE80211_MESH_RANN_INTERVAL (1 * HZ) | ||
17 | 18 | ||
18 | #define PP_OFFSET 1 /* Path Selection Protocol */ | 19 | #define MESHCONF_PP_OFFSET 0 /* Path Selection Protocol */ |
19 | #define PM_OFFSET 5 /* Path Selection Metric */ | 20 | #define MESHCONF_PM_OFFSET 1 /* Path Selection Metric */ |
20 | #define CC_OFFSET 9 /* Congestion Control Mode */ | 21 | #define MESHCONF_CC_OFFSET 2 /* Congestion Control Mode */ |
21 | #define SP_OFFSET 13 /* Synchronization Protocol */ | 22 | #define MESHCONF_SP_OFFSET 3 /* Synchronization Protocol */ |
22 | #define AUTH_OFFSET 17 /* Authentication Protocol */ | 23 | #define MESHCONF_AUTH_OFFSET 4 /* Authentication Protocol */ |
23 | #define CAPAB_OFFSET 22 | 24 | #define MESHCONF_CAPAB_OFFSET 6 |
24 | #define CAPAB_ACCEPT_PLINKS 0x80 | 25 | #define MESHCONF_CAPAB_ACCEPT_PLINKS 0x01 |
25 | #define CAPAB_FORWARDING 0x10 | 26 | #define MESHCONF_CAPAB_FORWARDING 0x08 |
26 | 27 | ||
27 | #define TMR_RUNNING_HK 0 | 28 | #define TMR_RUNNING_HK 0 |
28 | #define TMR_RUNNING_MP 1 | 29 | #define TMR_RUNNING_MP 1 |
30 | #define TMR_RUNNING_MPR 2 | ||
29 | 31 | ||
30 | int mesh_allocated; | 32 | int mesh_allocated; |
31 | static struct kmem_cache *rm_cache; | 33 | static struct kmem_cache *rm_cache; |
@@ -50,7 +52,7 @@ static void ieee80211_mesh_housekeeping_timer(unsigned long data) | |||
50 | struct ieee80211_local *local = sdata->local; | 52 | struct ieee80211_local *local = sdata->local; |
51 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 53 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
52 | 54 | ||
53 | ifmsh->wrkq_flags |= MESH_WORK_HOUSEKEEPING; | 55 | set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); |
54 | 56 | ||
55 | if (local->quiescing) { | 57 | if (local->quiescing) { |
56 | set_bit(TMR_RUNNING_HK, &ifmsh->timers_running); | 58 | set_bit(TMR_RUNNING_HK, &ifmsh->timers_running); |
@@ -85,11 +87,12 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_dat | |||
85 | */ | 87 | */ |
86 | if (ifmsh->mesh_id_len == ie->mesh_id_len && | 88 | if (ifmsh->mesh_id_len == ie->mesh_id_len && |
87 | memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 && | 89 | memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 && |
88 | memcmp(ifmsh->mesh_pp_id, ie->mesh_config + PP_OFFSET, 4) == 0 && | 90 | (ifmsh->mesh_pp_id == *(ie->mesh_config + MESHCONF_PP_OFFSET))&& |
89 | memcmp(ifmsh->mesh_pm_id, ie->mesh_config + PM_OFFSET, 4) == 0 && | 91 | (ifmsh->mesh_pm_id == *(ie->mesh_config + MESHCONF_PM_OFFSET))&& |
90 | memcmp(ifmsh->mesh_cc_id, ie->mesh_config + CC_OFFSET, 4) == 0 && | 92 | (ifmsh->mesh_cc_id == *(ie->mesh_config + MESHCONF_CC_OFFSET))&& |
91 | memcmp(ifmsh->mesh_sp_id, ie->mesh_config + SP_OFFSET, 4) == 0 && | 93 | (ifmsh->mesh_sp_id == *(ie->mesh_config + MESHCONF_SP_OFFSET))&& |
92 | memcmp(ifmsh->mesh_auth_id, ie->mesh_config + AUTH_OFFSET, 4) == 0) | 94 | (ifmsh->mesh_auth_id == *(ie->mesh_config + |
95 | MESHCONF_AUTH_OFFSET))) | ||
93 | return true; | 96 | return true; |
94 | 97 | ||
95 | return false; | 98 | return false; |
@@ -102,7 +105,8 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_dat | |||
102 | */ | 105 | */ |
103 | bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie) | 106 | bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie) |
104 | { | 107 | { |
105 | return (*(ie->mesh_config + CAPAB_OFFSET) & CAPAB_ACCEPT_PLINKS) != 0; | 108 | return (*(ie->mesh_config + MESHCONF_CAPAB_OFFSET) & |
109 | MESHCONF_CAPAB_ACCEPT_PLINKS) != 0; | ||
106 | } | 110 | } |
107 | 111 | ||
108 | /** | 112 | /** |
@@ -128,18 +132,11 @@ void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata) | |||
128 | 132 | ||
129 | void mesh_ids_set_default(struct ieee80211_if_mesh *sta) | 133 | void mesh_ids_set_default(struct ieee80211_if_mesh *sta) |
130 | { | 134 | { |
131 | u8 oui[3] = {0x00, 0x0F, 0xAC}; | 135 | sta->mesh_pp_id = 0; /* HWMP */ |
132 | 136 | sta->mesh_pm_id = 0; /* Airtime */ | |
133 | memcpy(sta->mesh_pp_id, oui, sizeof(oui)); | 137 | sta->mesh_cc_id = 0; /* Disabled */ |
134 | memcpy(sta->mesh_pm_id, oui, sizeof(oui)); | 138 | sta->mesh_sp_id = 0; /* Neighbor Offset */ |
135 | memcpy(sta->mesh_cc_id, oui, sizeof(oui)); | 139 | sta->mesh_auth_id = 0; /* Disabled */ |
136 | memcpy(sta->mesh_sp_id, oui, sizeof(oui)); | ||
137 | memcpy(sta->mesh_auth_id, oui, sizeof(oui)); | ||
138 | sta->mesh_pp_id[sizeof(oui)] = 0; | ||
139 | sta->mesh_pm_id[sizeof(oui)] = 0; | ||
140 | sta->mesh_cc_id[sizeof(oui)] = 0xff; | ||
141 | sta->mesh_sp_id[sizeof(oui)] = 0xff; | ||
142 | sta->mesh_auth_id[sizeof(oui)] = 0x0; | ||
143 | } | 140 | } |
144 | 141 | ||
145 | int mesh_rmc_init(struct ieee80211_sub_if_data *sdata) | 142 | int mesh_rmc_init(struct ieee80211_sub_if_data *sdata) |
@@ -228,6 +225,7 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata) | |||
228 | struct ieee80211_supported_band *sband; | 225 | struct ieee80211_supported_band *sband; |
229 | u8 *pos; | 226 | u8 *pos; |
230 | int len, i, rate; | 227 | int len, i, rate; |
228 | u8 neighbors; | ||
231 | 229 | ||
232 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | 230 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; |
233 | len = sband->n_bitrates; | 231 | len = sband->n_bitrates; |
@@ -251,6 +249,13 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata) | |||
251 | } | 249 | } |
252 | } | 250 | } |
253 | 251 | ||
252 | if (sband->band == IEEE80211_BAND_2GHZ) { | ||
253 | pos = skb_put(skb, 2 + 1); | ||
254 | *pos++ = WLAN_EID_DS_PARAMS; | ||
255 | *pos++ = 1; | ||
256 | *pos++ = ieee80211_frequency_to_channel(local->hw.conf.channel->center_freq); | ||
257 | } | ||
258 | |||
254 | pos = skb_put(skb, 2 + sdata->u.mesh.mesh_id_len); | 259 | pos = skb_put(skb, 2 + sdata->u.mesh.mesh_id_len); |
255 | *pos++ = WLAN_EID_MESH_ID; | 260 | *pos++ = WLAN_EID_MESH_ID; |
256 | *pos++ = sdata->u.mesh.mesh_id_len; | 261 | *pos++ = sdata->u.mesh.mesh_id_len; |
@@ -260,37 +265,33 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata) | |||
260 | pos = skb_put(skb, 2 + IEEE80211_MESH_CONFIG_LEN); | 265 | pos = skb_put(skb, 2 + IEEE80211_MESH_CONFIG_LEN); |
261 | *pos++ = WLAN_EID_MESH_CONFIG; | 266 | *pos++ = WLAN_EID_MESH_CONFIG; |
262 | *pos++ = IEEE80211_MESH_CONFIG_LEN; | 267 | *pos++ = IEEE80211_MESH_CONFIG_LEN; |
263 | /* Version */ | ||
264 | *pos++ = 1; | ||
265 | 268 | ||
266 | /* Active path selection protocol ID */ | 269 | /* Active path selection protocol ID */ |
267 | memcpy(pos, sdata->u.mesh.mesh_pp_id, 4); | 270 | *pos++ = sdata->u.mesh.mesh_pp_id; |
268 | pos += 4; | ||
269 | 271 | ||
270 | /* Active path selection metric ID */ | 272 | /* Active path selection metric ID */ |
271 | memcpy(pos, sdata->u.mesh.mesh_pm_id, 4); | 273 | *pos++ = sdata->u.mesh.mesh_pm_id; |
272 | pos += 4; | ||
273 | 274 | ||
274 | /* Congestion control mode identifier */ | 275 | /* Congestion control mode identifier */ |
275 | memcpy(pos, sdata->u.mesh.mesh_cc_id, 4); | 276 | *pos++ = sdata->u.mesh.mesh_cc_id; |
276 | pos += 4; | ||
277 | 277 | ||
278 | /* Synchronization protocol identifier */ | 278 | /* Synchronization protocol identifier */ |
279 | memcpy(pos, sdata->u.mesh.mesh_sp_id, 4); | 279 | *pos++ = sdata->u.mesh.mesh_sp_id; |
280 | pos += 4; | ||
281 | 280 | ||
282 | /* Authentication Protocol identifier */ | 281 | /* Authentication Protocol identifier */ |
283 | memcpy(pos, sdata->u.mesh.mesh_auth_id, 4); | 282 | *pos++ = sdata->u.mesh.mesh_auth_id; |
284 | pos += 4; | ||
285 | 283 | ||
286 | /* Mesh Formation Info */ | 284 | /* Mesh Formation Info - number of neighbors */ |
287 | memset(pos, 0x00, 1); | 285 | neighbors = atomic_read(&sdata->u.mesh.mshstats.estab_plinks); |
288 | pos += 1; | 286 | /* Number of neighbor mesh STAs or 15 whichever is smaller */ |
287 | neighbors = (neighbors > 15) ? 15 : neighbors; | ||
288 | *pos++ = neighbors << 1; | ||
289 | 289 | ||
290 | /* Mesh capability */ | 290 | /* Mesh capability */ |
291 | sdata->u.mesh.accepting_plinks = mesh_plink_availables(sdata); | 291 | sdata->u.mesh.accepting_plinks = mesh_plink_availables(sdata); |
292 | *pos = CAPAB_FORWARDING; | 292 | *pos = MESHCONF_CAPAB_FORWARDING; |
293 | *pos++ |= sdata->u.mesh.accepting_plinks ? CAPAB_ACCEPT_PLINKS : 0x00; | 293 | *pos++ |= sdata->u.mesh.accepting_plinks ? |
294 | MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00; | ||
294 | *pos++ = 0x00; | 295 | *pos++ = 0x00; |
295 | 296 | ||
296 | return; | 297 | return; |
@@ -355,6 +356,34 @@ static void ieee80211_mesh_path_timer(unsigned long data) | |||
355 | ieee80211_queue_work(&local->hw, &ifmsh->work); | 356 | ieee80211_queue_work(&local->hw, &ifmsh->work); |
356 | } | 357 | } |
357 | 358 | ||
359 | static void ieee80211_mesh_path_root_timer(unsigned long data) | ||
360 | { | ||
361 | struct ieee80211_sub_if_data *sdata = | ||
362 | (struct ieee80211_sub_if_data *) data; | ||
363 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | ||
364 | struct ieee80211_local *local = sdata->local; | ||
365 | |||
366 | set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags); | ||
367 | |||
368 | if (local->quiescing) { | ||
369 | set_bit(TMR_RUNNING_MPR, &ifmsh->timers_running); | ||
370 | return; | ||
371 | } | ||
372 | |||
373 | ieee80211_queue_work(&local->hw, &ifmsh->work); | ||
374 | } | ||
375 | |||
376 | void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh) | ||
377 | { | ||
378 | if (ifmsh->mshcfg.dot11MeshHWMPRootMode) | ||
379 | set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags); | ||
380 | else { | ||
381 | clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags); | ||
382 | /* stop running timer */ | ||
383 | del_timer_sync(&ifmsh->mesh_path_root_timer); | ||
384 | } | ||
385 | } | ||
386 | |||
358 | /** | 387 | /** |
359 | * ieee80211_fill_mesh_addresses - fill addresses of a locally originated mesh frame | 388 | * ieee80211_fill_mesh_addresses - fill addresses of a locally originated mesh frame |
360 | * @hdr: 802.11 frame header | 389 | * @hdr: 802.11 frame header |
@@ -448,6 +477,15 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata, | |||
448 | round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL)); | 477 | round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL)); |
449 | } | 478 | } |
450 | 479 | ||
480 | static void ieee80211_mesh_rootpath(struct ieee80211_sub_if_data *sdata) | ||
481 | { | ||
482 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | ||
483 | |||
484 | mesh_path_tx_root_frame(sdata); | ||
485 | mod_timer(&ifmsh->mesh_path_root_timer, | ||
486 | round_jiffies(jiffies + IEEE80211_MESH_RANN_INTERVAL)); | ||
487 | } | ||
488 | |||
451 | #ifdef CONFIG_PM | 489 | #ifdef CONFIG_PM |
452 | void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata) | 490 | void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata) |
453 | { | 491 | { |
@@ -462,6 +500,8 @@ void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata) | |||
462 | set_bit(TMR_RUNNING_HK, &ifmsh->timers_running); | 500 | set_bit(TMR_RUNNING_HK, &ifmsh->timers_running); |
463 | if (del_timer_sync(&ifmsh->mesh_path_timer)) | 501 | if (del_timer_sync(&ifmsh->mesh_path_timer)) |
464 | set_bit(TMR_RUNNING_MP, &ifmsh->timers_running); | 502 | set_bit(TMR_RUNNING_MP, &ifmsh->timers_running); |
503 | if (del_timer_sync(&ifmsh->mesh_path_root_timer)) | ||
504 | set_bit(TMR_RUNNING_MPR, &ifmsh->timers_running); | ||
465 | } | 505 | } |
466 | 506 | ||
467 | void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata) | 507 | void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata) |
@@ -472,6 +512,9 @@ void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata) | |||
472 | add_timer(&ifmsh->housekeeping_timer); | 512 | add_timer(&ifmsh->housekeeping_timer); |
473 | if (test_and_clear_bit(TMR_RUNNING_MP, &ifmsh->timers_running)) | 513 | if (test_and_clear_bit(TMR_RUNNING_MP, &ifmsh->timers_running)) |
474 | add_timer(&ifmsh->mesh_path_timer); | 514 | add_timer(&ifmsh->mesh_path_timer); |
515 | if (test_and_clear_bit(TMR_RUNNING_MPR, &ifmsh->timers_running)) | ||
516 | add_timer(&ifmsh->mesh_path_root_timer); | ||
517 | ieee80211_mesh_root_setup(ifmsh); | ||
475 | } | 518 | } |
476 | #endif | 519 | #endif |
477 | 520 | ||
@@ -480,7 +523,8 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) | |||
480 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 523 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
481 | struct ieee80211_local *local = sdata->local; | 524 | struct ieee80211_local *local = sdata->local; |
482 | 525 | ||
483 | ifmsh->wrkq_flags |= MESH_WORK_HOUSEKEEPING; | 526 | set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); |
527 | ieee80211_mesh_root_setup(ifmsh); | ||
484 | ieee80211_queue_work(&local->hw, &ifmsh->work); | 528 | ieee80211_queue_work(&local->hw, &ifmsh->work); |
485 | sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL; | 529 | sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL; |
486 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON | | 530 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON | |
@@ -491,6 +535,7 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) | |||
491 | void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) | 535 | void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) |
492 | { | 536 | { |
493 | del_timer_sync(&sdata->u.mesh.housekeeping_timer); | 537 | del_timer_sync(&sdata->u.mesh.housekeeping_timer); |
538 | del_timer_sync(&sdata->u.mesh.mesh_path_root_timer); | ||
494 | /* | 539 | /* |
495 | * If the timer fired while we waited for it, it will have | 540 | * If the timer fired while we waited for it, it will have |
496 | * requeued the work. Now the work will be running again | 541 | * requeued the work. Now the work will be running again |
@@ -561,7 +606,7 @@ static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata, | |||
561 | struct ieee80211_rx_status *rx_status) | 606 | struct ieee80211_rx_status *rx_status) |
562 | { | 607 | { |
563 | switch (mgmt->u.action.category) { | 608 | switch (mgmt->u.action.category) { |
564 | case PLINK_CATEGORY: | 609 | case MESH_PLINK_CATEGORY: |
565 | mesh_rx_plink_frame(sdata, mgmt, len, rx_status); | 610 | mesh_rx_plink_frame(sdata, mgmt, len, rx_status); |
566 | break; | 611 | break; |
567 | case MESH_PATH_SEL_CATEGORY: | 612 | case MESH_PATH_SEL_CATEGORY: |
@@ -628,6 +673,9 @@ static void ieee80211_mesh_work(struct work_struct *work) | |||
628 | 673 | ||
629 | if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags)) | 674 | if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags)) |
630 | ieee80211_mesh_housekeeping(sdata, ifmsh); | 675 | ieee80211_mesh_housekeeping(sdata, ifmsh); |
676 | |||
677 | if (test_and_clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags)) | ||
678 | ieee80211_mesh_rootpath(sdata); | ||
631 | } | 679 | } |
632 | 680 | ||
633 | void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) | 681 | void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) |
@@ -673,7 +721,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) | |||
673 | MESH_MIN_DISCOVERY_TIMEOUT; | 721 | MESH_MIN_DISCOVERY_TIMEOUT; |
674 | ifmsh->accepting_plinks = true; | 722 | ifmsh->accepting_plinks = true; |
675 | ifmsh->preq_id = 0; | 723 | ifmsh->preq_id = 0; |
676 | ifmsh->dsn = 0; | 724 | ifmsh->sn = 0; |
677 | atomic_set(&ifmsh->mpaths, 0); | 725 | atomic_set(&ifmsh->mpaths, 0); |
678 | mesh_rmc_init(sdata); | 726 | mesh_rmc_init(sdata); |
679 | ifmsh->last_preq = jiffies; | 727 | ifmsh->last_preq = jiffies; |
@@ -684,6 +732,9 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) | |||
684 | setup_timer(&ifmsh->mesh_path_timer, | 732 | setup_timer(&ifmsh->mesh_path_timer, |
685 | ieee80211_mesh_path_timer, | 733 | ieee80211_mesh_path_timer, |
686 | (unsigned long) sdata); | 734 | (unsigned long) sdata); |
735 | setup_timer(&ifmsh->mesh_path_root_timer, | ||
736 | ieee80211_mesh_path_root_timer, | ||
737 | (unsigned long) sdata); | ||
687 | INIT_LIST_HEAD(&ifmsh->preq_queue.list); | 738 | INIT_LIST_HEAD(&ifmsh->preq_queue.list); |
688 | spin_lock_init(&ifmsh->mesh_preq_queue_lock); | 739 | spin_lock_init(&ifmsh->mesh_preq_queue_lock); |
689 | } | 740 | } |
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index dd1c19319f0a..bd0e1cbb9a1e 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2008 open80211s Ltd. | 2 | * Copyright (c) 2008, 2009 open80211s Ltd. |
3 | * Authors: Luis Carlos Cobo <luisca@cozybit.com> | 3 | * Authors: Luis Carlos Cobo <luisca@cozybit.com> |
4 | * Javier Cardona <javier@cozybit.com> | 4 | * Javier Cardona <javier@cozybit.com> |
5 | * | 5 | * |
@@ -26,7 +26,7 @@ | |||
26 | * | 26 | * |
27 | * @MESH_PATH_ACTIVE: the mesh path can be used for forwarding | 27 | * @MESH_PATH_ACTIVE: the mesh path can be used for forwarding |
28 | * @MESH_PATH_RESOLVING: the discovery process is running for this mesh path | 28 | * @MESH_PATH_RESOLVING: the discovery process is running for this mesh path |
29 | * @MESH_PATH_DSN_VALID: the mesh path contains a valid destination sequence | 29 | * @MESH_PATH_SN_VALID: the mesh path contains a valid destination sequence |
30 | * number | 30 | * number |
31 | * @MESH_PATH_FIXED: the mesh path has been manually set and should not be | 31 | * @MESH_PATH_FIXED: the mesh path has been manually set and should not be |
32 | * modified | 32 | * modified |
@@ -38,7 +38,7 @@ | |||
38 | enum mesh_path_flags { | 38 | enum mesh_path_flags { |
39 | MESH_PATH_ACTIVE = BIT(0), | 39 | MESH_PATH_ACTIVE = BIT(0), |
40 | MESH_PATH_RESOLVING = BIT(1), | 40 | MESH_PATH_RESOLVING = BIT(1), |
41 | MESH_PATH_DSN_VALID = BIT(2), | 41 | MESH_PATH_SN_VALID = BIT(2), |
42 | MESH_PATH_FIXED = BIT(3), | 42 | MESH_PATH_FIXED = BIT(3), |
43 | MESH_PATH_RESOLVED = BIT(4), | 43 | MESH_PATH_RESOLVED = BIT(4), |
44 | }; | 44 | }; |
@@ -53,11 +53,13 @@ enum mesh_path_flags { | |||
53 | * to grow. | 53 | * to grow. |
54 | * @MESH_WORK_GROW_MPP_TABLE: the mesh portals table is full and needs to | 54 | * @MESH_WORK_GROW_MPP_TABLE: the mesh portals table is full and needs to |
55 | * grow | 55 | * grow |
56 | * @MESH_WORK_ROOT: the mesh root station needs to send a frame | ||
56 | */ | 57 | */ |
57 | enum mesh_deferred_task_flags { | 58 | enum mesh_deferred_task_flags { |
58 | MESH_WORK_HOUSEKEEPING, | 59 | MESH_WORK_HOUSEKEEPING, |
59 | MESH_WORK_GROW_MPATH_TABLE, | 60 | MESH_WORK_GROW_MPATH_TABLE, |
60 | MESH_WORK_GROW_MPP_TABLE, | 61 | MESH_WORK_GROW_MPP_TABLE, |
62 | MESH_WORK_ROOT, | ||
61 | }; | 63 | }; |
62 | 64 | ||
63 | /** | 65 | /** |
@@ -70,7 +72,7 @@ enum mesh_deferred_task_flags { | |||
70 | * @timer: mesh path discovery timer | 72 | * @timer: mesh path discovery timer |
71 | * @frame_queue: pending queue for frames sent to this destination while the | 73 | * @frame_queue: pending queue for frames sent to this destination while the |
72 | * path is unresolved | 74 | * path is unresolved |
73 | * @dsn: destination sequence number of the destination | 75 | * @sn: target sequence number |
74 | * @metric: current metric to this destination | 76 | * @metric: current metric to this destination |
75 | * @hop_count: hops to destination | 77 | * @hop_count: hops to destination |
76 | * @exp_time: in jiffies, when the path will expire or when it expired | 78 | * @exp_time: in jiffies, when the path will expire or when it expired |
@@ -94,7 +96,7 @@ struct mesh_path { | |||
94 | struct timer_list timer; | 96 | struct timer_list timer; |
95 | struct sk_buff_head frame_queue; | 97 | struct sk_buff_head frame_queue; |
96 | struct rcu_head rcu; | 98 | struct rcu_head rcu; |
97 | u32 dsn; | 99 | u32 sn; |
98 | u32 metric; | 100 | u32 metric; |
99 | u8 hop_count; | 101 | u8 hop_count; |
100 | unsigned long exp_time; | 102 | unsigned long exp_time; |
@@ -174,7 +176,7 @@ struct mesh_rmc { | |||
174 | #define MESH_CFG_CMP_LEN (IEEE80211_MESH_CONFIG_LEN - 2) | 176 | #define MESH_CFG_CMP_LEN (IEEE80211_MESH_CONFIG_LEN - 2) |
175 | 177 | ||
176 | /* Default values, timeouts in ms */ | 178 | /* Default values, timeouts in ms */ |
177 | #define MESH_TTL 5 | 179 | #define MESH_TTL 31 |
178 | #define MESH_MAX_RETR 3 | 180 | #define MESH_MAX_RETR 3 |
179 | #define MESH_RET_T 100 | 181 | #define MESH_RET_T 100 |
180 | #define MESH_CONF_T 100 | 182 | #define MESH_CONF_T 100 |
@@ -206,8 +208,14 @@ struct mesh_rmc { | |||
206 | #define MESH_MAX_MPATHS 1024 | 208 | #define MESH_MAX_MPATHS 1024 |
207 | 209 | ||
208 | /* Pending ANA approval */ | 210 | /* Pending ANA approval */ |
209 | #define PLINK_CATEGORY 30 | 211 | #define MESH_PLINK_CATEGORY 30 |
210 | #define MESH_PATH_SEL_CATEGORY 32 | 212 | #define MESH_PATH_SEL_CATEGORY 32 |
213 | #define MESH_PATH_SEL_ACTION 0 | ||
214 | |||
215 | /* PERR reason codes */ | ||
216 | #define PEER_RCODE_UNSPECIFIED 11 | ||
217 | #define PERR_RCODE_NO_ROUTE 12 | ||
218 | #define PERR_RCODE_DEST_UNREACH 13 | ||
211 | 219 | ||
212 | /* Public interfaces */ | 220 | /* Public interfaces */ |
213 | /* Various */ | 221 | /* Various */ |
@@ -234,6 +242,7 @@ ieee80211_rx_result | |||
234 | ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); | 242 | ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); |
235 | void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); | 243 | void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); |
236 | void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata); | 244 | void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata); |
245 | void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh); | ||
237 | 246 | ||
238 | /* Mesh paths */ | 247 | /* Mesh paths */ |
239 | int mesh_nexthop_lookup(struct sk_buff *skb, | 248 | int mesh_nexthop_lookup(struct sk_buff *skb, |
@@ -274,8 +283,8 @@ void mesh_mpp_table_grow(void); | |||
274 | u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, | 283 | u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, |
275 | struct mesh_table *tbl); | 284 | struct mesh_table *tbl); |
276 | /* Mesh paths */ | 285 | /* Mesh paths */ |
277 | int mesh_path_error_tx(u8 *dest, __le32 dest_dsn, u8 *ra, | 286 | int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, __le16 target_rcode, |
278 | struct ieee80211_sub_if_data *sdata); | 287 | u8 *ra, struct ieee80211_sub_if_data *sdata); |
279 | void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta); | 288 | void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta); |
280 | void mesh_path_flush_pending(struct mesh_path *mpath); | 289 | void mesh_path_flush_pending(struct mesh_path *mpath); |
281 | void mesh_path_tx_pending(struct mesh_path *mpath); | 290 | void mesh_path_tx_pending(struct mesh_path *mpath); |
@@ -288,6 +297,7 @@ void mesh_path_discard_frame(struct sk_buff *skb, | |||
288 | struct ieee80211_sub_if_data *sdata); | 297 | struct ieee80211_sub_if_data *sdata); |
289 | void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata); | 298 | void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata); |
290 | void mesh_path_restart(struct ieee80211_sub_if_data *sdata); | 299 | void mesh_path_restart(struct ieee80211_sub_if_data *sdata); |
300 | void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata); | ||
291 | 301 | ||
292 | extern int mesh_paths_generation; | 302 | extern int mesh_paths_generation; |
293 | 303 | ||
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 29b82e98effa..5c67e7b8790f 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2008 open80211s Ltd. | 2 | * Copyright (c) 2008, 2009 open80211s Ltd. |
3 | * Author: Luis Carlos Cobo <luisca@cozybit.com> | 3 | * Author: Luis Carlos Cobo <luisca@cozybit.com> |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
@@ -9,6 +9,12 @@ | |||
9 | 9 | ||
10 | #include "mesh.h" | 10 | #include "mesh.h" |
11 | 11 | ||
12 | #ifdef CONFIG_MAC80211_VERBOSE_MHWMP_DEBUG | ||
13 | #define mhwmp_dbg(fmt, args...) printk(KERN_DEBUG "Mesh HWMP: " fmt, ##args) | ||
14 | #else | ||
15 | #define mhwmp_dbg(fmt, args...) do { (void)(0); } while (0) | ||
16 | #endif | ||
17 | |||
12 | #define TEST_FRAME_LEN 8192 | 18 | #define TEST_FRAME_LEN 8192 |
13 | #define MAX_METRIC 0xffffffff | 19 | #define MAX_METRIC 0xffffffff |
14 | #define ARITH_SHIFT 8 | 20 | #define ARITH_SHIFT 8 |
@@ -21,6 +27,12 @@ | |||
21 | #define MP_F_DO 0x1 | 27 | #define MP_F_DO 0x1 |
22 | /* Reply and forward */ | 28 | /* Reply and forward */ |
23 | #define MP_F_RF 0x2 | 29 | #define MP_F_RF 0x2 |
30 | /* Unknown Sequence Number */ | ||
31 | #define MP_F_USN 0x01 | ||
32 | /* Reason code Present */ | ||
33 | #define MP_F_RCODE 0x02 | ||
34 | |||
35 | static void mesh_queue_preq(struct mesh_path *, u8); | ||
24 | 36 | ||
25 | static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae) | 37 | static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae) |
26 | { | 38 | { |
@@ -29,6 +41,13 @@ static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae) | |||
29 | return get_unaligned_le32(preq_elem + offset); | 41 | return get_unaligned_le32(preq_elem + offset); |
30 | } | 42 | } |
31 | 43 | ||
44 | static inline u32 u16_field_get(u8 *preq_elem, int offset, bool ae) | ||
45 | { | ||
46 | if (ae) | ||
47 | offset += 6; | ||
48 | return get_unaligned_le16(preq_elem + offset); | ||
49 | } | ||
50 | |||
32 | /* HWMP IE processing macros */ | 51 | /* HWMP IE processing macros */ |
33 | #define AE_F (1<<6) | 52 | #define AE_F (1<<6) |
34 | #define AE_F_SET(x) (*x & AE_F) | 53 | #define AE_F_SET(x) (*x & AE_F) |
@@ -37,30 +56,33 @@ static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae) | |||
37 | #define PREQ_IE_TTL(x) (*(x + 2)) | 56 | #define PREQ_IE_TTL(x) (*(x + 2)) |
38 | #define PREQ_IE_PREQ_ID(x) u32_field_get(x, 3, 0) | 57 | #define PREQ_IE_PREQ_ID(x) u32_field_get(x, 3, 0) |
39 | #define PREQ_IE_ORIG_ADDR(x) (x + 7) | 58 | #define PREQ_IE_ORIG_ADDR(x) (x + 7) |
40 | #define PREQ_IE_ORIG_DSN(x) u32_field_get(x, 13, 0); | 59 | #define PREQ_IE_ORIG_SN(x) u32_field_get(x, 13, 0); |
41 | #define PREQ_IE_LIFETIME(x) u32_field_get(x, 17, AE_F_SET(x)); | 60 | #define PREQ_IE_LIFETIME(x) u32_field_get(x, 17, AE_F_SET(x)); |
42 | #define PREQ_IE_METRIC(x) u32_field_get(x, 21, AE_F_SET(x)); | 61 | #define PREQ_IE_METRIC(x) u32_field_get(x, 21, AE_F_SET(x)); |
43 | #define PREQ_IE_DST_F(x) (*(AE_F_SET(x) ? x + 32 : x + 26)) | 62 | #define PREQ_IE_TARGET_F(x) (*(AE_F_SET(x) ? x + 32 : x + 26)) |
44 | #define PREQ_IE_DST_ADDR(x) (AE_F_SET(x) ? x + 33 : x + 27) | 63 | #define PREQ_IE_TARGET_ADDR(x) (AE_F_SET(x) ? x + 33 : x + 27) |
45 | #define PREQ_IE_DST_DSN(x) u32_field_get(x, 33, AE_F_SET(x)); | 64 | #define PREQ_IE_TARGET_SN(x) u32_field_get(x, 33, AE_F_SET(x)); |
46 | 65 | ||
47 | 66 | ||
48 | #define PREP_IE_FLAGS(x) PREQ_IE_FLAGS(x) | 67 | #define PREP_IE_FLAGS(x) PREQ_IE_FLAGS(x) |
49 | #define PREP_IE_HOPCOUNT(x) PREQ_IE_HOPCOUNT(x) | 68 | #define PREP_IE_HOPCOUNT(x) PREQ_IE_HOPCOUNT(x) |
50 | #define PREP_IE_TTL(x) PREQ_IE_TTL(x) | 69 | #define PREP_IE_TTL(x) PREQ_IE_TTL(x) |
51 | #define PREP_IE_ORIG_ADDR(x) (x + 3) | 70 | #define PREP_IE_ORIG_ADDR(x) (x + 3) |
52 | #define PREP_IE_ORIG_DSN(x) u32_field_get(x, 9, 0); | 71 | #define PREP_IE_ORIG_SN(x) u32_field_get(x, 9, 0); |
53 | #define PREP_IE_LIFETIME(x) u32_field_get(x, 13, AE_F_SET(x)); | 72 | #define PREP_IE_LIFETIME(x) u32_field_get(x, 13, AE_F_SET(x)); |
54 | #define PREP_IE_METRIC(x) u32_field_get(x, 17, AE_F_SET(x)); | 73 | #define PREP_IE_METRIC(x) u32_field_get(x, 17, AE_F_SET(x)); |
55 | #define PREP_IE_DST_ADDR(x) (AE_F_SET(x) ? x + 27 : x + 21) | 74 | #define PREP_IE_TARGET_ADDR(x) (AE_F_SET(x) ? x + 27 : x + 21) |
56 | #define PREP_IE_DST_DSN(x) u32_field_get(x, 27, AE_F_SET(x)); | 75 | #define PREP_IE_TARGET_SN(x) u32_field_get(x, 27, AE_F_SET(x)); |
57 | 76 | ||
58 | #define PERR_IE_DST_ADDR(x) (x + 2) | 77 | #define PERR_IE_TTL(x) (*(x)) |
59 | #define PERR_IE_DST_DSN(x) u32_field_get(x, 8, 0); | 78 | #define PERR_IE_TARGET_FLAGS(x) (*(x + 2)) |
79 | #define PERR_IE_TARGET_ADDR(x) (x + 3) | ||
80 | #define PERR_IE_TARGET_SN(x) u32_field_get(x, 9, 0); | ||
81 | #define PERR_IE_TARGET_RCODE(x) u16_field_get(x, 13, 0); | ||
60 | 82 | ||
61 | #define MSEC_TO_TU(x) (x*1000/1024) | 83 | #define MSEC_TO_TU(x) (x*1000/1024) |
62 | #define DSN_GT(x, y) ((long) (y) - (long) (x) < 0) | 84 | #define SN_GT(x, y) ((long) (y) - (long) (x) < 0) |
63 | #define DSN_LT(x, y) ((long) (x) - (long) (y) < 0) | 85 | #define SN_LT(x, y) ((long) (x) - (long) (y) < 0) |
64 | 86 | ||
65 | #define net_traversal_jiffies(s) \ | 87 | #define net_traversal_jiffies(s) \ |
66 | msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime) | 88 | msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime) |
@@ -75,13 +97,15 @@ static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae) | |||
75 | enum mpath_frame_type { | 97 | enum mpath_frame_type { |
76 | MPATH_PREQ = 0, | 98 | MPATH_PREQ = 0, |
77 | MPATH_PREP, | 99 | MPATH_PREP, |
78 | MPATH_PERR | 100 | MPATH_PERR, |
101 | MPATH_RANN | ||
79 | }; | 102 | }; |
80 | 103 | ||
81 | static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, | 104 | static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, |
82 | u8 *orig_addr, __le32 orig_dsn, u8 dst_flags, u8 *dst, | 105 | u8 *orig_addr, __le32 orig_sn, u8 target_flags, u8 *target, |
83 | __le32 dst_dsn, u8 *da, u8 hop_count, u8 ttl, __le32 lifetime, | 106 | __le32 target_sn, u8 *da, u8 hop_count, u8 ttl,__le32 lifetime, |
84 | __le32 metric, __le32 preq_id, struct ieee80211_sub_if_data *sdata) | 107 | __le32 metric, __le32 preq_id, |
108 | struct ieee80211_sub_if_data *sdata) | ||
85 | { | 109 | { |
86 | struct ieee80211_local *local = sdata->local; | 110 | struct ieee80211_local *local = sdata->local; |
87 | struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); | 111 | struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); |
@@ -103,21 +127,30 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, | |||
103 | 127 | ||
104 | memcpy(mgmt->da, da, ETH_ALEN); | 128 | memcpy(mgmt->da, da, ETH_ALEN); |
105 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); | 129 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); |
106 | /* BSSID is left zeroed, wildcard value */ | 130 | /* BSSID == SA */ |
131 | memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); | ||
107 | mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; | 132 | mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; |
108 | mgmt->u.action.u.mesh_action.action_code = action; | 133 | mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION; |
109 | 134 | ||
110 | switch (action) { | 135 | switch (action) { |
111 | case MPATH_PREQ: | 136 | case MPATH_PREQ: |
137 | mhwmp_dbg("sending PREQ to %pM\n", target); | ||
112 | ie_len = 37; | 138 | ie_len = 37; |
113 | pos = skb_put(skb, 2 + ie_len); | 139 | pos = skb_put(skb, 2 + ie_len); |
114 | *pos++ = WLAN_EID_PREQ; | 140 | *pos++ = WLAN_EID_PREQ; |
115 | break; | 141 | break; |
116 | case MPATH_PREP: | 142 | case MPATH_PREP: |
143 | mhwmp_dbg("sending PREP to %pM\n", target); | ||
117 | ie_len = 31; | 144 | ie_len = 31; |
118 | pos = skb_put(skb, 2 + ie_len); | 145 | pos = skb_put(skb, 2 + ie_len); |
119 | *pos++ = WLAN_EID_PREP; | 146 | *pos++ = WLAN_EID_PREP; |
120 | break; | 147 | break; |
148 | case MPATH_RANN: | ||
149 | mhwmp_dbg("sending RANN from %pM\n", orig_addr); | ||
150 | ie_len = sizeof(struct ieee80211_rann_ie); | ||
151 | pos = skb_put(skb, 2 + ie_len); | ||
152 | *pos++ = WLAN_EID_RANN; | ||
153 | break; | ||
121 | default: | 154 | default: |
122 | kfree_skb(skb); | 155 | kfree_skb(skb); |
123 | return -ENOTSUPP; | 156 | return -ENOTSUPP; |
@@ -133,20 +166,24 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, | |||
133 | } | 166 | } |
134 | memcpy(pos, orig_addr, ETH_ALEN); | 167 | memcpy(pos, orig_addr, ETH_ALEN); |
135 | pos += ETH_ALEN; | 168 | pos += ETH_ALEN; |
136 | memcpy(pos, &orig_dsn, 4); | 169 | memcpy(pos, &orig_sn, 4); |
137 | pos += 4; | ||
138 | memcpy(pos, &lifetime, 4); | ||
139 | pos += 4; | 170 | pos += 4; |
171 | if (action != MPATH_RANN) { | ||
172 | memcpy(pos, &lifetime, 4); | ||
173 | pos += 4; | ||
174 | } | ||
140 | memcpy(pos, &metric, 4); | 175 | memcpy(pos, &metric, 4); |
141 | pos += 4; | 176 | pos += 4; |
142 | if (action == MPATH_PREQ) { | 177 | if (action == MPATH_PREQ) { |
143 | /* destination count */ | 178 | /* destination count */ |
144 | *pos++ = 1; | 179 | *pos++ = 1; |
145 | *pos++ = dst_flags; | 180 | *pos++ = target_flags; |
181 | } | ||
182 | if (action != MPATH_RANN) { | ||
183 | memcpy(pos, target, ETH_ALEN); | ||
184 | pos += ETH_ALEN; | ||
185 | memcpy(pos, &target_sn, 4); | ||
146 | } | 186 | } |
147 | memcpy(pos, dst, ETH_ALEN); | ||
148 | pos += ETH_ALEN; | ||
149 | memcpy(pos, &dst_dsn, 4); | ||
150 | 187 | ||
151 | ieee80211_tx_skb(sdata, skb, 1); | 188 | ieee80211_tx_skb(sdata, skb, 1); |
152 | return 0; | 189 | return 0; |
@@ -155,11 +192,13 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, | |||
155 | /** | 192 | /** |
156 | * mesh_send_path error - Sends a PERR mesh management frame | 193 | * mesh_send_path error - Sends a PERR mesh management frame |
157 | * | 194 | * |
158 | * @dst: broken destination | 195 | * @target: broken destination |
159 | * @dst_dsn: dsn of the broken destination | 196 | * @target_sn: SN of the broken destination |
197 | * @target_rcode: reason code for this PERR | ||
160 | * @ra: node this frame is addressed to | 198 | * @ra: node this frame is addressed to |
161 | */ | 199 | */ |
162 | int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra, | 200 | int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, |
201 | __le16 target_rcode, u8 *ra, | ||
163 | struct ieee80211_sub_if_data *sdata) | 202 | struct ieee80211_sub_if_data *sdata) |
164 | { | 203 | { |
165 | struct ieee80211_local *local = sdata->local; | 204 | struct ieee80211_local *local = sdata->local; |
@@ -184,18 +223,30 @@ int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra, | |||
184 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); | 223 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); |
185 | /* BSSID is left zeroed, wildcard value */ | 224 | /* BSSID is left zeroed, wildcard value */ |
186 | mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; | 225 | mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; |
187 | mgmt->u.action.u.mesh_action.action_code = MPATH_PERR; | 226 | mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION; |
188 | ie_len = 12; | 227 | ie_len = 15; |
189 | pos = skb_put(skb, 2 + ie_len); | 228 | pos = skb_put(skb, 2 + ie_len); |
190 | *pos++ = WLAN_EID_PERR; | 229 | *pos++ = WLAN_EID_PERR; |
191 | *pos++ = ie_len; | 230 | *pos++ = ie_len; |
192 | /* mode flags, reserved */ | 231 | /* ttl */ |
193 | *pos++ = 0; | 232 | *pos++ = MESH_TTL; |
194 | /* number of destinations */ | 233 | /* number of destinations */ |
195 | *pos++ = 1; | 234 | *pos++ = 1; |
196 | memcpy(pos, dst, ETH_ALEN); | 235 | /* |
236 | * flags bit, bit 1 is unset if we know the sequence number and | ||
237 | * bit 2 is set if we have a reason code | ||
238 | */ | ||
239 | *pos = 0; | ||
240 | if (!target_sn) | ||
241 | *pos |= MP_F_USN; | ||
242 | if (target_rcode) | ||
243 | *pos |= MP_F_RCODE; | ||
244 | pos++; | ||
245 | memcpy(pos, target, ETH_ALEN); | ||
197 | pos += ETH_ALEN; | 246 | pos += ETH_ALEN; |
198 | memcpy(pos, &dst_dsn, 4); | 247 | memcpy(pos, &target_sn, 4); |
248 | pos += 4; | ||
249 | memcpy(pos, &target_rcode, 2); | ||
199 | 250 | ||
200 | ieee80211_tx_skb(sdata, skb, 1); | 251 | ieee80211_tx_skb(sdata, skb, 1); |
201 | return 0; | 252 | return 0; |
@@ -269,18 +320,17 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local, | |||
269 | */ | 320 | */ |
270 | static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, | 321 | static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, |
271 | struct ieee80211_mgmt *mgmt, | 322 | struct ieee80211_mgmt *mgmt, |
272 | u8 *hwmp_ie) | 323 | u8 *hwmp_ie, enum mpath_frame_type action) |
273 | { | 324 | { |
274 | struct ieee80211_local *local = sdata->local; | 325 | struct ieee80211_local *local = sdata->local; |
275 | struct mesh_path *mpath; | 326 | struct mesh_path *mpath; |
276 | struct sta_info *sta; | 327 | struct sta_info *sta; |
277 | bool fresh_info; | 328 | bool fresh_info; |
278 | u8 *orig_addr, *ta; | 329 | u8 *orig_addr, *ta; |
279 | u32 orig_dsn, orig_metric; | 330 | u32 orig_sn, orig_metric; |
280 | unsigned long orig_lifetime, exp_time; | 331 | unsigned long orig_lifetime, exp_time; |
281 | u32 last_hop_metric, new_metric; | 332 | u32 last_hop_metric, new_metric; |
282 | bool process = true; | 333 | bool process = true; |
283 | u8 action = mgmt->u.action.u.mesh_action.action_code; | ||
284 | 334 | ||
285 | rcu_read_lock(); | 335 | rcu_read_lock(); |
286 | sta = sta_info_get(local, mgmt->sa); | 336 | sta = sta_info_get(local, mgmt->sa); |
@@ -296,7 +346,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, | |||
296 | switch (action) { | 346 | switch (action) { |
297 | case MPATH_PREQ: | 347 | case MPATH_PREQ: |
298 | orig_addr = PREQ_IE_ORIG_ADDR(hwmp_ie); | 348 | orig_addr = PREQ_IE_ORIG_ADDR(hwmp_ie); |
299 | orig_dsn = PREQ_IE_ORIG_DSN(hwmp_ie); | 349 | orig_sn = PREQ_IE_ORIG_SN(hwmp_ie); |
300 | orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie); | 350 | orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie); |
301 | orig_metric = PREQ_IE_METRIC(hwmp_ie); | 351 | orig_metric = PREQ_IE_METRIC(hwmp_ie); |
302 | break; | 352 | break; |
@@ -309,7 +359,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, | |||
309 | * information from both PREQ and PREP frames. | 359 | * information from both PREQ and PREP frames. |
310 | */ | 360 | */ |
311 | orig_addr = PREP_IE_ORIG_ADDR(hwmp_ie); | 361 | orig_addr = PREP_IE_ORIG_ADDR(hwmp_ie); |
312 | orig_dsn = PREP_IE_ORIG_DSN(hwmp_ie); | 362 | orig_sn = PREP_IE_ORIG_SN(hwmp_ie); |
313 | orig_lifetime = PREP_IE_LIFETIME(hwmp_ie); | 363 | orig_lifetime = PREP_IE_LIFETIME(hwmp_ie); |
314 | orig_metric = PREP_IE_METRIC(hwmp_ie); | 364 | orig_metric = PREP_IE_METRIC(hwmp_ie); |
315 | break; | 365 | break; |
@@ -335,9 +385,9 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, | |||
335 | if (mpath->flags & MESH_PATH_FIXED) | 385 | if (mpath->flags & MESH_PATH_FIXED) |
336 | fresh_info = false; | 386 | fresh_info = false; |
337 | else if ((mpath->flags & MESH_PATH_ACTIVE) && | 387 | else if ((mpath->flags & MESH_PATH_ACTIVE) && |
338 | (mpath->flags & MESH_PATH_DSN_VALID)) { | 388 | (mpath->flags & MESH_PATH_SN_VALID)) { |
339 | if (DSN_GT(mpath->dsn, orig_dsn) || | 389 | if (SN_GT(mpath->sn, orig_sn) || |
340 | (mpath->dsn == orig_dsn && | 390 | (mpath->sn == orig_sn && |
341 | action == MPATH_PREQ && | 391 | action == MPATH_PREQ && |
342 | new_metric > mpath->metric)) { | 392 | new_metric > mpath->metric)) { |
343 | process = false; | 393 | process = false; |
@@ -356,9 +406,9 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, | |||
356 | 406 | ||
357 | if (fresh_info) { | 407 | if (fresh_info) { |
358 | mesh_path_assign_nexthop(mpath, sta); | 408 | mesh_path_assign_nexthop(mpath, sta); |
359 | mpath->flags |= MESH_PATH_DSN_VALID; | 409 | mpath->flags |= MESH_PATH_SN_VALID; |
360 | mpath->metric = new_metric; | 410 | mpath->metric = new_metric; |
361 | mpath->dsn = orig_dsn; | 411 | mpath->sn = orig_sn; |
362 | mpath->exp_time = time_after(mpath->exp_time, exp_time) | 412 | mpath->exp_time = time_after(mpath->exp_time, exp_time) |
363 | ? mpath->exp_time : exp_time; | 413 | ? mpath->exp_time : exp_time; |
364 | mesh_path_activate(mpath); | 414 | mesh_path_activate(mpath); |
@@ -397,7 +447,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, | |||
397 | 447 | ||
398 | if (fresh_info) { | 448 | if (fresh_info) { |
399 | mesh_path_assign_nexthop(mpath, sta); | 449 | mesh_path_assign_nexthop(mpath, sta); |
400 | mpath->flags &= ~MESH_PATH_DSN_VALID; | 450 | mpath->flags &= ~MESH_PATH_SN_VALID; |
401 | mpath->metric = last_hop_metric; | 451 | mpath->metric = last_hop_metric; |
402 | mpath->exp_time = time_after(mpath->exp_time, exp_time) | 452 | mpath->exp_time = time_after(mpath->exp_time, exp_time) |
403 | ? mpath->exp_time : exp_time; | 453 | ? mpath->exp_time : exp_time; |
@@ -419,44 +469,47 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, | |||
419 | { | 469 | { |
420 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 470 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
421 | struct mesh_path *mpath; | 471 | struct mesh_path *mpath; |
422 | u8 *dst_addr, *orig_addr; | 472 | u8 *target_addr, *orig_addr; |
423 | u8 dst_flags, ttl; | 473 | u8 target_flags, ttl; |
424 | u32 orig_dsn, dst_dsn, lifetime; | 474 | u32 orig_sn, target_sn, lifetime; |
425 | bool reply = false; | 475 | bool reply = false; |
426 | bool forward = true; | 476 | bool forward = true; |
427 | 477 | ||
428 | /* Update destination DSN, if present */ | 478 | /* Update target SN, if present */ |
429 | dst_addr = PREQ_IE_DST_ADDR(preq_elem); | 479 | target_addr = PREQ_IE_TARGET_ADDR(preq_elem); |
430 | orig_addr = PREQ_IE_ORIG_ADDR(preq_elem); | 480 | orig_addr = PREQ_IE_ORIG_ADDR(preq_elem); |
431 | dst_dsn = PREQ_IE_DST_DSN(preq_elem); | 481 | target_sn = PREQ_IE_TARGET_SN(preq_elem); |
432 | orig_dsn = PREQ_IE_ORIG_DSN(preq_elem); | 482 | orig_sn = PREQ_IE_ORIG_SN(preq_elem); |
433 | dst_flags = PREQ_IE_DST_F(preq_elem); | 483 | target_flags = PREQ_IE_TARGET_F(preq_elem); |
434 | 484 | ||
435 | if (memcmp(dst_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) { | 485 | mhwmp_dbg("received PREQ from %pM\n", orig_addr); |
486 | |||
487 | if (memcmp(target_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) { | ||
488 | mhwmp_dbg("PREQ is for us\n"); | ||
436 | forward = false; | 489 | forward = false; |
437 | reply = true; | 490 | reply = true; |
438 | metric = 0; | 491 | metric = 0; |
439 | if (time_after(jiffies, ifmsh->last_dsn_update + | 492 | if (time_after(jiffies, ifmsh->last_sn_update + |
440 | net_traversal_jiffies(sdata)) || | 493 | net_traversal_jiffies(sdata)) || |
441 | time_before(jiffies, ifmsh->last_dsn_update)) { | 494 | time_before(jiffies, ifmsh->last_sn_update)) { |
442 | dst_dsn = ++ifmsh->dsn; | 495 | target_sn = ++ifmsh->sn; |
443 | ifmsh->last_dsn_update = jiffies; | 496 | ifmsh->last_sn_update = jiffies; |
444 | } | 497 | } |
445 | } else { | 498 | } else { |
446 | rcu_read_lock(); | 499 | rcu_read_lock(); |
447 | mpath = mesh_path_lookup(dst_addr, sdata); | 500 | mpath = mesh_path_lookup(target_addr, sdata); |
448 | if (mpath) { | 501 | if (mpath) { |
449 | if ((!(mpath->flags & MESH_PATH_DSN_VALID)) || | 502 | if ((!(mpath->flags & MESH_PATH_SN_VALID)) || |
450 | DSN_LT(mpath->dsn, dst_dsn)) { | 503 | SN_LT(mpath->sn, target_sn)) { |
451 | mpath->dsn = dst_dsn; | 504 | mpath->sn = target_sn; |
452 | mpath->flags |= MESH_PATH_DSN_VALID; | 505 | mpath->flags |= MESH_PATH_SN_VALID; |
453 | } else if ((!(dst_flags & MP_F_DO)) && | 506 | } else if ((!(target_flags & MP_F_DO)) && |
454 | (mpath->flags & MESH_PATH_ACTIVE)) { | 507 | (mpath->flags & MESH_PATH_ACTIVE)) { |
455 | reply = true; | 508 | reply = true; |
456 | metric = mpath->metric; | 509 | metric = mpath->metric; |
457 | dst_dsn = mpath->dsn; | 510 | target_sn = mpath->sn; |
458 | if (dst_flags & MP_F_RF) | 511 | if (target_flags & MP_F_RF) |
459 | dst_flags |= MP_F_DO; | 512 | target_flags |= MP_F_DO; |
460 | else | 513 | else |
461 | forward = false; | 514 | forward = false; |
462 | } | 515 | } |
@@ -467,13 +520,14 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, | |||
467 | if (reply) { | 520 | if (reply) { |
468 | lifetime = PREQ_IE_LIFETIME(preq_elem); | 521 | lifetime = PREQ_IE_LIFETIME(preq_elem); |
469 | ttl = ifmsh->mshcfg.dot11MeshTTL; | 522 | ttl = ifmsh->mshcfg.dot11MeshTTL; |
470 | if (ttl != 0) | 523 | if (ttl != 0) { |
471 | mesh_path_sel_frame_tx(MPATH_PREP, 0, dst_addr, | 524 | mhwmp_dbg("replying to the PREQ\n"); |
472 | cpu_to_le32(dst_dsn), 0, orig_addr, | 525 | mesh_path_sel_frame_tx(MPATH_PREP, 0, target_addr, |
473 | cpu_to_le32(orig_dsn), mgmt->sa, 0, ttl, | 526 | cpu_to_le32(target_sn), 0, orig_addr, |
527 | cpu_to_le32(orig_sn), mgmt->sa, 0, ttl, | ||
474 | cpu_to_le32(lifetime), cpu_to_le32(metric), | 528 | cpu_to_le32(lifetime), cpu_to_le32(metric), |
475 | 0, sdata); | 529 | 0, sdata); |
476 | else | 530 | } else |
477 | ifmsh->mshstats.dropped_frames_ttl++; | 531 | ifmsh->mshstats.dropped_frames_ttl++; |
478 | } | 532 | } |
479 | 533 | ||
@@ -487,13 +541,14 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, | |||
487 | ifmsh->mshstats.dropped_frames_ttl++; | 541 | ifmsh->mshstats.dropped_frames_ttl++; |
488 | return; | 542 | return; |
489 | } | 543 | } |
544 | mhwmp_dbg("forwarding the PREQ from %pM\n", orig_addr); | ||
490 | --ttl; | 545 | --ttl; |
491 | flags = PREQ_IE_FLAGS(preq_elem); | 546 | flags = PREQ_IE_FLAGS(preq_elem); |
492 | preq_id = PREQ_IE_PREQ_ID(preq_elem); | 547 | preq_id = PREQ_IE_PREQ_ID(preq_elem); |
493 | hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1; | 548 | hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1; |
494 | mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr, | 549 | mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr, |
495 | cpu_to_le32(orig_dsn), dst_flags, dst_addr, | 550 | cpu_to_le32(orig_sn), target_flags, target_addr, |
496 | cpu_to_le32(dst_dsn), sdata->dev->broadcast, | 551 | cpu_to_le32(target_sn), sdata->dev->broadcast, |
497 | hopcount, ttl, cpu_to_le32(lifetime), | 552 | hopcount, ttl, cpu_to_le32(lifetime), |
498 | cpu_to_le32(metric), cpu_to_le32(preq_id), | 553 | cpu_to_le32(metric), cpu_to_le32(preq_id), |
499 | sdata); | 554 | sdata); |
@@ -508,10 +563,12 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata, | |||
508 | u8 *prep_elem, u32 metric) | 563 | u8 *prep_elem, u32 metric) |
509 | { | 564 | { |
510 | struct mesh_path *mpath; | 565 | struct mesh_path *mpath; |
511 | u8 *dst_addr, *orig_addr; | 566 | u8 *target_addr, *orig_addr; |
512 | u8 ttl, hopcount, flags; | 567 | u8 ttl, hopcount, flags; |
513 | u8 next_hop[ETH_ALEN]; | 568 | u8 next_hop[ETH_ALEN]; |
514 | u32 dst_dsn, orig_dsn, lifetime; | 569 | u32 target_sn, orig_sn, lifetime; |
570 | |||
571 | mhwmp_dbg("received PREP from %pM\n", PREP_IE_ORIG_ADDR(prep_elem)); | ||
515 | 572 | ||
516 | /* Note that we divert from the draft nomenclature and denominate | 573 | /* Note that we divert from the draft nomenclature and denominate |
517 | * destination to what the draft refers to as origininator. So in this | 574 | * destination to what the draft refers to as origininator. So in this |
@@ -519,8 +576,8 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata, | |||
519 | * which corresponds with the originator of the PREQ which this PREP | 576 | * which corresponds with the originator of the PREQ which this PREP |
520 | * replies | 577 | * replies |
521 | */ | 578 | */ |
522 | dst_addr = PREP_IE_DST_ADDR(prep_elem); | 579 | target_addr = PREP_IE_TARGET_ADDR(prep_elem); |
523 | if (memcmp(dst_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) | 580 | if (memcmp(target_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) |
524 | /* destination, no forwarding required */ | 581 | /* destination, no forwarding required */ |
525 | return; | 582 | return; |
526 | 583 | ||
@@ -531,7 +588,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata, | |||
531 | } | 588 | } |
532 | 589 | ||
533 | rcu_read_lock(); | 590 | rcu_read_lock(); |
534 | mpath = mesh_path_lookup(dst_addr, sdata); | 591 | mpath = mesh_path_lookup(target_addr, sdata); |
535 | if (mpath) | 592 | if (mpath) |
536 | spin_lock_bh(&mpath->state_lock); | 593 | spin_lock_bh(&mpath->state_lock); |
537 | else | 594 | else |
@@ -547,13 +604,13 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata, | |||
547 | lifetime = PREP_IE_LIFETIME(prep_elem); | 604 | lifetime = PREP_IE_LIFETIME(prep_elem); |
548 | hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1; | 605 | hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1; |
549 | orig_addr = PREP_IE_ORIG_ADDR(prep_elem); | 606 | orig_addr = PREP_IE_ORIG_ADDR(prep_elem); |
550 | dst_dsn = PREP_IE_DST_DSN(prep_elem); | 607 | target_sn = PREP_IE_TARGET_SN(prep_elem); |
551 | orig_dsn = PREP_IE_ORIG_DSN(prep_elem); | 608 | orig_sn = PREP_IE_ORIG_SN(prep_elem); |
552 | 609 | ||
553 | mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, | 610 | mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, |
554 | cpu_to_le32(orig_dsn), 0, dst_addr, | 611 | cpu_to_le32(orig_sn), 0, target_addr, |
555 | cpu_to_le32(dst_dsn), mpath->next_hop->sta.addr, hopcount, ttl, | 612 | cpu_to_le32(target_sn), mpath->next_hop->sta.addr, hopcount, |
556 | cpu_to_le32(lifetime), cpu_to_le32(metric), | 613 | ttl, cpu_to_le32(lifetime), cpu_to_le32(metric), |
557 | 0, sdata); | 614 | 0, sdata); |
558 | rcu_read_unlock(); | 615 | rcu_read_unlock(); |
559 | 616 | ||
@@ -570,25 +627,39 @@ fail: | |||
570 | static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata, | 627 | static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata, |
571 | struct ieee80211_mgmt *mgmt, u8 *perr_elem) | 628 | struct ieee80211_mgmt *mgmt, u8 *perr_elem) |
572 | { | 629 | { |
630 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | ||
573 | struct mesh_path *mpath; | 631 | struct mesh_path *mpath; |
574 | u8 *ta, *dst_addr; | 632 | u8 ttl; |
575 | u32 dst_dsn; | 633 | u8 *ta, *target_addr; |
634 | u8 target_flags; | ||
635 | u32 target_sn; | ||
636 | u16 target_rcode; | ||
576 | 637 | ||
577 | ta = mgmt->sa; | 638 | ta = mgmt->sa; |
578 | dst_addr = PERR_IE_DST_ADDR(perr_elem); | 639 | ttl = PERR_IE_TTL(perr_elem); |
579 | dst_dsn = PERR_IE_DST_DSN(perr_elem); | 640 | if (ttl <= 1) { |
641 | ifmsh->mshstats.dropped_frames_ttl++; | ||
642 | return; | ||
643 | } | ||
644 | ttl--; | ||
645 | target_flags = PERR_IE_TARGET_FLAGS(perr_elem); | ||
646 | target_addr = PERR_IE_TARGET_ADDR(perr_elem); | ||
647 | target_sn = PERR_IE_TARGET_SN(perr_elem); | ||
648 | target_rcode = PERR_IE_TARGET_RCODE(perr_elem); | ||
649 | |||
580 | rcu_read_lock(); | 650 | rcu_read_lock(); |
581 | mpath = mesh_path_lookup(dst_addr, sdata); | 651 | mpath = mesh_path_lookup(target_addr, sdata); |
582 | if (mpath) { | 652 | if (mpath) { |
583 | spin_lock_bh(&mpath->state_lock); | 653 | spin_lock_bh(&mpath->state_lock); |
584 | if (mpath->flags & MESH_PATH_ACTIVE && | 654 | if (mpath->flags & MESH_PATH_ACTIVE && |
585 | memcmp(ta, mpath->next_hop->sta.addr, ETH_ALEN) == 0 && | 655 | memcmp(ta, mpath->next_hop->sta.addr, ETH_ALEN) == 0 && |
586 | (!(mpath->flags & MESH_PATH_DSN_VALID) || | 656 | (!(mpath->flags & MESH_PATH_SN_VALID) || |
587 | DSN_GT(dst_dsn, mpath->dsn))) { | 657 | SN_GT(target_sn, mpath->sn))) { |
588 | mpath->flags &= ~MESH_PATH_ACTIVE; | 658 | mpath->flags &= ~MESH_PATH_ACTIVE; |
589 | mpath->dsn = dst_dsn; | 659 | mpath->sn = target_sn; |
590 | spin_unlock_bh(&mpath->state_lock); | 660 | spin_unlock_bh(&mpath->state_lock); |
591 | mesh_path_error_tx(dst_addr, cpu_to_le32(dst_dsn), | 661 | mesh_path_error_tx(ttl, target_addr, cpu_to_le32(target_sn), |
662 | cpu_to_le16(target_rcode), | ||
592 | sdata->dev->broadcast, sdata); | 663 | sdata->dev->broadcast, sdata); |
593 | } else | 664 | } else |
594 | spin_unlock_bh(&mpath->state_lock); | 665 | spin_unlock_bh(&mpath->state_lock); |
@@ -596,6 +667,56 @@ static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata, | |||
596 | rcu_read_unlock(); | 667 | rcu_read_unlock(); |
597 | } | 668 | } |
598 | 669 | ||
670 | static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, | ||
671 | struct ieee80211_mgmt *mgmt, | ||
672 | struct ieee80211_rann_ie *rann) | ||
673 | { | ||
674 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | ||
675 | struct mesh_path *mpath; | ||
676 | u8 *ta; | ||
677 | u8 ttl, flags, hopcount; | ||
678 | u8 *orig_addr; | ||
679 | u32 orig_sn, metric; | ||
680 | |||
681 | ta = mgmt->sa; | ||
682 | ttl = rann->rann_ttl; | ||
683 | if (ttl <= 1) { | ||
684 | ifmsh->mshstats.dropped_frames_ttl++; | ||
685 | return; | ||
686 | } | ||
687 | ttl--; | ||
688 | flags = rann->rann_flags; | ||
689 | orig_addr = rann->rann_addr; | ||
690 | orig_sn = rann->rann_seq; | ||
691 | hopcount = rann->rann_hopcount; | ||
692 | hopcount++; | ||
693 | metric = rann->rann_metric; | ||
694 | mhwmp_dbg("received RANN from %pM\n", orig_addr); | ||
695 | |||
696 | rcu_read_lock(); | ||
697 | mpath = mesh_path_lookup(orig_addr, sdata); | ||
698 | if (!mpath) { | ||
699 | mesh_path_add(orig_addr, sdata); | ||
700 | mpath = mesh_path_lookup(orig_addr, sdata); | ||
701 | if (!mpath) { | ||
702 | rcu_read_unlock(); | ||
703 | sdata->u.mesh.mshstats.dropped_frames_no_route++; | ||
704 | return; | ||
705 | } | ||
706 | mesh_queue_preq(mpath, | ||
707 | PREQ_Q_F_START | PREQ_Q_F_REFRESH); | ||
708 | } | ||
709 | if (mpath->sn < orig_sn) { | ||
710 | mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr, | ||
711 | cpu_to_le32(orig_sn), | ||
712 | 0, NULL, 0, sdata->dev->broadcast, | ||
713 | hopcount, ttl, 0, | ||
714 | cpu_to_le32(metric + mpath->metric), | ||
715 | 0, sdata); | ||
716 | mpath->sn = orig_sn; | ||
717 | } | ||
718 | rcu_read_unlock(); | ||
719 | } | ||
599 | 720 | ||
600 | 721 | ||
601 | void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, | 722 | void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, |
@@ -614,34 +735,34 @@ void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, | |||
614 | ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable, | 735 | ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable, |
615 | len - baselen, &elems); | 736 | len - baselen, &elems); |
616 | 737 | ||
617 | switch (mgmt->u.action.u.mesh_action.action_code) { | 738 | if (elems.preq) { |
618 | case MPATH_PREQ: | 739 | if (elems.preq_len != 37) |
619 | if (!elems.preq || elems.preq_len != 37) | ||
620 | /* Right now we support just 1 destination and no AE */ | 740 | /* Right now we support just 1 destination and no AE */ |
621 | return; | 741 | return; |
622 | last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.preq); | 742 | last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.preq, |
623 | if (!last_hop_metric) | 743 | MPATH_PREQ); |
624 | return; | 744 | if (last_hop_metric) |
625 | hwmp_preq_frame_process(sdata, mgmt, elems.preq, last_hop_metric); | 745 | hwmp_preq_frame_process(sdata, mgmt, elems.preq, |
626 | break; | 746 | last_hop_metric); |
627 | case MPATH_PREP: | 747 | } |
628 | if (!elems.prep || elems.prep_len != 31) | 748 | if (elems.prep) { |
749 | if (elems.prep_len != 31) | ||
629 | /* Right now we support no AE */ | 750 | /* Right now we support no AE */ |
630 | return; | 751 | return; |
631 | last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.prep); | 752 | last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.prep, |
632 | if (!last_hop_metric) | 753 | MPATH_PREP); |
633 | return; | 754 | if (last_hop_metric) |
634 | hwmp_prep_frame_process(sdata, mgmt, elems.prep, last_hop_metric); | 755 | hwmp_prep_frame_process(sdata, mgmt, elems.prep, |
635 | break; | 756 | last_hop_metric); |
636 | case MPATH_PERR: | 757 | } |
637 | if (!elems.perr || elems.perr_len != 12) | 758 | if (elems.perr) { |
759 | if (elems.perr_len != 15) | ||
638 | /* Right now we support only one destination per PERR */ | 760 | /* Right now we support only one destination per PERR */ |
639 | return; | 761 | return; |
640 | hwmp_perr_frame_process(sdata, mgmt, elems.perr); | 762 | hwmp_perr_frame_process(sdata, mgmt, elems.perr); |
641 | default: | ||
642 | return; | ||
643 | } | 763 | } |
644 | 764 | if (elems.rann) | |
765 | hwmp_rann_frame_process(sdata, mgmt, elems.rann); | ||
645 | } | 766 | } |
646 | 767 | ||
647 | /** | 768 | /** |
@@ -661,7 +782,7 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags) | |||
661 | 782 | ||
662 | preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC); | 783 | preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC); |
663 | if (!preq_node) { | 784 | if (!preq_node) { |
664 | printk(KERN_DEBUG "Mesh HWMP: could not allocate PREQ node\n"); | 785 | mhwmp_dbg("could not allocate PREQ node\n"); |
665 | return; | 786 | return; |
666 | } | 787 | } |
667 | 788 | ||
@@ -670,7 +791,7 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags) | |||
670 | spin_unlock(&ifmsh->mesh_preq_queue_lock); | 791 | spin_unlock(&ifmsh->mesh_preq_queue_lock); |
671 | kfree(preq_node); | 792 | kfree(preq_node); |
672 | if (printk_ratelimit()) | 793 | if (printk_ratelimit()) |
673 | printk(KERN_DEBUG "Mesh HWMP: PREQ node queue full\n"); | 794 | mhwmp_dbg("PREQ node queue full\n"); |
674 | return; | 795 | return; |
675 | } | 796 | } |
676 | 797 | ||
@@ -705,7 +826,7 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata) | |||
705 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 826 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
706 | struct mesh_preq_queue *preq_node; | 827 | struct mesh_preq_queue *preq_node; |
707 | struct mesh_path *mpath; | 828 | struct mesh_path *mpath; |
708 | u8 ttl, dst_flags; | 829 | u8 ttl, target_flags; |
709 | u32 lifetime; | 830 | u32 lifetime; |
710 | 831 | ||
711 | spin_lock_bh(&ifmsh->mesh_preq_queue_lock); | 832 | spin_lock_bh(&ifmsh->mesh_preq_queue_lock); |
@@ -747,11 +868,11 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata) | |||
747 | 868 | ||
748 | ifmsh->last_preq = jiffies; | 869 | ifmsh->last_preq = jiffies; |
749 | 870 | ||
750 | if (time_after(jiffies, ifmsh->last_dsn_update + | 871 | if (time_after(jiffies, ifmsh->last_sn_update + |
751 | net_traversal_jiffies(sdata)) || | 872 | net_traversal_jiffies(sdata)) || |
752 | time_before(jiffies, ifmsh->last_dsn_update)) { | 873 | time_before(jiffies, ifmsh->last_sn_update)) { |
753 | ++ifmsh->dsn; | 874 | ++ifmsh->sn; |
754 | sdata->u.mesh.last_dsn_update = jiffies; | 875 | sdata->u.mesh.last_sn_update = jiffies; |
755 | } | 876 | } |
756 | lifetime = default_lifetime(sdata); | 877 | lifetime = default_lifetime(sdata); |
757 | ttl = sdata->u.mesh.mshcfg.dot11MeshTTL; | 878 | ttl = sdata->u.mesh.mshcfg.dot11MeshTTL; |
@@ -762,14 +883,14 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata) | |||
762 | } | 883 | } |
763 | 884 | ||
764 | if (preq_node->flags & PREQ_Q_F_REFRESH) | 885 | if (preq_node->flags & PREQ_Q_F_REFRESH) |
765 | dst_flags = MP_F_DO; | 886 | target_flags = MP_F_DO; |
766 | else | 887 | else |
767 | dst_flags = MP_F_RF; | 888 | target_flags = MP_F_RF; |
768 | 889 | ||
769 | spin_unlock_bh(&mpath->state_lock); | 890 | spin_unlock_bh(&mpath->state_lock); |
770 | mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->dev->dev_addr, | 891 | mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->dev->dev_addr, |
771 | cpu_to_le32(ifmsh->dsn), dst_flags, mpath->dst, | 892 | cpu_to_le32(ifmsh->sn), target_flags, mpath->dst, |
772 | cpu_to_le32(mpath->dsn), sdata->dev->broadcast, 0, | 893 | cpu_to_le32(mpath->sn), sdata->dev->broadcast, 0, |
773 | ttl, cpu_to_le32(lifetime), 0, | 894 | ttl, cpu_to_le32(lifetime), 0, |
774 | cpu_to_le32(ifmsh->preq_id++), sdata); | 895 | cpu_to_le32(ifmsh->preq_id++), sdata); |
775 | mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); | 896 | mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); |
@@ -796,15 +917,15 @@ int mesh_nexthop_lookup(struct sk_buff *skb, | |||
796 | struct sk_buff *skb_to_free = NULL; | 917 | struct sk_buff *skb_to_free = NULL; |
797 | struct mesh_path *mpath; | 918 | struct mesh_path *mpath; |
798 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 919 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
799 | u8 *dst_addr = hdr->addr3; | 920 | u8 *target_addr = hdr->addr3; |
800 | int err = 0; | 921 | int err = 0; |
801 | 922 | ||
802 | rcu_read_lock(); | 923 | rcu_read_lock(); |
803 | mpath = mesh_path_lookup(dst_addr, sdata); | 924 | mpath = mesh_path_lookup(target_addr, sdata); |
804 | 925 | ||
805 | if (!mpath) { | 926 | if (!mpath) { |
806 | mesh_path_add(dst_addr, sdata); | 927 | mesh_path_add(target_addr, sdata); |
807 | mpath = mesh_path_lookup(dst_addr, sdata); | 928 | mpath = mesh_path_lookup(target_addr, sdata); |
808 | if (!mpath) { | 929 | if (!mpath) { |
809 | sdata->u.mesh.mshstats.dropped_frames_no_route++; | 930 | sdata->u.mesh.mshstats.dropped_frames_no_route++; |
810 | err = -ENOSPC; | 931 | err = -ENOSPC; |
@@ -882,3 +1003,14 @@ void mesh_path_timer(unsigned long data) | |||
882 | endmpathtimer: | 1003 | endmpathtimer: |
883 | rcu_read_unlock(); | 1004 | rcu_read_unlock(); |
884 | } | 1005 | } |
1006 | |||
1007 | void | ||
1008 | mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata) | ||
1009 | { | ||
1010 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | ||
1011 | |||
1012 | mesh_path_sel_frame_tx(MPATH_RANN, 0, sdata->dev->dev_addr, | ||
1013 | cpu_to_le32(++ifmsh->sn), | ||
1014 | 0, NULL, 0, sdata->dev->broadcast, | ||
1015 | 0, MESH_TTL, 0, 0, 0, sdata); | ||
1016 | } | ||
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 751c4d0e2b36..5399e7a9ec6e 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2008 open80211s Ltd. | 2 | * Copyright (c) 2008, 2009 open80211s Ltd. |
3 | * Author: Luis Carlos Cobo <luisca@cozybit.com> | 3 | * Author: Luis Carlos Cobo <luisca@cozybit.com> |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
@@ -463,10 +463,11 @@ void mesh_plink_broken(struct sta_info *sta) | |||
463 | mpath->flags & MESH_PATH_ACTIVE && | 463 | mpath->flags & MESH_PATH_ACTIVE && |
464 | !(mpath->flags & MESH_PATH_FIXED)) { | 464 | !(mpath->flags & MESH_PATH_FIXED)) { |
465 | mpath->flags &= ~MESH_PATH_ACTIVE; | 465 | mpath->flags &= ~MESH_PATH_ACTIVE; |
466 | ++mpath->dsn; | 466 | ++mpath->sn; |
467 | spin_unlock_bh(&mpath->state_lock); | 467 | spin_unlock_bh(&mpath->state_lock); |
468 | mesh_path_error_tx(mpath->dst, | 468 | mesh_path_error_tx(MESH_TTL, mpath->dst, |
469 | cpu_to_le32(mpath->dsn), | 469 | cpu_to_le32(mpath->sn), |
470 | PERR_RCODE_DEST_UNREACH, | ||
470 | sdata->dev->broadcast, sdata); | 471 | sdata->dev->broadcast, sdata); |
471 | } else | 472 | } else |
472 | spin_unlock_bh(&mpath->state_lock); | 473 | spin_unlock_bh(&mpath->state_lock); |
@@ -601,7 +602,7 @@ void mesh_path_discard_frame(struct sk_buff *skb, | |||
601 | { | 602 | { |
602 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 603 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
603 | struct mesh_path *mpath; | 604 | struct mesh_path *mpath; |
604 | u32 dsn = 0; | 605 | u32 sn = 0; |
605 | 606 | ||
606 | if (memcmp(hdr->addr4, sdata->dev->dev_addr, ETH_ALEN) != 0) { | 607 | if (memcmp(hdr->addr4, sdata->dev->dev_addr, ETH_ALEN) != 0) { |
607 | u8 *ra, *da; | 608 | u8 *ra, *da; |
@@ -610,8 +611,9 @@ void mesh_path_discard_frame(struct sk_buff *skb, | |||
610 | ra = hdr->addr1; | 611 | ra = hdr->addr1; |
611 | mpath = mesh_path_lookup(da, sdata); | 612 | mpath = mesh_path_lookup(da, sdata); |
612 | if (mpath) | 613 | if (mpath) |
613 | dsn = ++mpath->dsn; | 614 | sn = ++mpath->sn; |
614 | mesh_path_error_tx(skb->data, cpu_to_le32(dsn), ra, sdata); | 615 | mesh_path_error_tx(MESH_TTL, skb->data, cpu_to_le32(sn), |
616 | PERR_RCODE_NO_ROUTE, ra, sdata); | ||
615 | } | 617 | } |
616 | 618 | ||
617 | kfree_skb(skb); | 619 | kfree_skb(skb); |
@@ -646,7 +648,7 @@ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop) | |||
646 | { | 648 | { |
647 | spin_lock_bh(&mpath->state_lock); | 649 | spin_lock_bh(&mpath->state_lock); |
648 | mesh_path_assign_nexthop(mpath, next_hop); | 650 | mesh_path_assign_nexthop(mpath, next_hop); |
649 | mpath->dsn = 0xffff; | 651 | mpath->sn = 0xffff; |
650 | mpath->metric = 0; | 652 | mpath->metric = 0; |
651 | mpath->hop_count = 0; | 653 | mpath->hop_count = 0; |
652 | mpath->exp_time = 0; | 654 | mpath->exp_time = 0; |
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index ffcbad75e09b..f21329afdae3 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2008 open80211s Ltd. | 2 | * Copyright (c) 2008, 2009 open80211s Ltd. |
3 | * Author: Luis Carlos Cobo <luisca@cozybit.com> | 3 | * Author: Luis Carlos Cobo <luisca@cozybit.com> |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
@@ -18,9 +18,8 @@ | |||
18 | #define mpl_dbg(fmt, args...) do { (void)(0); } while (0) | 18 | #define mpl_dbg(fmt, args...) do { (void)(0); } while (0) |
19 | #endif | 19 | #endif |
20 | 20 | ||
21 | #define PLINK_GET_FRAME_SUBTYPE(p) (p) | 21 | #define PLINK_GET_LLID(p) (p + 4) |
22 | #define PLINK_GET_LLID(p) (p + 1) | 22 | #define PLINK_GET_PLID(p) (p + 6) |
23 | #define PLINK_GET_PLID(p) (p + 3) | ||
24 | 23 | ||
25 | #define mod_plink_timer(s, t) (mod_timer(&s->plink_timer, \ | 24 | #define mod_plink_timer(s, t) (mod_timer(&s->plink_timer, \ |
26 | jiffies + HZ * t / 1000)) | 25 | jiffies + HZ * t / 1000)) |
@@ -65,6 +64,7 @@ void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata) | |||
65 | { | 64 | { |
66 | atomic_inc(&sdata->u.mesh.mshstats.estab_plinks); | 65 | atomic_inc(&sdata->u.mesh.mshstats.estab_plinks); |
67 | mesh_accept_plinks_update(sdata); | 66 | mesh_accept_plinks_update(sdata); |
67 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); | ||
68 | } | 68 | } |
69 | 69 | ||
70 | static inline | 70 | static inline |
@@ -72,12 +72,13 @@ void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata) | |||
72 | { | 72 | { |
73 | atomic_dec(&sdata->u.mesh.mshstats.estab_plinks); | 73 | atomic_dec(&sdata->u.mesh.mshstats.estab_plinks); |
74 | mesh_accept_plinks_update(sdata); | 74 | mesh_accept_plinks_update(sdata); |
75 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); | ||
75 | } | 76 | } |
76 | 77 | ||
77 | /** | 78 | /** |
78 | * mesh_plink_fsm_restart - restart a mesh peer link finite state machine | 79 | * mesh_plink_fsm_restart - restart a mesh peer link finite state machine |
79 | * | 80 | * |
80 | * @sta: mes peer link to restart | 81 | * @sta: mesh peer link to restart |
81 | * | 82 | * |
82 | * Locking: this function must be called holding sta->lock | 83 | * Locking: this function must be called holding sta->lock |
83 | */ | 84 | */ |
@@ -152,6 +153,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, | |||
152 | struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); | 153 | struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); |
153 | struct ieee80211_mgmt *mgmt; | 154 | struct ieee80211_mgmt *mgmt; |
154 | bool include_plid = false; | 155 | bool include_plid = false; |
156 | static const u8 meshpeeringproto[] = { 0x00, 0x0F, 0xAC, 0x2A }; | ||
155 | u8 *pos; | 157 | u8 *pos; |
156 | int ie_len; | 158 | int ie_len; |
157 | 159 | ||
@@ -169,7 +171,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, | |||
169 | memcpy(mgmt->da, da, ETH_ALEN); | 171 | memcpy(mgmt->da, da, ETH_ALEN); |
170 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); | 172 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); |
171 | /* BSSID is left zeroed, wildcard value */ | 173 | /* BSSID is left zeroed, wildcard value */ |
172 | mgmt->u.action.category = PLINK_CATEGORY; | 174 | mgmt->u.action.category = MESH_PLINK_CATEGORY; |
173 | mgmt->u.action.u.plink_action.action_code = action; | 175 | mgmt->u.action.u.plink_action.action_code = action; |
174 | 176 | ||
175 | if (action == PLINK_CLOSE) | 177 | if (action == PLINK_CLOSE) |
@@ -179,7 +181,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, | |||
179 | if (action == PLINK_CONFIRM) { | 181 | if (action == PLINK_CONFIRM) { |
180 | pos = skb_put(skb, 4); | 182 | pos = skb_put(skb, 4); |
181 | /* two-byte status code followed by two-byte AID */ | 183 | /* two-byte status code followed by two-byte AID */ |
182 | memset(pos, 0, 4); | 184 | memset(pos, 0, 2); |
185 | memcpy(pos + 2, &plid, 2); | ||
183 | } | 186 | } |
184 | mesh_mgmt_ies_add(skb, sdata); | 187 | mesh_mgmt_ies_add(skb, sdata); |
185 | } | 188 | } |
@@ -187,18 +190,18 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, | |||
187 | /* Add Peer Link Management element */ | 190 | /* Add Peer Link Management element */ |
188 | switch (action) { | 191 | switch (action) { |
189 | case PLINK_OPEN: | 192 | case PLINK_OPEN: |
190 | ie_len = 3; | 193 | ie_len = 6; |
191 | break; | 194 | break; |
192 | case PLINK_CONFIRM: | 195 | case PLINK_CONFIRM: |
193 | ie_len = 5; | 196 | ie_len = 8; |
194 | include_plid = true; | 197 | include_plid = true; |
195 | break; | 198 | break; |
196 | case PLINK_CLOSE: | 199 | case PLINK_CLOSE: |
197 | default: | 200 | default: |
198 | if (!plid) | 201 | if (!plid) |
199 | ie_len = 5; | 202 | ie_len = 8; |
200 | else { | 203 | else { |
201 | ie_len = 7; | 204 | ie_len = 10; |
202 | include_plid = true; | 205 | include_plid = true; |
203 | } | 206 | } |
204 | break; | 207 | break; |
@@ -207,7 +210,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, | |||
207 | pos = skb_put(skb, 2 + ie_len); | 210 | pos = skb_put(skb, 2 + ie_len); |
208 | *pos++ = WLAN_EID_PEER_LINK; | 211 | *pos++ = WLAN_EID_PEER_LINK; |
209 | *pos++ = ie_len; | 212 | *pos++ = ie_len; |
210 | *pos++ = action; | 213 | memcpy(pos, meshpeeringproto, sizeof(meshpeeringproto)); |
214 | pos += 4; | ||
211 | memcpy(pos, &llid, 2); | 215 | memcpy(pos, &llid, 2); |
212 | if (include_plid) { | 216 | if (include_plid) { |
213 | pos += 2; | 217 | pos += 2; |
@@ -395,6 +399,17 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m | |||
395 | u8 ie_len; | 399 | u8 ie_len; |
396 | u8 *baseaddr; | 400 | u8 *baseaddr; |
397 | __le16 plid, llid, reason; | 401 | __le16 plid, llid, reason; |
402 | #ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG | ||
403 | static const char *mplstates[] = { | ||
404 | [PLINK_LISTEN] = "LISTEN", | ||
405 | [PLINK_OPN_SNT] = "OPN-SNT", | ||
406 | [PLINK_OPN_RCVD] = "OPN-RCVD", | ||
407 | [PLINK_CNF_RCVD] = "CNF_RCVD", | ||
408 | [PLINK_ESTAB] = "ESTAB", | ||
409 | [PLINK_HOLDING] = "HOLDING", | ||
410 | [PLINK_BLOCKED] = "BLOCKED" | ||
411 | }; | ||
412 | #endif | ||
398 | 413 | ||
399 | /* need action_code, aux */ | 414 | /* need action_code, aux */ |
400 | if (len < IEEE80211_MIN_ACTION_SIZE + 3) | 415 | if (len < IEEE80211_MIN_ACTION_SIZE + 3) |
@@ -417,12 +432,13 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m | |||
417 | return; | 432 | return; |
418 | } | 433 | } |
419 | 434 | ||
420 | ftype = *((u8 *)PLINK_GET_FRAME_SUBTYPE(elems.peer_link)); | 435 | ftype = mgmt->u.action.u.plink_action.action_code; |
421 | ie_len = elems.peer_link_len; | 436 | ie_len = elems.peer_link_len; |
422 | if ((ftype == PLINK_OPEN && ie_len != 3) || | 437 | if ((ftype == PLINK_OPEN && ie_len != 6) || |
423 | (ftype == PLINK_CONFIRM && ie_len != 5) || | 438 | (ftype == PLINK_CONFIRM && ie_len != 8) || |
424 | (ftype == PLINK_CLOSE && ie_len != 5 && ie_len != 7)) { | 439 | (ftype == PLINK_CLOSE && ie_len != 8 && ie_len != 10)) { |
425 | mpl_dbg("Mesh plink: incorrect plink ie length\n"); | 440 | mpl_dbg("Mesh plink: incorrect plink ie length %d %d\n", |
441 | ftype, ie_len); | ||
426 | return; | 442 | return; |
427 | } | 443 | } |
428 | 444 | ||
@@ -434,7 +450,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m | |||
434 | * from the point of view of this host. | 450 | * from the point of view of this host. |
435 | */ | 451 | */ |
436 | memcpy(&plid, PLINK_GET_LLID(elems.peer_link), 2); | 452 | memcpy(&plid, PLINK_GET_LLID(elems.peer_link), 2); |
437 | if (ftype == PLINK_CONFIRM || (ftype == PLINK_CLOSE && ie_len == 7)) | 453 | if (ftype == PLINK_CONFIRM || (ftype == PLINK_CLOSE && ie_len == 10)) |
438 | memcpy(&llid, PLINK_GET_PLID(elems.peer_link), 2); | 454 | memcpy(&llid, PLINK_GET_PLID(elems.peer_link), 2); |
439 | 455 | ||
440 | rcu_read_lock(); | 456 | rcu_read_lock(); |
@@ -532,8 +548,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m | |||
532 | } | 548 | } |
533 | } | 549 | } |
534 | 550 | ||
535 | mpl_dbg("Mesh plink (peer, state, llid, plid, event): %pM %d %d %d %d\n", | 551 | mpl_dbg("Mesh plink (peer, state, llid, plid, event): %pM %s %d %d %d\n", |
536 | mgmt->sa, sta->plink_state, | 552 | mgmt->sa, mplstates[sta->plink_state], |
537 | le16_to_cpu(sta->llid), le16_to_cpu(sta->plid), | 553 | le16_to_cpu(sta->llid), le16_to_cpu(sta->plid), |
538 | event); | 554 | event); |
539 | reason = 0; | 555 | reason = 0; |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index dc5049d58c51..2af306f67d78 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -458,9 +458,15 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, | |||
458 | mgmt->u.deauth.reason_code = cpu_to_le16(reason); | 458 | mgmt->u.deauth.reason_code = cpu_to_le16(reason); |
459 | 459 | ||
460 | if (stype == IEEE80211_STYPE_DEAUTH) | 460 | if (stype == IEEE80211_STYPE_DEAUTH) |
461 | cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len, cookie); | 461 | if (cookie) |
462 | __cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); | ||
463 | else | ||
464 | cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); | ||
462 | else | 465 | else |
463 | cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len, cookie); | 466 | if (cookie) |
467 | __cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len); | ||
468 | else | ||
469 | cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len); | ||
464 | ieee80211_tx_skb(sdata, skb, ifmgd->flags & IEEE80211_STA_MFP_ENABLED); | 470 | ieee80211_tx_skb(sdata, skb, ifmgd->flags & IEEE80211_STA_MFP_ENABLED); |
465 | } | 471 | } |
466 | 472 | ||
@@ -923,7 +929,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, | |||
923 | ieee80211_recalc_ps(local, -1); | 929 | ieee80211_recalc_ps(local, -1); |
924 | mutex_unlock(&local->iflist_mtx); | 930 | mutex_unlock(&local->iflist_mtx); |
925 | 931 | ||
926 | netif_tx_start_all_queues(sdata->dev); | 932 | netif_start_queue(sdata->dev); |
927 | netif_carrier_on(sdata->dev); | 933 | netif_carrier_on(sdata->dev); |
928 | } | 934 | } |
929 | 935 | ||
@@ -1055,7 +1061,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, | |||
1055 | * time -- we don't want the scan code to enable queues. | 1061 | * time -- we don't want the scan code to enable queues. |
1056 | */ | 1062 | */ |
1057 | 1063 | ||
1058 | netif_tx_stop_all_queues(sdata->dev); | 1064 | netif_stop_queue(sdata->dev); |
1059 | netif_carrier_off(sdata->dev); | 1065 | netif_carrier_off(sdata->dev); |
1060 | 1066 | ||
1061 | rcu_read_lock(); | 1067 | rcu_read_lock(); |
@@ -1892,7 +1898,6 @@ ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, | |||
1892 | fc = le16_to_cpu(mgmt->frame_control); | 1898 | fc = le16_to_cpu(mgmt->frame_control); |
1893 | 1899 | ||
1894 | switch (fc & IEEE80211_FCTL_STYPE) { | 1900 | switch (fc & IEEE80211_FCTL_STYPE) { |
1895 | case IEEE80211_STYPE_PROBE_REQ: | ||
1896 | case IEEE80211_STYPE_PROBE_RESP: | 1901 | case IEEE80211_STYPE_PROBE_RESP: |
1897 | case IEEE80211_STYPE_BEACON: | 1902 | case IEEE80211_STYPE_BEACON: |
1898 | case IEEE80211_STYPE_AUTH: | 1903 | case IEEE80211_STYPE_AUTH: |
@@ -1958,12 +1963,10 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | |||
1958 | /* no action */ | 1963 | /* no action */ |
1959 | break; | 1964 | break; |
1960 | case RX_MGMT_CFG80211_DEAUTH: | 1965 | case RX_MGMT_CFG80211_DEAUTH: |
1961 | cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len, | 1966 | cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); |
1962 | NULL); | ||
1963 | break; | 1967 | break; |
1964 | case RX_MGMT_CFG80211_DISASSOC: | 1968 | case RX_MGMT_CFG80211_DISASSOC: |
1965 | cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len, | 1969 | cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len); |
1966 | NULL); | ||
1967 | break; | 1970 | break; |
1968 | default: | 1971 | default: |
1969 | WARN(1, "unexpected: %d", rma); | 1972 | WARN(1, "unexpected: %d", rma); |
@@ -2018,7 +2021,7 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | |||
2018 | cfg80211_send_rx_assoc(sdata->dev, (u8 *) mgmt, skb->len); | 2021 | cfg80211_send_rx_assoc(sdata->dev, (u8 *) mgmt, skb->len); |
2019 | break; | 2022 | break; |
2020 | case RX_MGMT_CFG80211_DEAUTH: | 2023 | case RX_MGMT_CFG80211_DEAUTH: |
2021 | cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len, NULL); | 2024 | cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); |
2022 | break; | 2025 | break; |
2023 | default: | 2026 | default: |
2024 | WARN(1, "unexpected: %d", rma); | 2027 | WARN(1, "unexpected: %d", rma); |
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index b33efc4fc267..ccda7454fb17 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c | |||
@@ -163,8 +163,7 @@ struct rate_control_ref *rate_control_alloc(const char *name, | |||
163 | #ifdef CONFIG_MAC80211_DEBUGFS | 163 | #ifdef CONFIG_MAC80211_DEBUGFS |
164 | debugfsdir = debugfs_create_dir("rc", local->hw.wiphy->debugfsdir); | 164 | debugfsdir = debugfs_create_dir("rc", local->hw.wiphy->debugfsdir); |
165 | local->debugfs.rcdir = debugfsdir; | 165 | local->debugfs.rcdir = debugfsdir; |
166 | local->debugfs.rcname = debugfs_create_file("name", 0400, debugfsdir, | 166 | debugfs_create_file("name", 0400, debugfsdir, ref, &rcname_ops); |
167 | ref, &rcname_ops); | ||
168 | #endif | 167 | #endif |
169 | 168 | ||
170 | ref->priv = ref->ops->alloc(&local->hw, debugfsdir); | 169 | ref->priv = ref->ops->alloc(&local->hw, debugfsdir); |
@@ -188,9 +187,7 @@ static void rate_control_release(struct kref *kref) | |||
188 | ctrl_ref->ops->free(ctrl_ref->priv); | 187 | ctrl_ref->ops->free(ctrl_ref->priv); |
189 | 188 | ||
190 | #ifdef CONFIG_MAC80211_DEBUGFS | 189 | #ifdef CONFIG_MAC80211_DEBUGFS |
191 | debugfs_remove(ctrl_ref->local->debugfs.rcname); | 190 | debugfs_remove_recursive(ctrl_ref->local->debugfs.rcdir); |
192 | ctrl_ref->local->debugfs.rcname = NULL; | ||
193 | debugfs_remove(ctrl_ref->local->debugfs.rcdir); | ||
194 | ctrl_ref->local->debugfs.rcdir = NULL; | 191 | ctrl_ref->local->debugfs.rcdir = NULL; |
195 | #endif | 192 | #endif |
196 | 193 | ||
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 7170bf4565a8..6bce97ee2534 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -39,11 +39,8 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | |||
39 | * only useful for monitoring. | 39 | * only useful for monitoring. |
40 | */ | 40 | */ |
41 | static struct sk_buff *remove_monitor_info(struct ieee80211_local *local, | 41 | static struct sk_buff *remove_monitor_info(struct ieee80211_local *local, |
42 | struct sk_buff *skb, | 42 | struct sk_buff *skb) |
43 | int rtap_len) | ||
44 | { | 43 | { |
45 | skb_pull(skb, rtap_len); | ||
46 | |||
47 | if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) { | 44 | if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) { |
48 | if (likely(skb->len > FCS_LEN)) | 45 | if (likely(skb->len > FCS_LEN)) |
49 | skb_trim(skb, skb->len - FCS_LEN); | 46 | skb_trim(skb, skb->len - FCS_LEN); |
@@ -59,15 +56,14 @@ static struct sk_buff *remove_monitor_info(struct ieee80211_local *local, | |||
59 | } | 56 | } |
60 | 57 | ||
61 | static inline int should_drop_frame(struct sk_buff *skb, | 58 | static inline int should_drop_frame(struct sk_buff *skb, |
62 | int present_fcs_len, | 59 | int present_fcs_len) |
63 | int radiotap_len) | ||
64 | { | 60 | { |
65 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); | 61 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); |
66 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 62 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
67 | 63 | ||
68 | if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) | 64 | if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) |
69 | return 1; | 65 | return 1; |
70 | if (unlikely(skb->len < 16 + present_fcs_len + radiotap_len)) | 66 | if (unlikely(skb->len < 16 + present_fcs_len)) |
71 | return 1; | 67 | return 1; |
72 | if (ieee80211_is_ctl(hdr->frame_control) && | 68 | if (ieee80211_is_ctl(hdr->frame_control) && |
73 | !ieee80211_is_pspoll(hdr->frame_control) && | 69 | !ieee80211_is_pspoll(hdr->frame_control) && |
@@ -95,10 +91,6 @@ ieee80211_rx_radiotap_len(struct ieee80211_local *local, | |||
95 | if (len & 1) /* padding for RX_FLAGS if necessary */ | 91 | if (len & 1) /* padding for RX_FLAGS if necessary */ |
96 | len++; | 92 | len++; |
97 | 93 | ||
98 | /* make sure radiotap starts at a naturally aligned address */ | ||
99 | if (len % 8) | ||
100 | len = roundup(len, 8); | ||
101 | |||
102 | return len; | 94 | return len; |
103 | } | 95 | } |
104 | 96 | ||
@@ -116,6 +108,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, | |||
116 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); | 108 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); |
117 | struct ieee80211_radiotap_header *rthdr; | 109 | struct ieee80211_radiotap_header *rthdr; |
118 | unsigned char *pos; | 110 | unsigned char *pos; |
111 | u16 rx_flags = 0; | ||
119 | 112 | ||
120 | rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len); | 113 | rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len); |
121 | memset(rthdr, 0, rtap_len); | 114 | memset(rthdr, 0, rtap_len); |
@@ -134,7 +127,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, | |||
134 | 127 | ||
135 | /* IEEE80211_RADIOTAP_TSFT */ | 128 | /* IEEE80211_RADIOTAP_TSFT */ |
136 | if (status->flag & RX_FLAG_TSFT) { | 129 | if (status->flag & RX_FLAG_TSFT) { |
137 | *(__le64 *)pos = cpu_to_le64(status->mactime); | 130 | put_unaligned_le64(status->mactime, pos); |
138 | rthdr->it_present |= | 131 | rthdr->it_present |= |
139 | cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); | 132 | cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); |
140 | pos += 8; | 133 | pos += 8; |
@@ -166,17 +159,17 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, | |||
166 | pos++; | 159 | pos++; |
167 | 160 | ||
168 | /* IEEE80211_RADIOTAP_CHANNEL */ | 161 | /* IEEE80211_RADIOTAP_CHANNEL */ |
169 | *(__le16 *)pos = cpu_to_le16(status->freq); | 162 | put_unaligned_le16(status->freq, pos); |
170 | pos += 2; | 163 | pos += 2; |
171 | if (status->band == IEEE80211_BAND_5GHZ) | 164 | if (status->band == IEEE80211_BAND_5GHZ) |
172 | *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM | | 165 | put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ, |
173 | IEEE80211_CHAN_5GHZ); | 166 | pos); |
174 | else if (rate->flags & IEEE80211_RATE_ERP_G) | 167 | else if (rate->flags & IEEE80211_RATE_ERP_G) |
175 | *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM | | 168 | put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ, |
176 | IEEE80211_CHAN_2GHZ); | 169 | pos); |
177 | else | 170 | else |
178 | *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_CCK | | 171 | put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ, |
179 | IEEE80211_CHAN_2GHZ); | 172 | pos); |
180 | pos += 2; | 173 | pos += 2; |
181 | 174 | ||
182 | /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ | 175 | /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ |
@@ -205,10 +198,11 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, | |||
205 | 198 | ||
206 | /* IEEE80211_RADIOTAP_RX_FLAGS */ | 199 | /* IEEE80211_RADIOTAP_RX_FLAGS */ |
207 | /* ensure 2 byte alignment for the 2 byte field as required */ | 200 | /* ensure 2 byte alignment for the 2 byte field as required */ |
208 | if ((pos - (unsigned char *)rthdr) & 1) | 201 | if ((pos - (u8 *)rthdr) & 1) |
209 | pos++; | 202 | pos++; |
210 | if (status->flag & RX_FLAG_FAILED_PLCP_CRC) | 203 | if (status->flag & RX_FLAG_FAILED_PLCP_CRC) |
211 | *(__le16 *)pos |= cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADPLCP); | 204 | rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP; |
205 | put_unaligned_le16(rx_flags, pos); | ||
212 | pos += 2; | 206 | pos += 2; |
213 | } | 207 | } |
214 | 208 | ||
@@ -227,7 +221,6 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, | |||
227 | struct sk_buff *skb, *skb2; | 221 | struct sk_buff *skb, *skb2; |
228 | struct net_device *prev_dev = NULL; | 222 | struct net_device *prev_dev = NULL; |
229 | int present_fcs_len = 0; | 223 | int present_fcs_len = 0; |
230 | int rtap_len = 0; | ||
231 | 224 | ||
232 | /* | 225 | /* |
233 | * First, we may need to make a copy of the skb because | 226 | * First, we may need to make a copy of the skb because |
@@ -237,25 +230,23 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, | |||
237 | * We don't need to, of course, if we aren't going to return | 230 | * We don't need to, of course, if we aren't going to return |
238 | * the SKB because it has a bad FCS/PLCP checksum. | 231 | * the SKB because it has a bad FCS/PLCP checksum. |
239 | */ | 232 | */ |
240 | if (status->flag & RX_FLAG_RADIOTAP) | 233 | |
241 | rtap_len = ieee80211_get_radiotap_len(origskb->data); | 234 | /* room for the radiotap header based on driver features */ |
242 | else | 235 | needed_headroom = ieee80211_rx_radiotap_len(local, status); |
243 | /* room for the radiotap header based on driver features */ | ||
244 | needed_headroom = ieee80211_rx_radiotap_len(local, status); | ||
245 | 236 | ||
246 | if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) | 237 | if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) |
247 | present_fcs_len = FCS_LEN; | 238 | present_fcs_len = FCS_LEN; |
248 | 239 | ||
249 | if (!local->monitors) { | 240 | if (!local->monitors) { |
250 | if (should_drop_frame(origskb, present_fcs_len, rtap_len)) { | 241 | if (should_drop_frame(origskb, present_fcs_len)) { |
251 | dev_kfree_skb(origskb); | 242 | dev_kfree_skb(origskb); |
252 | return NULL; | 243 | return NULL; |
253 | } | 244 | } |
254 | 245 | ||
255 | return remove_monitor_info(local, origskb, rtap_len); | 246 | return remove_monitor_info(local, origskb); |
256 | } | 247 | } |
257 | 248 | ||
258 | if (should_drop_frame(origskb, present_fcs_len, rtap_len)) { | 249 | if (should_drop_frame(origskb, present_fcs_len)) { |
259 | /* only need to expand headroom if necessary */ | 250 | /* only need to expand headroom if necessary */ |
260 | skb = origskb; | 251 | skb = origskb; |
261 | origskb = NULL; | 252 | origskb = NULL; |
@@ -279,16 +270,14 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, | |||
279 | */ | 270 | */ |
280 | skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC); | 271 | skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC); |
281 | 272 | ||
282 | origskb = remove_monitor_info(local, origskb, rtap_len); | 273 | origskb = remove_monitor_info(local, origskb); |
283 | 274 | ||
284 | if (!skb) | 275 | if (!skb) |
285 | return origskb; | 276 | return origskb; |
286 | } | 277 | } |
287 | 278 | ||
288 | /* if necessary, prepend radiotap information */ | 279 | /* prepend radiotap information */ |
289 | if (!(status->flag & RX_FLAG_RADIOTAP)) | 280 | ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom); |
290 | ieee80211_add_rx_radiotap_header(local, skb, rate, | ||
291 | needed_headroom); | ||
292 | 281 | ||
293 | skb_reset_mac_header(skb); | 282 | skb_reset_mac_header(skb); |
294 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 283 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
@@ -518,7 +507,7 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) | |||
518 | 507 | ||
519 | if (ieee80211_is_action(hdr->frame_control)) { | 508 | if (ieee80211_is_action(hdr->frame_control)) { |
520 | mgmt = (struct ieee80211_mgmt *)hdr; | 509 | mgmt = (struct ieee80211_mgmt *)hdr; |
521 | if (mgmt->u.action.category != PLINK_CATEGORY) | 510 | if (mgmt->u.action.category != MESH_PLINK_CATEGORY) |
522 | return RX_DROP_MONITOR; | 511 | return RX_DROP_MONITOR; |
523 | return RX_CONTINUE; | 512 | return RX_CONTINUE; |
524 | } | 513 | } |
@@ -792,7 +781,7 @@ static void ap_sta_ps_start(struct sta_info *sta) | |||
792 | struct ieee80211_local *local = sdata->local; | 781 | struct ieee80211_local *local = sdata->local; |
793 | 782 | ||
794 | atomic_inc(&sdata->bss->num_sta_ps); | 783 | atomic_inc(&sdata->bss->num_sta_ps); |
795 | set_sta_flags(sta, WLAN_STA_PS); | 784 | set_sta_flags(sta, WLAN_STA_PS_STA); |
796 | drv_sta_notify(local, &sdata->vif, STA_NOTIFY_SLEEP, &sta->sta); | 785 | drv_sta_notify(local, &sdata->vif, STA_NOTIFY_SLEEP, &sta->sta); |
797 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 786 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
798 | printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n", | 787 | printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n", |
@@ -800,38 +789,28 @@ static void ap_sta_ps_start(struct sta_info *sta) | |||
800 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | 789 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ |
801 | } | 790 | } |
802 | 791 | ||
803 | static int ap_sta_ps_end(struct sta_info *sta) | 792 | static void ap_sta_ps_end(struct sta_info *sta) |
804 | { | 793 | { |
805 | struct ieee80211_sub_if_data *sdata = sta->sdata; | 794 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
806 | struct ieee80211_local *local = sdata->local; | ||
807 | int sent, buffered; | ||
808 | 795 | ||
809 | atomic_dec(&sdata->bss->num_sta_ps); | 796 | atomic_dec(&sdata->bss->num_sta_ps); |
810 | 797 | ||
811 | clear_sta_flags(sta, WLAN_STA_PS); | 798 | clear_sta_flags(sta, WLAN_STA_PS_STA); |
812 | drv_sta_notify(local, &sdata->vif, STA_NOTIFY_AWAKE, &sta->sta); | ||
813 | |||
814 | if (!skb_queue_empty(&sta->ps_tx_buf)) | ||
815 | sta_info_clear_tim_bit(sta); | ||
816 | 799 | ||
817 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 800 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
818 | printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n", | 801 | printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n", |
819 | sdata->dev->name, sta->sta.addr, sta->sta.aid); | 802 | sdata->dev->name, sta->sta.addr, sta->sta.aid); |
820 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | 803 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ |
821 | 804 | ||
822 | /* Send all buffered frames to the station */ | 805 | if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) { |
823 | sent = ieee80211_add_pending_skbs(local, &sta->tx_filtered); | ||
824 | buffered = ieee80211_add_pending_skbs(local, &sta->ps_tx_buf); | ||
825 | sent += buffered; | ||
826 | local->total_ps_buffered -= buffered; | ||
827 | |||
828 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 806 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
829 | printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames " | 807 | printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n", |
830 | "since STA not sleeping anymore\n", sdata->dev->name, | 808 | sdata->dev->name, sta->sta.addr, sta->sta.aid); |
831 | sta->sta.addr, sta->sta.aid, sent - buffered, buffered); | ||
832 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | 809 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ |
810 | return; | ||
811 | } | ||
833 | 812 | ||
834 | return sent; | 813 | ieee80211_sta_ps_deliver_wakeup(sta); |
835 | } | 814 | } |
836 | 815 | ||
837 | static ieee80211_rx_result debug_noinline | 816 | static ieee80211_rx_result debug_noinline |
@@ -870,7 +849,6 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) | |||
870 | sta->rx_fragments++; | 849 | sta->rx_fragments++; |
871 | sta->rx_bytes += rx->skb->len; | 850 | sta->rx_bytes += rx->skb->len; |
872 | sta->last_signal = rx->status->signal; | 851 | sta->last_signal = rx->status->signal; |
873 | sta->last_qual = rx->status->qual; | ||
874 | sta->last_noise = rx->status->noise; | 852 | sta->last_noise = rx->status->noise; |
875 | 853 | ||
876 | /* | 854 | /* |
@@ -880,7 +858,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) | |||
880 | if (!ieee80211_has_morefrags(hdr->frame_control) && | 858 | if (!ieee80211_has_morefrags(hdr->frame_control) && |
881 | (rx->sdata->vif.type == NL80211_IFTYPE_AP || | 859 | (rx->sdata->vif.type == NL80211_IFTYPE_AP || |
882 | rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { | 860 | rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { |
883 | if (test_sta_flags(sta, WLAN_STA_PS)) { | 861 | if (test_sta_flags(sta, WLAN_STA_PS_STA)) { |
884 | /* | 862 | /* |
885 | * Ignore doze->wake transitions that are | 863 | * Ignore doze->wake transitions that are |
886 | * indicated by non-data frames, the standard | 864 | * indicated by non-data frames, the standard |
@@ -891,19 +869,24 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) | |||
891 | */ | 869 | */ |
892 | if (ieee80211_is_data(hdr->frame_control) && | 870 | if (ieee80211_is_data(hdr->frame_control) && |
893 | !ieee80211_has_pm(hdr->frame_control)) | 871 | !ieee80211_has_pm(hdr->frame_control)) |
894 | rx->sent_ps_buffered += ap_sta_ps_end(sta); | 872 | ap_sta_ps_end(sta); |
895 | } else { | 873 | } else { |
896 | if (ieee80211_has_pm(hdr->frame_control)) | 874 | if (ieee80211_has_pm(hdr->frame_control)) |
897 | ap_sta_ps_start(sta); | 875 | ap_sta_ps_start(sta); |
898 | } | 876 | } |
899 | } | 877 | } |
900 | 878 | ||
901 | /* Drop data::nullfunc frames silently, since they are used only to | 879 | /* |
902 | * control station power saving mode. */ | 880 | * Drop (qos-)data::nullfunc frames silently, since they |
903 | if (ieee80211_is_nullfunc(hdr->frame_control)) { | 881 | * are used only to control station power saving mode. |
882 | */ | ||
883 | if (ieee80211_is_nullfunc(hdr->frame_control) || | ||
884 | ieee80211_is_qos_nullfunc(hdr->frame_control)) { | ||
904 | I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); | 885 | I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); |
905 | /* Update counter and free packet here to avoid counting this | 886 | /* |
906 | * as a dropped packed. */ | 887 | * Update counter and free packet here to avoid |
888 | * counting this as a dropped packed. | ||
889 | */ | ||
907 | sta->rx_packets++; | 890 | sta->rx_packets++; |
908 | dev_kfree_skb(rx->skb); | 891 | dev_kfree_skb(rx->skb); |
909 | return RX_QUEUED; | 892 | return RX_QUEUED; |
@@ -1103,9 +1086,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | |||
1103 | static ieee80211_rx_result debug_noinline | 1086 | static ieee80211_rx_result debug_noinline |
1104 | ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx) | 1087 | ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx) |
1105 | { | 1088 | { |
1106 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); | 1089 | struct ieee80211_sub_if_data *sdata = rx->sdata; |
1107 | struct sk_buff *skb; | ||
1108 | int no_pending_pkts; | ||
1109 | __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control; | 1090 | __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control; |
1110 | 1091 | ||
1111 | if (likely(!rx->sta || !ieee80211_is_pspoll(fc) || | 1092 | if (likely(!rx->sta || !ieee80211_is_pspoll(fc) || |
@@ -1116,56 +1097,10 @@ ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx) | |||
1116 | (sdata->vif.type != NL80211_IFTYPE_AP_VLAN)) | 1097 | (sdata->vif.type != NL80211_IFTYPE_AP_VLAN)) |
1117 | return RX_DROP_UNUSABLE; | 1098 | return RX_DROP_UNUSABLE; |
1118 | 1099 | ||
1119 | skb = skb_dequeue(&rx->sta->tx_filtered); | 1100 | if (!test_sta_flags(rx->sta, WLAN_STA_PS_DRIVER)) |
1120 | if (!skb) { | 1101 | ieee80211_sta_ps_deliver_poll_response(rx->sta); |
1121 | skb = skb_dequeue(&rx->sta->ps_tx_buf); | 1102 | else |
1122 | if (skb) | 1103 | set_sta_flags(rx->sta, WLAN_STA_PSPOLL); |
1123 | rx->local->total_ps_buffered--; | ||
1124 | } | ||
1125 | no_pending_pkts = skb_queue_empty(&rx->sta->tx_filtered) && | ||
1126 | skb_queue_empty(&rx->sta->ps_tx_buf); | ||
1127 | |||
1128 | if (skb) { | ||
1129 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
1130 | struct ieee80211_hdr *hdr = | ||
1131 | (struct ieee80211_hdr *) skb->data; | ||
1132 | |||
1133 | /* | ||
1134 | * Tell TX path to send this frame even though the STA may | ||
1135 | * still remain is PS mode after this frame exchange. | ||
1136 | */ | ||
1137 | info->flags |= IEEE80211_TX_CTL_PSPOLL_RESPONSE; | ||
1138 | |||
1139 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | ||
1140 | printk(KERN_DEBUG "STA %pM aid %d: PS Poll (entries after %d)\n", | ||
1141 | rx->sta->sta.addr, rx->sta->sta.aid, | ||
1142 | skb_queue_len(&rx->sta->ps_tx_buf)); | ||
1143 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | ||
1144 | |||
1145 | /* Use MoreData flag to indicate whether there are more | ||
1146 | * buffered frames for this STA */ | ||
1147 | if (no_pending_pkts) | ||
1148 | hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA); | ||
1149 | else | ||
1150 | hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); | ||
1151 | |||
1152 | ieee80211_add_pending_skb(rx->local, skb); | ||
1153 | |||
1154 | if (no_pending_pkts) | ||
1155 | sta_info_clear_tim_bit(rx->sta); | ||
1156 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | ||
1157 | } else if (!rx->sent_ps_buffered) { | ||
1158 | /* | ||
1159 | * FIXME: This can be the result of a race condition between | ||
1160 | * us expiring a frame and the station polling for it. | ||
1161 | * Should we send it a null-func frame indicating we | ||
1162 | * have nothing buffered for it? | ||
1163 | */ | ||
1164 | printk(KERN_DEBUG "%s: STA %pM sent PS Poll even " | ||
1165 | "though there are no buffered frames for it\n", | ||
1166 | rx->dev->name, rx->sta->sta.addr); | ||
1167 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | ||
1168 | } | ||
1169 | 1104 | ||
1170 | /* Free PS Poll skb here instead of returning RX_DROP that would | 1105 | /* Free PS Poll skb here instead of returning RX_DROP that would |
1171 | * count as an dropped frame. */ | 1106 | * count as an dropped frame. */ |
@@ -1246,6 +1181,13 @@ __ieee80211_data_to_8023(struct ieee80211_rx_data *rx) | |||
1246 | { | 1181 | { |
1247 | struct net_device *dev = rx->dev; | 1182 | struct net_device *dev = rx->dev; |
1248 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 1183 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
1184 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; | ||
1185 | |||
1186 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->use_4addr && | ||
1187 | ieee80211_has_a4(hdr->frame_control)) | ||
1188 | return -1; | ||
1189 | if (sdata->use_4addr && is_multicast_ether_addr(hdr->addr1)) | ||
1190 | return -1; | ||
1249 | 1191 | ||
1250 | return ieee80211_data_to_8023(rx->skb, dev->dev_addr, sdata->vif.type); | 1192 | return ieee80211_data_to_8023(rx->skb, dev->dev_addr, sdata->vif.type); |
1251 | } | 1193 | } |
@@ -1294,7 +1236,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) | |||
1294 | if ((sdata->vif.type == NL80211_IFTYPE_AP || | 1236 | if ((sdata->vif.type == NL80211_IFTYPE_AP || |
1295 | sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && | 1237 | sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && |
1296 | !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && | 1238 | !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && |
1297 | (rx->flags & IEEE80211_RX_RA_MATCH)) { | 1239 | (rx->flags & IEEE80211_RX_RA_MATCH) && !rx->sdata->use_4addr) { |
1298 | if (is_multicast_ether_addr(ehdr->h_dest)) { | 1240 | if (is_multicast_ether_addr(ehdr->h_dest)) { |
1299 | /* | 1241 | /* |
1300 | * send multicast frames both to higher layers in | 1242 | * send multicast frames both to higher layers in |
@@ -1337,10 +1279,10 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) | |||
1337 | skb = NULL; | 1279 | skb = NULL; |
1338 | } else { | 1280 | } else { |
1339 | u8 *data = skb->data; | 1281 | u8 *data = skb->data; |
1340 | size_t len = skb->len; | 1282 | size_t len = skb_headlen(skb); |
1341 | u8 *new = __skb_push(skb, align); | 1283 | skb->data -= align; |
1342 | memmove(new, data, len); | 1284 | memmove(skb->data, data, len); |
1343 | __skb_trim(skb, len); | 1285 | skb_set_tail_pointer(skb, len); |
1344 | } | 1286 | } |
1345 | } | 1287 | } |
1346 | #endif | 1288 | #endif |
@@ -1504,19 +1446,28 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | |||
1504 | /* illegal frame */ | 1446 | /* illegal frame */ |
1505 | return RX_DROP_MONITOR; | 1447 | return RX_DROP_MONITOR; |
1506 | 1448 | ||
1507 | if (!is_multicast_ether_addr(hdr->addr1) && | 1449 | if (mesh_hdr->flags & MESH_FLAGS_AE) { |
1508 | (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6)) { | ||
1509 | struct mesh_path *mppath; | 1450 | struct mesh_path *mppath; |
1451 | char *proxied_addr; | ||
1452 | char *mpp_addr; | ||
1453 | |||
1454 | if (is_multicast_ether_addr(hdr->addr1)) { | ||
1455 | mpp_addr = hdr->addr3; | ||
1456 | proxied_addr = mesh_hdr->eaddr1; | ||
1457 | } else { | ||
1458 | mpp_addr = hdr->addr4; | ||
1459 | proxied_addr = mesh_hdr->eaddr2; | ||
1460 | } | ||
1510 | 1461 | ||
1511 | rcu_read_lock(); | 1462 | rcu_read_lock(); |
1512 | mppath = mpp_path_lookup(mesh_hdr->eaddr2, sdata); | 1463 | mppath = mpp_path_lookup(proxied_addr, sdata); |
1513 | if (!mppath) { | 1464 | if (!mppath) { |
1514 | mpp_path_add(mesh_hdr->eaddr2, hdr->addr4, sdata); | 1465 | mpp_path_add(proxied_addr, mpp_addr, sdata); |
1515 | } else { | 1466 | } else { |
1516 | spin_lock_bh(&mppath->state_lock); | 1467 | spin_lock_bh(&mppath->state_lock); |
1517 | mppath->exp_time = jiffies; | 1468 | mppath->exp_time = jiffies; |
1518 | if (compare_ether_addr(mppath->mpp, hdr->addr4) != 0) | 1469 | if (compare_ether_addr(mppath->mpp, mpp_addr) != 0) |
1519 | memcpy(mppath->mpp, hdr->addr4, ETH_ALEN); | 1470 | memcpy(mppath->mpp, mpp_addr, ETH_ALEN); |
1520 | spin_unlock_bh(&mppath->state_lock); | 1471 | spin_unlock_bh(&mppath->state_lock); |
1521 | } | 1472 | } |
1522 | rcu_read_unlock(); | 1473 | rcu_read_unlock(); |
@@ -1590,6 +1541,7 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx) | |||
1590 | { | 1541 | { |
1591 | struct net_device *dev = rx->dev; | 1542 | struct net_device *dev = rx->dev; |
1592 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; | 1543 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; |
1544 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
1593 | __le16 fc = hdr->frame_control; | 1545 | __le16 fc = hdr->frame_control; |
1594 | int err; | 1546 | int err; |
1595 | 1547 | ||
@@ -1599,6 +1551,14 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx) | |||
1599 | if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) | 1551 | if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) |
1600 | return RX_DROP_MONITOR; | 1552 | return RX_DROP_MONITOR; |
1601 | 1553 | ||
1554 | /* | ||
1555 | * Allow the cooked monitor interface of an AP to see 4-addr frames so | ||
1556 | * that a 4-addr station can be detected and moved into a separate VLAN | ||
1557 | */ | ||
1558 | if (ieee80211_has_a4(hdr->frame_control) && | ||
1559 | sdata->vif.type == NL80211_IFTYPE_AP) | ||
1560 | return RX_DROP_MONITOR; | ||
1561 | |||
1602 | err = __ieee80211_data_to_8023(rx); | 1562 | err = __ieee80211_data_to_8023(rx); |
1603 | if (unlikely(err)) | 1563 | if (unlikely(err)) |
1604 | return RX_DROP_UNUSABLE; | 1564 | return RX_DROP_UNUSABLE; |
@@ -2039,7 +1999,7 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, | |||
2039 | 1999 | ||
2040 | switch (sdata->vif.type) { | 2000 | switch (sdata->vif.type) { |
2041 | case NL80211_IFTYPE_STATION: | 2001 | case NL80211_IFTYPE_STATION: |
2042 | if (!bssid) | 2002 | if (!bssid && !sdata->use_4addr) |
2043 | return 0; | 2003 | return 0; |
2044 | if (!multicast && | 2004 | if (!multicast && |
2045 | compare_ether_addr(sdata->dev->dev_addr, hdr->addr1) != 0) { | 2005 | compare_ether_addr(sdata->dev->dev_addr, hdr->addr1) != 0) { |
@@ -2481,9 +2441,21 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
2481 | goto drop; | 2441 | goto drop; |
2482 | 2442 | ||
2483 | if (status->flag & RX_FLAG_HT) { | 2443 | if (status->flag & RX_FLAG_HT) { |
2484 | /* rate_idx is MCS index */ | 2444 | /* |
2485 | if (WARN_ON(status->rate_idx < 0 || | 2445 | * rate_idx is MCS index, which can be [0-76] as documented on: |
2486 | status->rate_idx >= 76)) | 2446 | * |
2447 | * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n | ||
2448 | * | ||
2449 | * Anything else would be some sort of driver or hardware error. | ||
2450 | * The driver should catch hardware errors. | ||
2451 | */ | ||
2452 | if (WARN((status->rate_idx < 0 || | ||
2453 | status->rate_idx > 76), | ||
2454 | "Rate marked as an HT rate but passed " | ||
2455 | "status->rate_idx is not " | ||
2456 | "an MCS index [0-76]: %d (0x%02x)\n", | ||
2457 | status->rate_idx, | ||
2458 | status->rate_idx)) | ||
2487 | goto drop; | 2459 | goto drop; |
2488 | /* HT rates are not in the table - use the highest legacy rate | 2460 | /* HT rates are not in the table - use the highest legacy rate |
2489 | * for now since other parts of mac80211 may not yet be fully | 2461 | * for now since other parts of mac80211 may not yet be fully |
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 71e10cabf811..4cf387c944bf 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c | |||
@@ -12,8 +12,6 @@ | |||
12 | * published by the Free Software Foundation. | 12 | * published by the Free Software Foundation. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | /* TODO: figure out how to avoid that the "current BSS" expires */ | ||
16 | |||
17 | #include <linux/wireless.h> | 15 | #include <linux/wireless.h> |
18 | #include <linux/if_arp.h> | 16 | #include <linux/if_arp.h> |
19 | #include <linux/rtnetlink.h> | 17 | #include <linux/rtnetlink.h> |
@@ -189,6 +187,39 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) | |||
189 | return RX_QUEUED; | 187 | return RX_QUEUED; |
190 | } | 188 | } |
191 | 189 | ||
190 | /* return false if no more work */ | ||
191 | static bool ieee80211_prep_hw_scan(struct ieee80211_local *local) | ||
192 | { | ||
193 | struct cfg80211_scan_request *req = local->scan_req; | ||
194 | enum ieee80211_band band; | ||
195 | int i, ielen, n_chans; | ||
196 | |||
197 | do { | ||
198 | if (local->hw_scan_band == IEEE80211_NUM_BANDS) | ||
199 | return false; | ||
200 | |||
201 | band = local->hw_scan_band; | ||
202 | n_chans = 0; | ||
203 | for (i = 0; i < req->n_channels; i++) { | ||
204 | if (req->channels[i]->band == band) { | ||
205 | local->hw_scan_req->channels[n_chans] = | ||
206 | req->channels[i]; | ||
207 | n_chans++; | ||
208 | } | ||
209 | } | ||
210 | |||
211 | local->hw_scan_band++; | ||
212 | } while (!n_chans); | ||
213 | |||
214 | local->hw_scan_req->n_channels = n_chans; | ||
215 | |||
216 | ielen = ieee80211_build_preq_ies(local, (u8 *)local->hw_scan_req->ie, | ||
217 | req->ie, req->ie_len, band); | ||
218 | local->hw_scan_req->ie_len = ielen; | ||
219 | |||
220 | return true; | ||
221 | } | ||
222 | |||
192 | /* | 223 | /* |
193 | * inform AP that we will go to sleep so that it will buffer the frames | 224 | * inform AP that we will go to sleep so that it will buffer the frames |
194 | * while we scan | 225 | * while we scan |
@@ -249,13 +280,6 @@ static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata) | |||
249 | } | 280 | } |
250 | } | 281 | } |
251 | 282 | ||
252 | static void ieee80211_restore_scan_ies(struct ieee80211_local *local) | ||
253 | { | ||
254 | kfree(local->scan_req->ie); | ||
255 | local->scan_req->ie = local->orig_ies; | ||
256 | local->scan_req->ie_len = local->orig_ies_len; | ||
257 | } | ||
258 | |||
259 | void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) | 283 | void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) |
260 | { | 284 | { |
261 | struct ieee80211_local *local = hw_to_local(hw); | 285 | struct ieee80211_local *local = hw_to_local(hw); |
@@ -264,25 +288,36 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) | |||
264 | 288 | ||
265 | mutex_lock(&local->scan_mtx); | 289 | mutex_lock(&local->scan_mtx); |
266 | 290 | ||
267 | if (WARN_ON(!local->scanning)) { | 291 | /* |
292 | * It's ok to abort a not-yet-running scan (that | ||
293 | * we have one at all will be verified by checking | ||
294 | * local->scan_req next), but not to complete it | ||
295 | * successfully. | ||
296 | */ | ||
297 | if (WARN_ON(!local->scanning && !aborted)) | ||
298 | aborted = true; | ||
299 | |||
300 | if (WARN_ON(!local->scan_req)) { | ||
268 | mutex_unlock(&local->scan_mtx); | 301 | mutex_unlock(&local->scan_mtx); |
269 | return; | 302 | return; |
270 | } | 303 | } |
271 | 304 | ||
272 | if (WARN_ON(!local->scan_req)) { | 305 | was_hw_scan = test_bit(SCAN_HW_SCANNING, &local->scanning); |
306 | if (was_hw_scan && !aborted && ieee80211_prep_hw_scan(local)) { | ||
307 | ieee80211_queue_delayed_work(&local->hw, | ||
308 | &local->scan_work, 0); | ||
273 | mutex_unlock(&local->scan_mtx); | 309 | mutex_unlock(&local->scan_mtx); |
274 | return; | 310 | return; |
275 | } | 311 | } |
276 | 312 | ||
277 | if (test_bit(SCAN_HW_SCANNING, &local->scanning)) | 313 | kfree(local->hw_scan_req); |
278 | ieee80211_restore_scan_ies(local); | 314 | local->hw_scan_req = NULL; |
279 | 315 | ||
280 | if (local->scan_req != local->int_scan_req) | 316 | if (local->scan_req != local->int_scan_req) |
281 | cfg80211_scan_done(local->scan_req, aborted); | 317 | cfg80211_scan_done(local->scan_req, aborted); |
282 | local->scan_req = NULL; | 318 | local->scan_req = NULL; |
283 | local->scan_sdata = NULL; | 319 | local->scan_sdata = NULL; |
284 | 320 | ||
285 | was_hw_scan = test_bit(SCAN_HW_SCANNING, &local->scanning); | ||
286 | local->scanning = 0; | 321 | local->scanning = 0; |
287 | local->scan_channel = NULL; | 322 | local->scan_channel = NULL; |
288 | 323 | ||
@@ -306,10 +341,10 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) | |||
306 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | 341 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { |
307 | if (sdata->u.mgd.associated) { | 342 | if (sdata->u.mgd.associated) { |
308 | ieee80211_scan_ps_disable(sdata); | 343 | ieee80211_scan_ps_disable(sdata); |
309 | netif_tx_wake_all_queues(sdata->dev); | 344 | netif_wake_queue(sdata->dev); |
310 | } | 345 | } |
311 | } else | 346 | } else |
312 | netif_tx_wake_all_queues(sdata->dev); | 347 | netif_wake_queue(sdata->dev); |
313 | 348 | ||
314 | /* re-enable beaconing */ | 349 | /* re-enable beaconing */ |
315 | if (sdata->vif.type == NL80211_IFTYPE_AP || | 350 | if (sdata->vif.type == NL80211_IFTYPE_AP || |
@@ -364,7 +399,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local) | |||
364 | * are handled in the scan state machine | 399 | * are handled in the scan state machine |
365 | */ | 400 | */ |
366 | if (sdata->vif.type != NL80211_IFTYPE_STATION) | 401 | if (sdata->vif.type != NL80211_IFTYPE_STATION) |
367 | netif_tx_stop_all_queues(sdata->dev); | 402 | netif_stop_queue(sdata->dev); |
368 | } | 403 | } |
369 | mutex_unlock(&local->iflist_mtx); | 404 | mutex_unlock(&local->iflist_mtx); |
370 | 405 | ||
@@ -394,19 +429,23 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, | |||
394 | 429 | ||
395 | if (local->ops->hw_scan) { | 430 | if (local->ops->hw_scan) { |
396 | u8 *ies; | 431 | u8 *ies; |
397 | int ielen; | ||
398 | 432 | ||
399 | ies = kmalloc(2 + IEEE80211_MAX_SSID_LEN + | 433 | local->hw_scan_req = kmalloc( |
400 | local->scan_ies_len + req->ie_len, GFP_KERNEL); | 434 | sizeof(*local->hw_scan_req) + |
401 | if (!ies) | 435 | req->n_channels * sizeof(req->channels[0]) + |
436 | 2 + IEEE80211_MAX_SSID_LEN + local->scan_ies_len + | ||
437 | req->ie_len, GFP_KERNEL); | ||
438 | if (!local->hw_scan_req) | ||
402 | return -ENOMEM; | 439 | return -ENOMEM; |
403 | 440 | ||
404 | ielen = ieee80211_build_preq_ies(local, ies, | 441 | local->hw_scan_req->ssids = req->ssids; |
405 | req->ie, req->ie_len); | 442 | local->hw_scan_req->n_ssids = req->n_ssids; |
406 | local->orig_ies = req->ie; | 443 | ies = (u8 *)local->hw_scan_req + |
407 | local->orig_ies_len = req->ie_len; | 444 | sizeof(*local->hw_scan_req) + |
408 | req->ie = ies; | 445 | req->n_channels * sizeof(req->channels[0]); |
409 | req->ie_len = ielen; | 446 | local->hw_scan_req->ie = ies; |
447 | |||
448 | local->hw_scan_band = 0; | ||
410 | } | 449 | } |
411 | 450 | ||
412 | local->scan_req = req; | 451 | local->scan_req = req; |
@@ -438,16 +477,17 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, | |||
438 | ieee80211_recalc_idle(local); | 477 | ieee80211_recalc_idle(local); |
439 | mutex_unlock(&local->scan_mtx); | 478 | mutex_unlock(&local->scan_mtx); |
440 | 479 | ||
441 | if (local->ops->hw_scan) | 480 | if (local->ops->hw_scan) { |
442 | rc = drv_hw_scan(local, local->scan_req); | 481 | WARN_ON(!ieee80211_prep_hw_scan(local)); |
443 | else | 482 | rc = drv_hw_scan(local, local->hw_scan_req); |
483 | } else | ||
444 | rc = ieee80211_start_sw_scan(local); | 484 | rc = ieee80211_start_sw_scan(local); |
445 | 485 | ||
446 | mutex_lock(&local->scan_mtx); | 486 | mutex_lock(&local->scan_mtx); |
447 | 487 | ||
448 | if (rc) { | 488 | if (rc) { |
449 | if (local->ops->hw_scan) | 489 | kfree(local->hw_scan_req); |
450 | ieee80211_restore_scan_ies(local); | 490 | local->hw_scan_req = NULL; |
451 | local->scanning = 0; | 491 | local->scanning = 0; |
452 | 492 | ||
453 | ieee80211_recalc_idle(local); | 493 | ieee80211_recalc_idle(local); |
@@ -523,7 +563,7 @@ static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *loca | |||
523 | continue; | 563 | continue; |
524 | 564 | ||
525 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | 565 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { |
526 | netif_tx_stop_all_queues(sdata->dev); | 566 | netif_stop_queue(sdata->dev); |
527 | if (sdata->u.mgd.associated) | 567 | if (sdata->u.mgd.associated) |
528 | ieee80211_scan_ps_enable(sdata); | 568 | ieee80211_scan_ps_enable(sdata); |
529 | } | 569 | } |
@@ -558,7 +598,7 @@ static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *loca | |||
558 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | 598 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { |
559 | if (sdata->u.mgd.associated) | 599 | if (sdata->u.mgd.associated) |
560 | ieee80211_scan_ps_disable(sdata); | 600 | ieee80211_scan_ps_disable(sdata); |
561 | netif_tx_wake_all_queues(sdata->dev); | 601 | netif_wake_queue(sdata->dev); |
562 | } | 602 | } |
563 | } | 603 | } |
564 | mutex_unlock(&local->iflist_mtx); | 604 | mutex_unlock(&local->iflist_mtx); |
@@ -574,23 +614,14 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local, | |||
574 | { | 614 | { |
575 | int skip; | 615 | int skip; |
576 | struct ieee80211_channel *chan; | 616 | struct ieee80211_channel *chan; |
577 | struct ieee80211_sub_if_data *sdata = local->scan_sdata; | ||
578 | 617 | ||
579 | skip = 0; | 618 | skip = 0; |
580 | chan = local->scan_req->channels[local->scan_channel_idx]; | 619 | chan = local->scan_req->channels[local->scan_channel_idx]; |
581 | 620 | ||
582 | if (chan->flags & IEEE80211_CHAN_DISABLED || | 621 | local->scan_channel = chan; |
583 | (sdata->vif.type == NL80211_IFTYPE_ADHOC && | 622 | if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL)) |
584 | chan->flags & IEEE80211_CHAN_NO_IBSS)) | ||
585 | skip = 1; | 623 | skip = 1; |
586 | 624 | ||
587 | if (!skip) { | ||
588 | local->scan_channel = chan; | ||
589 | if (ieee80211_hw_config(local, | ||
590 | IEEE80211_CONF_CHANGE_CHANNEL)) | ||
591 | skip = 1; | ||
592 | } | ||
593 | |||
594 | /* advance state machine to next channel/band */ | 625 | /* advance state machine to next channel/band */ |
595 | local->scan_channel_idx++; | 626 | local->scan_channel_idx++; |
596 | 627 | ||
@@ -656,6 +687,14 @@ void ieee80211_scan_work(struct work_struct *work) | |||
656 | return; | 687 | return; |
657 | } | 688 | } |
658 | 689 | ||
690 | if (local->hw_scan_req) { | ||
691 | int rc = drv_hw_scan(local, local->hw_scan_req); | ||
692 | mutex_unlock(&local->scan_mtx); | ||
693 | if (rc) | ||
694 | ieee80211_scan_completed(&local->hw, true); | ||
695 | return; | ||
696 | } | ||
697 | |||
659 | if (local->scan_req && !local->scanning) { | 698 | if (local->scan_req && !local->scanning) { |
660 | struct cfg80211_scan_request *req = local->scan_req; | 699 | struct cfg80211_scan_request *req = local->scan_req; |
661 | int rc; | 700 | int rc; |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 594f2318c3d8..396a94806de9 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -171,6 +171,8 @@ void sta_info_destroy(struct sta_info *sta) | |||
171 | 171 | ||
172 | local = sta->local; | 172 | local = sta->local; |
173 | 173 | ||
174 | cancel_work_sync(&sta->drv_unblock_wk); | ||
175 | |||
174 | rate_control_remove_sta_debugfs(sta); | 176 | rate_control_remove_sta_debugfs(sta); |
175 | ieee80211_sta_debugfs_remove(sta); | 177 | ieee80211_sta_debugfs_remove(sta); |
176 | 178 | ||
@@ -259,6 +261,21 @@ static void sta_info_hash_add(struct ieee80211_local *local, | |||
259 | rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)], sta); | 261 | rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)], sta); |
260 | } | 262 | } |
261 | 263 | ||
264 | static void sta_unblock(struct work_struct *wk) | ||
265 | { | ||
266 | struct sta_info *sta; | ||
267 | |||
268 | sta = container_of(wk, struct sta_info, drv_unblock_wk); | ||
269 | |||
270 | if (sta->dead) | ||
271 | return; | ||
272 | |||
273 | if (!test_sta_flags(sta, WLAN_STA_PS_STA)) | ||
274 | ieee80211_sta_ps_deliver_wakeup(sta); | ||
275 | else if (test_and_clear_sta_flags(sta, WLAN_STA_PSPOLL)) | ||
276 | ieee80211_sta_ps_deliver_poll_response(sta); | ||
277 | } | ||
278 | |||
262 | struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, | 279 | struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, |
263 | u8 *addr, gfp_t gfp) | 280 | u8 *addr, gfp_t gfp) |
264 | { | 281 | { |
@@ -272,6 +289,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, | |||
272 | 289 | ||
273 | spin_lock_init(&sta->lock); | 290 | spin_lock_init(&sta->lock); |
274 | spin_lock_init(&sta->flaglock); | 291 | spin_lock_init(&sta->flaglock); |
292 | INIT_WORK(&sta->drv_unblock_wk, sta_unblock); | ||
275 | 293 | ||
276 | memcpy(sta->sta.addr, addr, ETH_ALEN); | 294 | memcpy(sta->sta.addr, addr, ETH_ALEN); |
277 | sta->local = local; | 295 | sta->local = local; |
@@ -478,8 +496,10 @@ static void __sta_info_unlink(struct sta_info **sta) | |||
478 | } | 496 | } |
479 | 497 | ||
480 | list_del(&(*sta)->list); | 498 | list_del(&(*sta)->list); |
499 | (*sta)->dead = true; | ||
481 | 500 | ||
482 | if (test_and_clear_sta_flags(*sta, WLAN_STA_PS)) { | 501 | if (test_and_clear_sta_flags(*sta, |
502 | WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) { | ||
483 | BUG_ON(!sdata->bss); | 503 | BUG_ON(!sdata->bss); |
484 | 504 | ||
485 | atomic_dec(&sdata->bss->num_sta_ps); | 505 | atomic_dec(&sdata->bss->num_sta_ps); |
@@ -489,6 +509,9 @@ static void __sta_info_unlink(struct sta_info **sta) | |||
489 | local->num_sta--; | 509 | local->num_sta--; |
490 | local->sta_generation++; | 510 | local->sta_generation++; |
491 | 511 | ||
512 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) | ||
513 | rcu_assign_pointer(sdata->u.vlan.sta, NULL); | ||
514 | |||
492 | if (local->ops->sta_notify) { | 515 | if (local->ops->sta_notify) { |
493 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) | 516 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) |
494 | sdata = container_of(sdata->bss, | 517 | sdata = container_of(sdata->bss, |
@@ -801,8 +824,8 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, | |||
801 | sta_info_destroy(sta); | 824 | sta_info_destroy(sta); |
802 | } | 825 | } |
803 | 826 | ||
804 | struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_hw *hw, | 827 | struct ieee80211_sta *ieee80211_find_sta_by_hw(struct ieee80211_hw *hw, |
805 | const u8 *addr) | 828 | const u8 *addr) |
806 | { | 829 | { |
807 | struct sta_info *sta = sta_info_get(hw_to_local(hw), addr); | 830 | struct sta_info *sta = sta_info_get(hw_to_local(hw), addr); |
808 | 831 | ||
@@ -810,4 +833,114 @@ struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_hw *hw, | |||
810 | return NULL; | 833 | return NULL; |
811 | return &sta->sta; | 834 | return &sta->sta; |
812 | } | 835 | } |
836 | EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_hw); | ||
837 | |||
838 | struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif, | ||
839 | const u8 *addr) | ||
840 | { | ||
841 | struct ieee80211_sub_if_data *sdata; | ||
842 | |||
843 | if (!vif) | ||
844 | return NULL; | ||
845 | |||
846 | sdata = vif_to_sdata(vif); | ||
847 | |||
848 | return ieee80211_find_sta_by_hw(&sdata->local->hw, addr); | ||
849 | } | ||
813 | EXPORT_SYMBOL(ieee80211_find_sta); | 850 | EXPORT_SYMBOL(ieee80211_find_sta); |
851 | |||
852 | /* powersave support code */ | ||
853 | void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) | ||
854 | { | ||
855 | struct ieee80211_sub_if_data *sdata = sta->sdata; | ||
856 | struct ieee80211_local *local = sdata->local; | ||
857 | int sent, buffered; | ||
858 | |||
859 | drv_sta_notify(local, &sdata->vif, STA_NOTIFY_AWAKE, &sta->sta); | ||
860 | |||
861 | if (!skb_queue_empty(&sta->ps_tx_buf)) | ||
862 | sta_info_clear_tim_bit(sta); | ||
863 | |||
864 | /* Send all buffered frames to the station */ | ||
865 | sent = ieee80211_add_pending_skbs(local, &sta->tx_filtered); | ||
866 | buffered = ieee80211_add_pending_skbs(local, &sta->ps_tx_buf); | ||
867 | sent += buffered; | ||
868 | local->total_ps_buffered -= buffered; | ||
869 | |||
870 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | ||
871 | printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames " | ||
872 | "since STA not sleeping anymore\n", sdata->dev->name, | ||
873 | sta->sta.addr, sta->sta.aid, sent - buffered, buffered); | ||
874 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | ||
875 | } | ||
876 | |||
877 | void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta) | ||
878 | { | ||
879 | struct ieee80211_sub_if_data *sdata = sta->sdata; | ||
880 | struct ieee80211_local *local = sdata->local; | ||
881 | struct sk_buff *skb; | ||
882 | int no_pending_pkts; | ||
883 | |||
884 | skb = skb_dequeue(&sta->tx_filtered); | ||
885 | if (!skb) { | ||
886 | skb = skb_dequeue(&sta->ps_tx_buf); | ||
887 | if (skb) | ||
888 | local->total_ps_buffered--; | ||
889 | } | ||
890 | no_pending_pkts = skb_queue_empty(&sta->tx_filtered) && | ||
891 | skb_queue_empty(&sta->ps_tx_buf); | ||
892 | |||
893 | if (skb) { | ||
894 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
895 | struct ieee80211_hdr *hdr = | ||
896 | (struct ieee80211_hdr *) skb->data; | ||
897 | |||
898 | /* | ||
899 | * Tell TX path to send this frame even though the STA may | ||
900 | * still remain is PS mode after this frame exchange. | ||
901 | */ | ||
902 | info->flags |= IEEE80211_TX_CTL_PSPOLL_RESPONSE; | ||
903 | |||
904 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | ||
905 | printk(KERN_DEBUG "STA %pM aid %d: PS Poll (entries after %d)\n", | ||
906 | sta->sta.addr, sta->sta.aid, | ||
907 | skb_queue_len(&sta->ps_tx_buf)); | ||
908 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | ||
909 | |||
910 | /* Use MoreData flag to indicate whether there are more | ||
911 | * buffered frames for this STA */ | ||
912 | if (no_pending_pkts) | ||
913 | hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA); | ||
914 | else | ||
915 | hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); | ||
916 | |||
917 | ieee80211_add_pending_skb(local, skb); | ||
918 | |||
919 | if (no_pending_pkts) | ||
920 | sta_info_clear_tim_bit(sta); | ||
921 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | ||
922 | } else { | ||
923 | /* | ||
924 | * FIXME: This can be the result of a race condition between | ||
925 | * us expiring a frame and the station polling for it. | ||
926 | * Should we send it a null-func frame indicating we | ||
927 | * have nothing buffered for it? | ||
928 | */ | ||
929 | printk(KERN_DEBUG "%s: STA %pM sent PS Poll even " | ||
930 | "though there are no buffered frames for it\n", | ||
931 | sdata->dev->name, sta->sta.addr); | ||
932 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | ||
933 | } | ||
934 | } | ||
935 | |||
936 | void ieee80211_sta_block_awake(struct ieee80211_hw *hw, | ||
937 | struct ieee80211_sta *pubsta, bool block) | ||
938 | { | ||
939 | struct sta_info *sta = container_of(pubsta, struct sta_info, sta); | ||
940 | |||
941 | if (block) | ||
942 | set_sta_flags(sta, WLAN_STA_PS_DRIVER); | ||
943 | else | ||
944 | ieee80211_queue_work(hw, &sta->drv_unblock_wk); | ||
945 | } | ||
946 | EXPORT_SYMBOL(ieee80211_sta_block_awake); | ||
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index ccc3adf962c7..4673454176ed 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/list.h> | 12 | #include <linux/list.h> |
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | #include <linux/if_ether.h> | 14 | #include <linux/if_ether.h> |
15 | #include <linux/workqueue.h> | ||
15 | #include "key.h" | 16 | #include "key.h" |
16 | 17 | ||
17 | /** | 18 | /** |
@@ -21,7 +22,7 @@ | |||
21 | * | 22 | * |
22 | * @WLAN_STA_AUTH: Station is authenticated. | 23 | * @WLAN_STA_AUTH: Station is authenticated. |
23 | * @WLAN_STA_ASSOC: Station is associated. | 24 | * @WLAN_STA_ASSOC: Station is associated. |
24 | * @WLAN_STA_PS: Station is in power-save mode | 25 | * @WLAN_STA_PS_STA: Station is in power-save mode |
25 | * @WLAN_STA_AUTHORIZED: Station is authorized to send/receive traffic. | 26 | * @WLAN_STA_AUTHORIZED: Station is authorized to send/receive traffic. |
26 | * This bit is always checked so needs to be enabled for all stations | 27 | * This bit is always checked so needs to be enabled for all stations |
27 | * when virtual port control is not in use. | 28 | * when virtual port control is not in use. |
@@ -36,11 +37,16 @@ | |||
36 | * @WLAN_STA_MFP: Management frame protection is used with this STA. | 37 | * @WLAN_STA_MFP: Management frame protection is used with this STA. |
37 | * @WLAN_STA_SUSPEND: Set/cleared during a suspend/resume cycle. | 38 | * @WLAN_STA_SUSPEND: Set/cleared during a suspend/resume cycle. |
38 | * Used to deny ADDBA requests (both TX and RX). | 39 | * Used to deny ADDBA requests (both TX and RX). |
40 | * @WLAN_STA_PS_DRIVER: driver requires keeping this station in | ||
41 | * power-save mode logically to flush frames that might still | ||
42 | * be in the queues | ||
43 | * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping | ||
44 | * station in power-save mode, reply when the driver unblocks. | ||
39 | */ | 45 | */ |
40 | enum ieee80211_sta_info_flags { | 46 | enum ieee80211_sta_info_flags { |
41 | WLAN_STA_AUTH = 1<<0, | 47 | WLAN_STA_AUTH = 1<<0, |
42 | WLAN_STA_ASSOC = 1<<1, | 48 | WLAN_STA_ASSOC = 1<<1, |
43 | WLAN_STA_PS = 1<<2, | 49 | WLAN_STA_PS_STA = 1<<2, |
44 | WLAN_STA_AUTHORIZED = 1<<3, | 50 | WLAN_STA_AUTHORIZED = 1<<3, |
45 | WLAN_STA_SHORT_PREAMBLE = 1<<4, | 51 | WLAN_STA_SHORT_PREAMBLE = 1<<4, |
46 | WLAN_STA_ASSOC_AP = 1<<5, | 52 | WLAN_STA_ASSOC_AP = 1<<5, |
@@ -48,7 +54,9 @@ enum ieee80211_sta_info_flags { | |||
48 | WLAN_STA_WDS = 1<<7, | 54 | WLAN_STA_WDS = 1<<7, |
49 | WLAN_STA_CLEAR_PS_FILT = 1<<9, | 55 | WLAN_STA_CLEAR_PS_FILT = 1<<9, |
50 | WLAN_STA_MFP = 1<<10, | 56 | WLAN_STA_MFP = 1<<10, |
51 | WLAN_STA_SUSPEND = 1<<11 | 57 | WLAN_STA_SUSPEND = 1<<11, |
58 | WLAN_STA_PS_DRIVER = 1<<12, | ||
59 | WLAN_STA_PSPOLL = 1<<13, | ||
52 | }; | 60 | }; |
53 | 61 | ||
54 | #define STA_TID_NUM 16 | 62 | #define STA_TID_NUM 16 |
@@ -193,7 +201,6 @@ struct sta_ampdu_mlme { | |||
193 | * @rx_fragments: number of received MPDUs | 201 | * @rx_fragments: number of received MPDUs |
194 | * @rx_dropped: number of dropped MPDUs from this STA | 202 | * @rx_dropped: number of dropped MPDUs from this STA |
195 | * @last_signal: signal of last received frame from this STA | 203 | * @last_signal: signal of last received frame from this STA |
196 | * @last_qual: qual of last received frame from this STA | ||
197 | * @last_noise: noise of last received frame from this STA | 204 | * @last_noise: noise of last received frame from this STA |
198 | * @last_seq_ctrl: last received seq/frag number from this STA (per RX queue) | 205 | * @last_seq_ctrl: last received seq/frag number from this STA (per RX queue) |
199 | * @tx_filtered_count: number of frames the hardware filtered for this STA | 206 | * @tx_filtered_count: number of frames the hardware filtered for this STA |
@@ -217,6 +224,8 @@ struct sta_ampdu_mlme { | |||
217 | * @plink_timer_was_running: used by suspend/resume to restore timers | 224 | * @plink_timer_was_running: used by suspend/resume to restore timers |
218 | * @debugfs: debug filesystem info | 225 | * @debugfs: debug filesystem info |
219 | * @sta: station information we share with the driver | 226 | * @sta: station information we share with the driver |
227 | * @dead: set to true when sta is unlinked | ||
228 | * @drv_unblock_wk used for driver PS unblocking | ||
220 | */ | 229 | */ |
221 | struct sta_info { | 230 | struct sta_info { |
222 | /* General information, mostly static */ | 231 | /* General information, mostly static */ |
@@ -230,8 +239,12 @@ struct sta_info { | |||
230 | spinlock_t lock; | 239 | spinlock_t lock; |
231 | spinlock_t flaglock; | 240 | spinlock_t flaglock; |
232 | 241 | ||
242 | struct work_struct drv_unblock_wk; | ||
243 | |||
233 | u16 listen_interval; | 244 | u16 listen_interval; |
234 | 245 | ||
246 | bool dead; | ||
247 | |||
235 | /* | 248 | /* |
236 | * for use by the internal lifetime management, | 249 | * for use by the internal lifetime management, |
237 | * see __sta_info_unlink | 250 | * see __sta_info_unlink |
@@ -259,7 +272,6 @@ struct sta_info { | |||
259 | unsigned long rx_fragments; | 272 | unsigned long rx_fragments; |
260 | unsigned long rx_dropped; | 273 | unsigned long rx_dropped; |
261 | int last_signal; | 274 | int last_signal; |
262 | int last_qual; | ||
263 | int last_noise; | 275 | int last_noise; |
264 | __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES]; | 276 | __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES]; |
265 | 277 | ||
@@ -301,28 +313,6 @@ struct sta_info { | |||
301 | #ifdef CONFIG_MAC80211_DEBUGFS | 313 | #ifdef CONFIG_MAC80211_DEBUGFS |
302 | struct sta_info_debugfsdentries { | 314 | struct sta_info_debugfsdentries { |
303 | struct dentry *dir; | 315 | struct dentry *dir; |
304 | struct dentry *flags; | ||
305 | struct dentry *num_ps_buf_frames; | ||
306 | struct dentry *inactive_ms; | ||
307 | struct dentry *last_seq_ctrl; | ||
308 | struct dentry *agg_status; | ||
309 | struct dentry *aid; | ||
310 | struct dentry *dev; | ||
311 | struct dentry *rx_packets; | ||
312 | struct dentry *tx_packets; | ||
313 | struct dentry *rx_bytes; | ||
314 | struct dentry *tx_bytes; | ||
315 | struct dentry *rx_duplicates; | ||
316 | struct dentry *rx_fragments; | ||
317 | struct dentry *rx_dropped; | ||
318 | struct dentry *tx_fragments; | ||
319 | struct dentry *tx_filtered; | ||
320 | struct dentry *tx_retry_failed; | ||
321 | struct dentry *tx_retry_count; | ||
322 | struct dentry *last_signal; | ||
323 | struct dentry *last_qual; | ||
324 | struct dentry *last_noise; | ||
325 | struct dentry *wep_weak_iv_count; | ||
326 | bool add_has_run; | 316 | bool add_has_run; |
327 | } debugfs; | 317 | } debugfs; |
328 | #endif | 318 | #endif |
@@ -454,4 +444,7 @@ int sta_info_flush(struct ieee80211_local *local, | |||
454 | void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, | 444 | void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, |
455 | unsigned long exp_time); | 445 | unsigned long exp_time); |
456 | 446 | ||
447 | void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta); | ||
448 | void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta); | ||
449 | |||
457 | #endif /* STA_INFO_H */ | 450 | #endif /* STA_INFO_H */ |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index eaa4118de988..3ad053f6de12 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -317,12 +317,11 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) | |||
317 | if (!atomic_read(&tx->sdata->bss->num_sta_ps)) | 317 | if (!atomic_read(&tx->sdata->bss->num_sta_ps)) |
318 | return TX_CONTINUE; | 318 | return TX_CONTINUE; |
319 | 319 | ||
320 | /* buffered in hardware */ | 320 | info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM; |
321 | if (!(tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING)) { | ||
322 | info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM; | ||
323 | 321 | ||
322 | /* device releases frame after DTIM beacon */ | ||
323 | if (!(tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING)) | ||
324 | return TX_CONTINUE; | 324 | return TX_CONTINUE; |
325 | } | ||
326 | 325 | ||
327 | /* buffered in mac80211 */ | 326 | /* buffered in mac80211 */ |
328 | if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) | 327 | if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) |
@@ -375,7 +374,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
375 | 374 | ||
376 | staflags = get_sta_flags(sta); | 375 | staflags = get_sta_flags(sta); |
377 | 376 | ||
378 | if (unlikely((staflags & WLAN_STA_PS) && | 377 | if (unlikely((staflags & (WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) && |
379 | !(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE))) { | 378 | !(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE))) { |
380 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 379 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
381 | printk(KERN_DEBUG "STA %pM aid %d: PS buffer (entries " | 380 | printk(KERN_DEBUG "STA %pM aid %d: PS buffer (entries " |
@@ -398,8 +397,13 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
398 | } else | 397 | } else |
399 | tx->local->total_ps_buffered++; | 398 | tx->local->total_ps_buffered++; |
400 | 399 | ||
401 | /* Queue frame to be sent after STA sends an PS Poll frame */ | 400 | /* |
402 | if (skb_queue_empty(&sta->ps_tx_buf)) | 401 | * Queue frame to be sent after STA wakes up/polls, |
402 | * but don't set the TIM bit if the driver is blocking | ||
403 | * wakeup or poll response transmissions anyway. | ||
404 | */ | ||
405 | if (skb_queue_empty(&sta->ps_tx_buf) && | ||
406 | !(staflags & WLAN_STA_PS_DRIVER)) | ||
403 | sta_info_set_tim_bit(sta); | 407 | sta_info_set_tim_bit(sta); |
404 | 408 | ||
405 | info->control.jiffies = jiffies; | 409 | info->control.jiffies = jiffies; |
@@ -409,7 +413,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
409 | return TX_QUEUED; | 413 | return TX_QUEUED; |
410 | } | 414 | } |
411 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 415 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
412 | else if (unlikely(test_sta_flags(sta, WLAN_STA_PS))) { | 416 | else if (unlikely(staflags & WLAN_STA_PS_STA)) { |
413 | printk(KERN_DEBUG "%s: STA %pM in PS mode, but pspoll " | 417 | printk(KERN_DEBUG "%s: STA %pM in PS mode, but pspoll " |
414 | "set -> send frame\n", tx->dev->name, | 418 | "set -> send frame\n", tx->dev->name, |
415 | sta->sta.addr); | 419 | sta->sta.addr); |
@@ -1047,7 +1051,10 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, | |||
1047 | 1051 | ||
1048 | hdr = (struct ieee80211_hdr *) skb->data; | 1052 | hdr = (struct ieee80211_hdr *) skb->data; |
1049 | 1053 | ||
1050 | tx->sta = sta_info_get(local, hdr->addr1); | 1054 | if ((sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && sdata->use_4addr) |
1055 | tx->sta = rcu_dereference(sdata->u.vlan.sta); | ||
1056 | if (!tx->sta) | ||
1057 | tx->sta = sta_info_get(local, hdr->addr1); | ||
1051 | 1058 | ||
1052 | if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) && | 1059 | if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) && |
1053 | (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) { | 1060 | (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) { |
@@ -1201,23 +1208,25 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx) | |||
1201 | struct sk_buff *skb = tx->skb; | 1208 | struct sk_buff *skb = tx->skb; |
1202 | ieee80211_tx_result res = TX_DROP; | 1209 | ieee80211_tx_result res = TX_DROP; |
1203 | 1210 | ||
1204 | #define CALL_TXH(txh) \ | 1211 | #define CALL_TXH(txh) \ |
1205 | res = txh(tx); \ | 1212 | do { \ |
1206 | if (res != TX_CONTINUE) \ | 1213 | res = txh(tx); \ |
1207 | goto txh_done; | 1214 | if (res != TX_CONTINUE) \ |
1208 | 1215 | goto txh_done; \ | |
1209 | CALL_TXH(ieee80211_tx_h_check_assoc) | 1216 | } while (0) |
1210 | CALL_TXH(ieee80211_tx_h_ps_buf) | 1217 | |
1211 | CALL_TXH(ieee80211_tx_h_select_key) | 1218 | CALL_TXH(ieee80211_tx_h_check_assoc); |
1212 | CALL_TXH(ieee80211_tx_h_michael_mic_add) | 1219 | CALL_TXH(ieee80211_tx_h_ps_buf); |
1213 | CALL_TXH(ieee80211_tx_h_rate_ctrl) | 1220 | CALL_TXH(ieee80211_tx_h_select_key); |
1214 | CALL_TXH(ieee80211_tx_h_misc) | 1221 | CALL_TXH(ieee80211_tx_h_michael_mic_add); |
1215 | CALL_TXH(ieee80211_tx_h_sequence) | 1222 | CALL_TXH(ieee80211_tx_h_rate_ctrl); |
1216 | CALL_TXH(ieee80211_tx_h_fragment) | 1223 | CALL_TXH(ieee80211_tx_h_misc); |
1224 | CALL_TXH(ieee80211_tx_h_sequence); | ||
1225 | CALL_TXH(ieee80211_tx_h_fragment); | ||
1217 | /* handlers after fragment must be aware of tx info fragmentation! */ | 1226 | /* handlers after fragment must be aware of tx info fragmentation! */ |
1218 | CALL_TXH(ieee80211_tx_h_stats) | 1227 | CALL_TXH(ieee80211_tx_h_stats); |
1219 | CALL_TXH(ieee80211_tx_h_encrypt) | 1228 | CALL_TXH(ieee80211_tx_h_encrypt); |
1220 | CALL_TXH(ieee80211_tx_h_calculate_duration) | 1229 | CALL_TXH(ieee80211_tx_h_calculate_duration); |
1221 | #undef CALL_TXH | 1230 | #undef CALL_TXH |
1222 | 1231 | ||
1223 | txh_done: | 1232 | txh_done: |
@@ -1387,6 +1396,30 @@ static int ieee80211_skb_resize(struct ieee80211_local *local, | |||
1387 | return 0; | 1396 | return 0; |
1388 | } | 1397 | } |
1389 | 1398 | ||
1399 | static bool need_dynamic_ps(struct ieee80211_local *local) | ||
1400 | { | ||
1401 | /* driver doesn't support power save */ | ||
1402 | if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) | ||
1403 | return false; | ||
1404 | |||
1405 | /* hardware does dynamic power save */ | ||
1406 | if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS) | ||
1407 | return false; | ||
1408 | |||
1409 | /* dynamic power save disabled */ | ||
1410 | if (local->hw.conf.dynamic_ps_timeout <= 0) | ||
1411 | return false; | ||
1412 | |||
1413 | /* we are scanning, don't enable power save */ | ||
1414 | if (local->scanning) | ||
1415 | return false; | ||
1416 | |||
1417 | if (!local->ps_sdata) | ||
1418 | return false; | ||
1419 | |||
1420 | return true; | ||
1421 | } | ||
1422 | |||
1390 | static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, | 1423 | static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, |
1391 | struct sk_buff *skb) | 1424 | struct sk_buff *skb) |
1392 | { | 1425 | { |
@@ -1399,9 +1432,7 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, | |||
1399 | 1432 | ||
1400 | dev_hold(sdata->dev); | 1433 | dev_hold(sdata->dev); |
1401 | 1434 | ||
1402 | if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) && | 1435 | if (need_dynamic_ps(local)) { |
1403 | local->hw.conf.dynamic_ps_timeout > 0 && | ||
1404 | !(local->scanning) && local->ps_sdata) { | ||
1405 | if (local->hw.conf.flags & IEEE80211_CONF_PS) { | 1436 | if (local->hw.conf.flags & IEEE80211_CONF_PS) { |
1406 | ieee80211_stop_queues_by_reason(&local->hw, | 1437 | ieee80211_stop_queues_by_reason(&local->hw, |
1407 | IEEE80211_QUEUE_STOP_REASON_PS); | 1438 | IEEE80211_QUEUE_STOP_REASON_PS); |
@@ -1585,7 +1616,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1585 | const u8 *encaps_data; | 1616 | const u8 *encaps_data; |
1586 | int encaps_len, skip_header_bytes; | 1617 | int encaps_len, skip_header_bytes; |
1587 | int nh_pos, h_pos; | 1618 | int nh_pos, h_pos; |
1588 | struct sta_info *sta; | 1619 | struct sta_info *sta = NULL; |
1589 | u32 sta_flags = 0; | 1620 | u32 sta_flags = 0; |
1590 | 1621 | ||
1591 | if (unlikely(skb->len < ETH_HLEN)) { | 1622 | if (unlikely(skb->len < ETH_HLEN)) { |
@@ -1602,8 +1633,25 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1602 | fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); | 1633 | fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); |
1603 | 1634 | ||
1604 | switch (sdata->vif.type) { | 1635 | switch (sdata->vif.type) { |
1605 | case NL80211_IFTYPE_AP: | ||
1606 | case NL80211_IFTYPE_AP_VLAN: | 1636 | case NL80211_IFTYPE_AP_VLAN: |
1637 | rcu_read_lock(); | ||
1638 | if (sdata->use_4addr) | ||
1639 | sta = rcu_dereference(sdata->u.vlan.sta); | ||
1640 | if (sta) { | ||
1641 | fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); | ||
1642 | /* RA TA DA SA */ | ||
1643 | memcpy(hdr.addr1, sta->sta.addr, ETH_ALEN); | ||
1644 | memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); | ||
1645 | memcpy(hdr.addr3, skb->data, ETH_ALEN); | ||
1646 | memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); | ||
1647 | hdrlen = 30; | ||
1648 | sta_flags = get_sta_flags(sta); | ||
1649 | } | ||
1650 | rcu_read_unlock(); | ||
1651 | if (sta) | ||
1652 | break; | ||
1653 | /* fall through */ | ||
1654 | case NL80211_IFTYPE_AP: | ||
1607 | fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); | 1655 | fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); |
1608 | /* DA BSSID SA */ | 1656 | /* DA BSSID SA */ |
1609 | memcpy(hdr.addr1, skb->data, ETH_ALEN); | 1657 | memcpy(hdr.addr1, skb->data, ETH_ALEN); |
@@ -1677,12 +1725,21 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1677 | break; | 1725 | break; |
1678 | #endif | 1726 | #endif |
1679 | case NL80211_IFTYPE_STATION: | 1727 | case NL80211_IFTYPE_STATION: |
1680 | fc |= cpu_to_le16(IEEE80211_FCTL_TODS); | ||
1681 | /* BSSID SA DA */ | ||
1682 | memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN); | 1728 | memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN); |
1683 | memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); | 1729 | if (sdata->use_4addr && ethertype != ETH_P_PAE) { |
1684 | memcpy(hdr.addr3, skb->data, ETH_ALEN); | 1730 | fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); |
1685 | hdrlen = 24; | 1731 | /* RA TA DA SA */ |
1732 | memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); | ||
1733 | memcpy(hdr.addr3, skb->data, ETH_ALEN); | ||
1734 | memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); | ||
1735 | hdrlen = 30; | ||
1736 | } else { | ||
1737 | fc |= cpu_to_le16(IEEE80211_FCTL_TODS); | ||
1738 | /* BSSID SA DA */ | ||
1739 | memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); | ||
1740 | memcpy(hdr.addr3, skb->data, ETH_ALEN); | ||
1741 | hdrlen = 24; | ||
1742 | } | ||
1686 | break; | 1743 | break; |
1687 | case NL80211_IFTYPE_ADHOC: | 1744 | case NL80211_IFTYPE_ADHOC: |
1688 | /* DA SA BSSID */ | 1745 | /* DA SA BSSID */ |
@@ -1990,8 +2047,9 @@ static void ieee80211_beacon_add_tim(struct ieee80211_if_ap *bss, | |||
1990 | } | 2047 | } |
1991 | } | 2048 | } |
1992 | 2049 | ||
1993 | struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, | 2050 | struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, |
1994 | struct ieee80211_vif *vif) | 2051 | struct ieee80211_vif *vif, |
2052 | u16 *tim_offset, u16 *tim_length) | ||
1995 | { | 2053 | { |
1996 | struct ieee80211_local *local = hw_to_local(hw); | 2054 | struct ieee80211_local *local = hw_to_local(hw); |
1997 | struct sk_buff *skb = NULL; | 2055 | struct sk_buff *skb = NULL; |
@@ -2008,6 +2066,11 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, | |||
2008 | 2066 | ||
2009 | sdata = vif_to_sdata(vif); | 2067 | sdata = vif_to_sdata(vif); |
2010 | 2068 | ||
2069 | if (tim_offset) | ||
2070 | *tim_offset = 0; | ||
2071 | if (tim_length) | ||
2072 | *tim_length = 0; | ||
2073 | |||
2011 | if (sdata->vif.type == NL80211_IFTYPE_AP) { | 2074 | if (sdata->vif.type == NL80211_IFTYPE_AP) { |
2012 | ap = &sdata->u.ap; | 2075 | ap = &sdata->u.ap; |
2013 | beacon = rcu_dereference(ap->beacon); | 2076 | beacon = rcu_dereference(ap->beacon); |
@@ -2043,6 +2106,11 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, | |||
2043 | spin_unlock_irqrestore(&local->sta_lock, flags); | 2106 | spin_unlock_irqrestore(&local->sta_lock, flags); |
2044 | } | 2107 | } |
2045 | 2108 | ||
2109 | if (tim_offset) | ||
2110 | *tim_offset = beacon->head_len; | ||
2111 | if (tim_length) | ||
2112 | *tim_length = skb->len - beacon->head_len; | ||
2113 | |||
2046 | if (beacon->tail) | 2114 | if (beacon->tail) |
2047 | memcpy(skb_put(skb, beacon->tail_len), | 2115 | memcpy(skb_put(skb, beacon->tail_len), |
2048 | beacon->tail, beacon->tail_len); | 2116 | beacon->tail, beacon->tail_len); |
@@ -2080,7 +2148,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, | |||
2080 | cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); | 2148 | cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); |
2081 | memset(mgmt->da, 0xff, ETH_ALEN); | 2149 | memset(mgmt->da, 0xff, ETH_ALEN); |
2082 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); | 2150 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); |
2083 | /* BSSID is left zeroed, wildcard value */ | 2151 | memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); |
2084 | mgmt->u.beacon.beacon_int = | 2152 | mgmt->u.beacon.beacon_int = |
2085 | cpu_to_le16(sdata->vif.bss_conf.beacon_int); | 2153 | cpu_to_le16(sdata->vif.bss_conf.beacon_int); |
2086 | mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */ | 2154 | mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */ |
@@ -2119,7 +2187,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, | |||
2119 | rcu_read_unlock(); | 2187 | rcu_read_unlock(); |
2120 | return skb; | 2188 | return skb; |
2121 | } | 2189 | } |
2122 | EXPORT_SYMBOL(ieee80211_beacon_get); | 2190 | EXPORT_SYMBOL(ieee80211_beacon_get_tim); |
2123 | 2191 | ||
2124 | void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | 2192 | void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
2125 | const void *frame, size_t frame_len, | 2193 | const void *frame, size_t frame_len, |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index aeb65b3d2295..da86e1592f8c 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -685,6 +685,10 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, | |||
685 | elems->perr = pos; | 685 | elems->perr = pos; |
686 | elems->perr_len = elen; | 686 | elems->perr_len = elen; |
687 | break; | 687 | break; |
688 | case WLAN_EID_RANN: | ||
689 | if (elen >= sizeof(struct ieee80211_rann_ie)) | ||
690 | elems->rann = (void *)pos; | ||
691 | break; | ||
688 | case WLAN_EID_CHANNEL_SWITCH: | 692 | case WLAN_EID_CHANNEL_SWITCH: |
689 | elems->ch_switch_elem = pos; | 693 | elems->ch_switch_elem = pos; |
690 | elems->ch_switch_elem_len = elen; | 694 | elems->ch_switch_elem_len = elen; |
@@ -872,13 +876,14 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, | |||
872 | } | 876 | } |
873 | 877 | ||
874 | int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, | 878 | int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, |
875 | const u8 *ie, size_t ie_len) | 879 | const u8 *ie, size_t ie_len, |
880 | enum ieee80211_band band) | ||
876 | { | 881 | { |
877 | struct ieee80211_supported_band *sband; | 882 | struct ieee80211_supported_band *sband; |
878 | u8 *pos, *supp_rates_len, *esupp_rates_len = NULL; | 883 | u8 *pos, *supp_rates_len, *esupp_rates_len = NULL; |
879 | int i; | 884 | int i; |
880 | 885 | ||
881 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | 886 | sband = local->hw.wiphy->bands[band]; |
882 | 887 | ||
883 | pos = buffer; | 888 | pos = buffer; |
884 | 889 | ||
@@ -966,7 +971,8 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, | |||
966 | memcpy(pos, ssid, ssid_len); | 971 | memcpy(pos, ssid, ssid_len); |
967 | pos += ssid_len; | 972 | pos += ssid_len; |
968 | 973 | ||
969 | skb_put(skb, ieee80211_build_preq_ies(local, pos, ie, ie_len)); | 974 | skb_put(skb, ieee80211_build_preq_ies(local, pos, ie, ie_len, |
975 | local->hw.conf.channel->band)); | ||
970 | 976 | ||
971 | ieee80211_tx_skb(sdata, skb, 0); | 977 | ieee80211_tx_skb(sdata, skb, 0); |
972 | } | 978 | } |
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index 1b816a2ea813..80abdf297b36 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c | |||
@@ -384,7 +384,7 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] = | |||
384 | }; | 384 | }; |
385 | 385 | ||
386 | /* this module per-net specifics */ | 386 | /* this module per-net specifics */ |
387 | static int dccp_net_id; | 387 | static int dccp_net_id __read_mostly; |
388 | struct dccp_net { | 388 | struct dccp_net { |
389 | int dccp_loose; | 389 | int dccp_loose; |
390 | unsigned int dccp_timeout[CT_DCCP_MAX + 1]; | 390 | unsigned int dccp_timeout[CT_DCCP_MAX + 1]; |
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index a54a0af0edba..91d0e719d67c 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c | |||
@@ -43,7 +43,7 @@ | |||
43 | #define GRE_TIMEOUT (30 * HZ) | 43 | #define GRE_TIMEOUT (30 * HZ) |
44 | #define GRE_STREAM_TIMEOUT (180 * HZ) | 44 | #define GRE_STREAM_TIMEOUT (180 * HZ) |
45 | 45 | ||
46 | static int proto_gre_net_id; | 46 | static int proto_gre_net_id __read_mostly; |
47 | struct netns_proto_gre { | 47 | struct netns_proto_gre { |
48 | rwlock_t keymap_lock; | 48 | rwlock_t keymap_lock; |
49 | struct list_head keymap_list; | 49 | struct list_head keymap_list; |
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index ebf00ad5b194..362afbd60a96 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c | |||
@@ -149,7 +149,7 @@ socket_match(const struct sk_buff *skb, const struct xt_match_param *par, | |||
149 | 149 | ||
150 | /* Ignore sockets listening on INADDR_ANY */ | 150 | /* Ignore sockets listening on INADDR_ANY */ |
151 | wildcard = (sk->sk_state != TCP_TIME_WAIT && | 151 | wildcard = (sk->sk_state != TCP_TIME_WAIT && |
152 | inet_sk(sk)->rcv_saddr == 0); | 152 | inet_sk(sk)->inet_rcv_saddr == 0); |
153 | 153 | ||
154 | /* Ignore non-transparent sockets, | 154 | /* Ignore non-transparent sockets, |
155 | if XT_SOCKET_TRANSPARENT is used */ | 155 | if XT_SOCKET_TRANSPARENT is used */ |
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index fb357f010189..3dfe2bac8623 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c | |||
@@ -472,13 +472,12 @@ int netlbl_unlhsh_add(struct net *net, | |||
472 | 472 | ||
473 | rcu_read_lock(); | 473 | rcu_read_lock(); |
474 | if (dev_name != NULL) { | 474 | if (dev_name != NULL) { |
475 | dev = dev_get_by_name(net, dev_name); | 475 | dev = dev_get_by_name_rcu(net, dev_name); |
476 | if (dev == NULL) { | 476 | if (dev == NULL) { |
477 | ret_val = -ENODEV; | 477 | ret_val = -ENODEV; |
478 | goto unlhsh_add_return; | 478 | goto unlhsh_add_return; |
479 | } | 479 | } |
480 | ifindex = dev->ifindex; | 480 | ifindex = dev->ifindex; |
481 | dev_put(dev); | ||
482 | iface = netlbl_unlhsh_search_iface(ifindex); | 481 | iface = netlbl_unlhsh_search_iface(ifindex); |
483 | } else { | 482 | } else { |
484 | ifindex = 0; | 483 | ifindex = 0; |
@@ -737,13 +736,12 @@ int netlbl_unlhsh_remove(struct net *net, | |||
737 | 736 | ||
738 | rcu_read_lock(); | 737 | rcu_read_lock(); |
739 | if (dev_name != NULL) { | 738 | if (dev_name != NULL) { |
740 | dev = dev_get_by_name(net, dev_name); | 739 | dev = dev_get_by_name_rcu(net, dev_name); |
741 | if (dev == NULL) { | 740 | if (dev == NULL) { |
742 | ret_val = -ENODEV; | 741 | ret_val = -ENODEV; |
743 | goto unlhsh_remove_return; | 742 | goto unlhsh_remove_return; |
744 | } | 743 | } |
745 | iface = netlbl_unlhsh_search_iface(dev->ifindex); | 744 | iface = netlbl_unlhsh_search_iface(dev->ifindex); |
746 | dev_put(dev); | ||
747 | } else | 745 | } else |
748 | iface = rcu_dereference(netlbl_unlhsh_def); | 746 | iface = rcu_dereference(netlbl_unlhsh_def); |
749 | if (iface == NULL) { | 747 | if (iface == NULL) { |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 19e98007691c..eff5b0ddc5ca 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -428,7 +428,8 @@ static int __netlink_create(struct net *net, struct socket *sock, | |||
428 | return 0; | 428 | return 0; |
429 | } | 429 | } |
430 | 430 | ||
431 | static int netlink_create(struct net *net, struct socket *sock, int protocol) | 431 | static int netlink_create(struct net *net, struct socket *sock, int protocol, |
432 | int kern) | ||
432 | { | 433 | { |
433 | struct module *module = NULL; | 434 | struct module *module = NULL; |
434 | struct mutex *cb_mutex; | 435 | struct mutex *cb_mutex; |
@@ -497,7 +498,7 @@ static int netlink_release(struct socket *sock) | |||
497 | 498 | ||
498 | skb_queue_purge(&sk->sk_write_queue); | 499 | skb_queue_purge(&sk->sk_write_queue); |
499 | 500 | ||
500 | if (nlk->pid && !nlk->subscriptions) { | 501 | if (nlk->pid) { |
501 | struct netlink_notify n = { | 502 | struct netlink_notify n = { |
502 | .net = sock_net(sk), | 503 | .net = sock_net(sk), |
503 | .protocol = sk->sk_protocol, | 504 | .protocol = sk->sk_protocol, |
@@ -707,7 +708,7 @@ static int netlink_getname(struct socket *sock, struct sockaddr *addr, | |||
707 | { | 708 | { |
708 | struct sock *sk = sock->sk; | 709 | struct sock *sk = sock->sk; |
709 | struct netlink_sock *nlk = nlk_sk(sk); | 710 | struct netlink_sock *nlk = nlk_sk(sk); |
710 | struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; | 711 | DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr); |
711 | 712 | ||
712 | nladdr->nl_family = AF_NETLINK; | 713 | nladdr->nl_family = AF_NETLINK; |
713 | nladdr->nl_pad = 0; | 714 | nladdr->nl_pad = 0; |
@@ -2050,7 +2051,7 @@ static const struct proto_ops netlink_ops = { | |||
2050 | .sendpage = sock_no_sendpage, | 2051 | .sendpage = sock_no_sendpage, |
2051 | }; | 2052 | }; |
2052 | 2053 | ||
2053 | static struct net_proto_family netlink_family_ops = { | 2054 | static const struct net_proto_family netlink_family_ops = { |
2054 | .family = PF_NETLINK, | 2055 | .family = PF_NETLINK, |
2055 | .create = netlink_create, | 2056 | .create = netlink_create, |
2056 | .owner = THIS_MODULE, /* for consistency 8) */ | 2057 | .owner = THIS_MODULE, /* for consistency 8) */ |
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 44ff3f3810fa..d07ecda0a92d 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
@@ -97,25 +97,17 @@ static struct genl_ops *genl_get_cmd(u8 cmd, struct genl_family *family) | |||
97 | */ | 97 | */ |
98 | static inline u16 genl_generate_id(void) | 98 | static inline u16 genl_generate_id(void) |
99 | { | 99 | { |
100 | static u16 id_gen_idx; | 100 | static u16 id_gen_idx = GENL_MIN_ID; |
101 | int overflowed = 0; | 101 | int i; |
102 | 102 | ||
103 | do { | 103 | for (i = 0; i <= GENL_MAX_ID - GENL_MIN_ID; i++) { |
104 | if (id_gen_idx == 0) | 104 | if (!genl_family_find_byid(id_gen_idx)) |
105 | return id_gen_idx; | ||
106 | if (++id_gen_idx > GENL_MAX_ID) | ||
105 | id_gen_idx = GENL_MIN_ID; | 107 | id_gen_idx = GENL_MIN_ID; |
108 | } | ||
106 | 109 | ||
107 | if (++id_gen_idx > GENL_MAX_ID) { | 110 | return 0; |
108 | if (!overflowed) { | ||
109 | overflowed = 1; | ||
110 | id_gen_idx = 0; | ||
111 | continue; | ||
112 | } else | ||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | } while (genl_family_find_byid(id_gen_idx)); | ||
117 | |||
118 | return id_gen_idx; | ||
119 | } | 111 | } |
120 | 112 | ||
121 | static struct genl_multicast_group notify_grp; | 113 | static struct genl_multicast_group notify_grp; |
@@ -374,11 +366,6 @@ int genl_register_family(struct genl_family *family) | |||
374 | goto errout_locked; | 366 | goto errout_locked; |
375 | } | 367 | } |
376 | 368 | ||
377 | if (genl_family_find_byid(family->id)) { | ||
378 | err = -EEXIST; | ||
379 | goto errout_locked; | ||
380 | } | ||
381 | |||
382 | if (family->id == GENL_ID_GENERATE) { | 369 | if (family->id == GENL_ID_GENERATE) { |
383 | u16 newid = genl_generate_id(); | 370 | u16 newid = genl_generate_id(); |
384 | 371 | ||
@@ -388,6 +375,9 @@ int genl_register_family(struct genl_family *family) | |||
388 | } | 375 | } |
389 | 376 | ||
390 | family->id = newid; | 377 | family->id = newid; |
378 | } else if (genl_family_find_byid(family->id)) { | ||
379 | err = -EEXIST; | ||
380 | goto errout_locked; | ||
391 | } | 381 | } |
392 | 382 | ||
393 | if (family->maxattr) { | 383 | if (family->maxattr) { |
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 7a834952f67f..4bdd5697f63b 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c | |||
@@ -425,7 +425,8 @@ static struct proto nr_proto = { | |||
425 | .obj_size = sizeof(struct nr_sock), | 425 | .obj_size = sizeof(struct nr_sock), |
426 | }; | 426 | }; |
427 | 427 | ||
428 | static int nr_create(struct net *net, struct socket *sock, int protocol) | 428 | static int nr_create(struct net *net, struct socket *sock, int protocol, |
429 | int kern) | ||
429 | { | 430 | { |
430 | struct sock *sk; | 431 | struct sock *sk; |
431 | struct nr_sock *nr; | 432 | struct nr_sock *nr; |
@@ -1372,7 +1373,7 @@ static const struct file_operations nr_info_fops = { | |||
1372 | }; | 1373 | }; |
1373 | #endif /* CONFIG_PROC_FS */ | 1374 | #endif /* CONFIG_PROC_FS */ |
1374 | 1375 | ||
1375 | static struct net_proto_family nr_family_ops = { | 1376 | static const struct net_proto_family nr_family_ops = { |
1376 | .family = PF_NETROM, | 1377 | .family = PF_NETROM, |
1377 | .create = nr_create, | 1378 | .create = nr_create, |
1378 | .owner = THIS_MODULE, | 1379 | .owner = THIS_MODULE, |
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c index 4eb1ac9a7679..aacba76070fc 100644 --- a/net/netrom/nr_route.c +++ b/net/netrom/nr_route.c | |||
@@ -597,15 +597,15 @@ struct net_device *nr_dev_first(void) | |||
597 | { | 597 | { |
598 | struct net_device *dev, *first = NULL; | 598 | struct net_device *dev, *first = NULL; |
599 | 599 | ||
600 | read_lock(&dev_base_lock); | 600 | rcu_read_lock(); |
601 | for_each_netdev(&init_net, dev) { | 601 | for_each_netdev_rcu(&init_net, dev) { |
602 | if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM) | 602 | if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM) |
603 | if (first == NULL || strncmp(dev->name, first->name, 3) < 0) | 603 | if (first == NULL || strncmp(dev->name, first->name, 3) < 0) |
604 | first = dev; | 604 | first = dev; |
605 | } | 605 | } |
606 | if (first) | 606 | if (first) |
607 | dev_hold(first); | 607 | dev_hold(first); |
608 | read_unlock(&dev_base_lock); | 608 | rcu_read_unlock(); |
609 | 609 | ||
610 | return first; | 610 | return first; |
611 | } | 611 | } |
@@ -617,16 +617,17 @@ struct net_device *nr_dev_get(ax25_address *addr) | |||
617 | { | 617 | { |
618 | struct net_device *dev; | 618 | struct net_device *dev; |
619 | 619 | ||
620 | read_lock(&dev_base_lock); | 620 | rcu_read_lock(); |
621 | for_each_netdev(&init_net, dev) { | 621 | for_each_netdev_rcu(&init_net, dev) { |
622 | if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM && ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) { | 622 | if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM && |
623 | ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) { | ||
623 | dev_hold(dev); | 624 | dev_hold(dev); |
624 | goto out; | 625 | goto out; |
625 | } | 626 | } |
626 | } | 627 | } |
627 | dev = NULL; | 628 | dev = NULL; |
628 | out: | 629 | out: |
629 | read_unlock(&dev_base_lock); | 630 | rcu_read_unlock(); |
630 | return dev; | 631 | return dev; |
631 | } | 632 | } |
632 | 633 | ||
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index f2d116a5cb35..c620bd9ae3de 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -79,6 +79,7 @@ | |||
79 | #include <linux/module.h> | 79 | #include <linux/module.h> |
80 | #include <linux/init.h> | 80 | #include <linux/init.h> |
81 | #include <linux/mutex.h> | 81 | #include <linux/mutex.h> |
82 | #include <linux/if_vlan.h> | ||
82 | 83 | ||
83 | #ifdef CONFIG_INET | 84 | #ifdef CONFIG_INET |
84 | #include <net/inet_common.h> | 85 | #include <net/inet_common.h> |
@@ -188,7 +189,6 @@ struct packet_sock { | |||
188 | struct packet_ring_buffer tx_ring; | 189 | struct packet_ring_buffer tx_ring; |
189 | int copy_thresh; | 190 | int copy_thresh; |
190 | #endif | 191 | #endif |
191 | struct packet_type prot_hook; | ||
192 | spinlock_t bind_lock; | 192 | spinlock_t bind_lock; |
193 | struct mutex pg_vec_lock; | 193 | struct mutex pg_vec_lock; |
194 | unsigned int running:1, /* prot_hook is attached*/ | 194 | unsigned int running:1, /* prot_hook is attached*/ |
@@ -204,6 +204,7 @@ struct packet_sock { | |||
204 | unsigned int tp_reserve; | 204 | unsigned int tp_reserve; |
205 | unsigned int tp_loss:1; | 205 | unsigned int tp_loss:1; |
206 | #endif | 206 | #endif |
207 | struct packet_type prot_hook ____cacheline_aligned_in_smp; | ||
207 | }; | 208 | }; |
208 | 209 | ||
209 | struct packet_skb_cb { | 210 | struct packet_skb_cb { |
@@ -436,7 +437,8 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, | |||
436 | */ | 437 | */ |
437 | 438 | ||
438 | saddr->spkt_device[13] = 0; | 439 | saddr->spkt_device[13] = 0; |
439 | dev = dev_get_by_name(sock_net(sk), saddr->spkt_device); | 440 | rcu_read_lock(); |
441 | dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); | ||
440 | err = -ENODEV; | 442 | err = -ENODEV; |
441 | if (dev == NULL) | 443 | if (dev == NULL) |
442 | goto out_unlock; | 444 | goto out_unlock; |
@@ -490,6 +492,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, | |||
490 | skb->protocol = proto; | 492 | skb->protocol = proto; |
491 | skb->dev = dev; | 493 | skb->dev = dev; |
492 | skb->priority = sk->sk_priority; | 494 | skb->priority = sk->sk_priority; |
495 | skb->mark = sk->sk_mark; | ||
493 | if (err) | 496 | if (err) |
494 | goto out_free; | 497 | goto out_free; |
495 | 498 | ||
@@ -498,14 +501,13 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, | |||
498 | */ | 501 | */ |
499 | 502 | ||
500 | dev_queue_xmit(skb); | 503 | dev_queue_xmit(skb); |
501 | dev_put(dev); | 504 | rcu_read_unlock(); |
502 | return len; | 505 | return len; |
503 | 506 | ||
504 | out_free: | 507 | out_free: |
505 | kfree_skb(skb); | 508 | kfree_skb(skb); |
506 | out_unlock: | 509 | out_unlock: |
507 | if (dev) | 510 | rcu_read_unlock(); |
508 | dev_put(dev); | ||
509 | return err; | 511 | return err; |
510 | } | 512 | } |
511 | 513 | ||
@@ -626,15 +628,14 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, | |||
626 | 628 | ||
627 | spin_lock(&sk->sk_receive_queue.lock); | 629 | spin_lock(&sk->sk_receive_queue.lock); |
628 | po->stats.tp_packets++; | 630 | po->stats.tp_packets++; |
631 | skb->dropcount = atomic_read(&sk->sk_drops); | ||
629 | __skb_queue_tail(&sk->sk_receive_queue, skb); | 632 | __skb_queue_tail(&sk->sk_receive_queue, skb); |
630 | spin_unlock(&sk->sk_receive_queue.lock); | 633 | spin_unlock(&sk->sk_receive_queue.lock); |
631 | sk->sk_data_ready(sk, skb->len); | 634 | sk->sk_data_ready(sk, skb->len); |
632 | return 0; | 635 | return 0; |
633 | 636 | ||
634 | drop_n_acct: | 637 | drop_n_acct: |
635 | spin_lock(&sk->sk_receive_queue.lock); | 638 | po->stats.tp_drops = atomic_inc_return(&sk->sk_drops); |
636 | po->stats.tp_drops++; | ||
637 | spin_unlock(&sk->sk_receive_queue.lock); | ||
638 | 639 | ||
639 | drop_n_restore: | 640 | drop_n_restore: |
640 | if (skb_head != skb->data && skb_shared(skb)) { | 641 | if (skb_head != skb->data && skb_shared(skb)) { |
@@ -766,7 +767,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, | |||
766 | getnstimeofday(&ts); | 767 | getnstimeofday(&ts); |
767 | h.h2->tp_sec = ts.tv_sec; | 768 | h.h2->tp_sec = ts.tv_sec; |
768 | h.h2->tp_nsec = ts.tv_nsec; | 769 | h.h2->tp_nsec = ts.tv_nsec; |
769 | h.h2->tp_vlan_tci = skb->vlan_tci; | 770 | h.h2->tp_vlan_tci = vlan_tx_tag_get(skb); |
770 | hdrlen = sizeof(*h.h2); | 771 | hdrlen = sizeof(*h.h2); |
771 | break; | 772 | break; |
772 | default: | 773 | default: |
@@ -856,6 +857,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, | |||
856 | skb->protocol = proto; | 857 | skb->protocol = proto; |
857 | skb->dev = dev; | 858 | skb->dev = dev; |
858 | skb->priority = po->sk.sk_priority; | 859 | skb->priority = po->sk.sk_priority; |
860 | skb->mark = po->sk.sk_mark; | ||
859 | skb_shinfo(skb)->destructor_arg = ph.raw; | 861 | skb_shinfo(skb)->destructor_arg = ph.raw; |
860 | 862 | ||
861 | switch (po->tp_version) { | 863 | switch (po->tp_version) { |
@@ -1122,6 +1124,7 @@ static int packet_snd(struct socket *sock, | |||
1122 | skb->protocol = proto; | 1124 | skb->protocol = proto; |
1123 | skb->dev = dev; | 1125 | skb->dev = dev; |
1124 | skb->priority = sk->sk_priority; | 1126 | skb->priority = sk->sk_priority; |
1127 | skb->mark = sk->sk_mark; | ||
1125 | 1128 | ||
1126 | /* | 1129 | /* |
1127 | * Now send it | 1130 | * Now send it |
@@ -1341,7 +1344,8 @@ static struct proto packet_proto = { | |||
1341 | * Create a packet of type SOCK_PACKET. | 1344 | * Create a packet of type SOCK_PACKET. |
1342 | */ | 1345 | */ |
1343 | 1346 | ||
1344 | static int packet_create(struct net *net, struct socket *sock, int protocol) | 1347 | static int packet_create(struct net *net, struct socket *sock, int protocol, |
1348 | int kern) | ||
1345 | { | 1349 | { |
1346 | struct sock *sk; | 1350 | struct sock *sk; |
1347 | struct packet_sock *po; | 1351 | struct packet_sock *po; |
@@ -1472,7 +1476,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1472 | if (err) | 1476 | if (err) |
1473 | goto out_free; | 1477 | goto out_free; |
1474 | 1478 | ||
1475 | sock_recv_timestamp(msg, sk, skb); | 1479 | sock_recv_ts_and_drops(msg, sk, skb); |
1476 | 1480 | ||
1477 | if (msg->msg_name) | 1481 | if (msg->msg_name) |
1478 | memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, | 1482 | memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, |
@@ -1488,7 +1492,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1488 | aux.tp_snaplen = skb->len; | 1492 | aux.tp_snaplen = skb->len; |
1489 | aux.tp_mac = 0; | 1493 | aux.tp_mac = 0; |
1490 | aux.tp_net = skb_network_offset(skb); | 1494 | aux.tp_net = skb_network_offset(skb); |
1491 | aux.tp_vlan_tci = skb->vlan_tci; | 1495 | aux.tp_vlan_tci = vlan_tx_tag_get(skb); |
1492 | 1496 | ||
1493 | put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); | 1497 | put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); |
1494 | } | 1498 | } |
@@ -1515,12 +1519,13 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, | |||
1515 | return -EOPNOTSUPP; | 1519 | return -EOPNOTSUPP; |
1516 | 1520 | ||
1517 | uaddr->sa_family = AF_PACKET; | 1521 | uaddr->sa_family = AF_PACKET; |
1518 | dev = dev_get_by_index(sock_net(sk), pkt_sk(sk)->ifindex); | 1522 | rcu_read_lock(); |
1519 | if (dev) { | 1523 | dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); |
1524 | if (dev) | ||
1520 | strlcpy(uaddr->sa_data, dev->name, 15); | 1525 | strlcpy(uaddr->sa_data, dev->name, 15); |
1521 | dev_put(dev); | 1526 | else |
1522 | } else | ||
1523 | memset(uaddr->sa_data, 0, 14); | 1527 | memset(uaddr->sa_data, 0, 14); |
1528 | rcu_read_unlock(); | ||
1524 | *uaddr_len = sizeof(*uaddr); | 1529 | *uaddr_len = sizeof(*uaddr); |
1525 | 1530 | ||
1526 | return 0; | 1531 | return 0; |
@@ -1532,7 +1537,7 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr, | |||
1532 | struct net_device *dev; | 1537 | struct net_device *dev; |
1533 | struct sock *sk = sock->sk; | 1538 | struct sock *sk = sock->sk; |
1534 | struct packet_sock *po = pkt_sk(sk); | 1539 | struct packet_sock *po = pkt_sk(sk); |
1535 | struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr; | 1540 | DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr); |
1536 | 1541 | ||
1537 | if (peer) | 1542 | if (peer) |
1538 | return -EOPNOTSUPP; | 1543 | return -EOPNOTSUPP; |
@@ -1540,16 +1545,17 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr, | |||
1540 | sll->sll_family = AF_PACKET; | 1545 | sll->sll_family = AF_PACKET; |
1541 | sll->sll_ifindex = po->ifindex; | 1546 | sll->sll_ifindex = po->ifindex; |
1542 | sll->sll_protocol = po->num; | 1547 | sll->sll_protocol = po->num; |
1543 | dev = dev_get_by_index(sock_net(sk), po->ifindex); | 1548 | rcu_read_lock(); |
1549 | dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); | ||
1544 | if (dev) { | 1550 | if (dev) { |
1545 | sll->sll_hatype = dev->type; | 1551 | sll->sll_hatype = dev->type; |
1546 | sll->sll_halen = dev->addr_len; | 1552 | sll->sll_halen = dev->addr_len; |
1547 | memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len); | 1553 | memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len); |
1548 | dev_put(dev); | ||
1549 | } else { | 1554 | } else { |
1550 | sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */ | 1555 | sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */ |
1551 | sll->sll_halen = 0; | 1556 | sll->sll_halen = 0; |
1552 | } | 1557 | } |
1558 | rcu_read_unlock(); | ||
1553 | *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen; | 1559 | *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen; |
1554 | 1560 | ||
1555 | return 0; | 1561 | return 0; |
@@ -1659,11 +1665,9 @@ static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) | |||
1659 | if (--ml->count == 0) { | 1665 | if (--ml->count == 0) { |
1660 | struct net_device *dev; | 1666 | struct net_device *dev; |
1661 | *mlp = ml->next; | 1667 | *mlp = ml->next; |
1662 | dev = dev_get_by_index(sock_net(sk), ml->ifindex); | 1668 | dev = __dev_get_by_index(sock_net(sk), ml->ifindex); |
1663 | if (dev) { | 1669 | if (dev) |
1664 | packet_dev_mc(dev, ml, -1); | 1670 | packet_dev_mc(dev, ml, -1); |
1665 | dev_put(dev); | ||
1666 | } | ||
1667 | kfree(ml); | 1671 | kfree(ml); |
1668 | } | 1672 | } |
1669 | rtnl_unlock(); | 1673 | rtnl_unlock(); |
@@ -1687,11 +1691,9 @@ static void packet_flush_mclist(struct sock *sk) | |||
1687 | struct net_device *dev; | 1691 | struct net_device *dev; |
1688 | 1692 | ||
1689 | po->mclist = ml->next; | 1693 | po->mclist = ml->next; |
1690 | dev = dev_get_by_index(sock_net(sk), ml->ifindex); | 1694 | dev = __dev_get_by_index(sock_net(sk), ml->ifindex); |
1691 | if (dev != NULL) { | 1695 | if (dev != NULL) |
1692 | packet_dev_mc(dev, ml, -1); | 1696 | packet_dev_mc(dev, ml, -1); |
1693 | dev_put(dev); | ||
1694 | } | ||
1695 | kfree(ml); | 1697 | kfree(ml); |
1696 | } | 1698 | } |
1697 | rtnl_unlock(); | 1699 | rtnl_unlock(); |
@@ -2360,7 +2362,7 @@ static const struct proto_ops packet_ops = { | |||
2360 | .sendpage = sock_no_sendpage, | 2362 | .sendpage = sock_no_sendpage, |
2361 | }; | 2363 | }; |
2362 | 2364 | ||
2363 | static struct net_proto_family packet_family_ops = { | 2365 | static const struct net_proto_family packet_family_ops = { |
2364 | .family = PF_PACKET, | 2366 | .family = PF_PACKET, |
2365 | .create = packet_create, | 2367 | .create = packet_create, |
2366 | .owner = THIS_MODULE, | 2368 | .owner = THIS_MODULE, |
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c index f60c0c2aacba..526d0273991a 100644 --- a/net/phonet/af_phonet.c +++ b/net/phonet/af_phonet.c | |||
@@ -35,7 +35,6 @@ | |||
35 | 35 | ||
36 | /* Transport protocol registration */ | 36 | /* Transport protocol registration */ |
37 | static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly; | 37 | static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly; |
38 | static DEFINE_SPINLOCK(proto_tab_lock); | ||
39 | 38 | ||
40 | static struct phonet_protocol *phonet_proto_get(int protocol) | 39 | static struct phonet_protocol *phonet_proto_get(int protocol) |
41 | { | 40 | { |
@@ -44,11 +43,11 @@ static struct phonet_protocol *phonet_proto_get(int protocol) | |||
44 | if (protocol >= PHONET_NPROTO) | 43 | if (protocol >= PHONET_NPROTO) |
45 | return NULL; | 44 | return NULL; |
46 | 45 | ||
47 | spin_lock(&proto_tab_lock); | 46 | rcu_read_lock(); |
48 | pp = proto_tab[protocol]; | 47 | pp = rcu_dereference(proto_tab[protocol]); |
49 | if (pp && !try_module_get(pp->prot->owner)) | 48 | if (pp && !try_module_get(pp->prot->owner)) |
50 | pp = NULL; | 49 | pp = NULL; |
51 | spin_unlock(&proto_tab_lock); | 50 | rcu_read_unlock(); |
52 | 51 | ||
53 | return pp; | 52 | return pp; |
54 | } | 53 | } |
@@ -60,7 +59,8 @@ static inline void phonet_proto_put(struct phonet_protocol *pp) | |||
60 | 59 | ||
61 | /* protocol family functions */ | 60 | /* protocol family functions */ |
62 | 61 | ||
63 | static int pn_socket_create(struct net *net, struct socket *sock, int protocol) | 62 | static int pn_socket_create(struct net *net, struct socket *sock, int protocol, |
63 | int kern) | ||
64 | { | 64 | { |
65 | struct sock *sk; | 65 | struct sock *sk; |
66 | struct pn_sock *pn; | 66 | struct pn_sock *pn; |
@@ -118,7 +118,7 @@ out: | |||
118 | return err; | 118 | return err; |
119 | } | 119 | } |
120 | 120 | ||
121 | static struct net_proto_family phonet_proto_family = { | 121 | static const struct net_proto_family phonet_proto_family = { |
122 | .family = PF_PHONET, | 122 | .family = PF_PHONET, |
123 | .create = pn_socket_create, | 123 | .create = pn_socket_create, |
124 | .owner = THIS_MODULE, | 124 | .owner = THIS_MODULE, |
@@ -190,9 +190,8 @@ static int pn_send(struct sk_buff *skb, struct net_device *dev, | |||
190 | skb->priority = 0; | 190 | skb->priority = 0; |
191 | skb->dev = dev; | 191 | skb->dev = dev; |
192 | 192 | ||
193 | if (pn_addr(src) == pn_addr(dst)) { | 193 | if (skb->pkt_type == PACKET_LOOPBACK) { |
194 | skb_reset_mac_header(skb); | 194 | skb_reset_mac_header(skb); |
195 | skb->pkt_type = PACKET_LOOPBACK; | ||
196 | skb_orphan(skb); | 195 | skb_orphan(skb); |
197 | if (irq) | 196 | if (irq) |
198 | netif_rx(skb); | 197 | netif_rx(skb); |
@@ -222,6 +221,9 @@ static int pn_raw_send(const void *data, int len, struct net_device *dev, | |||
222 | if (skb == NULL) | 221 | if (skb == NULL) |
223 | return -ENOMEM; | 222 | return -ENOMEM; |
224 | 223 | ||
224 | if (phonet_address_lookup(dev_net(dev), pn_addr(dst)) == 0) | ||
225 | skb->pkt_type = PACKET_LOOPBACK; | ||
226 | |||
225 | skb_reserve(skb, MAX_PHONET_HEADER); | 227 | skb_reserve(skb, MAX_PHONET_HEADER); |
226 | __skb_put(skb, len); | 228 | __skb_put(skb, len); |
227 | skb_copy_to_linear_data(skb, data, len); | 229 | skb_copy_to_linear_data(skb, data, len); |
@@ -235,6 +237,7 @@ static int pn_raw_send(const void *data, int len, struct net_device *dev, | |||
235 | int pn_skb_send(struct sock *sk, struct sk_buff *skb, | 237 | int pn_skb_send(struct sock *sk, struct sk_buff *skb, |
236 | const struct sockaddr_pn *target) | 238 | const struct sockaddr_pn *target) |
237 | { | 239 | { |
240 | struct net *net = sock_net(sk); | ||
238 | struct net_device *dev; | 241 | struct net_device *dev; |
239 | struct pn_sock *pn = pn_sk(sk); | 242 | struct pn_sock *pn = pn_sk(sk); |
240 | int err; | 243 | int err; |
@@ -243,9 +246,13 @@ int pn_skb_send(struct sock *sk, struct sk_buff *skb, | |||
243 | 246 | ||
244 | err = -EHOSTUNREACH; | 247 | err = -EHOSTUNREACH; |
245 | if (sk->sk_bound_dev_if) | 248 | if (sk->sk_bound_dev_if) |
246 | dev = dev_get_by_index(sock_net(sk), sk->sk_bound_dev_if); | 249 | dev = dev_get_by_index(net, sk->sk_bound_dev_if); |
247 | else | 250 | else if (phonet_address_lookup(net, daddr) == 0) { |
248 | dev = phonet_device_get(sock_net(sk)); | 251 | dev = phonet_device_get(net); |
252 | skb->pkt_type = PACKET_LOOPBACK; | ||
253 | } else | ||
254 | dev = phonet_route_output(net, daddr); | ||
255 | |||
249 | if (!dev || !(dev->flags & IFF_UP)) | 256 | if (!dev || !(dev->flags & IFF_UP)) |
250 | goto drop; | 257 | goto drop; |
251 | 258 | ||
@@ -369,6 +376,12 @@ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev, | |||
369 | 376 | ||
370 | pn_skb_get_dst_sockaddr(skb, &sa); | 377 | pn_skb_get_dst_sockaddr(skb, &sa); |
371 | 378 | ||
379 | /* check if this is broadcasted */ | ||
380 | if (pn_sockaddr_get_addr(&sa) == PNADDR_BROADCAST) { | ||
381 | pn_deliver_sock_broadcast(net, skb); | ||
382 | goto out; | ||
383 | } | ||
384 | |||
372 | /* check if we are the destination */ | 385 | /* check if we are the destination */ |
373 | if (phonet_address_lookup(net, pn_sockaddr_get_addr(&sa)) == 0) { | 386 | if (phonet_address_lookup(net, pn_sockaddr_get_addr(&sa)) == 0) { |
374 | /* Phonet packet input */ | 387 | /* Phonet packet input */ |
@@ -381,6 +394,38 @@ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev, | |||
381 | send_obj_unreachable(skb); | 394 | send_obj_unreachable(skb); |
382 | send_reset_indications(skb); | 395 | send_reset_indications(skb); |
383 | } | 396 | } |
397 | } else if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) | ||
398 | goto out; /* Race between address deletion and loopback */ | ||
399 | else { | ||
400 | /* Phonet packet routing */ | ||
401 | struct net_device *out_dev; | ||
402 | |||
403 | out_dev = phonet_route_output(net, pn_sockaddr_get_addr(&sa)); | ||
404 | if (!out_dev) { | ||
405 | LIMIT_NETDEBUG(KERN_WARNING"No Phonet route to %02X\n", | ||
406 | pn_sockaddr_get_addr(&sa)); | ||
407 | goto out; | ||
408 | } | ||
409 | |||
410 | __skb_push(skb, sizeof(struct phonethdr)); | ||
411 | skb->dev = out_dev; | ||
412 | if (out_dev == dev) { | ||
413 | LIMIT_NETDEBUG(KERN_ERR"Phonet loop to %02X on %s\n", | ||
414 | pn_sockaddr_get_addr(&sa), dev->name); | ||
415 | goto out_dev; | ||
416 | } | ||
417 | /* Some drivers (e.g. TUN) do not allocate HW header space */ | ||
418 | if (skb_cow_head(skb, out_dev->hard_header_len)) | ||
419 | goto out_dev; | ||
420 | |||
421 | if (dev_hard_header(skb, out_dev, ETH_P_PHONET, NULL, NULL, | ||
422 | skb->len) < 0) | ||
423 | goto out_dev; | ||
424 | dev_queue_xmit(skb); | ||
425 | dev_put(out_dev); | ||
426 | return NET_RX_SUCCESS; | ||
427 | out_dev: | ||
428 | dev_put(out_dev); | ||
384 | } | 429 | } |
385 | 430 | ||
386 | out: | 431 | out: |
@@ -393,6 +438,8 @@ static struct packet_type phonet_packet_type __read_mostly = { | |||
393 | .func = phonet_rcv, | 438 | .func = phonet_rcv, |
394 | }; | 439 | }; |
395 | 440 | ||
441 | static DEFINE_MUTEX(proto_tab_lock); | ||
442 | |||
396 | int __init_or_module phonet_proto_register(int protocol, | 443 | int __init_or_module phonet_proto_register(int protocol, |
397 | struct phonet_protocol *pp) | 444 | struct phonet_protocol *pp) |
398 | { | 445 | { |
@@ -405,12 +452,12 @@ int __init_or_module phonet_proto_register(int protocol, | |||
405 | if (err) | 452 | if (err) |
406 | return err; | 453 | return err; |
407 | 454 | ||
408 | spin_lock(&proto_tab_lock); | 455 | mutex_lock(&proto_tab_lock); |
409 | if (proto_tab[protocol]) | 456 | if (proto_tab[protocol]) |
410 | err = -EBUSY; | 457 | err = -EBUSY; |
411 | else | 458 | else |
412 | proto_tab[protocol] = pp; | 459 | rcu_assign_pointer(proto_tab[protocol], pp); |
413 | spin_unlock(&proto_tab_lock); | 460 | mutex_unlock(&proto_tab_lock); |
414 | 461 | ||
415 | return err; | 462 | return err; |
416 | } | 463 | } |
@@ -418,10 +465,11 @@ EXPORT_SYMBOL(phonet_proto_register); | |||
418 | 465 | ||
419 | void phonet_proto_unregister(int protocol, struct phonet_protocol *pp) | 466 | void phonet_proto_unregister(int protocol, struct phonet_protocol *pp) |
420 | { | 467 | { |
421 | spin_lock(&proto_tab_lock); | 468 | mutex_lock(&proto_tab_lock); |
422 | BUG_ON(proto_tab[protocol] != pp); | 469 | BUG_ON(proto_tab[protocol] != pp); |
423 | proto_tab[protocol] = NULL; | 470 | rcu_assign_pointer(proto_tab[protocol], NULL); |
424 | spin_unlock(&proto_tab_lock); | 471 | mutex_unlock(&proto_tab_lock); |
472 | synchronize_rcu(); | ||
425 | proto_unregister(pp->prot); | 473 | proto_unregister(pp->prot); |
426 | } | 474 | } |
427 | EXPORT_SYMBOL(phonet_proto_unregister); | 475 | EXPORT_SYMBOL(phonet_proto_unregister); |
@@ -435,6 +483,7 @@ static int __init phonet_init(void) | |||
435 | if (err) | 483 | if (err) |
436 | return err; | 484 | return err; |
437 | 485 | ||
486 | pn_sock_init(); | ||
438 | err = sock_register(&phonet_proto_family); | 487 | err = sock_register(&phonet_proto_family); |
439 | if (err) { | 488 | if (err) { |
440 | printk(KERN_ALERT | 489 | printk(KERN_ALERT |
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c index ef5c75c372e4..67f072e94d00 100644 --- a/net/phonet/datagram.c +++ b/net/phonet/datagram.c | |||
@@ -159,11 +159,9 @@ out_nofree: | |||
159 | static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb) | 159 | static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb) |
160 | { | 160 | { |
161 | int err = sock_queue_rcv_skb(sk, skb); | 161 | int err = sock_queue_rcv_skb(sk, skb); |
162 | if (err < 0) { | 162 | |
163 | if (err < 0) | ||
163 | kfree_skb(skb); | 164 | kfree_skb(skb); |
164 | if (err == -ENOMEM) | ||
165 | atomic_inc(&sk->sk_drops); | ||
166 | } | ||
167 | return err ? NET_RX_DROP : NET_RX_SUCCESS; | 165 | return err ? NET_RX_DROP : NET_RX_SUCCESS; |
168 | } | 166 | } |
169 | 167 | ||
diff --git a/net/phonet/pep.c b/net/phonet/pep.c index 5f32d217535b..bdc17bdad366 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c | |||
@@ -360,8 +360,6 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
360 | err = sock_queue_rcv_skb(sk, skb); | 360 | err = sock_queue_rcv_skb(sk, skb); |
361 | if (!err) | 361 | if (!err) |
362 | return 0; | 362 | return 0; |
363 | if (err == -ENOMEM) | ||
364 | atomic_inc(&sk->sk_drops); | ||
365 | break; | 363 | break; |
366 | } | 364 | } |
367 | 365 | ||
@@ -845,7 +843,7 @@ static int pep_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
845 | struct msghdr *msg, size_t len) | 843 | struct msghdr *msg, size_t len) |
846 | { | 844 | { |
847 | struct pep_sock *pn = pep_sk(sk); | 845 | struct pep_sock *pn = pep_sk(sk); |
848 | struct sk_buff *skb = NULL; | 846 | struct sk_buff *skb; |
849 | long timeo; | 847 | long timeo; |
850 | int flags = msg->msg_flags; | 848 | int flags = msg->msg_flags; |
851 | int err, done; | 849 | int err, done; |
@@ -853,6 +851,16 @@ static int pep_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
853 | if (msg->msg_flags & MSG_OOB || !(msg->msg_flags & MSG_EOR)) | 851 | if (msg->msg_flags & MSG_OOB || !(msg->msg_flags & MSG_EOR)) |
854 | return -EOPNOTSUPP; | 852 | return -EOPNOTSUPP; |
855 | 853 | ||
854 | skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len, | ||
855 | flags & MSG_DONTWAIT, &err); | ||
856 | if (!skb) | ||
857 | return -ENOBUFS; | ||
858 | |||
859 | skb_reserve(skb, MAX_PHONET_HEADER + 3); | ||
860 | err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); | ||
861 | if (err < 0) | ||
862 | goto outfree; | ||
863 | |||
856 | lock_sock(sk); | 864 | lock_sock(sk); |
857 | timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); | 865 | timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); |
858 | if ((1 << sk->sk_state) & (TCPF_LISTEN|TCPF_CLOSE)) { | 866 | if ((1 << sk->sk_state) & (TCPF_LISTEN|TCPF_CLOSE)) { |
@@ -896,28 +904,13 @@ disabled: | |||
896 | goto disabled; | 904 | goto disabled; |
897 | } | 905 | } |
898 | 906 | ||
899 | if (!skb) { | ||
900 | skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len, | ||
901 | flags & MSG_DONTWAIT, &err); | ||
902 | if (skb == NULL) | ||
903 | goto out; | ||
904 | skb_reserve(skb, MAX_PHONET_HEADER + 3); | ||
905 | |||
906 | if (sk->sk_state != TCP_ESTABLISHED || | ||
907 | !atomic_read(&pn->tx_credits)) | ||
908 | goto disabled; /* sock_alloc_send_skb might sleep */ | ||
909 | } | ||
910 | |||
911 | err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); | ||
912 | if (err < 0) | ||
913 | goto out; | ||
914 | |||
915 | err = pipe_skb_send(sk, skb); | 907 | err = pipe_skb_send(sk, skb); |
916 | if (err >= 0) | 908 | if (err >= 0) |
917 | err = len; /* success! */ | 909 | err = len; /* success! */ |
918 | skb = NULL; | 910 | skb = NULL; |
919 | out: | 911 | out: |
920 | release_sock(sk); | 912 | release_sock(sk); |
913 | outfree: | ||
921 | kfree_skb(skb); | 914 | kfree_skb(skb); |
922 | return err; | 915 | return err; |
923 | } | 916 | } |
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c index 5f42f30dd168..d87388c94b00 100644 --- a/net/phonet/pn_dev.c +++ b/net/phonet/pn_dev.c | |||
@@ -33,11 +33,17 @@ | |||
33 | #include <net/netns/generic.h> | 33 | #include <net/netns/generic.h> |
34 | #include <net/phonet/pn_dev.h> | 34 | #include <net/phonet/pn_dev.h> |
35 | 35 | ||
36 | struct phonet_routes { | ||
37 | struct mutex lock; | ||
38 | struct net_device *table[64]; | ||
39 | }; | ||
40 | |||
36 | struct phonet_net { | 41 | struct phonet_net { |
37 | struct phonet_device_list pndevs; | 42 | struct phonet_device_list pndevs; |
43 | struct phonet_routes routes; | ||
38 | }; | 44 | }; |
39 | 45 | ||
40 | int phonet_net_id; | 46 | int phonet_net_id __read_mostly; |
41 | 47 | ||
42 | struct phonet_device_list *phonet_device_list(struct net *net) | 48 | struct phonet_device_list *phonet_device_list(struct net *net) |
43 | { | 49 | { |
@@ -55,7 +61,8 @@ static struct phonet_device *__phonet_device_alloc(struct net_device *dev) | |||
55 | pnd->netdev = dev; | 61 | pnd->netdev = dev; |
56 | bitmap_zero(pnd->addrs, 64); | 62 | bitmap_zero(pnd->addrs, 64); |
57 | 63 | ||
58 | list_add(&pnd->list, &pndevs->list); | 64 | BUG_ON(!mutex_is_locked(&pndevs->lock)); |
65 | list_add_rcu(&pnd->list, &pndevs->list); | ||
59 | return pnd; | 66 | return pnd; |
60 | } | 67 | } |
61 | 68 | ||
@@ -64,6 +71,7 @@ static struct phonet_device *__phonet_get(struct net_device *dev) | |||
64 | struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); | 71 | struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); |
65 | struct phonet_device *pnd; | 72 | struct phonet_device *pnd; |
66 | 73 | ||
74 | BUG_ON(!mutex_is_locked(&pndevs->lock)); | ||
67 | list_for_each_entry(pnd, &pndevs->list, list) { | 75 | list_for_each_entry(pnd, &pndevs->list, list) { |
68 | if (pnd->netdev == dev) | 76 | if (pnd->netdev == dev) |
69 | return pnd; | 77 | return pnd; |
@@ -71,6 +79,18 @@ static struct phonet_device *__phonet_get(struct net_device *dev) | |||
71 | return NULL; | 79 | return NULL; |
72 | } | 80 | } |
73 | 81 | ||
82 | static struct phonet_device *__phonet_get_rcu(struct net_device *dev) | ||
83 | { | ||
84 | struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); | ||
85 | struct phonet_device *pnd; | ||
86 | |||
87 | list_for_each_entry_rcu(pnd, &pndevs->list, list) { | ||
88 | if (pnd->netdev == dev) | ||
89 | return pnd; | ||
90 | } | ||
91 | return NULL; | ||
92 | } | ||
93 | |||
74 | static void phonet_device_destroy(struct net_device *dev) | 94 | static void phonet_device_destroy(struct net_device *dev) |
75 | { | 95 | { |
76 | struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); | 96 | struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); |
@@ -78,11 +98,11 @@ static void phonet_device_destroy(struct net_device *dev) | |||
78 | 98 | ||
79 | ASSERT_RTNL(); | 99 | ASSERT_RTNL(); |
80 | 100 | ||
81 | spin_lock_bh(&pndevs->lock); | 101 | mutex_lock(&pndevs->lock); |
82 | pnd = __phonet_get(dev); | 102 | pnd = __phonet_get(dev); |
83 | if (pnd) | 103 | if (pnd) |
84 | list_del(&pnd->list); | 104 | list_del_rcu(&pnd->list); |
85 | spin_unlock_bh(&pndevs->lock); | 105 | mutex_unlock(&pndevs->lock); |
86 | 106 | ||
87 | if (pnd) { | 107 | if (pnd) { |
88 | u8 addr; | 108 | u8 addr; |
@@ -100,8 +120,8 @@ struct net_device *phonet_device_get(struct net *net) | |||
100 | struct phonet_device *pnd; | 120 | struct phonet_device *pnd; |
101 | struct net_device *dev = NULL; | 121 | struct net_device *dev = NULL; |
102 | 122 | ||
103 | spin_lock_bh(&pndevs->lock); | 123 | rcu_read_lock(); |
104 | list_for_each_entry(pnd, &pndevs->list, list) { | 124 | list_for_each_entry_rcu(pnd, &pndevs->list, list) { |
105 | dev = pnd->netdev; | 125 | dev = pnd->netdev; |
106 | BUG_ON(!dev); | 126 | BUG_ON(!dev); |
107 | 127 | ||
@@ -112,7 +132,7 @@ struct net_device *phonet_device_get(struct net *net) | |||
112 | } | 132 | } |
113 | if (dev) | 133 | if (dev) |
114 | dev_hold(dev); | 134 | dev_hold(dev); |
115 | spin_unlock_bh(&pndevs->lock); | 135 | rcu_read_unlock(); |
116 | return dev; | 136 | return dev; |
117 | } | 137 | } |
118 | 138 | ||
@@ -122,7 +142,7 @@ int phonet_address_add(struct net_device *dev, u8 addr) | |||
122 | struct phonet_device *pnd; | 142 | struct phonet_device *pnd; |
123 | int err = 0; | 143 | int err = 0; |
124 | 144 | ||
125 | spin_lock_bh(&pndevs->lock); | 145 | mutex_lock(&pndevs->lock); |
126 | /* Find or create Phonet-specific device data */ | 146 | /* Find or create Phonet-specific device data */ |
127 | pnd = __phonet_get(dev); | 147 | pnd = __phonet_get(dev); |
128 | if (pnd == NULL) | 148 | if (pnd == NULL) |
@@ -131,7 +151,7 @@ int phonet_address_add(struct net_device *dev, u8 addr) | |||
131 | err = -ENOMEM; | 151 | err = -ENOMEM; |
132 | else if (test_and_set_bit(addr >> 2, pnd->addrs)) | 152 | else if (test_and_set_bit(addr >> 2, pnd->addrs)) |
133 | err = -EEXIST; | 153 | err = -EEXIST; |
134 | spin_unlock_bh(&pndevs->lock); | 154 | mutex_unlock(&pndevs->lock); |
135 | return err; | 155 | return err; |
136 | } | 156 | } |
137 | 157 | ||
@@ -141,36 +161,56 @@ int phonet_address_del(struct net_device *dev, u8 addr) | |||
141 | struct phonet_device *pnd; | 161 | struct phonet_device *pnd; |
142 | int err = 0; | 162 | int err = 0; |
143 | 163 | ||
144 | spin_lock_bh(&pndevs->lock); | 164 | mutex_lock(&pndevs->lock); |
145 | pnd = __phonet_get(dev); | 165 | pnd = __phonet_get(dev); |
146 | if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs)) | 166 | if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs)) { |
147 | err = -EADDRNOTAVAIL; | 167 | err = -EADDRNOTAVAIL; |
148 | else if (bitmap_empty(pnd->addrs, 64)) { | 168 | pnd = NULL; |
149 | list_del(&pnd->list); | 169 | } else if (bitmap_empty(pnd->addrs, 64)) |
170 | list_del_rcu(&pnd->list); | ||
171 | else | ||
172 | pnd = NULL; | ||
173 | mutex_unlock(&pndevs->lock); | ||
174 | |||
175 | if (pnd) { | ||
176 | synchronize_rcu(); | ||
150 | kfree(pnd); | 177 | kfree(pnd); |
151 | } | 178 | } |
152 | spin_unlock_bh(&pndevs->lock); | ||
153 | return err; | 179 | return err; |
154 | } | 180 | } |
155 | 181 | ||
156 | /* Gets a source address toward a destination, through a interface. */ | 182 | /* Gets a source address toward a destination, through a interface. */ |
157 | u8 phonet_address_get(struct net_device *dev, u8 addr) | 183 | u8 phonet_address_get(struct net_device *dev, u8 daddr) |
158 | { | 184 | { |
159 | struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); | ||
160 | struct phonet_device *pnd; | 185 | struct phonet_device *pnd; |
186 | u8 saddr; | ||
161 | 187 | ||
162 | spin_lock_bh(&pndevs->lock); | 188 | rcu_read_lock(); |
163 | pnd = __phonet_get(dev); | 189 | pnd = __phonet_get_rcu(dev); |
164 | if (pnd) { | 190 | if (pnd) { |
165 | BUG_ON(bitmap_empty(pnd->addrs, 64)); | 191 | BUG_ON(bitmap_empty(pnd->addrs, 64)); |
166 | 192 | ||
167 | /* Use same source address as destination, if possible */ | 193 | /* Use same source address as destination, if possible */ |
168 | if (!test_bit(addr >> 2, pnd->addrs)) | 194 | if (test_bit(daddr >> 2, pnd->addrs)) |
169 | addr = find_first_bit(pnd->addrs, 64) << 2; | 195 | saddr = daddr; |
196 | else | ||
197 | saddr = find_first_bit(pnd->addrs, 64) << 2; | ||
170 | } else | 198 | } else |
171 | addr = PN_NO_ADDR; | 199 | saddr = PN_NO_ADDR; |
172 | spin_unlock_bh(&pndevs->lock); | 200 | rcu_read_unlock(); |
173 | return addr; | 201 | |
202 | if (saddr == PN_NO_ADDR) { | ||
203 | /* Fallback to another device */ | ||
204 | struct net_device *def_dev; | ||
205 | |||
206 | def_dev = phonet_device_get(dev_net(dev)); | ||
207 | if (def_dev) { | ||
208 | if (def_dev != dev) | ||
209 | saddr = phonet_address_get(def_dev, daddr); | ||
210 | dev_put(def_dev); | ||
211 | } | ||
212 | } | ||
213 | return saddr; | ||
174 | } | 214 | } |
175 | 215 | ||
176 | int phonet_address_lookup(struct net *net, u8 addr) | 216 | int phonet_address_lookup(struct net *net, u8 addr) |
@@ -179,8 +219,8 @@ int phonet_address_lookup(struct net *net, u8 addr) | |||
179 | struct phonet_device *pnd; | 219 | struct phonet_device *pnd; |
180 | int err = -EADDRNOTAVAIL; | 220 | int err = -EADDRNOTAVAIL; |
181 | 221 | ||
182 | spin_lock_bh(&pndevs->lock); | 222 | rcu_read_lock(); |
183 | list_for_each_entry(pnd, &pndevs->list, list) { | 223 | list_for_each_entry_rcu(pnd, &pndevs->list, list) { |
184 | /* Don't allow unregistering devices! */ | 224 | /* Don't allow unregistering devices! */ |
185 | if ((pnd->netdev->reg_state != NETREG_REGISTERED) || | 225 | if ((pnd->netdev->reg_state != NETREG_REGISTERED) || |
186 | ((pnd->netdev->flags & IFF_UP)) != IFF_UP) | 226 | ((pnd->netdev->flags & IFF_UP)) != IFF_UP) |
@@ -192,7 +232,7 @@ int phonet_address_lookup(struct net *net, u8 addr) | |||
192 | } | 232 | } |
193 | } | 233 | } |
194 | found: | 234 | found: |
195 | spin_unlock_bh(&pndevs->lock); | 235 | rcu_read_unlock(); |
196 | return err; | 236 | return err; |
197 | } | 237 | } |
198 | 238 | ||
@@ -219,6 +259,32 @@ static int phonet_device_autoconf(struct net_device *dev) | |||
219 | return 0; | 259 | return 0; |
220 | } | 260 | } |
221 | 261 | ||
262 | static void phonet_route_autodel(struct net_device *dev) | ||
263 | { | ||
264 | struct phonet_net *pnn = net_generic(dev_net(dev), phonet_net_id); | ||
265 | unsigned i; | ||
266 | DECLARE_BITMAP(deleted, 64); | ||
267 | |||
268 | /* Remove left-over Phonet routes */ | ||
269 | bitmap_zero(deleted, 64); | ||
270 | mutex_lock(&pnn->routes.lock); | ||
271 | for (i = 0; i < 64; i++) | ||
272 | if (dev == pnn->routes.table[i]) { | ||
273 | rcu_assign_pointer(pnn->routes.table[i], NULL); | ||
274 | set_bit(i, deleted); | ||
275 | } | ||
276 | mutex_unlock(&pnn->routes.lock); | ||
277 | |||
278 | if (bitmap_empty(deleted, 64)) | ||
279 | return; /* short-circuit RCU */ | ||
280 | synchronize_rcu(); | ||
281 | for (i = find_first_bit(deleted, 64); i < 64; | ||
282 | i = find_next_bit(deleted, 64, i + 1)) { | ||
283 | rtm_phonet_notify(RTM_DELROUTE, dev, i); | ||
284 | dev_put(dev); | ||
285 | } | ||
286 | } | ||
287 | |||
222 | /* notify Phonet of device events */ | 288 | /* notify Phonet of device events */ |
223 | static int phonet_device_notify(struct notifier_block *me, unsigned long what, | 289 | static int phonet_device_notify(struct notifier_block *me, unsigned long what, |
224 | void *arg) | 290 | void *arg) |
@@ -232,6 +298,7 @@ static int phonet_device_notify(struct notifier_block *me, unsigned long what, | |||
232 | break; | 298 | break; |
233 | case NETDEV_UNREGISTER: | 299 | case NETDEV_UNREGISTER: |
234 | phonet_device_destroy(dev); | 300 | phonet_device_destroy(dev); |
301 | phonet_route_autodel(dev); | ||
235 | break; | 302 | break; |
236 | } | 303 | } |
237 | return 0; | 304 | return 0; |
@@ -246,7 +313,7 @@ static struct notifier_block phonet_device_notifier = { | |||
246 | /* Per-namespace Phonet devices handling */ | 313 | /* Per-namespace Phonet devices handling */ |
247 | static int phonet_init_net(struct net *net) | 314 | static int phonet_init_net(struct net *net) |
248 | { | 315 | { |
249 | struct phonet_net *pnn = kmalloc(sizeof(*pnn), GFP_KERNEL); | 316 | struct phonet_net *pnn = kzalloc(sizeof(*pnn), GFP_KERNEL); |
250 | if (!pnn) | 317 | if (!pnn) |
251 | return -ENOMEM; | 318 | return -ENOMEM; |
252 | 319 | ||
@@ -256,7 +323,8 @@ static int phonet_init_net(struct net *net) | |||
256 | } | 323 | } |
257 | 324 | ||
258 | INIT_LIST_HEAD(&pnn->pndevs.list); | 325 | INIT_LIST_HEAD(&pnn->pndevs.list); |
259 | spin_lock_init(&pnn->pndevs.lock); | 326 | mutex_init(&pnn->pndevs.lock); |
327 | mutex_init(&pnn->routes.lock); | ||
260 | net_assign_generic(net, phonet_net_id, pnn); | 328 | net_assign_generic(net, phonet_net_id, pnn); |
261 | return 0; | 329 | return 0; |
262 | } | 330 | } |
@@ -265,10 +333,19 @@ static void phonet_exit_net(struct net *net) | |||
265 | { | 333 | { |
266 | struct phonet_net *pnn = net_generic(net, phonet_net_id); | 334 | struct phonet_net *pnn = net_generic(net, phonet_net_id); |
267 | struct net_device *dev; | 335 | struct net_device *dev; |
336 | unsigned i; | ||
268 | 337 | ||
269 | rtnl_lock(); | 338 | rtnl_lock(); |
270 | for_each_netdev(net, dev) | 339 | for_each_netdev(net, dev) |
271 | phonet_device_destroy(dev); | 340 | phonet_device_destroy(dev); |
341 | |||
342 | for (i = 0; i < 64; i++) { | ||
343 | dev = pnn->routes.table[i]; | ||
344 | if (dev) { | ||
345 | rtm_phonet_notify(RTM_DELROUTE, dev, i); | ||
346 | dev_put(dev); | ||
347 | } | ||
348 | } | ||
272 | rtnl_unlock(); | 349 | rtnl_unlock(); |
273 | 350 | ||
274 | proc_net_remove(net, "phonet"); | 351 | proc_net_remove(net, "phonet"); |
@@ -300,3 +377,73 @@ void phonet_device_exit(void) | |||
300 | unregister_netdevice_notifier(&phonet_device_notifier); | 377 | unregister_netdevice_notifier(&phonet_device_notifier); |
301 | unregister_pernet_gen_device(phonet_net_id, &phonet_net_ops); | 378 | unregister_pernet_gen_device(phonet_net_id, &phonet_net_ops); |
302 | } | 379 | } |
380 | |||
381 | int phonet_route_add(struct net_device *dev, u8 daddr) | ||
382 | { | ||
383 | struct phonet_net *pnn = net_generic(dev_net(dev), phonet_net_id); | ||
384 | struct phonet_routes *routes = &pnn->routes; | ||
385 | int err = -EEXIST; | ||
386 | |||
387 | daddr = daddr >> 2; | ||
388 | mutex_lock(&routes->lock); | ||
389 | if (routes->table[daddr] == NULL) { | ||
390 | rcu_assign_pointer(routes->table[daddr], dev); | ||
391 | dev_hold(dev); | ||
392 | err = 0; | ||
393 | } | ||
394 | mutex_unlock(&routes->lock); | ||
395 | return err; | ||
396 | } | ||
397 | |||
398 | int phonet_route_del(struct net_device *dev, u8 daddr) | ||
399 | { | ||
400 | struct phonet_net *pnn = net_generic(dev_net(dev), phonet_net_id); | ||
401 | struct phonet_routes *routes = &pnn->routes; | ||
402 | |||
403 | daddr = daddr >> 2; | ||
404 | mutex_lock(&routes->lock); | ||
405 | if (dev == routes->table[daddr]) | ||
406 | rcu_assign_pointer(routes->table[daddr], NULL); | ||
407 | else | ||
408 | dev = NULL; | ||
409 | mutex_unlock(&routes->lock); | ||
410 | |||
411 | if (!dev) | ||
412 | return -ENOENT; | ||
413 | synchronize_rcu(); | ||
414 | dev_put(dev); | ||
415 | return 0; | ||
416 | } | ||
417 | |||
418 | struct net_device *phonet_route_get(struct net *net, u8 daddr) | ||
419 | { | ||
420 | struct phonet_net *pnn = net_generic(net, phonet_net_id); | ||
421 | struct phonet_routes *routes = &pnn->routes; | ||
422 | struct net_device *dev; | ||
423 | |||
424 | ASSERT_RTNL(); /* no need to hold the device */ | ||
425 | |||
426 | daddr >>= 2; | ||
427 | rcu_read_lock(); | ||
428 | dev = rcu_dereference(routes->table[daddr]); | ||
429 | rcu_read_unlock(); | ||
430 | return dev; | ||
431 | } | ||
432 | |||
433 | struct net_device *phonet_route_output(struct net *net, u8 daddr) | ||
434 | { | ||
435 | struct phonet_net *pnn = net_generic(net, phonet_net_id); | ||
436 | struct phonet_routes *routes = &pnn->routes; | ||
437 | struct net_device *dev; | ||
438 | |||
439 | daddr >>= 2; | ||
440 | rcu_read_lock(); | ||
441 | dev = rcu_dereference(routes->table[daddr]); | ||
442 | if (dev) | ||
443 | dev_hold(dev); | ||
444 | rcu_read_unlock(); | ||
445 | |||
446 | if (!dev) | ||
447 | dev = phonet_device_get(net); /* Default route */ | ||
448 | return dev; | ||
449 | } | ||
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c index d21fd3576610..2e6c7eb8e76a 100644 --- a/net/phonet/pn_netlink.c +++ b/net/phonet/pn_netlink.c | |||
@@ -29,6 +29,8 @@ | |||
29 | #include <net/sock.h> | 29 | #include <net/sock.h> |
30 | #include <net/phonet/pn_dev.h> | 30 | #include <net/phonet/pn_dev.h> |
31 | 31 | ||
32 | /* Device address handling */ | ||
33 | |||
32 | static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr, | 34 | static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr, |
33 | u32 pid, u32 seq, int event); | 35 | u32 pid, u32 seq, int event); |
34 | 36 | ||
@@ -51,8 +53,7 @@ void phonet_address_notify(int event, struct net_device *dev, u8 addr) | |||
51 | RTNLGRP_PHONET_IFADDR, NULL, GFP_KERNEL); | 53 | RTNLGRP_PHONET_IFADDR, NULL, GFP_KERNEL); |
52 | return; | 54 | return; |
53 | errout: | 55 | errout: |
54 | if (err < 0) | 56 | rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_IFADDR, err); |
55 | rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_IFADDR, err); | ||
56 | } | 57 | } |
57 | 58 | ||
58 | static const struct nla_policy ifa_phonet_policy[IFA_MAX+1] = { | 59 | static const struct nla_policy ifa_phonet_policy[IFA_MAX+1] = { |
@@ -130,8 +131,8 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) | |||
130 | int addr_idx = 0, addr_start_idx = cb->args[1]; | 131 | int addr_idx = 0, addr_start_idx = cb->args[1]; |
131 | 132 | ||
132 | pndevs = phonet_device_list(sock_net(skb->sk)); | 133 | pndevs = phonet_device_list(sock_net(skb->sk)); |
133 | spin_lock_bh(&pndevs->lock); | 134 | rcu_read_lock(); |
134 | list_for_each_entry(pnd, &pndevs->list, list) { | 135 | list_for_each_entry_rcu(pnd, &pndevs->list, list) { |
135 | u8 addr; | 136 | u8 addr; |
136 | 137 | ||
137 | if (dev_idx > dev_start_idx) | 138 | if (dev_idx > dev_start_idx) |
@@ -153,13 +154,137 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) | |||
153 | } | 154 | } |
154 | 155 | ||
155 | out: | 156 | out: |
156 | spin_unlock_bh(&pndevs->lock); | 157 | rcu_read_unlock(); |
157 | cb->args[0] = dev_idx; | 158 | cb->args[0] = dev_idx; |
158 | cb->args[1] = addr_idx; | 159 | cb->args[1] = addr_idx; |
159 | 160 | ||
160 | return skb->len; | 161 | return skb->len; |
161 | } | 162 | } |
162 | 163 | ||
164 | /* Routes handling */ | ||
165 | |||
166 | static int fill_route(struct sk_buff *skb, struct net_device *dev, u8 dst, | ||
167 | u32 pid, u32 seq, int event) | ||
168 | { | ||
169 | struct rtmsg *rtm; | ||
170 | struct nlmsghdr *nlh; | ||
171 | |||
172 | nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), 0); | ||
173 | if (nlh == NULL) | ||
174 | return -EMSGSIZE; | ||
175 | |||
176 | rtm = nlmsg_data(nlh); | ||
177 | rtm->rtm_family = AF_PHONET; | ||
178 | rtm->rtm_dst_len = 6; | ||
179 | rtm->rtm_src_len = 0; | ||
180 | rtm->rtm_tos = 0; | ||
181 | rtm->rtm_table = RT_TABLE_MAIN; | ||
182 | rtm->rtm_protocol = RTPROT_STATIC; | ||
183 | rtm->rtm_scope = RT_SCOPE_UNIVERSE; | ||
184 | rtm->rtm_type = RTN_UNICAST; | ||
185 | rtm->rtm_flags = 0; | ||
186 | NLA_PUT_U8(skb, RTA_DST, dst); | ||
187 | NLA_PUT_U32(skb, RTA_OIF, dev->ifindex); | ||
188 | return nlmsg_end(skb, nlh); | ||
189 | |||
190 | nla_put_failure: | ||
191 | nlmsg_cancel(skb, nlh); | ||
192 | return -EMSGSIZE; | ||
193 | } | ||
194 | |||
195 | void rtm_phonet_notify(int event, struct net_device *dev, u8 dst) | ||
196 | { | ||
197 | struct sk_buff *skb; | ||
198 | int err = -ENOBUFS; | ||
199 | |||
200 | skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct ifaddrmsg)) + | ||
201 | nla_total_size(1) + nla_total_size(4), GFP_KERNEL); | ||
202 | if (skb == NULL) | ||
203 | goto errout; | ||
204 | err = fill_route(skb, dev, dst, 0, 0, event); | ||
205 | if (err < 0) { | ||
206 | WARN_ON(err == -EMSGSIZE); | ||
207 | kfree_skb(skb); | ||
208 | goto errout; | ||
209 | } | ||
210 | rtnl_notify(skb, dev_net(dev), 0, | ||
211 | RTNLGRP_PHONET_ROUTE, NULL, GFP_KERNEL); | ||
212 | return; | ||
213 | errout: | ||
214 | rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_ROUTE, err); | ||
215 | } | ||
216 | |||
217 | static const struct nla_policy rtm_phonet_policy[RTA_MAX+1] = { | ||
218 | [RTA_DST] = { .type = NLA_U8 }, | ||
219 | [RTA_OIF] = { .type = NLA_U32 }, | ||
220 | }; | ||
221 | |||
222 | static int route_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *attr) | ||
223 | { | ||
224 | struct net *net = sock_net(skb->sk); | ||
225 | struct nlattr *tb[RTA_MAX+1]; | ||
226 | struct net_device *dev; | ||
227 | struct rtmsg *rtm; | ||
228 | int err; | ||
229 | u8 dst; | ||
230 | |||
231 | if (!capable(CAP_SYS_ADMIN)) | ||
232 | return -EPERM; | ||
233 | |||
234 | ASSERT_RTNL(); | ||
235 | |||
236 | err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_phonet_policy); | ||
237 | if (err < 0) | ||
238 | return err; | ||
239 | |||
240 | rtm = nlmsg_data(nlh); | ||
241 | if (rtm->rtm_table != RT_TABLE_MAIN || rtm->rtm_type != RTN_UNICAST) | ||
242 | return -EINVAL; | ||
243 | if (tb[RTA_DST] == NULL || tb[RTA_OIF] == NULL) | ||
244 | return -EINVAL; | ||
245 | dst = nla_get_u8(tb[RTA_DST]); | ||
246 | if (dst & 3) /* Phonet addresses only have 6 high-order bits */ | ||
247 | return -EINVAL; | ||
248 | |||
249 | dev = __dev_get_by_index(net, nla_get_u32(tb[RTA_OIF])); | ||
250 | if (dev == NULL) | ||
251 | return -ENODEV; | ||
252 | |||
253 | if (nlh->nlmsg_type == RTM_NEWROUTE) | ||
254 | err = phonet_route_add(dev, dst); | ||
255 | else | ||
256 | err = phonet_route_del(dev, dst); | ||
257 | if (!err) | ||
258 | rtm_phonet_notify(nlh->nlmsg_type, dev, dst); | ||
259 | return err; | ||
260 | } | ||
261 | |||
262 | static int route_dumpit(struct sk_buff *skb, struct netlink_callback *cb) | ||
263 | { | ||
264 | struct net *net = sock_net(skb->sk); | ||
265 | u8 addr, addr_idx = 0, addr_start_idx = cb->args[0]; | ||
266 | |||
267 | for (addr = 0; addr < 64; addr++) { | ||
268 | struct net_device *dev; | ||
269 | |||
270 | dev = phonet_route_get(net, addr << 2); | ||
271 | if (!dev) | ||
272 | continue; | ||
273 | |||
274 | if (addr_idx++ < addr_start_idx) | ||
275 | continue; | ||
276 | if (fill_route(skb, dev, addr << 2, NETLINK_CB(cb->skb).pid, | ||
277 | cb->nlh->nlmsg_seq, RTM_NEWROUTE)) | ||
278 | goto out; | ||
279 | } | ||
280 | |||
281 | out: | ||
282 | cb->args[0] = addr_idx; | ||
283 | cb->args[1] = 0; | ||
284 | |||
285 | return skb->len; | ||
286 | } | ||
287 | |||
163 | int __init phonet_netlink_register(void) | 288 | int __init phonet_netlink_register(void) |
164 | { | 289 | { |
165 | int err = __rtnl_register(PF_PHONET, RTM_NEWADDR, addr_doit, NULL); | 290 | int err = __rtnl_register(PF_PHONET, RTM_NEWADDR, addr_doit, NULL); |
@@ -169,5 +294,8 @@ int __init phonet_netlink_register(void) | |||
169 | /* Further __rtnl_register() cannot fail */ | 294 | /* Further __rtnl_register() cannot fail */ |
170 | __rtnl_register(PF_PHONET, RTM_DELADDR, addr_doit, NULL); | 295 | __rtnl_register(PF_PHONET, RTM_DELADDR, addr_doit, NULL); |
171 | __rtnl_register(PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit); | 296 | __rtnl_register(PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit); |
297 | __rtnl_register(PF_PHONET, RTM_NEWROUTE, route_doit, NULL); | ||
298 | __rtnl_register(PF_PHONET, RTM_DELROUTE, route_doit, NULL); | ||
299 | __rtnl_register(PF_PHONET, RTM_GETROUTE, NULL, route_dumpit); | ||
172 | return 0; | 300 | return 0; |
173 | } | 301 | } |
diff --git a/net/phonet/socket.c b/net/phonet/socket.c index aa5b5a972bff..4112b6e1c48a 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c | |||
@@ -45,13 +45,28 @@ static int pn_socket_release(struct socket *sock) | |||
45 | return 0; | 45 | return 0; |
46 | } | 46 | } |
47 | 47 | ||
48 | #define PN_HASHSIZE 16 | ||
49 | #define PN_HASHMASK (PN_HASHSIZE-1) | ||
50 | |||
51 | |||
48 | static struct { | 52 | static struct { |
49 | struct hlist_head hlist; | 53 | struct hlist_head hlist[PN_HASHSIZE]; |
50 | spinlock_t lock; | 54 | spinlock_t lock; |
51 | } pnsocks = { | 55 | } pnsocks; |
52 | .hlist = HLIST_HEAD_INIT, | 56 | |
53 | .lock = __SPIN_LOCK_UNLOCKED(pnsocks.lock), | 57 | void __init pn_sock_init(void) |
54 | }; | 58 | { |
59 | unsigned i; | ||
60 | |||
61 | for (i = 0; i < PN_HASHSIZE; i++) | ||
62 | INIT_HLIST_HEAD(pnsocks.hlist + i); | ||
63 | spin_lock_init(&pnsocks.lock); | ||
64 | } | ||
65 | |||
66 | static struct hlist_head *pn_hash_list(u16 obj) | ||
67 | { | ||
68 | return pnsocks.hlist + (obj & PN_HASHMASK); | ||
69 | } | ||
55 | 70 | ||
56 | /* | 71 | /* |
57 | * Find address based on socket address, match only certain fields. | 72 | * Find address based on socket address, match only certain fields. |
@@ -64,10 +79,11 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn) | |||
64 | struct sock *rval = NULL; | 79 | struct sock *rval = NULL; |
65 | u16 obj = pn_sockaddr_get_object(spn); | 80 | u16 obj = pn_sockaddr_get_object(spn); |
66 | u8 res = spn->spn_resource; | 81 | u8 res = spn->spn_resource; |
82 | struct hlist_head *hlist = pn_hash_list(obj); | ||
67 | 83 | ||
68 | spin_lock_bh(&pnsocks.lock); | 84 | spin_lock_bh(&pnsocks.lock); |
69 | 85 | ||
70 | sk_for_each(sknode, node, &pnsocks.hlist) { | 86 | sk_for_each(sknode, node, hlist) { |
71 | struct pn_sock *pn = pn_sk(sknode); | 87 | struct pn_sock *pn = pn_sk(sknode); |
72 | BUG_ON(!pn->sobject); /* unbound socket */ | 88 | BUG_ON(!pn->sobject); /* unbound socket */ |
73 | 89 | ||
@@ -94,13 +110,44 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn) | |||
94 | spin_unlock_bh(&pnsocks.lock); | 110 | spin_unlock_bh(&pnsocks.lock); |
95 | 111 | ||
96 | return rval; | 112 | return rval; |
113 | } | ||
114 | |||
115 | /* Deliver a broadcast packet (only in bottom-half) */ | ||
116 | void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb) | ||
117 | { | ||
118 | struct hlist_head *hlist = pnsocks.hlist; | ||
119 | unsigned h; | ||
120 | |||
121 | spin_lock(&pnsocks.lock); | ||
122 | for (h = 0; h < PN_HASHSIZE; h++) { | ||
123 | struct hlist_node *node; | ||
124 | struct sock *sknode; | ||
125 | |||
126 | sk_for_each(sknode, node, hlist) { | ||
127 | struct sk_buff *clone; | ||
128 | |||
129 | if (!net_eq(sock_net(sknode), net)) | ||
130 | continue; | ||
131 | if (!sock_flag(sknode, SOCK_BROADCAST)) | ||
132 | continue; | ||
97 | 133 | ||
134 | clone = skb_clone(skb, GFP_ATOMIC); | ||
135 | if (clone) { | ||
136 | sock_hold(sknode); | ||
137 | sk_receive_skb(sknode, clone, 0); | ||
138 | } | ||
139 | } | ||
140 | hlist++; | ||
141 | } | ||
142 | spin_unlock(&pnsocks.lock); | ||
98 | } | 143 | } |
99 | 144 | ||
100 | void pn_sock_hash(struct sock *sk) | 145 | void pn_sock_hash(struct sock *sk) |
101 | { | 146 | { |
147 | struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject); | ||
148 | |||
102 | spin_lock_bh(&pnsocks.lock); | 149 | spin_lock_bh(&pnsocks.lock); |
103 | sk_add_node(sk, &pnsocks.hlist); | 150 | sk_add_node(sk, hlist); |
104 | spin_unlock_bh(&pnsocks.lock); | 151 | spin_unlock_bh(&pnsocks.lock); |
105 | } | 152 | } |
106 | EXPORT_SYMBOL(pn_sock_hash); | 153 | EXPORT_SYMBOL(pn_sock_hash); |
@@ -416,15 +463,20 @@ EXPORT_SYMBOL(pn_sock_get_port); | |||
416 | static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos) | 463 | static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos) |
417 | { | 464 | { |
418 | struct net *net = seq_file_net(seq); | 465 | struct net *net = seq_file_net(seq); |
466 | struct hlist_head *hlist = pnsocks.hlist; | ||
419 | struct hlist_node *node; | 467 | struct hlist_node *node; |
420 | struct sock *sknode; | 468 | struct sock *sknode; |
469 | unsigned h; | ||
421 | 470 | ||
422 | sk_for_each(sknode, node, &pnsocks.hlist) { | 471 | for (h = 0; h < PN_HASHSIZE; h++) { |
423 | if (!net_eq(net, sock_net(sknode))) | 472 | sk_for_each(sknode, node, hlist) { |
424 | continue; | 473 | if (!net_eq(net, sock_net(sknode))) |
425 | if (!pos) | 474 | continue; |
426 | return sknode; | 475 | if (!pos) |
427 | pos--; | 476 | return sknode; |
477 | pos--; | ||
478 | } | ||
479 | hlist++; | ||
428 | } | 480 | } |
429 | return NULL; | 481 | return NULL; |
430 | } | 482 | } |
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index 98e05382fd3c..e25d8d5ce8df 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c | |||
@@ -265,6 +265,9 @@ static int rds_setsockopt(struct socket *sock, int level, int optname, | |||
265 | case RDS_GET_MR: | 265 | case RDS_GET_MR: |
266 | ret = rds_get_mr(rs, optval, optlen); | 266 | ret = rds_get_mr(rs, optval, optlen); |
267 | break; | 267 | break; |
268 | case RDS_GET_MR_FOR_DEST: | ||
269 | ret = rds_get_mr_for_dest(rs, optval, optlen); | ||
270 | break; | ||
268 | case RDS_FREE_MR: | 271 | case RDS_FREE_MR: |
269 | ret = rds_free_mr(rs, optval, optlen); | 272 | ret = rds_free_mr(rs, optval, optlen); |
270 | break; | 273 | break; |
@@ -407,7 +410,8 @@ static int __rds_create(struct socket *sock, struct sock *sk, int protocol) | |||
407 | return 0; | 410 | return 0; |
408 | } | 411 | } |
409 | 412 | ||
410 | static int rds_create(struct net *net, struct socket *sock, int protocol) | 413 | static int rds_create(struct net *net, struct socket *sock, int protocol, |
414 | int kern) | ||
411 | { | 415 | { |
412 | struct sock *sk; | 416 | struct sock *sk; |
413 | 417 | ||
@@ -431,7 +435,7 @@ void rds_sock_put(struct rds_sock *rs) | |||
431 | sock_put(rds_rs_to_sk(rs)); | 435 | sock_put(rds_rs_to_sk(rs)); |
432 | } | 436 | } |
433 | 437 | ||
434 | static struct net_proto_family rds_family_ops = { | 438 | static const struct net_proto_family rds_family_ops = { |
435 | .family = AF_RDS, | 439 | .family = AF_RDS, |
436 | .create = rds_create, | 440 | .create = rds_create, |
437 | .owner = THIS_MODULE, | 441 | .owner = THIS_MODULE, |
diff --git a/net/rds/cong.c b/net/rds/cong.c index dd2711df640b..6d06cac2649c 100644 --- a/net/rds/cong.c +++ b/net/rds/cong.c | |||
@@ -218,6 +218,8 @@ void rds_cong_queue_updates(struct rds_cong_map *map) | |||
218 | spin_lock_irqsave(&rds_cong_lock, flags); | 218 | spin_lock_irqsave(&rds_cong_lock, flags); |
219 | 219 | ||
220 | list_for_each_entry(conn, &map->m_conn_list, c_map_item) { | 220 | list_for_each_entry(conn, &map->m_conn_list, c_map_item) { |
221 | if (conn->c_loopback) | ||
222 | continue; | ||
221 | if (!test_and_set_bit(0, &conn->c_map_queued)) { | 223 | if (!test_and_set_bit(0, &conn->c_map_queued)) { |
222 | rds_stats_inc(s_cong_update_queued); | 224 | rds_stats_inc(s_cong_update_queued); |
223 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); | 225 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); |
diff --git a/net/rds/ib.h b/net/rds/ib.h index 1378b854cac0..64df4e79b29f 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h | |||
@@ -98,6 +98,7 @@ struct rds_ib_connection { | |||
98 | struct rds_ib_send_work *i_sends; | 98 | struct rds_ib_send_work *i_sends; |
99 | 99 | ||
100 | /* rx */ | 100 | /* rx */ |
101 | struct tasklet_struct i_recv_tasklet; | ||
101 | struct mutex i_recv_mutex; | 102 | struct mutex i_recv_mutex; |
102 | struct rds_ib_work_ring i_recv_ring; | 103 | struct rds_ib_work_ring i_recv_ring; |
103 | struct rds_ib_incoming *i_ibinc; | 104 | struct rds_ib_incoming *i_ibinc; |
@@ -303,6 +304,7 @@ void rds_ib_inc_free(struct rds_incoming *inc); | |||
303 | int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, | 304 | int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, |
304 | size_t size); | 305 | size_t size); |
305 | void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context); | 306 | void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context); |
307 | void rds_ib_recv_tasklet_fn(unsigned long data); | ||
306 | void rds_ib_recv_init_ring(struct rds_ib_connection *ic); | 308 | void rds_ib_recv_init_ring(struct rds_ib_connection *ic); |
307 | void rds_ib_recv_clear_ring(struct rds_ib_connection *ic); | 309 | void rds_ib_recv_clear_ring(struct rds_ib_connection *ic); |
308 | void rds_ib_recv_init_ack(struct rds_ib_connection *ic); | 310 | void rds_ib_recv_init_ack(struct rds_ib_connection *ic); |
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index c2d372f13dbb..9d320692a4fc 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c | |||
@@ -694,6 +694,8 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp) | |||
694 | return -ENOMEM; | 694 | return -ENOMEM; |
695 | 695 | ||
696 | INIT_LIST_HEAD(&ic->ib_node); | 696 | INIT_LIST_HEAD(&ic->ib_node); |
697 | tasklet_init(&ic->i_recv_tasklet, rds_ib_recv_tasklet_fn, | ||
698 | (unsigned long) ic); | ||
697 | mutex_init(&ic->i_recv_mutex); | 699 | mutex_init(&ic->i_recv_mutex); |
698 | #ifndef KERNEL_HAS_ATOMIC64 | 700 | #ifndef KERNEL_HAS_ATOMIC64 |
699 | spin_lock_init(&ic->i_ack_lock); | 701 | spin_lock_init(&ic->i_ack_lock); |
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c index ef3ab5b7283e..c5e916598c14 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c | |||
@@ -187,11 +187,8 @@ void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock) | |||
187 | INIT_LIST_HEAD(list); | 187 | INIT_LIST_HEAD(list); |
188 | spin_unlock_irq(list_lock); | 188 | spin_unlock_irq(list_lock); |
189 | 189 | ||
190 | list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node) { | 190 | list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node) |
191 | if (ic->conn->c_passive) | ||
192 | rds_conn_destroy(ic->conn->c_passive); | ||
193 | rds_conn_destroy(ic->conn); | 191 | rds_conn_destroy(ic->conn); |
194 | } | ||
195 | } | 192 | } |
196 | 193 | ||
197 | struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev) | 194 | struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev) |
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index cd7a6cfcab03..fe5ab8c6b964 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c | |||
@@ -143,15 +143,16 @@ static int rds_ib_recv_refill_one(struct rds_connection *conn, | |||
143 | int ret = -ENOMEM; | 143 | int ret = -ENOMEM; |
144 | 144 | ||
145 | if (recv->r_ibinc == NULL) { | 145 | if (recv->r_ibinc == NULL) { |
146 | if (atomic_read(&rds_ib_allocation) >= rds_ib_sysctl_max_recv_allocation) { | 146 | if (!atomic_add_unless(&rds_ib_allocation, 1, rds_ib_sysctl_max_recv_allocation)) { |
147 | rds_ib_stats_inc(s_ib_rx_alloc_limit); | 147 | rds_ib_stats_inc(s_ib_rx_alloc_limit); |
148 | goto out; | 148 | goto out; |
149 | } | 149 | } |
150 | recv->r_ibinc = kmem_cache_alloc(rds_ib_incoming_slab, | 150 | recv->r_ibinc = kmem_cache_alloc(rds_ib_incoming_slab, |
151 | kptr_gfp); | 151 | kptr_gfp); |
152 | if (recv->r_ibinc == NULL) | 152 | if (recv->r_ibinc == NULL) { |
153 | atomic_dec(&rds_ib_allocation); | ||
153 | goto out; | 154 | goto out; |
154 | atomic_inc(&rds_ib_allocation); | 155 | } |
155 | INIT_LIST_HEAD(&recv->r_ibinc->ii_frags); | 156 | INIT_LIST_HEAD(&recv->r_ibinc->ii_frags); |
156 | rds_inc_init(&recv->r_ibinc->ii_inc, conn, conn->c_faddr); | 157 | rds_inc_init(&recv->r_ibinc->ii_inc, conn, conn->c_faddr); |
157 | } | 158 | } |
@@ -824,17 +825,22 @@ void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context) | |||
824 | { | 825 | { |
825 | struct rds_connection *conn = context; | 826 | struct rds_connection *conn = context; |
826 | struct rds_ib_connection *ic = conn->c_transport_data; | 827 | struct rds_ib_connection *ic = conn->c_transport_data; |
827 | struct ib_wc wc; | ||
828 | struct rds_ib_ack_state state = { 0, }; | ||
829 | struct rds_ib_recv_work *recv; | ||
830 | 828 | ||
831 | rdsdebug("conn %p cq %p\n", conn, cq); | 829 | rdsdebug("conn %p cq %p\n", conn, cq); |
832 | 830 | ||
833 | rds_ib_stats_inc(s_ib_rx_cq_call); | 831 | rds_ib_stats_inc(s_ib_rx_cq_call); |
834 | 832 | ||
835 | ib_req_notify_cq(cq, IB_CQ_SOLICITED); | 833 | tasklet_schedule(&ic->i_recv_tasklet); |
834 | } | ||
835 | |||
836 | static inline void rds_poll_cq(struct rds_ib_connection *ic, | ||
837 | struct rds_ib_ack_state *state) | ||
838 | { | ||
839 | struct rds_connection *conn = ic->conn; | ||
840 | struct ib_wc wc; | ||
841 | struct rds_ib_recv_work *recv; | ||
836 | 842 | ||
837 | while (ib_poll_cq(cq, 1, &wc) > 0) { | 843 | while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) { |
838 | rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", | 844 | rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", |
839 | (unsigned long long)wc.wr_id, wc.status, wc.byte_len, | 845 | (unsigned long long)wc.wr_id, wc.status, wc.byte_len, |
840 | be32_to_cpu(wc.ex.imm_data)); | 846 | be32_to_cpu(wc.ex.imm_data)); |
@@ -852,7 +858,7 @@ void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context) | |||
852 | if (rds_conn_up(conn) || rds_conn_connecting(conn)) { | 858 | if (rds_conn_up(conn) || rds_conn_connecting(conn)) { |
853 | /* We expect errors as the qp is drained during shutdown */ | 859 | /* We expect errors as the qp is drained during shutdown */ |
854 | if (wc.status == IB_WC_SUCCESS) { | 860 | if (wc.status == IB_WC_SUCCESS) { |
855 | rds_ib_process_recv(conn, recv, wc.byte_len, &state); | 861 | rds_ib_process_recv(conn, recv, wc.byte_len, state); |
856 | } else { | 862 | } else { |
857 | rds_ib_conn_error(conn, "recv completion on " | 863 | rds_ib_conn_error(conn, "recv completion on " |
858 | "%pI4 had status %u, disconnecting and " | 864 | "%pI4 had status %u, disconnecting and " |
@@ -863,6 +869,17 @@ void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context) | |||
863 | 869 | ||
864 | rds_ib_ring_free(&ic->i_recv_ring, 1); | 870 | rds_ib_ring_free(&ic->i_recv_ring, 1); |
865 | } | 871 | } |
872 | } | ||
873 | |||
874 | void rds_ib_recv_tasklet_fn(unsigned long data) | ||
875 | { | ||
876 | struct rds_ib_connection *ic = (struct rds_ib_connection *) data; | ||
877 | struct rds_connection *conn = ic->conn; | ||
878 | struct rds_ib_ack_state state = { 0, }; | ||
879 | |||
880 | rds_poll_cq(ic, &state); | ||
881 | ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED); | ||
882 | rds_poll_cq(ic, &state); | ||
866 | 883 | ||
867 | if (state.ack_next_valid) | 884 | if (state.ack_next_valid) |
868 | rds_ib_set_ack(ic, state.ack_next, state.ack_required); | 885 | rds_ib_set_ack(ic, state.ack_next, state.ack_required); |
diff --git a/net/rds/iw.h b/net/rds/iw.h index dd72b62bd506..eef2f0c28476 100644 --- a/net/rds/iw.h +++ b/net/rds/iw.h | |||
@@ -119,6 +119,7 @@ struct rds_iw_connection { | |||
119 | struct rds_iw_send_work *i_sends; | 119 | struct rds_iw_send_work *i_sends; |
120 | 120 | ||
121 | /* rx */ | 121 | /* rx */ |
122 | struct tasklet_struct i_recv_tasklet; | ||
122 | struct mutex i_recv_mutex; | 123 | struct mutex i_recv_mutex; |
123 | struct rds_iw_work_ring i_recv_ring; | 124 | struct rds_iw_work_ring i_recv_ring; |
124 | struct rds_iw_incoming *i_iwinc; | 125 | struct rds_iw_incoming *i_iwinc; |
@@ -330,6 +331,7 @@ void rds_iw_inc_free(struct rds_incoming *inc); | |||
330 | int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, | 331 | int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, |
331 | size_t size); | 332 | size_t size); |
332 | void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context); | 333 | void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context); |
334 | void rds_iw_recv_tasklet_fn(unsigned long data); | ||
333 | void rds_iw_recv_init_ring(struct rds_iw_connection *ic); | 335 | void rds_iw_recv_init_ring(struct rds_iw_connection *ic); |
334 | void rds_iw_recv_clear_ring(struct rds_iw_connection *ic); | 336 | void rds_iw_recv_clear_ring(struct rds_iw_connection *ic); |
335 | void rds_iw_recv_init_ack(struct rds_iw_connection *ic); | 337 | void rds_iw_recv_init_ack(struct rds_iw_connection *ic); |
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c index a416b0d492b1..394cf6b4d0aa 100644 --- a/net/rds/iw_cm.c +++ b/net/rds/iw_cm.c | |||
@@ -696,6 +696,8 @@ int rds_iw_conn_alloc(struct rds_connection *conn, gfp_t gfp) | |||
696 | return -ENOMEM; | 696 | return -ENOMEM; |
697 | 697 | ||
698 | INIT_LIST_HEAD(&ic->iw_node); | 698 | INIT_LIST_HEAD(&ic->iw_node); |
699 | tasklet_init(&ic->i_recv_tasklet, rds_iw_recv_tasklet_fn, | ||
700 | (unsigned long) ic); | ||
699 | mutex_init(&ic->i_recv_mutex); | 701 | mutex_init(&ic->i_recv_mutex); |
700 | #ifndef KERNEL_HAS_ATOMIC64 | 702 | #ifndef KERNEL_HAS_ATOMIC64 |
701 | spin_lock_init(&ic->i_ack_lock); | 703 | spin_lock_init(&ic->i_ack_lock); |
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c index de4a1b16bf7b..b25d785e49fc 100644 --- a/net/rds/iw_rdma.c +++ b/net/rds/iw_rdma.c | |||
@@ -245,11 +245,8 @@ void __rds_iw_destroy_conns(struct list_head *list, spinlock_t *list_lock) | |||
245 | INIT_LIST_HEAD(list); | 245 | INIT_LIST_HEAD(list); |
246 | spin_unlock_irq(list_lock); | 246 | spin_unlock_irq(list_lock); |
247 | 247 | ||
248 | list_for_each_entry_safe(ic, _ic, &tmp_list, iw_node) { | 248 | list_for_each_entry_safe(ic, _ic, &tmp_list, iw_node) |
249 | if (ic->conn->c_passive) | ||
250 | rds_conn_destroy(ic->conn->c_passive); | ||
251 | rds_conn_destroy(ic->conn); | 249 | rds_conn_destroy(ic->conn); |
252 | } | ||
253 | } | 250 | } |
254 | 251 | ||
255 | static void rds_iw_set_scatterlist(struct rds_iw_scatterlist *sg, | 252 | static void rds_iw_set_scatterlist(struct rds_iw_scatterlist *sg, |
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c index 8683f5f66c4b..24fc53f03833 100644 --- a/net/rds/iw_recv.c +++ b/net/rds/iw_recv.c | |||
@@ -143,15 +143,16 @@ static int rds_iw_recv_refill_one(struct rds_connection *conn, | |||
143 | int ret = -ENOMEM; | 143 | int ret = -ENOMEM; |
144 | 144 | ||
145 | if (recv->r_iwinc == NULL) { | 145 | if (recv->r_iwinc == NULL) { |
146 | if (atomic_read(&rds_iw_allocation) >= rds_iw_sysctl_max_recv_allocation) { | 146 | if (!atomic_add_unless(&rds_iw_allocation, 1, rds_iw_sysctl_max_recv_allocation)) { |
147 | rds_iw_stats_inc(s_iw_rx_alloc_limit); | 147 | rds_iw_stats_inc(s_iw_rx_alloc_limit); |
148 | goto out; | 148 | goto out; |
149 | } | 149 | } |
150 | recv->r_iwinc = kmem_cache_alloc(rds_iw_incoming_slab, | 150 | recv->r_iwinc = kmem_cache_alloc(rds_iw_incoming_slab, |
151 | kptr_gfp); | 151 | kptr_gfp); |
152 | if (recv->r_iwinc == NULL) | 152 | if (recv->r_iwinc == NULL) { |
153 | atomic_dec(&rds_iw_allocation); | ||
153 | goto out; | 154 | goto out; |
154 | atomic_inc(&rds_iw_allocation); | 155 | } |
155 | INIT_LIST_HEAD(&recv->r_iwinc->ii_frags); | 156 | INIT_LIST_HEAD(&recv->r_iwinc->ii_frags); |
156 | rds_inc_init(&recv->r_iwinc->ii_inc, conn, conn->c_faddr); | 157 | rds_inc_init(&recv->r_iwinc->ii_inc, conn, conn->c_faddr); |
157 | } | 158 | } |
@@ -783,17 +784,22 @@ void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context) | |||
783 | { | 784 | { |
784 | struct rds_connection *conn = context; | 785 | struct rds_connection *conn = context; |
785 | struct rds_iw_connection *ic = conn->c_transport_data; | 786 | struct rds_iw_connection *ic = conn->c_transport_data; |
786 | struct ib_wc wc; | ||
787 | struct rds_iw_ack_state state = { 0, }; | ||
788 | struct rds_iw_recv_work *recv; | ||
789 | 787 | ||
790 | rdsdebug("conn %p cq %p\n", conn, cq); | 788 | rdsdebug("conn %p cq %p\n", conn, cq); |
791 | 789 | ||
792 | rds_iw_stats_inc(s_iw_rx_cq_call); | 790 | rds_iw_stats_inc(s_iw_rx_cq_call); |
793 | 791 | ||
794 | ib_req_notify_cq(cq, IB_CQ_SOLICITED); | 792 | tasklet_schedule(&ic->i_recv_tasklet); |
793 | } | ||
794 | |||
795 | static inline void rds_poll_cq(struct rds_iw_connection *ic, | ||
796 | struct rds_iw_ack_state *state) | ||
797 | { | ||
798 | struct rds_connection *conn = ic->conn; | ||
799 | struct ib_wc wc; | ||
800 | struct rds_iw_recv_work *recv; | ||
795 | 801 | ||
796 | while (ib_poll_cq(cq, 1, &wc) > 0) { | 802 | while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) { |
797 | rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", | 803 | rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", |
798 | (unsigned long long)wc.wr_id, wc.status, wc.byte_len, | 804 | (unsigned long long)wc.wr_id, wc.status, wc.byte_len, |
799 | be32_to_cpu(wc.ex.imm_data)); | 805 | be32_to_cpu(wc.ex.imm_data)); |
@@ -811,7 +817,7 @@ void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context) | |||
811 | if (rds_conn_up(conn) || rds_conn_connecting(conn)) { | 817 | if (rds_conn_up(conn) || rds_conn_connecting(conn)) { |
812 | /* We expect errors as the qp is drained during shutdown */ | 818 | /* We expect errors as the qp is drained during shutdown */ |
813 | if (wc.status == IB_WC_SUCCESS) { | 819 | if (wc.status == IB_WC_SUCCESS) { |
814 | rds_iw_process_recv(conn, recv, wc.byte_len, &state); | 820 | rds_iw_process_recv(conn, recv, wc.byte_len, state); |
815 | } else { | 821 | } else { |
816 | rds_iw_conn_error(conn, "recv completion on " | 822 | rds_iw_conn_error(conn, "recv completion on " |
817 | "%pI4 had status %u, disconnecting and " | 823 | "%pI4 had status %u, disconnecting and " |
@@ -822,6 +828,17 @@ void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context) | |||
822 | 828 | ||
823 | rds_iw_ring_free(&ic->i_recv_ring, 1); | 829 | rds_iw_ring_free(&ic->i_recv_ring, 1); |
824 | } | 830 | } |
831 | } | ||
832 | |||
833 | void rds_iw_recv_tasklet_fn(unsigned long data) | ||
834 | { | ||
835 | struct rds_iw_connection *ic = (struct rds_iw_connection *) data; | ||
836 | struct rds_connection *conn = ic->conn; | ||
837 | struct rds_iw_ack_state state = { 0, }; | ||
838 | |||
839 | rds_poll_cq(ic, &state); | ||
840 | ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED); | ||
841 | rds_poll_cq(ic, &state); | ||
825 | 842 | ||
826 | if (state.ack_next_valid) | 843 | if (state.ack_next_valid) |
827 | rds_iw_set_ack(ic, state.ack_next, state.ack_required); | 844 | rds_iw_set_ack(ic, state.ack_next, state.ack_required); |
diff --git a/net/rds/rdma.c b/net/rds/rdma.c index 8dc83d2caa58..971b5a668458 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c | |||
@@ -317,6 +317,30 @@ int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen) | |||
317 | return __rds_rdma_map(rs, &args, NULL, NULL); | 317 | return __rds_rdma_map(rs, &args, NULL, NULL); |
318 | } | 318 | } |
319 | 319 | ||
320 | int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen) | ||
321 | { | ||
322 | struct rds_get_mr_for_dest_args args; | ||
323 | struct rds_get_mr_args new_args; | ||
324 | |||
325 | if (optlen != sizeof(struct rds_get_mr_for_dest_args)) | ||
326 | return -EINVAL; | ||
327 | |||
328 | if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval, | ||
329 | sizeof(struct rds_get_mr_for_dest_args))) | ||
330 | return -EFAULT; | ||
331 | |||
332 | /* | ||
333 | * Initially, just behave like get_mr(). | ||
334 | * TODO: Implement get_mr as wrapper around this | ||
335 | * and deprecate it. | ||
336 | */ | ||
337 | new_args.vec = args.vec; | ||
338 | new_args.cookie_addr = args.cookie_addr; | ||
339 | new_args.flags = args.flags; | ||
340 | |||
341 | return __rds_rdma_map(rs, &new_args, NULL, NULL); | ||
342 | } | ||
343 | |||
320 | /* | 344 | /* |
321 | * Free the MR indicated by the given R_Key | 345 | * Free the MR indicated by the given R_Key |
322 | */ | 346 | */ |
diff --git a/net/rds/rdma.h b/net/rds/rdma.h index 425512098b0b..909c39835a5d 100644 --- a/net/rds/rdma.h +++ b/net/rds/rdma.h | |||
@@ -61,6 +61,7 @@ static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie) | |||
61 | } | 61 | } |
62 | 62 | ||
63 | int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen); | 63 | int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen); |
64 | int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen); | ||
64 | int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen); | 65 | int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen); |
65 | void rds_rdma_drop_keys(struct rds_sock *rs); | 66 | void rds_rdma_drop_keys(struct rds_sock *rs); |
66 | int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, | 67 | int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, |
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c index 24b743eb0b1b..45474a436862 100644 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c | |||
@@ -67,11 +67,11 @@ static int rds_tcp_accept_one(struct socket *sock) | |||
67 | inet = inet_sk(new_sock->sk); | 67 | inet = inet_sk(new_sock->sk); |
68 | 68 | ||
69 | rdsdebug("accepted tcp %u.%u.%u.%u:%u -> %u.%u.%u.%u:%u\n", | 69 | rdsdebug("accepted tcp %u.%u.%u.%u:%u -> %u.%u.%u.%u:%u\n", |
70 | NIPQUAD(inet->saddr), ntohs(inet->sport), | 70 | NIPQUAD(inet->inet_saddr), ntohs(inet->inet_sport), |
71 | NIPQUAD(inet->daddr), ntohs(inet->dport)); | 71 | NIPQUAD(inet->inet_daddr), ntohs(inet->inet_dport)); |
72 | 72 | ||
73 | conn = rds_conn_create(inet->saddr, inet->daddr, &rds_tcp_transport, | 73 | conn = rds_conn_create(inet->inet_saddr, inet->inet_daddr, |
74 | GFP_KERNEL); | 74 | &rds_tcp_transport, GFP_KERNEL); |
75 | if (IS_ERR(conn)) { | 75 | if (IS_ERR(conn)) { |
76 | ret = PTR_ERR(conn); | 76 | ret = PTR_ERR(conn); |
77 | goto out; | 77 | goto out; |
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 502cce76621d..4de4287fec37 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c | |||
@@ -512,7 +512,8 @@ static struct proto rose_proto = { | |||
512 | .obj_size = sizeof(struct rose_sock), | 512 | .obj_size = sizeof(struct rose_sock), |
513 | }; | 513 | }; |
514 | 514 | ||
515 | static int rose_create(struct net *net, struct socket *sock, int protocol) | 515 | static int rose_create(struct net *net, struct socket *sock, int protocol, |
516 | int kern) | ||
516 | { | 517 | { |
517 | struct sock *sk; | 518 | struct sock *sk; |
518 | struct rose_sock *rose; | 519 | struct rose_sock *rose; |
@@ -1509,7 +1510,7 @@ static const struct file_operations rose_info_fops = { | |||
1509 | }; | 1510 | }; |
1510 | #endif /* CONFIG_PROC_FS */ | 1511 | #endif /* CONFIG_PROC_FS */ |
1511 | 1512 | ||
1512 | static struct net_proto_family rose_family_ops = { | 1513 | static const struct net_proto_family rose_family_ops = { |
1513 | .family = PF_ROSE, | 1514 | .family = PF_ROSE, |
1514 | .create = rose_create, | 1515 | .create = rose_create, |
1515 | .owner = THIS_MODULE, | 1516 | .owner = THIS_MODULE, |
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c index f3e21989b88c..ea2e72337e2f 100644 --- a/net/rose/rose_route.c +++ b/net/rose/rose_route.c | |||
@@ -600,13 +600,13 @@ struct net_device *rose_dev_first(void) | |||
600 | { | 600 | { |
601 | struct net_device *dev, *first = NULL; | 601 | struct net_device *dev, *first = NULL; |
602 | 602 | ||
603 | read_lock(&dev_base_lock); | 603 | rcu_read_lock(); |
604 | for_each_netdev(&init_net, dev) { | 604 | for_each_netdev_rcu(&init_net, dev) { |
605 | if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE) | 605 | if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE) |
606 | if (first == NULL || strncmp(dev->name, first->name, 3) < 0) | 606 | if (first == NULL || strncmp(dev->name, first->name, 3) < 0) |
607 | first = dev; | 607 | first = dev; |
608 | } | 608 | } |
609 | read_unlock(&dev_base_lock); | 609 | rcu_read_unlock(); |
610 | 610 | ||
611 | return first; | 611 | return first; |
612 | } | 612 | } |
@@ -618,8 +618,8 @@ struct net_device *rose_dev_get(rose_address *addr) | |||
618 | { | 618 | { |
619 | struct net_device *dev; | 619 | struct net_device *dev; |
620 | 620 | ||
621 | read_lock(&dev_base_lock); | 621 | rcu_read_lock(); |
622 | for_each_netdev(&init_net, dev) { | 622 | for_each_netdev_rcu(&init_net, dev) { |
623 | if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0) { | 623 | if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0) { |
624 | dev_hold(dev); | 624 | dev_hold(dev); |
625 | goto out; | 625 | goto out; |
@@ -627,7 +627,7 @@ struct net_device *rose_dev_get(rose_address *addr) | |||
627 | } | 627 | } |
628 | dev = NULL; | 628 | dev = NULL; |
629 | out: | 629 | out: |
630 | read_unlock(&dev_base_lock); | 630 | rcu_read_unlock(); |
631 | return dev; | 631 | return dev; |
632 | } | 632 | } |
633 | 633 | ||
@@ -635,14 +635,14 @@ static int rose_dev_exists(rose_address *addr) | |||
635 | { | 635 | { |
636 | struct net_device *dev; | 636 | struct net_device *dev; |
637 | 637 | ||
638 | read_lock(&dev_base_lock); | 638 | rcu_read_lock(); |
639 | for_each_netdev(&init_net, dev) { | 639 | for_each_netdev_rcu(&init_net, dev) { |
640 | if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0) | 640 | if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0) |
641 | goto out; | 641 | goto out; |
642 | } | 642 | } |
643 | dev = NULL; | 643 | dev = NULL; |
644 | out: | 644 | out: |
645 | read_unlock(&dev_base_lock); | 645 | rcu_read_unlock(); |
646 | return dev != NULL; | 646 | return dev != NULL; |
647 | } | 647 | } |
648 | 648 | ||
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index a86afceaa94f..f978d02a248a 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c | |||
@@ -608,7 +608,8 @@ static unsigned int rxrpc_poll(struct file *file, struct socket *sock, | |||
608 | /* | 608 | /* |
609 | * create an RxRPC socket | 609 | * create an RxRPC socket |
610 | */ | 610 | */ |
611 | static int rxrpc_create(struct net *net, struct socket *sock, int protocol) | 611 | static int rxrpc_create(struct net *net, struct socket *sock, int protocol, |
612 | int kern) | ||
612 | { | 613 | { |
613 | struct rxrpc_sock *rx; | 614 | struct rxrpc_sock *rx; |
614 | struct sock *sk; | 615 | struct sock *sk; |
@@ -777,7 +778,7 @@ static struct proto rxrpc_proto = { | |||
777 | .max_header = sizeof(struct rxrpc_header), | 778 | .max_header = sizeof(struct rxrpc_header), |
778 | }; | 779 | }; |
779 | 780 | ||
780 | static struct net_proto_family rxrpc_family_ops = { | 781 | static const struct net_proto_family rxrpc_family_ops = { |
781 | .family = PF_RXRPC, | 782 | .family = PF_RXRPC, |
782 | .create = rxrpc_create, | 783 | .create = rxrpc_create, |
783 | .owner = THIS_MODULE, | 784 | .owner = THIS_MODULE, |
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c index a39bf97f8830..60c2b94e6b54 100644 --- a/net/rxrpc/ar-recvmsg.c +++ b/net/rxrpc/ar-recvmsg.c | |||
@@ -146,7 +146,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
146 | memcpy(msg->msg_name, | 146 | memcpy(msg->msg_name, |
147 | &call->conn->trans->peer->srx, | 147 | &call->conn->trans->peer->srx, |
148 | sizeof(call->conn->trans->peer->srx)); | 148 | sizeof(call->conn->trans->peer->srx)); |
149 | sock_recv_timestamp(msg, &rx->sk, skb); | 149 | sock_recv_ts_and_drops(msg, &rx->sk, skb); |
150 | } | 150 | } |
151 | 151 | ||
152 | /* receive the message */ | 152 | /* receive the message */ |
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 2dfb3e7a040d..ca2e1fd2bf69 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
@@ -618,7 +618,8 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a, | |||
618 | goto errout; | 618 | goto errout; |
619 | 619 | ||
620 | if (gnet_stats_copy_basic(&d, &h->tcf_bstats) < 0 || | 620 | if (gnet_stats_copy_basic(&d, &h->tcf_bstats) < 0 || |
621 | gnet_stats_copy_rate_est(&d, &h->tcf_rate_est) < 0 || | 621 | gnet_stats_copy_rate_est(&d, &h->tcf_bstats, |
622 | &h->tcf_rate_est) < 0 || | ||
622 | gnet_stats_copy_queue(&d, &h->tcf_qstats) < 0) | 623 | gnet_stats_copy_queue(&d, &h->tcf_qstats) < 0) |
623 | goto errout; | 624 | goto errout; |
624 | 625 | ||
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index b9aaab4e0354..797479369881 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c | |||
@@ -65,48 +65,53 @@ static int tcf_mirred_init(struct nlattr *nla, struct nlattr *est, | |||
65 | struct tc_mirred *parm; | 65 | struct tc_mirred *parm; |
66 | struct tcf_mirred *m; | 66 | struct tcf_mirred *m; |
67 | struct tcf_common *pc; | 67 | struct tcf_common *pc; |
68 | struct net_device *dev = NULL; | 68 | struct net_device *dev; |
69 | int ret = 0, err; | 69 | int ret, ok_push = 0; |
70 | int ok_push = 0; | ||
71 | 70 | ||
72 | if (nla == NULL) | 71 | if (nla == NULL) |
73 | return -EINVAL; | 72 | return -EINVAL; |
74 | 73 | ret = nla_parse_nested(tb, TCA_MIRRED_MAX, nla, mirred_policy); | |
75 | err = nla_parse_nested(tb, TCA_MIRRED_MAX, nla, mirred_policy); | 74 | if (ret < 0) |
76 | if (err < 0) | 75 | return ret; |
77 | return err; | ||
78 | |||
79 | if (tb[TCA_MIRRED_PARMS] == NULL) | 76 | if (tb[TCA_MIRRED_PARMS] == NULL) |
80 | return -EINVAL; | 77 | return -EINVAL; |
81 | parm = nla_data(tb[TCA_MIRRED_PARMS]); | 78 | parm = nla_data(tb[TCA_MIRRED_PARMS]); |
82 | 79 | switch (parm->eaction) { | |
80 | case TCA_EGRESS_MIRROR: | ||
81 | case TCA_EGRESS_REDIR: | ||
82 | break; | ||
83 | default: | ||
84 | return -EINVAL; | ||
85 | } | ||
83 | if (parm->ifindex) { | 86 | if (parm->ifindex) { |
84 | dev = __dev_get_by_index(&init_net, parm->ifindex); | 87 | dev = __dev_get_by_index(&init_net, parm->ifindex); |
85 | if (dev == NULL) | 88 | if (dev == NULL) |
86 | return -ENODEV; | 89 | return -ENODEV; |
87 | switch (dev->type) { | 90 | switch (dev->type) { |
88 | case ARPHRD_TUNNEL: | 91 | case ARPHRD_TUNNEL: |
89 | case ARPHRD_TUNNEL6: | 92 | case ARPHRD_TUNNEL6: |
90 | case ARPHRD_SIT: | 93 | case ARPHRD_SIT: |
91 | case ARPHRD_IPGRE: | 94 | case ARPHRD_IPGRE: |
92 | case ARPHRD_VOID: | 95 | case ARPHRD_VOID: |
93 | case ARPHRD_NONE: | 96 | case ARPHRD_NONE: |
94 | ok_push = 0; | 97 | ok_push = 0; |
95 | break; | 98 | break; |
96 | default: | 99 | default: |
97 | ok_push = 1; | 100 | ok_push = 1; |
98 | break; | 101 | break; |
99 | } | 102 | } |
103 | } else { | ||
104 | dev = NULL; | ||
100 | } | 105 | } |
101 | 106 | ||
102 | pc = tcf_hash_check(parm->index, a, bind, &mirred_hash_info); | 107 | pc = tcf_hash_check(parm->index, a, bind, &mirred_hash_info); |
103 | if (!pc) { | 108 | if (!pc) { |
104 | if (!parm->ifindex) | 109 | if (dev == NULL) |
105 | return -EINVAL; | 110 | return -EINVAL; |
106 | pc = tcf_hash_create(parm->index, est, a, sizeof(*m), bind, | 111 | pc = tcf_hash_create(parm->index, est, a, sizeof(*m), bind, |
107 | &mirred_idx_gen, &mirred_hash_info); | 112 | &mirred_idx_gen, &mirred_hash_info); |
108 | if (IS_ERR(pc)) | 113 | if (IS_ERR(pc)) |
109 | return PTR_ERR(pc); | 114 | return PTR_ERR(pc); |
110 | ret = ACT_P_CREATED; | 115 | ret = ACT_P_CREATED; |
111 | } else { | 116 | } else { |
112 | if (!ovr) { | 117 | if (!ovr) { |
@@ -119,12 +124,12 @@ static int tcf_mirred_init(struct nlattr *nla, struct nlattr *est, | |||
119 | spin_lock_bh(&m->tcf_lock); | 124 | spin_lock_bh(&m->tcf_lock); |
120 | m->tcf_action = parm->action; | 125 | m->tcf_action = parm->action; |
121 | m->tcfm_eaction = parm->eaction; | 126 | m->tcfm_eaction = parm->eaction; |
122 | if (parm->ifindex) { | 127 | if (dev != NULL) { |
123 | m->tcfm_ifindex = parm->ifindex; | 128 | m->tcfm_ifindex = parm->ifindex; |
124 | if (ret != ACT_P_CREATED) | 129 | if (ret != ACT_P_CREATED) |
125 | dev_put(m->tcfm_dev); | 130 | dev_put(m->tcfm_dev); |
126 | m->tcfm_dev = dev; | ||
127 | dev_hold(dev); | 131 | dev_hold(dev); |
132 | m->tcfm_dev = dev; | ||
128 | m->tcfm_ok_push = ok_push; | 133 | m->tcfm_ok_push = ok_push; |
129 | } | 134 | } |
130 | spin_unlock_bh(&m->tcf_lock); | 135 | spin_unlock_bh(&m->tcf_lock); |
@@ -148,47 +153,32 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a, | |||
148 | { | 153 | { |
149 | struct tcf_mirred *m = a->priv; | 154 | struct tcf_mirred *m = a->priv; |
150 | struct net_device *dev; | 155 | struct net_device *dev; |
151 | struct sk_buff *skb2 = NULL; | 156 | struct sk_buff *skb2; |
152 | u32 at = G_TC_AT(skb->tc_verd); | 157 | u32 at; |
158 | int retval, err = 1; | ||
153 | 159 | ||
154 | spin_lock(&m->tcf_lock); | 160 | spin_lock(&m->tcf_lock); |
155 | |||
156 | dev = m->tcfm_dev; | ||
157 | m->tcf_tm.lastuse = jiffies; | 161 | m->tcf_tm.lastuse = jiffies; |
158 | 162 | ||
159 | if (!(dev->flags&IFF_UP) ) { | 163 | dev = m->tcfm_dev; |
164 | if (!(dev->flags & IFF_UP)) { | ||
160 | if (net_ratelimit()) | 165 | if (net_ratelimit()) |
161 | printk("mirred to Houston: device %s is gone!\n", | 166 | printk("mirred to Houston: device %s is gone!\n", |
162 | dev->name); | 167 | dev->name); |
163 | bad_mirred: | 168 | goto out; |
164 | if (skb2 != NULL) | ||
165 | kfree_skb(skb2); | ||
166 | m->tcf_qstats.overlimits++; | ||
167 | m->tcf_bstats.bytes += qdisc_pkt_len(skb); | ||
168 | m->tcf_bstats.packets++; | ||
169 | spin_unlock(&m->tcf_lock); | ||
170 | /* should we be asking for packet to be dropped? | ||
171 | * may make sense for redirect case only | ||
172 | */ | ||
173 | return TC_ACT_SHOT; | ||
174 | } | 169 | } |
175 | 170 | ||
176 | skb2 = skb_act_clone(skb, GFP_ATOMIC); | 171 | skb2 = skb_act_clone(skb, GFP_ATOMIC); |
177 | if (skb2 == NULL) | 172 | if (skb2 == NULL) |
178 | goto bad_mirred; | 173 | goto out; |
179 | if (m->tcfm_eaction != TCA_EGRESS_MIRROR && | ||
180 | m->tcfm_eaction != TCA_EGRESS_REDIR) { | ||
181 | if (net_ratelimit()) | ||
182 | printk("tcf_mirred unknown action %d\n", | ||
183 | m->tcfm_eaction); | ||
184 | goto bad_mirred; | ||
185 | } | ||
186 | 174 | ||
187 | m->tcf_bstats.bytes += qdisc_pkt_len(skb2); | 175 | m->tcf_bstats.bytes += qdisc_pkt_len(skb2); |
188 | m->tcf_bstats.packets++; | 176 | m->tcf_bstats.packets++; |
189 | if (!(at & AT_EGRESS)) | 177 | at = G_TC_AT(skb->tc_verd); |
178 | if (!(at & AT_EGRESS)) { | ||
190 | if (m->tcfm_ok_push) | 179 | if (m->tcfm_ok_push) |
191 | skb_push(skb2, skb2->dev->hard_header_len); | 180 | skb_push(skb2, skb2->dev->hard_header_len); |
181 | } | ||
192 | 182 | ||
193 | /* mirror is always swallowed */ | 183 | /* mirror is always swallowed */ |
194 | if (m->tcfm_eaction != TCA_EGRESS_MIRROR) | 184 | if (m->tcfm_eaction != TCA_EGRESS_MIRROR) |
@@ -197,8 +187,23 @@ bad_mirred: | |||
197 | skb2->dev = dev; | 187 | skb2->dev = dev; |
198 | skb2->iif = skb->dev->ifindex; | 188 | skb2->iif = skb->dev->ifindex; |
199 | dev_queue_xmit(skb2); | 189 | dev_queue_xmit(skb2); |
190 | err = 0; | ||
191 | |||
192 | out: | ||
193 | if (err) { | ||
194 | m->tcf_qstats.overlimits++; | ||
195 | m->tcf_bstats.bytes += qdisc_pkt_len(skb); | ||
196 | m->tcf_bstats.packets++; | ||
197 | /* should we be asking for packet to be dropped? | ||
198 | * may make sense for redirect case only | ||
199 | */ | ||
200 | retval = TC_ACT_SHOT; | ||
201 | } else { | ||
202 | retval = m->tcf_action; | ||
203 | } | ||
200 | spin_unlock(&m->tcf_lock); | 204 | spin_unlock(&m->tcf_lock); |
201 | return m->tcf_action; | 205 | |
206 | return retval; | ||
202 | } | 207 | } |
203 | 208 | ||
204 | static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) | 209 | static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) |
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 4ab916b8074b..e9607fe55b58 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c | |||
@@ -54,6 +54,8 @@ static int tcf_skbedit(struct sk_buff *skb, struct tc_action *a, | |||
54 | if (d->flags & SKBEDIT_F_QUEUE_MAPPING && | 54 | if (d->flags & SKBEDIT_F_QUEUE_MAPPING && |
55 | skb->dev->real_num_tx_queues > d->queue_mapping) | 55 | skb->dev->real_num_tx_queues > d->queue_mapping) |
56 | skb_set_queue_mapping(skb, d->queue_mapping); | 56 | skb_set_queue_mapping(skb, d->queue_mapping); |
57 | if (d->flags & SKBEDIT_F_MARK) | ||
58 | skb->mark = d->mark; | ||
57 | 59 | ||
58 | spin_unlock(&d->tcf_lock); | 60 | spin_unlock(&d->tcf_lock); |
59 | return d->tcf_action; | 61 | return d->tcf_action; |
@@ -63,6 +65,7 @@ static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = { | |||
63 | [TCA_SKBEDIT_PARMS] = { .len = sizeof(struct tc_skbedit) }, | 65 | [TCA_SKBEDIT_PARMS] = { .len = sizeof(struct tc_skbedit) }, |
64 | [TCA_SKBEDIT_PRIORITY] = { .len = sizeof(u32) }, | 66 | [TCA_SKBEDIT_PRIORITY] = { .len = sizeof(u32) }, |
65 | [TCA_SKBEDIT_QUEUE_MAPPING] = { .len = sizeof(u16) }, | 67 | [TCA_SKBEDIT_QUEUE_MAPPING] = { .len = sizeof(u16) }, |
68 | [TCA_SKBEDIT_MARK] = { .len = sizeof(u32) }, | ||
66 | }; | 69 | }; |
67 | 70 | ||
68 | static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est, | 71 | static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est, |
@@ -72,7 +75,7 @@ static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est, | |||
72 | struct tc_skbedit *parm; | 75 | struct tc_skbedit *parm; |
73 | struct tcf_skbedit *d; | 76 | struct tcf_skbedit *d; |
74 | struct tcf_common *pc; | 77 | struct tcf_common *pc; |
75 | u32 flags = 0, *priority = NULL; | 78 | u32 flags = 0, *priority = NULL, *mark = NULL; |
76 | u16 *queue_mapping = NULL; | 79 | u16 *queue_mapping = NULL; |
77 | int ret = 0, err; | 80 | int ret = 0, err; |
78 | 81 | ||
@@ -95,6 +98,12 @@ static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est, | |||
95 | flags |= SKBEDIT_F_QUEUE_MAPPING; | 98 | flags |= SKBEDIT_F_QUEUE_MAPPING; |
96 | queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]); | 99 | queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]); |
97 | } | 100 | } |
101 | |||
102 | if (tb[TCA_SKBEDIT_MARK] != NULL) { | ||
103 | flags |= SKBEDIT_F_MARK; | ||
104 | mark = nla_data(tb[TCA_SKBEDIT_MARK]); | ||
105 | } | ||
106 | |||
98 | if (!flags) | 107 | if (!flags) |
99 | return -EINVAL; | 108 | return -EINVAL; |
100 | 109 | ||
@@ -124,6 +133,9 @@ static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est, | |||
124 | d->priority = *priority; | 133 | d->priority = *priority; |
125 | if (flags & SKBEDIT_F_QUEUE_MAPPING) | 134 | if (flags & SKBEDIT_F_QUEUE_MAPPING) |
126 | d->queue_mapping = *queue_mapping; | 135 | d->queue_mapping = *queue_mapping; |
136 | if (flags & SKBEDIT_F_MARK) | ||
137 | d->mark = *mark; | ||
138 | |||
127 | d->tcf_action = parm->action; | 139 | d->tcf_action = parm->action; |
128 | 140 | ||
129 | spin_unlock_bh(&d->tcf_lock); | 141 | spin_unlock_bh(&d->tcf_lock); |
@@ -161,6 +173,9 @@ static inline int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, | |||
161 | if (d->flags & SKBEDIT_F_QUEUE_MAPPING) | 173 | if (d->flags & SKBEDIT_F_QUEUE_MAPPING) |
162 | NLA_PUT(skb, TCA_SKBEDIT_QUEUE_MAPPING, | 174 | NLA_PUT(skb, TCA_SKBEDIT_QUEUE_MAPPING, |
163 | sizeof(d->queue_mapping), &d->queue_mapping); | 175 | sizeof(d->queue_mapping), &d->queue_mapping); |
176 | if (d->flags & SKBEDIT_F_MARK) | ||
177 | NLA_PUT(skb, TCA_SKBEDIT_MARK, sizeof(d->mark), | ||
178 | &d->mark); | ||
164 | t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); | 179 | t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); |
165 | t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse); | 180 | t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse); |
166 | t.expires = jiffies_to_clock_t(d->tcf_tm.expires); | 181 | t.expires = jiffies_to_clock_t(d->tcf_tm.expires); |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 7cf6c0fbc7a6..c024da77824f 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -404,6 +404,7 @@ static int tcf_node_dump(struct tcf_proto *tp, unsigned long n, | |||
404 | a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER); | 404 | a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER); |
405 | } | 405 | } |
406 | 406 | ||
407 | /* called with RTNL */ | ||
407 | static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) | 408 | static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) |
408 | { | 409 | { |
409 | struct net *net = sock_net(skb->sk); | 410 | struct net *net = sock_net(skb->sk); |
@@ -422,7 +423,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) | |||
422 | 423 | ||
423 | if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) | 424 | if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) |
424 | return skb->len; | 425 | return skb->len; |
425 | if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) | 426 | if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) |
426 | return skb->len; | 427 | return skb->len; |
427 | 428 | ||
428 | if (!tcm->tcm_parent) | 429 | if (!tcm->tcm_parent) |
@@ -484,7 +485,6 @@ errout: | |||
484 | if (cl) | 485 | if (cl) |
485 | cops->put(q, cl); | 486 | cops->put(q, cl); |
486 | out: | 487 | out: |
487 | dev_put(dev); | ||
488 | return skb->len; | 488 | return skb->len; |
489 | } | 489 | } |
490 | 490 | ||
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 18d85d259104..8e8d836f00c0 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c | |||
@@ -303,17 +303,17 @@ META_COLLECTOR(var_sk_bound_if) | |||
303 | { | 303 | { |
304 | SKIP_NONLOCAL(skb); | 304 | SKIP_NONLOCAL(skb); |
305 | 305 | ||
306 | if (skb->sk->sk_bound_dev_if == 0) { | 306 | if (skb->sk->sk_bound_dev_if == 0) { |
307 | dst->value = (unsigned long) "any"; | 307 | dst->value = (unsigned long) "any"; |
308 | dst->len = 3; | 308 | dst->len = 3; |
309 | } else { | 309 | } else { |
310 | struct net_device *dev; | 310 | struct net_device *dev; |
311 | 311 | ||
312 | dev = dev_get_by_index(&init_net, skb->sk->sk_bound_dev_if); | 312 | rcu_read_lock(); |
313 | dev = dev_get_by_index_rcu(&init_net, skb->sk->sk_bound_dev_if); | ||
313 | *err = var_dev(dev, dst); | 314 | *err = var_dev(dev, dst); |
314 | if (dev) | 315 | rcu_read_unlock(); |
315 | dev_put(dev); | 316 | } |
316 | } | ||
317 | } | 317 | } |
318 | 318 | ||
319 | META_COLLECTOR(int_sk_refcnt) | 319 | META_COLLECTOR(int_sk_refcnt) |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 903e4188b6ca..876ba4bb6ae9 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -1179,7 +1179,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, | |||
1179 | goto nla_put_failure; | 1179 | goto nla_put_failure; |
1180 | 1180 | ||
1181 | if (gnet_stats_copy_basic(&d, &q->bstats) < 0 || | 1181 | if (gnet_stats_copy_basic(&d, &q->bstats) < 0 || |
1182 | gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 || | 1182 | gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 || |
1183 | gnet_stats_copy_queue(&d, &q->qstats) < 0) | 1183 | gnet_stats_copy_queue(&d, &q->qstats) < 0) |
1184 | goto nla_put_failure; | 1184 | goto nla_put_failure; |
1185 | 1185 | ||
@@ -1279,9 +1279,10 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) | |||
1279 | 1279 | ||
1280 | s_idx = cb->args[0]; | 1280 | s_idx = cb->args[0]; |
1281 | s_q_idx = q_idx = cb->args[1]; | 1281 | s_q_idx = q_idx = cb->args[1]; |
1282 | read_lock(&dev_base_lock); | 1282 | |
1283 | rcu_read_lock(); | ||
1283 | idx = 0; | 1284 | idx = 0; |
1284 | for_each_netdev(&init_net, dev) { | 1285 | for_each_netdev_rcu(&init_net, dev) { |
1285 | struct netdev_queue *dev_queue; | 1286 | struct netdev_queue *dev_queue; |
1286 | 1287 | ||
1287 | if (idx < s_idx) | 1288 | if (idx < s_idx) |
@@ -1302,7 +1303,7 @@ cont: | |||
1302 | } | 1303 | } |
1303 | 1304 | ||
1304 | done: | 1305 | done: |
1305 | read_unlock(&dev_base_lock); | 1306 | rcu_read_unlock(); |
1306 | 1307 | ||
1307 | cb->args[0] = idx; | 1308 | cb->args[0] = idx; |
1308 | cb->args[1] = q_idx; | 1309 | cb->args[1] = q_idx; |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 5b132c473264..3846d65bc03e 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -1609,7 +1609,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |||
1609 | cl->xstats.undertime = cl->undertime - q->now; | 1609 | cl->xstats.undertime = cl->undertime - q->now; |
1610 | 1610 | ||
1611 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || | 1611 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || |
1612 | gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || | 1612 | gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || |
1613 | gnet_stats_copy_queue(d, &cl->qstats) < 0) | 1613 | gnet_stats_copy_queue(d, &cl->qstats) < 0) |
1614 | return -1; | 1614 | return -1; |
1615 | 1615 | ||
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index 5a888af7e5da..a65604f8f2b8 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c | |||
@@ -280,7 +280,7 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |||
280 | } | 280 | } |
281 | 281 | ||
282 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || | 282 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || |
283 | gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || | 283 | gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || |
284 | gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0) | 284 | gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0) |
285 | return -1; | 285 | return -1; |
286 | 286 | ||
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 4ae6aa562f2b..5173c1e1b19c 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -119,32 +119,26 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, | |||
119 | spin_unlock(root_lock); | 119 | spin_unlock(root_lock); |
120 | 120 | ||
121 | HARD_TX_LOCK(dev, txq, smp_processor_id()); | 121 | HARD_TX_LOCK(dev, txq, smp_processor_id()); |
122 | if (!netif_tx_queue_stopped(txq) && | 122 | if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) |
123 | !netif_tx_queue_frozen(txq)) | ||
124 | ret = dev_hard_start_xmit(skb, dev, txq); | 123 | ret = dev_hard_start_xmit(skb, dev, txq); |
124 | |||
125 | HARD_TX_UNLOCK(dev, txq); | 125 | HARD_TX_UNLOCK(dev, txq); |
126 | 126 | ||
127 | spin_lock(root_lock); | 127 | spin_lock(root_lock); |
128 | 128 | ||
129 | switch (ret) { | 129 | if (dev_xmit_complete(ret)) { |
130 | case NETDEV_TX_OK: | 130 | /* Driver sent out skb successfully or skb was consumed */ |
131 | /* Driver sent out skb successfully */ | ||
132 | ret = qdisc_qlen(q); | 131 | ret = qdisc_qlen(q); |
133 | break; | 132 | } else if (ret == NETDEV_TX_LOCKED) { |
134 | |||
135 | case NETDEV_TX_LOCKED: | ||
136 | /* Driver try lock failed */ | 133 | /* Driver try lock failed */ |
137 | ret = handle_dev_cpu_collision(skb, txq, q); | 134 | ret = handle_dev_cpu_collision(skb, txq, q); |
138 | break; | 135 | } else { |
139 | |||
140 | default: | ||
141 | /* Driver returned NETDEV_TX_BUSY - requeue skb */ | 136 | /* Driver returned NETDEV_TX_BUSY - requeue skb */ |
142 | if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) | 137 | if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) |
143 | printk(KERN_WARNING "BUG %s code %d qlen %d\n", | 138 | printk(KERN_WARNING "BUG %s code %d qlen %d\n", |
144 | dev->name, ret, q->q.qlen); | 139 | dev->name, ret, q->q.qlen); |
145 | 140 | ||
146 | ret = dev_requeue_skb(skb, q); | 141 | ret = dev_requeue_skb(skb, q); |
147 | break; | ||
148 | } | 142 | } |
149 | 143 | ||
150 | if (ret && (netif_tx_queue_stopped(txq) || | 144 | if (ret && (netif_tx_queue_stopped(txq) || |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 2c5c76be18f8..b38b39c60752 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -1375,7 +1375,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |||
1375 | xstats.rtwork = cl->cl_cumul; | 1375 | xstats.rtwork = cl->cl_cumul; |
1376 | 1376 | ||
1377 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || | 1377 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || |
1378 | gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || | 1378 | gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || |
1379 | gnet_stats_copy_queue(d, &cl->qstats) < 0) | 1379 | gnet_stats_copy_queue(d, &cl->qstats) < 0) |
1380 | return -1; | 1380 | return -1; |
1381 | 1381 | ||
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 85acab9dc6fd..2e38d1abd830 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -1105,7 +1105,7 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) | |||
1105 | cl->xstats.ctokens = cl->ctokens; | 1105 | cl->xstats.ctokens = cl->ctokens; |
1106 | 1106 | ||
1107 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || | 1107 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || |
1108 | gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || | 1108 | gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 || |
1109 | gnet_stats_copy_queue(d, &cl->qstats) < 0) | 1109 | gnet_stats_copy_queue(d, &cl->qstats) < 0) |
1110 | return -1; | 1110 | return -1; |
1111 | 1111 | ||
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index bb280e60e00a..cc50fbe99291 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
@@ -837,15 +837,16 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr) | |||
837 | if (type & IPV6_ADDR_LINKLOCAL) { | 837 | if (type & IPV6_ADDR_LINKLOCAL) { |
838 | if (!addr->v6.sin6_scope_id) | 838 | if (!addr->v6.sin6_scope_id) |
839 | return 0; | 839 | return 0; |
840 | dev = dev_get_by_index(&init_net, addr->v6.sin6_scope_id); | 840 | rcu_read_lock(); |
841 | if (!dev) | 841 | dev = dev_get_by_index_rcu(&init_net, |
842 | return 0; | 842 | addr->v6.sin6_scope_id); |
843 | if (!ipv6_chk_addr(&init_net, &addr->v6.sin6_addr, | 843 | if (!dev || |
844 | !ipv6_chk_addr(&init_net, &addr->v6.sin6_addr, | ||
844 | dev, 0)) { | 845 | dev, 0)) { |
845 | dev_put(dev); | 846 | rcu_read_unlock(); |
846 | return 0; | 847 | return 0; |
847 | } | 848 | } |
848 | dev_put(dev); | 849 | rcu_read_unlock(); |
849 | } else if (type == IPV6_ADDR_MAPPED) { | 850 | } else if (type == IPV6_ADDR_MAPPED) { |
850 | if (!opt->v4mapped) | 851 | if (!opt->v4mapped) |
851 | return 0; | 852 | return 0; |
@@ -873,10 +874,12 @@ static int sctp_inet6_send_verify(struct sctp_sock *opt, union sctp_addr *addr) | |||
873 | if (type & IPV6_ADDR_LINKLOCAL) { | 874 | if (type & IPV6_ADDR_LINKLOCAL) { |
874 | if (!addr->v6.sin6_scope_id) | 875 | if (!addr->v6.sin6_scope_id) |
875 | return 0; | 876 | return 0; |
876 | dev = dev_get_by_index(&init_net, addr->v6.sin6_scope_id); | 877 | rcu_read_lock(); |
878 | dev = dev_get_by_index_rcu(&init_net, | ||
879 | addr->v6.sin6_scope_id); | ||
880 | rcu_read_unlock(); | ||
877 | if (!dev) | 881 | if (!dev) |
878 | return 0; | 882 | return 0; |
879 | dev_put(dev); | ||
880 | } | 883 | } |
881 | af = opt->pf->af; | 884 | af = opt->pf->af; |
882 | } | 885 | } |
@@ -930,7 +933,6 @@ static struct inet_protosw sctpv6_seqpacket_protosw = { | |||
930 | .protocol = IPPROTO_SCTP, | 933 | .protocol = IPPROTO_SCTP, |
931 | .prot = &sctpv6_prot, | 934 | .prot = &sctpv6_prot, |
932 | .ops = &inet6_seqpacket_ops, | 935 | .ops = &inet6_seqpacket_ops, |
933 | .capability = -1, | ||
934 | .no_check = 0, | 936 | .no_check = 0, |
935 | .flags = SCTP_PROTOSW_FLAG | 937 | .flags = SCTP_PROTOSW_FLAG |
936 | }; | 938 | }; |
@@ -939,7 +941,6 @@ static struct inet_protosw sctpv6_stream_protosw = { | |||
939 | .protocol = IPPROTO_SCTP, | 941 | .protocol = IPPROTO_SCTP, |
940 | .prot = &sctpv6_prot, | 942 | .prot = &sctpv6_prot, |
941 | .ops = &inet6_seqpacket_ops, | 943 | .ops = &inet6_seqpacket_ops, |
942 | .capability = -1, | ||
943 | .no_check = 0, | 944 | .no_check = 0, |
944 | .flags = SCTP_PROTOSW_FLAG, | 945 | .flags = SCTP_PROTOSW_FLAG, |
945 | }; | 946 | }; |
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 612dc878e05c..08ef203d36ac 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
@@ -205,14 +205,14 @@ static void sctp_get_local_addr_list(void) | |||
205 | struct list_head *pos; | 205 | struct list_head *pos; |
206 | struct sctp_af *af; | 206 | struct sctp_af *af; |
207 | 207 | ||
208 | read_lock(&dev_base_lock); | 208 | rcu_read_lock(); |
209 | for_each_netdev(&init_net, dev) { | 209 | for_each_netdev_rcu(&init_net, dev) { |
210 | __list_for_each(pos, &sctp_address_families) { | 210 | __list_for_each(pos, &sctp_address_families) { |
211 | af = list_entry(pos, struct sctp_af, list); | 211 | af = list_entry(pos, struct sctp_af, list); |
212 | af->copy_addrlist(&sctp_local_addr_list, dev); | 212 | af->copy_addrlist(&sctp_local_addr_list, dev); |
213 | } | 213 | } |
214 | } | 214 | } |
215 | read_unlock(&dev_base_lock); | 215 | rcu_read_unlock(); |
216 | } | 216 | } |
217 | 217 | ||
218 | /* Free the existing local addresses. */ | 218 | /* Free the existing local addresses. */ |
@@ -296,19 +296,19 @@ static void sctp_v4_from_sk(union sctp_addr *addr, struct sock *sk) | |||
296 | { | 296 | { |
297 | addr->v4.sin_family = AF_INET; | 297 | addr->v4.sin_family = AF_INET; |
298 | addr->v4.sin_port = 0; | 298 | addr->v4.sin_port = 0; |
299 | addr->v4.sin_addr.s_addr = inet_sk(sk)->rcv_saddr; | 299 | addr->v4.sin_addr.s_addr = inet_sk(sk)->inet_rcv_saddr; |
300 | } | 300 | } |
301 | 301 | ||
302 | /* Initialize sk->sk_rcv_saddr from sctp_addr. */ | 302 | /* Initialize sk->sk_rcv_saddr from sctp_addr. */ |
303 | static void sctp_v4_to_sk_saddr(union sctp_addr *addr, struct sock *sk) | 303 | static void sctp_v4_to_sk_saddr(union sctp_addr *addr, struct sock *sk) |
304 | { | 304 | { |
305 | inet_sk(sk)->rcv_saddr = addr->v4.sin_addr.s_addr; | 305 | inet_sk(sk)->inet_rcv_saddr = addr->v4.sin_addr.s_addr; |
306 | } | 306 | } |
307 | 307 | ||
308 | /* Initialize sk->sk_daddr from sctp_addr. */ | 308 | /* Initialize sk->sk_daddr from sctp_addr. */ |
309 | static void sctp_v4_to_sk_daddr(union sctp_addr *addr, struct sock *sk) | 309 | static void sctp_v4_to_sk_daddr(union sctp_addr *addr, struct sock *sk) |
310 | { | 310 | { |
311 | inet_sk(sk)->daddr = addr->v4.sin_addr.s_addr; | 311 | inet_sk(sk)->inet_daddr = addr->v4.sin_addr.s_addr; |
312 | } | 312 | } |
313 | 313 | ||
314 | /* Initialize a sctp_addr from an address parameter. */ | 314 | /* Initialize a sctp_addr from an address parameter. */ |
@@ -598,7 +598,7 @@ static struct sock *sctp_v4_create_accept_sk(struct sock *sk, | |||
598 | 598 | ||
599 | newinet = inet_sk(newsk); | 599 | newinet = inet_sk(newsk); |
600 | 600 | ||
601 | newinet->daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr; | 601 | newinet->inet_daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr; |
602 | 602 | ||
603 | sk_refcnt_debug_inc(newsk); | 603 | sk_refcnt_debug_inc(newsk); |
604 | 604 | ||
@@ -909,7 +909,6 @@ static struct inet_protosw sctp_seqpacket_protosw = { | |||
909 | .protocol = IPPROTO_SCTP, | 909 | .protocol = IPPROTO_SCTP, |
910 | .prot = &sctp_prot, | 910 | .prot = &sctp_prot, |
911 | .ops = &inet_seqpacket_ops, | 911 | .ops = &inet_seqpacket_ops, |
912 | .capability = -1, | ||
913 | .no_check = 0, | 912 | .no_check = 0, |
914 | .flags = SCTP_PROTOSW_FLAG | 913 | .flags = SCTP_PROTOSW_FLAG |
915 | }; | 914 | }; |
@@ -918,7 +917,6 @@ static struct inet_protosw sctp_stream_protosw = { | |||
918 | .protocol = IPPROTO_SCTP, | 917 | .protocol = IPPROTO_SCTP, |
919 | .prot = &sctp_prot, | 918 | .prot = &sctp_prot, |
920 | .ops = &inet_seqpacket_ops, | 919 | .ops = &inet_seqpacket_ops, |
921 | .capability = -1, | ||
922 | .no_check = 0, | 920 | .no_check = 0, |
923 | .flags = SCTP_PROTOSW_FLAG | 921 | .flags = SCTP_PROTOSW_FLAG |
924 | }; | 922 | }; |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 3a95fcb17a9e..66b1f02b17ba 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -394,7 +394,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) | |||
394 | 394 | ||
395 | /* Refresh ephemeral port. */ | 395 | /* Refresh ephemeral port. */ |
396 | if (!bp->port) | 396 | if (!bp->port) |
397 | bp->port = inet_sk(sk)->num; | 397 | bp->port = inet_sk(sk)->inet_num; |
398 | 398 | ||
399 | /* Add the address to the bind address list. | 399 | /* Add the address to the bind address list. |
400 | * Use GFP_ATOMIC since BHs will be disabled. | 400 | * Use GFP_ATOMIC since BHs will be disabled. |
@@ -403,7 +403,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) | |||
403 | 403 | ||
404 | /* Copy back into socket for getsockname() use. */ | 404 | /* Copy back into socket for getsockname() use. */ |
405 | if (!ret) { | 405 | if (!ret) { |
406 | inet_sk(sk)->sport = htons(inet_sk(sk)->num); | 406 | inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num); |
407 | af->to_sk_saddr(addr, sk); | 407 | af->to_sk_saddr(addr, sk); |
408 | } | 408 | } |
409 | 409 | ||
@@ -1117,7 +1117,7 @@ static int __sctp_connect(struct sock* sk, | |||
1117 | } | 1117 | } |
1118 | 1118 | ||
1119 | /* Initialize sk's dport and daddr for getpeername() */ | 1119 | /* Initialize sk's dport and daddr for getpeername() */ |
1120 | inet_sk(sk)->dport = htons(asoc->peer.port); | 1120 | inet_sk(sk)->inet_dport = htons(asoc->peer.port); |
1121 | af = sctp_get_af_specific(sa_addr->sa.sa_family); | 1121 | af = sctp_get_af_specific(sa_addr->sa.sa_family); |
1122 | af->to_sk_daddr(sa_addr, sk); | 1122 | af->to_sk_daddr(sa_addr, sk); |
1123 | sk->sk_err = 0; | 1123 | sk->sk_err = 0; |
@@ -1968,7 +1968,7 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
1968 | if (err) | 1968 | if (err) |
1969 | goto out_free; | 1969 | goto out_free; |
1970 | 1970 | ||
1971 | sock_recv_timestamp(msg, sk, skb); | 1971 | sock_recv_ts_and_drops(msg, sk, skb); |
1972 | if (sctp_ulpevent_is_notification(event)) { | 1972 | if (sctp_ulpevent_is_notification(event)) { |
1973 | msg->msg_flags |= MSG_NOTIFICATION; | 1973 | msg->msg_flags |= MSG_NOTIFICATION; |
1974 | sp->pf->event_msgname(event, msg->msg_name, addr_len); | 1974 | sp->pf->event_msgname(event, msg->msg_name, addr_len); |
@@ -5861,7 +5861,7 @@ pp_not_found: | |||
5861 | */ | 5861 | */ |
5862 | success: | 5862 | success: |
5863 | if (!sctp_sk(sk)->bind_hash) { | 5863 | if (!sctp_sk(sk)->bind_hash) { |
5864 | inet_sk(sk)->num = snum; | 5864 | inet_sk(sk)->inet_num = snum; |
5865 | sk_add_bind_node(sk, &pp->owner); | 5865 | sk_add_bind_node(sk, &pp->owner); |
5866 | sctp_sk(sk)->bind_hash = pp; | 5866 | sctp_sk(sk)->bind_hash = pp; |
5867 | } | 5867 | } |
@@ -5933,7 +5933,7 @@ SCTP_STATIC int sctp_listen_start(struct sock *sk, int backlog) | |||
5933 | if (sctp_autobind(sk)) | 5933 | if (sctp_autobind(sk)) |
5934 | return -EAGAIN; | 5934 | return -EAGAIN; |
5935 | } else { | 5935 | } else { |
5936 | if (sctp_get_port(sk, inet_sk(sk)->num)) { | 5936 | if (sctp_get_port(sk, inet_sk(sk)->inet_num)) { |
5937 | sk->sk_state = SCTP_SS_CLOSED; | 5937 | sk->sk_state = SCTP_SS_CLOSED; |
5938 | return -EADDRINUSE; | 5938 | return -EADDRINUSE; |
5939 | } | 5939 | } |
@@ -6104,14 +6104,14 @@ static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) | |||
6104 | static inline void __sctp_put_port(struct sock *sk) | 6104 | static inline void __sctp_put_port(struct sock *sk) |
6105 | { | 6105 | { |
6106 | struct sctp_bind_hashbucket *head = | 6106 | struct sctp_bind_hashbucket *head = |
6107 | &sctp_port_hashtable[sctp_phashfn(inet_sk(sk)->num)]; | 6107 | &sctp_port_hashtable[sctp_phashfn(inet_sk(sk)->inet_num)]; |
6108 | struct sctp_bind_bucket *pp; | 6108 | struct sctp_bind_bucket *pp; |
6109 | 6109 | ||
6110 | sctp_spin_lock(&head->lock); | 6110 | sctp_spin_lock(&head->lock); |
6111 | pp = sctp_sk(sk)->bind_hash; | 6111 | pp = sctp_sk(sk)->bind_hash; |
6112 | __sk_del_bind_node(sk); | 6112 | __sk_del_bind_node(sk); |
6113 | sctp_sk(sk)->bind_hash = NULL; | 6113 | sctp_sk(sk)->bind_hash = NULL; |
6114 | inet_sk(sk)->num = 0; | 6114 | inet_sk(sk)->inet_num = 0; |
6115 | sctp_bucket_destroy(pp); | 6115 | sctp_bucket_destroy(pp); |
6116 | sctp_spin_unlock(&head->lock); | 6116 | sctp_spin_unlock(&head->lock); |
6117 | } | 6117 | } |
@@ -6138,7 +6138,7 @@ static int sctp_autobind(struct sock *sk) | |||
6138 | /* Initialize a local sockaddr structure to INADDR_ANY. */ | 6138 | /* Initialize a local sockaddr structure to INADDR_ANY. */ |
6139 | af = sctp_sk(sk)->pf->af; | 6139 | af = sctp_sk(sk)->pf->af; |
6140 | 6140 | ||
6141 | port = htons(inet_sk(sk)->num); | 6141 | port = htons(inet_sk(sk)->inet_num); |
6142 | af->inaddr_any(&autoaddr, port); | 6142 | af->inaddr_any(&autoaddr, port); |
6143 | 6143 | ||
6144 | return sctp_do_bind(sk, &autoaddr, af->sockaddr_len); | 6144 | return sctp_do_bind(sk, &autoaddr, af->sockaddr_len); |
@@ -6707,12 +6707,12 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk, | |||
6707 | /* Initialize sk's sport, dport, rcv_saddr and daddr for | 6707 | /* Initialize sk's sport, dport, rcv_saddr and daddr for |
6708 | * getsockname() and getpeername() | 6708 | * getsockname() and getpeername() |
6709 | */ | 6709 | */ |
6710 | newinet->sport = inet->sport; | 6710 | newinet->inet_sport = inet->inet_sport; |
6711 | newinet->saddr = inet->saddr; | 6711 | newinet->inet_saddr = inet->inet_saddr; |
6712 | newinet->rcv_saddr = inet->rcv_saddr; | 6712 | newinet->inet_rcv_saddr = inet->inet_rcv_saddr; |
6713 | newinet->dport = htons(asoc->peer.port); | 6713 | newinet->inet_dport = htons(asoc->peer.port); |
6714 | newinet->pmtudisc = inet->pmtudisc; | 6714 | newinet->pmtudisc = inet->pmtudisc; |
6715 | newinet->id = asoc->next_tsn ^ jiffies; | 6715 | newinet->inet_id = asoc->next_tsn ^ jiffies; |
6716 | 6716 | ||
6717 | newinet->uc_ttl = inet->uc_ttl; | 6717 | newinet->uc_ttl = inet->uc_ttl; |
6718 | newinet->mc_loop = 1; | 6718 | newinet->mc_loop = 1; |
@@ -6751,13 +6751,13 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, | |||
6751 | newsp->hmac = NULL; | 6751 | newsp->hmac = NULL; |
6752 | 6752 | ||
6753 | /* Hook this new socket in to the bind_hash list. */ | 6753 | /* Hook this new socket in to the bind_hash list. */ |
6754 | head = &sctp_port_hashtable[sctp_phashfn(inet_sk(oldsk)->num)]; | 6754 | head = &sctp_port_hashtable[sctp_phashfn(inet_sk(oldsk)->inet_num)]; |
6755 | sctp_local_bh_disable(); | 6755 | sctp_local_bh_disable(); |
6756 | sctp_spin_lock(&head->lock); | 6756 | sctp_spin_lock(&head->lock); |
6757 | pp = sctp_sk(oldsk)->bind_hash; | 6757 | pp = sctp_sk(oldsk)->bind_hash; |
6758 | sk_add_bind_node(newsk, &pp->owner); | 6758 | sk_add_bind_node(newsk, &pp->owner); |
6759 | sctp_sk(newsk)->bind_hash = pp; | 6759 | sctp_sk(newsk)->bind_hash = pp; |
6760 | inet_sk(newsk)->num = inet_sk(oldsk)->num; | 6760 | inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num; |
6761 | sctp_spin_unlock(&head->lock); | 6761 | sctp_spin_unlock(&head->lock); |
6762 | sctp_local_bh_enable(); | 6762 | sctp_local_bh_enable(); |
6763 | 6763 | ||
diff --git a/net/socket.c b/net/socket.c index 75655365b5fd..402abb39cbfe 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -97,6 +97,12 @@ | |||
97 | #include <net/sock.h> | 97 | #include <net/sock.h> |
98 | #include <linux/netfilter.h> | 98 | #include <linux/netfilter.h> |
99 | 99 | ||
100 | #include <linux/if_tun.h> | ||
101 | #include <linux/ipv6_route.h> | ||
102 | #include <linux/route.h> | ||
103 | #include <linux/sockios.h> | ||
104 | #include <linux/atalk.h> | ||
105 | |||
100 | static int sock_no_open(struct inode *irrelevant, struct file *dontcare); | 106 | static int sock_no_open(struct inode *irrelevant, struct file *dontcare); |
101 | static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, | 107 | static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, |
102 | unsigned long nr_segs, loff_t pos); | 108 | unsigned long nr_segs, loff_t pos); |
@@ -668,10 +674,24 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, | |||
668 | 674 | ||
669 | EXPORT_SYMBOL_GPL(__sock_recv_timestamp); | 675 | EXPORT_SYMBOL_GPL(__sock_recv_timestamp); |
670 | 676 | ||
671 | static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock, | 677 | inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) |
672 | struct msghdr *msg, size_t size, int flags) | 678 | { |
679 | if (sock_flag(sk, SOCK_RXQ_OVFL) && skb && skb->dropcount) | ||
680 | put_cmsg(msg, SOL_SOCKET, SO_RXQ_OVFL, | ||
681 | sizeof(__u32), &skb->dropcount); | ||
682 | } | ||
683 | |||
684 | void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, | ||
685 | struct sk_buff *skb) | ||
686 | { | ||
687 | sock_recv_timestamp(msg, sk, skb); | ||
688 | sock_recv_drops(msg, sk, skb); | ||
689 | } | ||
690 | EXPORT_SYMBOL_GPL(sock_recv_ts_and_drops); | ||
691 | |||
692 | static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock, | ||
693 | struct msghdr *msg, size_t size, int flags) | ||
673 | { | 694 | { |
674 | int err; | ||
675 | struct sock_iocb *si = kiocb_to_siocb(iocb); | 695 | struct sock_iocb *si = kiocb_to_siocb(iocb); |
676 | 696 | ||
677 | si->sock = sock; | 697 | si->sock = sock; |
@@ -680,13 +700,17 @@ static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
680 | si->size = size; | 700 | si->size = size; |
681 | si->flags = flags; | 701 | si->flags = flags; |
682 | 702 | ||
683 | err = security_socket_recvmsg(sock, msg, size, flags); | ||
684 | if (err) | ||
685 | return err; | ||
686 | |||
687 | return sock->ops->recvmsg(iocb, sock, msg, size, flags); | 703 | return sock->ops->recvmsg(iocb, sock, msg, size, flags); |
688 | } | 704 | } |
689 | 705 | ||
706 | static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock, | ||
707 | struct msghdr *msg, size_t size, int flags) | ||
708 | { | ||
709 | int err = security_socket_recvmsg(sock, msg, size, flags); | ||
710 | |||
711 | return err ?: __sock_recvmsg_nosec(iocb, sock, msg, size, flags); | ||
712 | } | ||
713 | |||
690 | int sock_recvmsg(struct socket *sock, struct msghdr *msg, | 714 | int sock_recvmsg(struct socket *sock, struct msghdr *msg, |
691 | size_t size, int flags) | 715 | size_t size, int flags) |
692 | { | 716 | { |
@@ -702,6 +726,21 @@ int sock_recvmsg(struct socket *sock, struct msghdr *msg, | |||
702 | return ret; | 726 | return ret; |
703 | } | 727 | } |
704 | 728 | ||
729 | static int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg, | ||
730 | size_t size, int flags) | ||
731 | { | ||
732 | struct kiocb iocb; | ||
733 | struct sock_iocb siocb; | ||
734 | int ret; | ||
735 | |||
736 | init_sync_kiocb(&iocb, NULL); | ||
737 | iocb.private = &siocb; | ||
738 | ret = __sock_recvmsg_nosec(&iocb, sock, msg, size, flags); | ||
739 | if (-EIOCBQUEUED == ret) | ||
740 | ret = wait_on_sync_kiocb(&iocb); | ||
741 | return ret; | ||
742 | } | ||
743 | |||
705 | int kernel_recvmsg(struct socket *sock, struct msghdr *msg, | 744 | int kernel_recvmsg(struct socket *sock, struct msghdr *msg, |
706 | struct kvec *vec, size_t num, size_t size, int flags) | 745 | struct kvec *vec, size_t num, size_t size, int flags) |
707 | { | 746 | { |
@@ -886,6 +925,24 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *)) | |||
886 | 925 | ||
887 | EXPORT_SYMBOL(dlci_ioctl_set); | 926 | EXPORT_SYMBOL(dlci_ioctl_set); |
888 | 927 | ||
928 | static long sock_do_ioctl(struct net *net, struct socket *sock, | ||
929 | unsigned int cmd, unsigned long arg) | ||
930 | { | ||
931 | int err; | ||
932 | void __user *argp = (void __user *)arg; | ||
933 | |||
934 | err = sock->ops->ioctl(sock, cmd, arg); | ||
935 | |||
936 | /* | ||
937 | * If this ioctl is unknown try to hand it down | ||
938 | * to the NIC driver. | ||
939 | */ | ||
940 | if (err == -ENOIOCTLCMD) | ||
941 | err = dev_ioctl(net, cmd, argp); | ||
942 | |||
943 | return err; | ||
944 | } | ||
945 | |||
889 | /* | 946 | /* |
890 | * With an ioctl, arg may well be a user mode pointer, but we don't know | 947 | * With an ioctl, arg may well be a user mode pointer, but we don't know |
891 | * what to do with it - that's up to the protocol still. | 948 | * what to do with it - that's up to the protocol still. |
@@ -905,11 +962,11 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) | |||
905 | if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) { | 962 | if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) { |
906 | err = dev_ioctl(net, cmd, argp); | 963 | err = dev_ioctl(net, cmd, argp); |
907 | } else | 964 | } else |
908 | #ifdef CONFIG_WIRELESS_EXT | 965 | #ifdef CONFIG_WEXT_CORE |
909 | if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { | 966 | if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { |
910 | err = dev_ioctl(net, cmd, argp); | 967 | err = dev_ioctl(net, cmd, argp); |
911 | } else | 968 | } else |
912 | #endif /* CONFIG_WIRELESS_EXT */ | 969 | #endif |
913 | switch (cmd) { | 970 | switch (cmd) { |
914 | case FIOSETOWN: | 971 | case FIOSETOWN: |
915 | case SIOCSPGRP: | 972 | case SIOCSPGRP: |
@@ -959,14 +1016,7 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) | |||
959 | mutex_unlock(&dlci_ioctl_mutex); | 1016 | mutex_unlock(&dlci_ioctl_mutex); |
960 | break; | 1017 | break; |
961 | default: | 1018 | default: |
962 | err = sock->ops->ioctl(sock, cmd, arg); | 1019 | err = sock_do_ioctl(net, sock, cmd, arg); |
963 | |||
964 | /* | ||
965 | * If this ioctl is unknown try to hand it down | ||
966 | * to the NIC driver. | ||
967 | */ | ||
968 | if (err == -ENOIOCTLCMD) | ||
969 | err = dev_ioctl(net, cmd, argp); | ||
970 | break; | 1020 | break; |
971 | } | 1021 | } |
972 | return err; | 1022 | return err; |
@@ -1100,11 +1150,14 @@ static int sock_fasync(int fd, struct file *filp, int on) | |||
1100 | fna->fa_next = sock->fasync_list; | 1150 | fna->fa_next = sock->fasync_list; |
1101 | write_lock_bh(&sk->sk_callback_lock); | 1151 | write_lock_bh(&sk->sk_callback_lock); |
1102 | sock->fasync_list = fna; | 1152 | sock->fasync_list = fna; |
1153 | sock_set_flag(sk, SOCK_FASYNC); | ||
1103 | write_unlock_bh(&sk->sk_callback_lock); | 1154 | write_unlock_bh(&sk->sk_callback_lock); |
1104 | } else { | 1155 | } else { |
1105 | if (fa != NULL) { | 1156 | if (fa != NULL) { |
1106 | write_lock_bh(&sk->sk_callback_lock); | 1157 | write_lock_bh(&sk->sk_callback_lock); |
1107 | *prev = fa->fa_next; | 1158 | *prev = fa->fa_next; |
1159 | if (!sock->fasync_list) | ||
1160 | sock_reset_flag(sk, SOCK_FASYNC); | ||
1108 | write_unlock_bh(&sk->sk_callback_lock); | 1161 | write_unlock_bh(&sk->sk_callback_lock); |
1109 | kfree(fa); | 1162 | kfree(fa); |
1110 | } | 1163 | } |
@@ -1216,7 +1269,7 @@ static int __sock_create(struct net *net, int family, int type, int protocol, | |||
1216 | /* Now protected by module ref count */ | 1269 | /* Now protected by module ref count */ |
1217 | rcu_read_unlock(); | 1270 | rcu_read_unlock(); |
1218 | 1271 | ||
1219 | err = pf->create(net, sock, protocol); | 1272 | err = pf->create(net, sock, protocol, kern); |
1220 | if (err < 0) | 1273 | if (err < 0) |
1221 | goto out_module_put; | 1274 | goto out_module_put; |
1222 | 1275 | ||
@@ -1965,22 +2018,15 @@ out: | |||
1965 | return err; | 2018 | return err; |
1966 | } | 2019 | } |
1967 | 2020 | ||
1968 | /* | 2021 | static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg, |
1969 | * BSD recvmsg interface | 2022 | struct msghdr *msg_sys, unsigned flags, int nosec) |
1970 | */ | ||
1971 | |||
1972 | SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg, | ||
1973 | unsigned int, flags) | ||
1974 | { | 2023 | { |
1975 | struct compat_msghdr __user *msg_compat = | 2024 | struct compat_msghdr __user *msg_compat = |
1976 | (struct compat_msghdr __user *)msg; | 2025 | (struct compat_msghdr __user *)msg; |
1977 | struct socket *sock; | ||
1978 | struct iovec iovstack[UIO_FASTIOV]; | 2026 | struct iovec iovstack[UIO_FASTIOV]; |
1979 | struct iovec *iov = iovstack; | 2027 | struct iovec *iov = iovstack; |
1980 | struct msghdr msg_sys; | ||
1981 | unsigned long cmsg_ptr; | 2028 | unsigned long cmsg_ptr; |
1982 | int err, iov_size, total_len, len; | 2029 | int err, iov_size, total_len, len; |
1983 | int fput_needed; | ||
1984 | 2030 | ||
1985 | /* kernel mode address */ | 2031 | /* kernel mode address */ |
1986 | struct sockaddr_storage addr; | 2032 | struct sockaddr_storage addr; |
@@ -1990,27 +2036,23 @@ SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg, | |||
1990 | int __user *uaddr_len; | 2036 | int __user *uaddr_len; |
1991 | 2037 | ||
1992 | if (MSG_CMSG_COMPAT & flags) { | 2038 | if (MSG_CMSG_COMPAT & flags) { |
1993 | if (get_compat_msghdr(&msg_sys, msg_compat)) | 2039 | if (get_compat_msghdr(msg_sys, msg_compat)) |
1994 | return -EFAULT; | 2040 | return -EFAULT; |
1995 | } | 2041 | } |
1996 | else if (copy_from_user(&msg_sys, msg, sizeof(struct msghdr))) | 2042 | else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) |
1997 | return -EFAULT; | 2043 | return -EFAULT; |
1998 | 2044 | ||
1999 | sock = sockfd_lookup_light(fd, &err, &fput_needed); | ||
2000 | if (!sock) | ||
2001 | goto out; | ||
2002 | |||
2003 | err = -EMSGSIZE; | 2045 | err = -EMSGSIZE; |
2004 | if (msg_sys.msg_iovlen > UIO_MAXIOV) | 2046 | if (msg_sys->msg_iovlen > UIO_MAXIOV) |
2005 | goto out_put; | 2047 | goto out; |
2006 | 2048 | ||
2007 | /* Check whether to allocate the iovec area */ | 2049 | /* Check whether to allocate the iovec area */ |
2008 | err = -ENOMEM; | 2050 | err = -ENOMEM; |
2009 | iov_size = msg_sys.msg_iovlen * sizeof(struct iovec); | 2051 | iov_size = msg_sys->msg_iovlen * sizeof(struct iovec); |
2010 | if (msg_sys.msg_iovlen > UIO_FASTIOV) { | 2052 | if (msg_sys->msg_iovlen > UIO_FASTIOV) { |
2011 | iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL); | 2053 | iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL); |
2012 | if (!iov) | 2054 | if (!iov) |
2013 | goto out_put; | 2055 | goto out; |
2014 | } | 2056 | } |
2015 | 2057 | ||
2016 | /* | 2058 | /* |
@@ -2018,46 +2060,47 @@ SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg, | |||
2018 | * kernel msghdr to use the kernel address space) | 2060 | * kernel msghdr to use the kernel address space) |
2019 | */ | 2061 | */ |
2020 | 2062 | ||
2021 | uaddr = (__force void __user *)msg_sys.msg_name; | 2063 | uaddr = (__force void __user *)msg_sys->msg_name; |
2022 | uaddr_len = COMPAT_NAMELEN(msg); | 2064 | uaddr_len = COMPAT_NAMELEN(msg); |
2023 | if (MSG_CMSG_COMPAT & flags) { | 2065 | if (MSG_CMSG_COMPAT & flags) { |
2024 | err = verify_compat_iovec(&msg_sys, iov, | 2066 | err = verify_compat_iovec(msg_sys, iov, |
2025 | (struct sockaddr *)&addr, | 2067 | (struct sockaddr *)&addr, |
2026 | VERIFY_WRITE); | 2068 | VERIFY_WRITE); |
2027 | } else | 2069 | } else |
2028 | err = verify_iovec(&msg_sys, iov, | 2070 | err = verify_iovec(msg_sys, iov, |
2029 | (struct sockaddr *)&addr, | 2071 | (struct sockaddr *)&addr, |
2030 | VERIFY_WRITE); | 2072 | VERIFY_WRITE); |
2031 | if (err < 0) | 2073 | if (err < 0) |
2032 | goto out_freeiov; | 2074 | goto out_freeiov; |
2033 | total_len = err; | 2075 | total_len = err; |
2034 | 2076 | ||
2035 | cmsg_ptr = (unsigned long)msg_sys.msg_control; | 2077 | cmsg_ptr = (unsigned long)msg_sys->msg_control; |
2036 | msg_sys.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); | 2078 | msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); |
2037 | 2079 | ||
2038 | if (sock->file->f_flags & O_NONBLOCK) | 2080 | if (sock->file->f_flags & O_NONBLOCK) |
2039 | flags |= MSG_DONTWAIT; | 2081 | flags |= MSG_DONTWAIT; |
2040 | err = sock_recvmsg(sock, &msg_sys, total_len, flags); | 2082 | err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys, |
2083 | total_len, flags); | ||
2041 | if (err < 0) | 2084 | if (err < 0) |
2042 | goto out_freeiov; | 2085 | goto out_freeiov; |
2043 | len = err; | 2086 | len = err; |
2044 | 2087 | ||
2045 | if (uaddr != NULL) { | 2088 | if (uaddr != NULL) { |
2046 | err = move_addr_to_user((struct sockaddr *)&addr, | 2089 | err = move_addr_to_user((struct sockaddr *)&addr, |
2047 | msg_sys.msg_namelen, uaddr, | 2090 | msg_sys->msg_namelen, uaddr, |
2048 | uaddr_len); | 2091 | uaddr_len); |
2049 | if (err < 0) | 2092 | if (err < 0) |
2050 | goto out_freeiov; | 2093 | goto out_freeiov; |
2051 | } | 2094 | } |
2052 | err = __put_user((msg_sys.msg_flags & ~MSG_CMSG_COMPAT), | 2095 | err = __put_user((msg_sys->msg_flags & ~MSG_CMSG_COMPAT), |
2053 | COMPAT_FLAGS(msg)); | 2096 | COMPAT_FLAGS(msg)); |
2054 | if (err) | 2097 | if (err) |
2055 | goto out_freeiov; | 2098 | goto out_freeiov; |
2056 | if (MSG_CMSG_COMPAT & flags) | 2099 | if (MSG_CMSG_COMPAT & flags) |
2057 | err = __put_user((unsigned long)msg_sys.msg_control - cmsg_ptr, | 2100 | err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, |
2058 | &msg_compat->msg_controllen); | 2101 | &msg_compat->msg_controllen); |
2059 | else | 2102 | else |
2060 | err = __put_user((unsigned long)msg_sys.msg_control - cmsg_ptr, | 2103 | err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, |
2061 | &msg->msg_controllen); | 2104 | &msg->msg_controllen); |
2062 | if (err) | 2105 | if (err) |
2063 | goto out_freeiov; | 2106 | goto out_freeiov; |
@@ -2066,21 +2109,150 @@ SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg, | |||
2066 | out_freeiov: | 2109 | out_freeiov: |
2067 | if (iov != iovstack) | 2110 | if (iov != iovstack) |
2068 | sock_kfree_s(sock->sk, iov, iov_size); | 2111 | sock_kfree_s(sock->sk, iov, iov_size); |
2069 | out_put: | 2112 | out: |
2113 | return err; | ||
2114 | } | ||
2115 | |||
2116 | /* | ||
2117 | * BSD recvmsg interface | ||
2118 | */ | ||
2119 | |||
2120 | SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg, | ||
2121 | unsigned int, flags) | ||
2122 | { | ||
2123 | int fput_needed, err; | ||
2124 | struct msghdr msg_sys; | ||
2125 | struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed); | ||
2126 | |||
2127 | if (!sock) | ||
2128 | goto out; | ||
2129 | |||
2130 | err = __sys_recvmsg(sock, msg, &msg_sys, flags, 0); | ||
2131 | |||
2070 | fput_light(sock->file, fput_needed); | 2132 | fput_light(sock->file, fput_needed); |
2071 | out: | 2133 | out: |
2072 | return err; | 2134 | return err; |
2073 | } | 2135 | } |
2074 | 2136 | ||
2075 | #ifdef __ARCH_WANT_SYS_SOCKETCALL | 2137 | /* |
2138 | * Linux recvmmsg interface | ||
2139 | */ | ||
2140 | |||
2141 | int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, | ||
2142 | unsigned int flags, struct timespec *timeout) | ||
2143 | { | ||
2144 | int fput_needed, err, datagrams; | ||
2145 | struct socket *sock; | ||
2146 | struct mmsghdr __user *entry; | ||
2147 | struct msghdr msg_sys; | ||
2148 | struct timespec end_time; | ||
2149 | |||
2150 | if (timeout && | ||
2151 | poll_select_set_timeout(&end_time, timeout->tv_sec, | ||
2152 | timeout->tv_nsec)) | ||
2153 | return -EINVAL; | ||
2154 | |||
2155 | datagrams = 0; | ||
2156 | |||
2157 | sock = sockfd_lookup_light(fd, &err, &fput_needed); | ||
2158 | if (!sock) | ||
2159 | return err; | ||
2160 | |||
2161 | err = sock_error(sock->sk); | ||
2162 | if (err) | ||
2163 | goto out_put; | ||
2164 | |||
2165 | entry = mmsg; | ||
2166 | |||
2167 | while (datagrams < vlen) { | ||
2168 | /* | ||
2169 | * No need to ask LSM for more than the first datagram. | ||
2170 | */ | ||
2171 | err = __sys_recvmsg(sock, (struct msghdr __user *)entry, | ||
2172 | &msg_sys, flags, datagrams); | ||
2173 | if (err < 0) | ||
2174 | break; | ||
2175 | err = put_user(err, &entry->msg_len); | ||
2176 | if (err) | ||
2177 | break; | ||
2178 | ++entry; | ||
2179 | ++datagrams; | ||
2180 | |||
2181 | if (timeout) { | ||
2182 | ktime_get_ts(timeout); | ||
2183 | *timeout = timespec_sub(end_time, *timeout); | ||
2184 | if (timeout->tv_sec < 0) { | ||
2185 | timeout->tv_sec = timeout->tv_nsec = 0; | ||
2186 | break; | ||
2187 | } | ||
2188 | |||
2189 | /* Timeout, return less than vlen datagrams */ | ||
2190 | if (timeout->tv_nsec == 0 && timeout->tv_sec == 0) | ||
2191 | break; | ||
2192 | } | ||
2076 | 2193 | ||
2194 | /* Out of band data, return right away */ | ||
2195 | if (msg_sys.msg_flags & MSG_OOB) | ||
2196 | break; | ||
2197 | } | ||
2198 | |||
2199 | out_put: | ||
2200 | fput_light(sock->file, fput_needed); | ||
2201 | |||
2202 | if (err == 0) | ||
2203 | return datagrams; | ||
2204 | |||
2205 | if (datagrams != 0) { | ||
2206 | /* | ||
2207 | * We may return less entries than requested (vlen) if the | ||
2208 | * sock is non block and there aren't enough datagrams... | ||
2209 | */ | ||
2210 | if (err != -EAGAIN) { | ||
2211 | /* | ||
2212 | * ... or if recvmsg returns an error after we | ||
2213 | * received some datagrams, where we record the | ||
2214 | * error to return on the next call or if the | ||
2215 | * app asks about it using getsockopt(SO_ERROR). | ||
2216 | */ | ||
2217 | sock->sk->sk_err = -err; | ||
2218 | } | ||
2219 | |||
2220 | return datagrams; | ||
2221 | } | ||
2222 | |||
2223 | return err; | ||
2224 | } | ||
2225 | |||
2226 | SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg, | ||
2227 | unsigned int, vlen, unsigned int, flags, | ||
2228 | struct timespec __user *, timeout) | ||
2229 | { | ||
2230 | int datagrams; | ||
2231 | struct timespec timeout_sys; | ||
2232 | |||
2233 | if (!timeout) | ||
2234 | return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL); | ||
2235 | |||
2236 | if (copy_from_user(&timeout_sys, timeout, sizeof(timeout_sys))) | ||
2237 | return -EFAULT; | ||
2238 | |||
2239 | datagrams = __sys_recvmmsg(fd, mmsg, vlen, flags, &timeout_sys); | ||
2240 | |||
2241 | if (datagrams > 0 && | ||
2242 | copy_to_user(timeout, &timeout_sys, sizeof(timeout_sys))) | ||
2243 | datagrams = -EFAULT; | ||
2244 | |||
2245 | return datagrams; | ||
2246 | } | ||
2247 | |||
2248 | #ifdef __ARCH_WANT_SYS_SOCKETCALL | ||
2077 | /* Argument list sizes for sys_socketcall */ | 2249 | /* Argument list sizes for sys_socketcall */ |
2078 | #define AL(x) ((x) * sizeof(unsigned long)) | 2250 | #define AL(x) ((x) * sizeof(unsigned long)) |
2079 | static const unsigned char nargs[19]={ | 2251 | static const unsigned char nargs[20] = { |
2080 | AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), | 2252 | AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), |
2081 | AL(3),AL(3),AL(4),AL(4),AL(4),AL(6), | 2253 | AL(3),AL(3),AL(4),AL(4),AL(4),AL(6), |
2082 | AL(6),AL(2),AL(5),AL(5),AL(3),AL(3), | 2254 | AL(6),AL(2),AL(5),AL(5),AL(3),AL(3), |
2083 | AL(4) | 2255 | AL(4),AL(5) |
2084 | }; | 2256 | }; |
2085 | 2257 | ||
2086 | #undef AL | 2258 | #undef AL |
@@ -2100,7 +2272,7 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args) | |||
2100 | int err; | 2272 | int err; |
2101 | unsigned int len; | 2273 | unsigned int len; |
2102 | 2274 | ||
2103 | if (call < 1 || call > SYS_ACCEPT4) | 2275 | if (call < 1 || call > SYS_RECVMMSG) |
2104 | return -EINVAL; | 2276 | return -EINVAL; |
2105 | 2277 | ||
2106 | len = nargs[call]; | 2278 | len = nargs[call]; |
@@ -2178,6 +2350,10 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args) | |||
2178 | case SYS_RECVMSG: | 2350 | case SYS_RECVMSG: |
2179 | err = sys_recvmsg(a0, (struct msghdr __user *)a1, a[2]); | 2351 | err = sys_recvmsg(a0, (struct msghdr __user *)a1, a[2]); |
2180 | break; | 2352 | break; |
2353 | case SYS_RECVMMSG: | ||
2354 | err = sys_recvmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3], | ||
2355 | (struct timespec __user *)a[4]); | ||
2356 | break; | ||
2181 | case SYS_ACCEPT4: | 2357 | case SYS_ACCEPT4: |
2182 | err = sys_accept4(a0, (struct sockaddr __user *)a1, | 2358 | err = sys_accept4(a0, (struct sockaddr __user *)a1, |
2183 | (int __user *)a[2], a[3]); | 2359 | (int __user *)a[2], a[3]); |
@@ -2300,6 +2476,552 @@ void socket_seq_show(struct seq_file *seq) | |||
2300 | #endif /* CONFIG_PROC_FS */ | 2476 | #endif /* CONFIG_PROC_FS */ |
2301 | 2477 | ||
2302 | #ifdef CONFIG_COMPAT | 2478 | #ifdef CONFIG_COMPAT |
2479 | static int do_siocgstamp(struct net *net, struct socket *sock, | ||
2480 | unsigned int cmd, struct compat_timeval __user *up) | ||
2481 | { | ||
2482 | mm_segment_t old_fs = get_fs(); | ||
2483 | struct timeval ktv; | ||
2484 | int err; | ||
2485 | |||
2486 | set_fs(KERNEL_DS); | ||
2487 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); | ||
2488 | set_fs(old_fs); | ||
2489 | if (!err) { | ||
2490 | err = put_user(ktv.tv_sec, &up->tv_sec); | ||
2491 | err |= __put_user(ktv.tv_usec, &up->tv_usec); | ||
2492 | } | ||
2493 | return err; | ||
2494 | } | ||
2495 | |||
2496 | static int do_siocgstampns(struct net *net, struct socket *sock, | ||
2497 | unsigned int cmd, struct compat_timespec __user *up) | ||
2498 | { | ||
2499 | mm_segment_t old_fs = get_fs(); | ||
2500 | struct timespec kts; | ||
2501 | int err; | ||
2502 | |||
2503 | set_fs(KERNEL_DS); | ||
2504 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); | ||
2505 | set_fs(old_fs); | ||
2506 | if (!err) { | ||
2507 | err = put_user(kts.tv_sec, &up->tv_sec); | ||
2508 | err |= __put_user(kts.tv_nsec, &up->tv_nsec); | ||
2509 | } | ||
2510 | return err; | ||
2511 | } | ||
2512 | |||
2513 | static int dev_ifname32(struct net *net, struct compat_ifreq __user *uifr32) | ||
2514 | { | ||
2515 | struct ifreq __user *uifr; | ||
2516 | int err; | ||
2517 | |||
2518 | uifr = compat_alloc_user_space(sizeof(struct ifreq)); | ||
2519 | if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) | ||
2520 | return -EFAULT; | ||
2521 | |||
2522 | err = dev_ioctl(net, SIOCGIFNAME, uifr); | ||
2523 | if (err) | ||
2524 | return err; | ||
2525 | |||
2526 | if (copy_in_user(uifr32, uifr, sizeof(struct compat_ifreq))) | ||
2527 | return -EFAULT; | ||
2528 | |||
2529 | return 0; | ||
2530 | } | ||
2531 | |||
2532 | static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32) | ||
2533 | { | ||
2534 | struct compat_ifconf ifc32; | ||
2535 | struct ifconf ifc; | ||
2536 | struct ifconf __user *uifc; | ||
2537 | struct compat_ifreq __user *ifr32; | ||
2538 | struct ifreq __user *ifr; | ||
2539 | unsigned int i, j; | ||
2540 | int err; | ||
2541 | |||
2542 | if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf))) | ||
2543 | return -EFAULT; | ||
2544 | |||
2545 | if (ifc32.ifcbuf == 0) { | ||
2546 | ifc32.ifc_len = 0; | ||
2547 | ifc.ifc_len = 0; | ||
2548 | ifc.ifc_req = NULL; | ||
2549 | uifc = compat_alloc_user_space(sizeof(struct ifconf)); | ||
2550 | } else { | ||
2551 | size_t len =((ifc32.ifc_len / sizeof (struct compat_ifreq)) + 1) * | ||
2552 | sizeof (struct ifreq); | ||
2553 | uifc = compat_alloc_user_space(sizeof(struct ifconf) + len); | ||
2554 | ifc.ifc_len = len; | ||
2555 | ifr = ifc.ifc_req = (void __user *)(uifc + 1); | ||
2556 | ifr32 = compat_ptr(ifc32.ifcbuf); | ||
2557 | for (i = 0; i < ifc32.ifc_len; i += sizeof (struct compat_ifreq)) { | ||
2558 | if (copy_in_user(ifr, ifr32, sizeof(struct compat_ifreq))) | ||
2559 | return -EFAULT; | ||
2560 | ifr++; | ||
2561 | ifr32++; | ||
2562 | } | ||
2563 | } | ||
2564 | if (copy_to_user(uifc, &ifc, sizeof(struct ifconf))) | ||
2565 | return -EFAULT; | ||
2566 | |||
2567 | err = dev_ioctl(net, SIOCGIFCONF, uifc); | ||
2568 | if (err) | ||
2569 | return err; | ||
2570 | |||
2571 | if (copy_from_user(&ifc, uifc, sizeof(struct ifconf))) | ||
2572 | return -EFAULT; | ||
2573 | |||
2574 | ifr = ifc.ifc_req; | ||
2575 | ifr32 = compat_ptr(ifc32.ifcbuf); | ||
2576 | for (i = 0, j = 0; | ||
2577 | i + sizeof (struct compat_ifreq) <= ifc32.ifc_len && j < ifc.ifc_len; | ||
2578 | i += sizeof (struct compat_ifreq), j += sizeof (struct ifreq)) { | ||
2579 | if (copy_in_user(ifr32, ifr, sizeof (struct compat_ifreq))) | ||
2580 | return -EFAULT; | ||
2581 | ifr32++; | ||
2582 | ifr++; | ||
2583 | } | ||
2584 | |||
2585 | if (ifc32.ifcbuf == 0) { | ||
2586 | /* Translate from 64-bit structure multiple to | ||
2587 | * a 32-bit one. | ||
2588 | */ | ||
2589 | i = ifc.ifc_len; | ||
2590 | i = ((i / sizeof(struct ifreq)) * sizeof(struct compat_ifreq)); | ||
2591 | ifc32.ifc_len = i; | ||
2592 | } else { | ||
2593 | ifc32.ifc_len = i; | ||
2594 | } | ||
2595 | if (copy_to_user(uifc32, &ifc32, sizeof(struct compat_ifconf))) | ||
2596 | return -EFAULT; | ||
2597 | |||
2598 | return 0; | ||
2599 | } | ||
2600 | |||
2601 | static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) | ||
2602 | { | ||
2603 | struct ifreq __user *ifr; | ||
2604 | u32 data; | ||
2605 | void __user *datap; | ||
2606 | |||
2607 | ifr = compat_alloc_user_space(sizeof(*ifr)); | ||
2608 | |||
2609 | if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) | ||
2610 | return -EFAULT; | ||
2611 | |||
2612 | if (get_user(data, &ifr32->ifr_ifru.ifru_data)) | ||
2613 | return -EFAULT; | ||
2614 | |||
2615 | datap = compat_ptr(data); | ||
2616 | if (put_user(datap, &ifr->ifr_ifru.ifru_data)) | ||
2617 | return -EFAULT; | ||
2618 | |||
2619 | return dev_ioctl(net, SIOCETHTOOL, ifr); | ||
2620 | } | ||
2621 | |||
2622 | static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32) | ||
2623 | { | ||
2624 | void __user *uptr; | ||
2625 | compat_uptr_t uptr32; | ||
2626 | struct ifreq __user *uifr; | ||
2627 | |||
2628 | uifr = compat_alloc_user_space(sizeof (*uifr)); | ||
2629 | if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) | ||
2630 | return -EFAULT; | ||
2631 | |||
2632 | if (get_user(uptr32, &uifr32->ifr_settings.ifs_ifsu)) | ||
2633 | return -EFAULT; | ||
2634 | |||
2635 | uptr = compat_ptr(uptr32); | ||
2636 | |||
2637 | if (put_user(uptr, &uifr->ifr_settings.ifs_ifsu.raw_hdlc)) | ||
2638 | return -EFAULT; | ||
2639 | |||
2640 | return dev_ioctl(net, SIOCWANDEV, uifr); | ||
2641 | } | ||
2642 | |||
2643 | static int bond_ioctl(struct net *net, unsigned int cmd, | ||
2644 | struct compat_ifreq __user *ifr32) | ||
2645 | { | ||
2646 | struct ifreq kifr; | ||
2647 | struct ifreq __user *uifr; | ||
2648 | mm_segment_t old_fs; | ||
2649 | int err; | ||
2650 | u32 data; | ||
2651 | void __user *datap; | ||
2652 | |||
2653 | switch (cmd) { | ||
2654 | case SIOCBONDENSLAVE: | ||
2655 | case SIOCBONDRELEASE: | ||
2656 | case SIOCBONDSETHWADDR: | ||
2657 | case SIOCBONDCHANGEACTIVE: | ||
2658 | if (copy_from_user(&kifr, ifr32, sizeof(struct compat_ifreq))) | ||
2659 | return -EFAULT; | ||
2660 | |||
2661 | old_fs = get_fs(); | ||
2662 | set_fs (KERNEL_DS); | ||
2663 | err = dev_ioctl(net, cmd, &kifr); | ||
2664 | set_fs (old_fs); | ||
2665 | |||
2666 | return err; | ||
2667 | case SIOCBONDSLAVEINFOQUERY: | ||
2668 | case SIOCBONDINFOQUERY: | ||
2669 | uifr = compat_alloc_user_space(sizeof(*uifr)); | ||
2670 | if (copy_in_user(&uifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) | ||
2671 | return -EFAULT; | ||
2672 | |||
2673 | if (get_user(data, &ifr32->ifr_ifru.ifru_data)) | ||
2674 | return -EFAULT; | ||
2675 | |||
2676 | datap = compat_ptr(data); | ||
2677 | if (put_user(datap, &uifr->ifr_ifru.ifru_data)) | ||
2678 | return -EFAULT; | ||
2679 | |||
2680 | return dev_ioctl(net, cmd, uifr); | ||
2681 | default: | ||
2682 | return -EINVAL; | ||
2683 | }; | ||
2684 | } | ||
2685 | |||
2686 | static int siocdevprivate_ioctl(struct net *net, unsigned int cmd, | ||
2687 | struct compat_ifreq __user *u_ifreq32) | ||
2688 | { | ||
2689 | struct ifreq __user *u_ifreq64; | ||
2690 | char tmp_buf[IFNAMSIZ]; | ||
2691 | void __user *data64; | ||
2692 | u32 data32; | ||
2693 | |||
2694 | if (copy_from_user(&tmp_buf[0], &(u_ifreq32->ifr_ifrn.ifrn_name[0]), | ||
2695 | IFNAMSIZ)) | ||
2696 | return -EFAULT; | ||
2697 | if (__get_user(data32, &u_ifreq32->ifr_ifru.ifru_data)) | ||
2698 | return -EFAULT; | ||
2699 | data64 = compat_ptr(data32); | ||
2700 | |||
2701 | u_ifreq64 = compat_alloc_user_space(sizeof(*u_ifreq64)); | ||
2702 | |||
2703 | /* Don't check these user accesses, just let that get trapped | ||
2704 | * in the ioctl handler instead. | ||
2705 | */ | ||
2706 | if (copy_to_user(&u_ifreq64->ifr_ifrn.ifrn_name[0], &tmp_buf[0], | ||
2707 | IFNAMSIZ)) | ||
2708 | return -EFAULT; | ||
2709 | if (__put_user(data64, &u_ifreq64->ifr_ifru.ifru_data)) | ||
2710 | return -EFAULT; | ||
2711 | |||
2712 | return dev_ioctl(net, cmd, u_ifreq64); | ||
2713 | } | ||
2714 | |||
2715 | static int dev_ifsioc(struct net *net, struct socket *sock, | ||
2716 | unsigned int cmd, struct compat_ifreq __user *uifr32) | ||
2717 | { | ||
2718 | struct ifreq __user *uifr; | ||
2719 | int err; | ||
2720 | |||
2721 | uifr = compat_alloc_user_space(sizeof(*uifr)); | ||
2722 | if (copy_in_user(uifr, uifr32, sizeof(*uifr32))) | ||
2723 | return -EFAULT; | ||
2724 | |||
2725 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr); | ||
2726 | |||
2727 | if (!err) { | ||
2728 | switch (cmd) { | ||
2729 | case SIOCGIFFLAGS: | ||
2730 | case SIOCGIFMETRIC: | ||
2731 | case SIOCGIFMTU: | ||
2732 | case SIOCGIFMEM: | ||
2733 | case SIOCGIFHWADDR: | ||
2734 | case SIOCGIFINDEX: | ||
2735 | case SIOCGIFADDR: | ||
2736 | case SIOCGIFBRDADDR: | ||
2737 | case SIOCGIFDSTADDR: | ||
2738 | case SIOCGIFNETMASK: | ||
2739 | case SIOCGIFPFLAGS: | ||
2740 | case SIOCGIFTXQLEN: | ||
2741 | case SIOCGMIIPHY: | ||
2742 | case SIOCGMIIREG: | ||
2743 | if (copy_in_user(uifr32, uifr, sizeof(*uifr32))) | ||
2744 | err = -EFAULT; | ||
2745 | break; | ||
2746 | } | ||
2747 | } | ||
2748 | return err; | ||
2749 | } | ||
2750 | |||
2751 | static int compat_sioc_ifmap(struct net *net, unsigned int cmd, | ||
2752 | struct compat_ifreq __user *uifr32) | ||
2753 | { | ||
2754 | struct ifreq ifr; | ||
2755 | struct compat_ifmap __user *uifmap32; | ||
2756 | mm_segment_t old_fs; | ||
2757 | int err; | ||
2758 | |||
2759 | uifmap32 = &uifr32->ifr_ifru.ifru_map; | ||
2760 | err = copy_from_user(&ifr, uifr32, sizeof(ifr.ifr_name)); | ||
2761 | err |= __get_user(ifr.ifr_map.mem_start, &uifmap32->mem_start); | ||
2762 | err |= __get_user(ifr.ifr_map.mem_end, &uifmap32->mem_end); | ||
2763 | err |= __get_user(ifr.ifr_map.base_addr, &uifmap32->base_addr); | ||
2764 | err |= __get_user(ifr.ifr_map.irq, &uifmap32->irq); | ||
2765 | err |= __get_user(ifr.ifr_map.dma, &uifmap32->dma); | ||
2766 | err |= __get_user(ifr.ifr_map.port, &uifmap32->port); | ||
2767 | if (err) | ||
2768 | return -EFAULT; | ||
2769 | |||
2770 | old_fs = get_fs(); | ||
2771 | set_fs (KERNEL_DS); | ||
2772 | err = dev_ioctl(net, cmd, (void __user *)&ifr); | ||
2773 | set_fs (old_fs); | ||
2774 | |||
2775 | if (cmd == SIOCGIFMAP && !err) { | ||
2776 | err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name)); | ||
2777 | err |= __put_user(ifr.ifr_map.mem_start, &uifmap32->mem_start); | ||
2778 | err |= __put_user(ifr.ifr_map.mem_end, &uifmap32->mem_end); | ||
2779 | err |= __put_user(ifr.ifr_map.base_addr, &uifmap32->base_addr); | ||
2780 | err |= __put_user(ifr.ifr_map.irq, &uifmap32->irq); | ||
2781 | err |= __put_user(ifr.ifr_map.dma, &uifmap32->dma); | ||
2782 | err |= __put_user(ifr.ifr_map.port, &uifmap32->port); | ||
2783 | if (err) | ||
2784 | err = -EFAULT; | ||
2785 | } | ||
2786 | return err; | ||
2787 | } | ||
2788 | |||
2789 | static int compat_siocshwtstamp(struct net *net, struct compat_ifreq __user *uifr32) | ||
2790 | { | ||
2791 | void __user *uptr; | ||
2792 | compat_uptr_t uptr32; | ||
2793 | struct ifreq __user *uifr; | ||
2794 | |||
2795 | uifr = compat_alloc_user_space(sizeof (*uifr)); | ||
2796 | if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) | ||
2797 | return -EFAULT; | ||
2798 | |||
2799 | if (get_user(uptr32, &uifr32->ifr_data)) | ||
2800 | return -EFAULT; | ||
2801 | |||
2802 | uptr = compat_ptr(uptr32); | ||
2803 | |||
2804 | if (put_user(uptr, &uifr->ifr_data)) | ||
2805 | return -EFAULT; | ||
2806 | |||
2807 | return dev_ioctl(net, SIOCSHWTSTAMP, uifr); | ||
2808 | } | ||
2809 | |||
2810 | struct rtentry32 { | ||
2811 | u32 rt_pad1; | ||
2812 | struct sockaddr rt_dst; /* target address */ | ||
2813 | struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */ | ||
2814 | struct sockaddr rt_genmask; /* target network mask (IP) */ | ||
2815 | unsigned short rt_flags; | ||
2816 | short rt_pad2; | ||
2817 | u32 rt_pad3; | ||
2818 | unsigned char rt_tos; | ||
2819 | unsigned char rt_class; | ||
2820 | short rt_pad4; | ||
2821 | short rt_metric; /* +1 for binary compatibility! */ | ||
2822 | /* char * */ u32 rt_dev; /* forcing the device at add */ | ||
2823 | u32 rt_mtu; /* per route MTU/Window */ | ||
2824 | u32 rt_window; /* Window clamping */ | ||
2825 | unsigned short rt_irtt; /* Initial RTT */ | ||
2826 | }; | ||
2827 | |||
2828 | struct in6_rtmsg32 { | ||
2829 | struct in6_addr rtmsg_dst; | ||
2830 | struct in6_addr rtmsg_src; | ||
2831 | struct in6_addr rtmsg_gateway; | ||
2832 | u32 rtmsg_type; | ||
2833 | u16 rtmsg_dst_len; | ||
2834 | u16 rtmsg_src_len; | ||
2835 | u32 rtmsg_metric; | ||
2836 | u32 rtmsg_info; | ||
2837 | u32 rtmsg_flags; | ||
2838 | s32 rtmsg_ifindex; | ||
2839 | }; | ||
2840 | |||
2841 | static int routing_ioctl(struct net *net, struct socket *sock, | ||
2842 | unsigned int cmd, void __user *argp) | ||
2843 | { | ||
2844 | int ret; | ||
2845 | void *r = NULL; | ||
2846 | struct in6_rtmsg r6; | ||
2847 | struct rtentry r4; | ||
2848 | char devname[16]; | ||
2849 | u32 rtdev; | ||
2850 | mm_segment_t old_fs = get_fs(); | ||
2851 | |||
2852 | if (sock && sock->sk && sock->sk->sk_family == AF_INET6) { /* ipv6 */ | ||
2853 | struct in6_rtmsg32 __user *ur6 = argp; | ||
2854 | ret = copy_from_user (&r6.rtmsg_dst, &(ur6->rtmsg_dst), | ||
2855 | 3 * sizeof(struct in6_addr)); | ||
2856 | ret |= __get_user (r6.rtmsg_type, &(ur6->rtmsg_type)); | ||
2857 | ret |= __get_user (r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len)); | ||
2858 | ret |= __get_user (r6.rtmsg_src_len, &(ur6->rtmsg_src_len)); | ||
2859 | ret |= __get_user (r6.rtmsg_metric, &(ur6->rtmsg_metric)); | ||
2860 | ret |= __get_user (r6.rtmsg_info, &(ur6->rtmsg_info)); | ||
2861 | ret |= __get_user (r6.rtmsg_flags, &(ur6->rtmsg_flags)); | ||
2862 | ret |= __get_user (r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex)); | ||
2863 | |||
2864 | r = (void *) &r6; | ||
2865 | } else { /* ipv4 */ | ||
2866 | struct rtentry32 __user *ur4 = argp; | ||
2867 | ret = copy_from_user (&r4.rt_dst, &(ur4->rt_dst), | ||
2868 | 3 * sizeof(struct sockaddr)); | ||
2869 | ret |= __get_user (r4.rt_flags, &(ur4->rt_flags)); | ||
2870 | ret |= __get_user (r4.rt_metric, &(ur4->rt_metric)); | ||
2871 | ret |= __get_user (r4.rt_mtu, &(ur4->rt_mtu)); | ||
2872 | ret |= __get_user (r4.rt_window, &(ur4->rt_window)); | ||
2873 | ret |= __get_user (r4.rt_irtt, &(ur4->rt_irtt)); | ||
2874 | ret |= __get_user (rtdev, &(ur4->rt_dev)); | ||
2875 | if (rtdev) { | ||
2876 | ret |= copy_from_user (devname, compat_ptr(rtdev), 15); | ||
2877 | r4.rt_dev = devname; devname[15] = 0; | ||
2878 | } else | ||
2879 | r4.rt_dev = NULL; | ||
2880 | |||
2881 | r = (void *) &r4; | ||
2882 | } | ||
2883 | |||
2884 | if (ret) { | ||
2885 | ret = -EFAULT; | ||
2886 | goto out; | ||
2887 | } | ||
2888 | |||
2889 | set_fs (KERNEL_DS); | ||
2890 | ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r); | ||
2891 | set_fs (old_fs); | ||
2892 | |||
2893 | out: | ||
2894 | return ret; | ||
2895 | } | ||
2896 | |||
2897 | /* Since old style bridge ioctl's endup using SIOCDEVPRIVATE | ||
2898 | * for some operations; this forces use of the newer bridge-utils that | ||
2899 | * use compatiable ioctls | ||
2900 | */ | ||
2901 | static int old_bridge_ioctl(compat_ulong_t __user *argp) | ||
2902 | { | ||
2903 | compat_ulong_t tmp; | ||
2904 | |||
2905 | if (get_user(tmp, argp)) | ||
2906 | return -EFAULT; | ||
2907 | if (tmp == BRCTL_GET_VERSION) | ||
2908 | return BRCTL_VERSION + 1; | ||
2909 | return -EINVAL; | ||
2910 | } | ||
2911 | |||
2912 | static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, | ||
2913 | unsigned int cmd, unsigned long arg) | ||
2914 | { | ||
2915 | void __user *argp = compat_ptr(arg); | ||
2916 | struct sock *sk = sock->sk; | ||
2917 | struct net *net = sock_net(sk); | ||
2918 | |||
2919 | if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) | ||
2920 | return siocdevprivate_ioctl(net, cmd, argp); | ||
2921 | |||
2922 | switch (cmd) { | ||
2923 | case SIOCSIFBR: | ||
2924 | case SIOCGIFBR: | ||
2925 | return old_bridge_ioctl(argp); | ||
2926 | case SIOCGIFNAME: | ||
2927 | return dev_ifname32(net, argp); | ||
2928 | case SIOCGIFCONF: | ||
2929 | return dev_ifconf(net, argp); | ||
2930 | case SIOCETHTOOL: | ||
2931 | return ethtool_ioctl(net, argp); | ||
2932 | case SIOCWANDEV: | ||
2933 | return compat_siocwandev(net, argp); | ||
2934 | case SIOCGIFMAP: | ||
2935 | case SIOCSIFMAP: | ||
2936 | return compat_sioc_ifmap(net, cmd, argp); | ||
2937 | case SIOCBONDENSLAVE: | ||
2938 | case SIOCBONDRELEASE: | ||
2939 | case SIOCBONDSETHWADDR: | ||
2940 | case SIOCBONDSLAVEINFOQUERY: | ||
2941 | case SIOCBONDINFOQUERY: | ||
2942 | case SIOCBONDCHANGEACTIVE: | ||
2943 | return bond_ioctl(net, cmd, argp); | ||
2944 | case SIOCADDRT: | ||
2945 | case SIOCDELRT: | ||
2946 | return routing_ioctl(net, sock, cmd, argp); | ||
2947 | case SIOCGSTAMP: | ||
2948 | return do_siocgstamp(net, sock, cmd, argp); | ||
2949 | case SIOCGSTAMPNS: | ||
2950 | return do_siocgstampns(net, sock, cmd, argp); | ||
2951 | case SIOCSHWTSTAMP: | ||
2952 | return compat_siocshwtstamp(net, argp); | ||
2953 | |||
2954 | case FIOSETOWN: | ||
2955 | case SIOCSPGRP: | ||
2956 | case FIOGETOWN: | ||
2957 | case SIOCGPGRP: | ||
2958 | case SIOCBRADDBR: | ||
2959 | case SIOCBRDELBR: | ||
2960 | case SIOCGIFVLAN: | ||
2961 | case SIOCSIFVLAN: | ||
2962 | case SIOCADDDLCI: | ||
2963 | case SIOCDELDLCI: | ||
2964 | return sock_ioctl(file, cmd, arg); | ||
2965 | |||
2966 | case SIOCGIFFLAGS: | ||
2967 | case SIOCSIFFLAGS: | ||
2968 | case SIOCGIFMETRIC: | ||
2969 | case SIOCSIFMETRIC: | ||
2970 | case SIOCGIFMTU: | ||
2971 | case SIOCSIFMTU: | ||
2972 | case SIOCGIFMEM: | ||
2973 | case SIOCSIFMEM: | ||
2974 | case SIOCGIFHWADDR: | ||
2975 | case SIOCSIFHWADDR: | ||
2976 | case SIOCADDMULTI: | ||
2977 | case SIOCDELMULTI: | ||
2978 | case SIOCGIFINDEX: | ||
2979 | case SIOCGIFADDR: | ||
2980 | case SIOCSIFADDR: | ||
2981 | case SIOCSIFHWBROADCAST: | ||
2982 | case SIOCDIFADDR: | ||
2983 | case SIOCGIFBRDADDR: | ||
2984 | case SIOCSIFBRDADDR: | ||
2985 | case SIOCGIFDSTADDR: | ||
2986 | case SIOCSIFDSTADDR: | ||
2987 | case SIOCGIFNETMASK: | ||
2988 | case SIOCSIFNETMASK: | ||
2989 | case SIOCSIFPFLAGS: | ||
2990 | case SIOCGIFPFLAGS: | ||
2991 | case SIOCGIFTXQLEN: | ||
2992 | case SIOCSIFTXQLEN: | ||
2993 | case SIOCBRADDIF: | ||
2994 | case SIOCBRDELIF: | ||
2995 | case SIOCSIFNAME: | ||
2996 | case SIOCGMIIPHY: | ||
2997 | case SIOCGMIIREG: | ||
2998 | case SIOCSMIIREG: | ||
2999 | return dev_ifsioc(net, sock, cmd, argp); | ||
3000 | |||
3001 | case SIOCSARP: | ||
3002 | case SIOCGARP: | ||
3003 | case SIOCDARP: | ||
3004 | case SIOCATMARK: | ||
3005 | return sock_do_ioctl(net, sock, cmd, arg); | ||
3006 | } | ||
3007 | |||
3008 | /* Prevent warning from compat_sys_ioctl, these always | ||
3009 | * result in -EINVAL in the native case anyway. */ | ||
3010 | switch (cmd) { | ||
3011 | case SIOCRTMSG: | ||
3012 | case SIOCGIFCOUNT: | ||
3013 | case SIOCSRARP: | ||
3014 | case SIOCGRARP: | ||
3015 | case SIOCDRARP: | ||
3016 | case SIOCSIFLINK: | ||
3017 | case SIOCGIFSLAVE: | ||
3018 | case SIOCSIFSLAVE: | ||
3019 | return -EINVAL; | ||
3020 | } | ||
3021 | |||
3022 | return -ENOIOCTLCMD; | ||
3023 | } | ||
3024 | |||
2303 | static long compat_sock_ioctl(struct file *file, unsigned cmd, | 3025 | static long compat_sock_ioctl(struct file *file, unsigned cmd, |
2304 | unsigned long arg) | 3026 | unsigned long arg) |
2305 | { | 3027 | { |
@@ -2318,6 +3040,9 @@ static long compat_sock_ioctl(struct file *file, unsigned cmd, | |||
2318 | (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)) | 3040 | (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)) |
2319 | ret = compat_wext_handle_ioctl(net, cmd, arg); | 3041 | ret = compat_wext_handle_ioctl(net, cmd, arg); |
2320 | 3042 | ||
3043 | if (ret == -ENOIOCTLCMD) | ||
3044 | ret = compat_sock_ioctl_trans(file, sock, cmd, arg); | ||
3045 | |||
2321 | return ret; | 3046 | return ret; |
2322 | } | 3047 | } |
2323 | #endif | 3048 | #endif |
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index 117f68a8aa40..f4c7ff3a53e6 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c | |||
@@ -686,8 +686,7 @@ svcauth_unix_set_client(struct svc_rqst *rqstp) | |||
686 | case AF_INET: | 686 | case AF_INET: |
687 | sin = svc_addr_in(rqstp); | 687 | sin = svc_addr_in(rqstp); |
688 | sin6 = &sin6_storage; | 688 | sin6 = &sin6_storage; |
689 | ipv6_addr_set(&sin6->sin6_addr, 0, 0, | 689 | ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &sin6->sin6_addr); |
690 | htonl(0x0000FFFF), sin->sin_addr.s_addr); | ||
691 | break; | 690 | break; |
692 | case AF_INET6: | 691 | case AF_INET6: |
693 | sin6 = svc_addr_in6(rqstp); | 692 | sin6 = svc_addr_in6(rqstp); |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 1c246a4f491e..870929e08e5d 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -272,14 +272,14 @@ static int svc_one_sock_name(struct svc_sock *svsk, char *buf, int remaining) | |||
272 | case PF_INET: | 272 | case PF_INET: |
273 | len = snprintf(buf, remaining, "ipv4 %s %pI4 %d\n", | 273 | len = snprintf(buf, remaining, "ipv4 %s %pI4 %d\n", |
274 | proto_name, | 274 | proto_name, |
275 | &inet_sk(sk)->rcv_saddr, | 275 | &inet_sk(sk)->inet_rcv_saddr, |
276 | inet_sk(sk)->num); | 276 | inet_sk(sk)->inet_num); |
277 | break; | 277 | break; |
278 | case PF_INET6: | 278 | case PF_INET6: |
279 | len = snprintf(buf, remaining, "ipv6 %s %pI6 %d\n", | 279 | len = snprintf(buf, remaining, "ipv6 %s %pI6 %d\n", |
280 | proto_name, | 280 | proto_name, |
281 | &inet6_sk(sk)->rcv_saddr, | 281 | &inet6_sk(sk)->rcv_saddr, |
282 | inet_sk(sk)->num); | 282 | inet_sk(sk)->inet_num); |
283 | break; | 283 | break; |
284 | default: | 284 | default: |
285 | len = snprintf(buf, remaining, "*unknown-%d*\n", | 285 | len = snprintf(buf, remaining, "*unknown-%d*\n", |
@@ -1311,7 +1311,7 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv, | |||
1311 | /* Register socket with portmapper */ | 1311 | /* Register socket with portmapper */ |
1312 | if (*errp >= 0 && pmap_register) | 1312 | if (*errp >= 0 && pmap_register) |
1313 | *errp = svc_register(serv, inet->sk_family, inet->sk_protocol, | 1313 | *errp = svc_register(serv, inet->sk_family, inet->sk_protocol, |
1314 | ntohs(inet_sk(inet)->sport)); | 1314 | ntohs(inet_sk(inet)->inet_sport)); |
1315 | 1315 | ||
1316 | if (*errp < 0) { | 1316 | if (*errp < 0) { |
1317 | kfree(svsk); | 1317 | kfree(svsk); |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index e6d9abf7440e..d00c2119faf3 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -177,6 +177,7 @@ static void reject_rx_queue(struct sock *sk) | |||
177 | * @net: network namespace (must be default network) | 177 | * @net: network namespace (must be default network) |
178 | * @sock: pre-allocated socket structure | 178 | * @sock: pre-allocated socket structure |
179 | * @protocol: protocol indicator (must be 0) | 179 | * @protocol: protocol indicator (must be 0) |
180 | * @kern: caused by kernel or by userspace? | ||
180 | * | 181 | * |
181 | * This routine creates additional data structures used by the TIPC socket, | 182 | * This routine creates additional data structures used by the TIPC socket, |
182 | * initializes them, and links them together. | 183 | * initializes them, and links them together. |
@@ -184,7 +185,8 @@ static void reject_rx_queue(struct sock *sk) | |||
184 | * Returns 0 on success, errno otherwise | 185 | * Returns 0 on success, errno otherwise |
185 | */ | 186 | */ |
186 | 187 | ||
187 | static int tipc_create(struct net *net, struct socket *sock, int protocol) | 188 | static int tipc_create(struct net *net, struct socket *sock, int protocol, |
189 | int kern) | ||
188 | { | 190 | { |
189 | const struct proto_ops *ops; | 191 | const struct proto_ops *ops; |
190 | socket_state state; | 192 | socket_state state; |
@@ -1528,7 +1530,7 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags) | |||
1528 | 1530 | ||
1529 | buf = skb_peek(&sk->sk_receive_queue); | 1531 | buf = skb_peek(&sk->sk_receive_queue); |
1530 | 1532 | ||
1531 | res = tipc_create(sock_net(sock->sk), new_sock, 0); | 1533 | res = tipc_create(sock_net(sock->sk), new_sock, 0, 0); |
1532 | if (!res) { | 1534 | if (!res) { |
1533 | struct sock *new_sk = new_sock->sk; | 1535 | struct sock *new_sk = new_sock->sk; |
1534 | struct tipc_sock *new_tsock = tipc_sk(new_sk); | 1536 | struct tipc_sock *new_tsock = tipc_sk(new_sk); |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index fc820cd75453..7553ea6edd8f 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -621,7 +621,8 @@ out: | |||
621 | return sk; | 621 | return sk; |
622 | } | 622 | } |
623 | 623 | ||
624 | static int unix_create(struct net *net, struct socket *sock, int protocol) | 624 | static int unix_create(struct net *net, struct socket *sock, int protocol, |
625 | int kern) | ||
625 | { | 626 | { |
626 | if (protocol && protocol != PF_UNIX) | 627 | if (protocol && protocol != PF_UNIX) |
627 | return -EPROTONOSUPPORT; | 628 | return -EPROTONOSUPPORT; |
@@ -1258,7 +1259,7 @@ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_ | |||
1258 | { | 1259 | { |
1259 | struct sock *sk = sock->sk; | 1260 | struct sock *sk = sock->sk; |
1260 | struct unix_sock *u; | 1261 | struct unix_sock *u; |
1261 | struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; | 1262 | DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr); |
1262 | int err = 0; | 1263 | int err = 0; |
1263 | 1264 | ||
1264 | if (peer) { | 1265 | if (peer) { |
@@ -2216,7 +2217,7 @@ static const struct file_operations unix_seq_fops = { | |||
2216 | 2217 | ||
2217 | #endif | 2218 | #endif |
2218 | 2219 | ||
2219 | static struct net_proto_family unix_family_ops = { | 2220 | static const struct net_proto_family unix_family_ops = { |
2220 | .family = PF_UNIX, | 2221 | .family = PF_UNIX, |
2221 | .create = unix_create, | 2222 | .create = unix_create, |
2222 | .owner = THIS_MODULE, | 2223 | .owner = THIS_MODULE, |
diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c index d631a17186bc..d3bfb6ef13ae 100644 --- a/net/wimax/op-msg.c +++ b/net/wimax/op-msg.c | |||
@@ -388,6 +388,8 @@ int wimax_gnl_doit_msg_from_user(struct sk_buff *skb, struct genl_info *info) | |||
388 | } | 388 | } |
389 | mutex_lock(&wimax_dev->mutex); | 389 | mutex_lock(&wimax_dev->mutex); |
390 | result = wimax_dev_is_ready(wimax_dev); | 390 | result = wimax_dev_is_ready(wimax_dev); |
391 | if (result == -ENOMEDIUM) | ||
392 | result = 0; | ||
391 | if (result < 0) | 393 | if (result < 0) |
392 | goto error_not_ready; | 394 | goto error_not_ready; |
393 | result = -ENOSYS; | 395 | result = -ENOSYS; |
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c index 70ef4df863b9..94d339c345d2 100644 --- a/net/wimax/op-rfkill.c +++ b/net/wimax/op-rfkill.c | |||
@@ -305,8 +305,15 @@ int wimax_rfkill(struct wimax_dev *wimax_dev, enum wimax_rf_state state) | |||
305 | d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state); | 305 | d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state); |
306 | mutex_lock(&wimax_dev->mutex); | 306 | mutex_lock(&wimax_dev->mutex); |
307 | result = wimax_dev_is_ready(wimax_dev); | 307 | result = wimax_dev_is_ready(wimax_dev); |
308 | if (result < 0) | 308 | if (result < 0) { |
309 | /* While initializing, < 1.4.3 wimax-tools versions use | ||
310 | * this call to check if the device is a valid WiMAX | ||
311 | * device; so we allow it to proceed always, | ||
312 | * considering the radios are all off. */ | ||
313 | if (result == -ENOMEDIUM && state == WIMAX_RF_QUERY) | ||
314 | result = WIMAX_RF_OFF << 1 | WIMAX_RF_OFF; | ||
309 | goto error_not_ready; | 315 | goto error_not_ready; |
316 | } | ||
310 | switch (state) { | 317 | switch (state) { |
311 | case WIMAX_RF_ON: | 318 | case WIMAX_RF_ON: |
312 | case WIMAX_RF_OFF: | 319 | case WIMAX_RF_OFF: |
@@ -355,6 +362,7 @@ int wimax_rfkill_add(struct wimax_dev *wimax_dev) | |||
355 | 362 | ||
356 | wimax_dev->rfkill = rfkill; | 363 | wimax_dev->rfkill = rfkill; |
357 | 364 | ||
365 | rfkill_init_sw_state(rfkill, 1); | ||
358 | result = rfkill_register(wimax_dev->rfkill); | 366 | result = rfkill_register(wimax_dev->rfkill); |
359 | if (result < 0) | 367 | if (result < 0) |
360 | goto error_rfkill_register; | 368 | goto error_rfkill_register; |
diff --git a/net/wimax/stack.c b/net/wimax/stack.c index 79fb7d7c640f..c8866412f830 100644 --- a/net/wimax/stack.c +++ b/net/wimax/stack.c | |||
@@ -60,6 +60,14 @@ | |||
60 | #define D_SUBMODULE stack | 60 | #define D_SUBMODULE stack |
61 | #include "debug-levels.h" | 61 | #include "debug-levels.h" |
62 | 62 | ||
63 | static char wimax_debug_params[128]; | ||
64 | module_param_string(debug, wimax_debug_params, sizeof(wimax_debug_params), | ||
65 | 0644); | ||
66 | MODULE_PARM_DESC(debug, | ||
67 | "String of space-separated NAME:VALUE pairs, where NAMEs " | ||
68 | "are the different debug submodules and VALUE are the " | ||
69 | "initial debug value to set."); | ||
70 | |||
63 | /* | 71 | /* |
64 | * Authoritative source for the RE_STATE_CHANGE attribute policy | 72 | * Authoritative source for the RE_STATE_CHANGE attribute policy |
65 | * | 73 | * |
@@ -562,6 +570,9 @@ int __init wimax_subsys_init(void) | |||
562 | int result, cnt; | 570 | int result, cnt; |
563 | 571 | ||
564 | d_fnstart(4, NULL, "()\n"); | 572 | d_fnstart(4, NULL, "()\n"); |
573 | d_parse_params(D_LEVEL, D_LEVEL_SIZE, wimax_debug_params, | ||
574 | "wimax.debug"); | ||
575 | |||
565 | snprintf(wimax_gnl_family.name, sizeof(wimax_gnl_family.name), | 576 | snprintf(wimax_gnl_family.name, sizeof(wimax_gnl_family.name), |
566 | "WiMAX"); | 577 | "WiMAX"); |
567 | result = genl_register_family(&wimax_gnl_family); | 578 | result = genl_register_family(&wimax_gnl_family); |
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig index abf7ca3f9ff9..614bdcec1c80 100644 --- a/net/wireless/Kconfig +++ b/net/wireless/Kconfig | |||
@@ -1,3 +1,21 @@ | |||
1 | config WIRELESS_EXT | ||
2 | bool | ||
3 | |||
4 | config WEXT_CORE | ||
5 | def_bool y | ||
6 | depends on CFG80211_WEXT || WIRELESS_EXT | ||
7 | |||
8 | config WEXT_PROC | ||
9 | def_bool y | ||
10 | depends on PROC_FS | ||
11 | depends on WEXT_CORE | ||
12 | |||
13 | config WEXT_SPY | ||
14 | bool | ||
15 | |||
16 | config WEXT_PRIV | ||
17 | bool | ||
18 | |||
1 | config CFG80211 | 19 | config CFG80211 |
2 | tristate "cfg80211 - wireless configuration API" | 20 | tristate "cfg80211 - wireless configuration API" |
3 | depends on RFKILL || !RFKILL | 21 | depends on RFKILL || !RFKILL |
@@ -56,6 +74,12 @@ config CFG80211_REG_DEBUG | |||
56 | 74 | ||
57 | If unsure, say N. | 75 | If unsure, say N. |
58 | 76 | ||
77 | config CFG80211_DEFAULT_PS_VALUE | ||
78 | int | ||
79 | default 1 if CFG80211_DEFAULT_PS | ||
80 | default 0 | ||
81 | depends on CFG80211 | ||
82 | |||
59 | config CFG80211_DEFAULT_PS | 83 | config CFG80211_DEFAULT_PS |
60 | bool "enable powersave by default" | 84 | bool "enable powersave by default" |
61 | depends on CFG80211 | 85 | depends on CFG80211 |
@@ -67,14 +91,10 @@ config CFG80211_DEFAULT_PS | |||
67 | applications instead -- they need to register their network | 91 | applications instead -- they need to register their network |
68 | latency requirement, see Documentation/power/pm_qos_interface.txt. | 92 | latency requirement, see Documentation/power/pm_qos_interface.txt. |
69 | 93 | ||
70 | config CFG80211_DEFAULT_PS_VALUE | ||
71 | int | ||
72 | default 1 if CFG80211_DEFAULT_PS | ||
73 | default 0 | ||
74 | |||
75 | config CFG80211_DEBUGFS | 94 | config CFG80211_DEBUGFS |
76 | bool "cfg80211 DebugFS entries" | 95 | bool "cfg80211 DebugFS entries" |
77 | depends on CFG80211 && DEBUG_FS | 96 | depends on CFG80211 |
97 | depends on DEBUG_FS | ||
78 | ---help--- | 98 | ---help--- |
79 | You can enable this if you want to debugfs entries for cfg80211. | 99 | You can enable this if you want to debugfs entries for cfg80211. |
80 | 100 | ||
@@ -83,6 +103,7 @@ config CFG80211_DEBUGFS | |||
83 | config WIRELESS_OLD_REGULATORY | 103 | config WIRELESS_OLD_REGULATORY |
84 | bool "Old wireless static regulatory definitions" | 104 | bool "Old wireless static regulatory definitions" |
85 | default n | 105 | default n |
106 | depends on CFG80211 | ||
86 | ---help--- | 107 | ---help--- |
87 | This option enables the old static regulatory information | 108 | This option enables the old static regulatory information |
88 | and uses it within the new framework. This option is available | 109 | and uses it within the new framework. This option is available |
@@ -94,20 +115,19 @@ config WIRELESS_OLD_REGULATORY | |||
94 | 115 | ||
95 | Say N and if you say Y, please tell us why. The default is N. | 116 | Say N and if you say Y, please tell us why. The default is N. |
96 | 117 | ||
97 | config WIRELESS_EXT | 118 | config CFG80211_WEXT |
98 | bool "Wireless extensions" | 119 | bool "cfg80211 wireless extensions compatibility" |
120 | depends on CFG80211 | ||
121 | select WEXT_CORE | ||
99 | default y | 122 | default y |
100 | ---help--- | 123 | help |
101 | This option enables the legacy wireless extensions | 124 | Enable this option if you need old userspace for wireless |
102 | (wireless network interface configuration via ioctls.) | 125 | extensions with cfg80211-based drivers. |
103 | |||
104 | Say Y unless you've upgraded all your userspace to use | ||
105 | nl80211 instead of wireless extensions. | ||
106 | 126 | ||
107 | config WIRELESS_EXT_SYSFS | 127 | config WIRELESS_EXT_SYSFS |
108 | bool "Wireless extensions sysfs files" | 128 | bool "Wireless extensions sysfs files" |
109 | default y | 129 | default y |
110 | depends on WIRELESS_EXT && SYSFS | 130 | depends on WEXT_CORE && SYSFS |
111 | help | 131 | help |
112 | This option enables the deprecated wireless statistics | 132 | This option enables the deprecated wireless statistics |
113 | files in /sys/class/net/*/wireless/. The same information | 133 | files in /sys/class/net/*/wireless/. The same information |
diff --git a/net/wireless/Makefile b/net/wireless/Makefile index 3ecaa9179977..f07c8dc7aab2 100644 --- a/net/wireless/Makefile +++ b/net/wireless/Makefile | |||
@@ -1,13 +1,17 @@ | |||
1 | obj-$(CONFIG_WIRELESS_EXT) += wext.o | ||
2 | obj-$(CONFIG_CFG80211) += cfg80211.o | 1 | obj-$(CONFIG_CFG80211) += cfg80211.o |
3 | obj-$(CONFIG_LIB80211) += lib80211.o | 2 | obj-$(CONFIG_LIB80211) += lib80211.o |
4 | obj-$(CONFIG_LIB80211_CRYPT_WEP) += lib80211_crypt_wep.o | 3 | obj-$(CONFIG_LIB80211_CRYPT_WEP) += lib80211_crypt_wep.o |
5 | obj-$(CONFIG_LIB80211_CRYPT_CCMP) += lib80211_crypt_ccmp.o | 4 | obj-$(CONFIG_LIB80211_CRYPT_CCMP) += lib80211_crypt_ccmp.o |
6 | obj-$(CONFIG_LIB80211_CRYPT_TKIP) += lib80211_crypt_tkip.o | 5 | obj-$(CONFIG_LIB80211_CRYPT_TKIP) += lib80211_crypt_tkip.o |
7 | 6 | ||
7 | obj-$(CONFIG_WEXT_CORE) += wext-core.o | ||
8 | obj-$(CONFIG_WEXT_PROC) += wext-proc.o | ||
9 | obj-$(CONFIG_WEXT_SPY) += wext-spy.o | ||
10 | obj-$(CONFIG_WEXT_PRIV) += wext-priv.o | ||
11 | |||
8 | cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o | 12 | cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o |
9 | cfg80211-y += mlme.o ibss.o sme.o chan.o | 13 | cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o |
10 | cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o | 14 | cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o |
11 | cfg80211-$(CONFIG_WIRELESS_EXT) += wext-compat.o wext-sme.o | 15 | cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o |
12 | 16 | ||
13 | ccflags-y += -D__CHECK_ENDIAN__ | 17 | ccflags-y += -D__CHECK_ENDIAN__ |
diff --git a/net/wireless/core.c b/net/wireless/core.c index a595f712b5bf..45bd63ad2eb2 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include "sysfs.h" | 22 | #include "sysfs.h" |
23 | #include "debugfs.h" | 23 | #include "debugfs.h" |
24 | #include "wext-compat.h" | 24 | #include "wext-compat.h" |
25 | #include "ethtool.h" | ||
25 | 26 | ||
26 | /* name for sysfs, %d is appended */ | 27 | /* name for sysfs, %d is appended */ |
27 | #define PHY_NAME "phy" | 28 | #define PHY_NAME "phy" |
@@ -359,6 +360,10 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) | |||
359 | INIT_LIST_HEAD(&rdev->bss_list); | 360 | INIT_LIST_HEAD(&rdev->bss_list); |
360 | INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done); | 361 | INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done); |
361 | 362 | ||
363 | #ifdef CONFIG_CFG80211_WEXT | ||
364 | rdev->wiphy.wext = &cfg80211_wext_handler; | ||
365 | #endif | ||
366 | |||
362 | device_initialize(&rdev->wiphy.dev); | 367 | device_initialize(&rdev->wiphy.dev); |
363 | rdev->wiphy.dev.class = &ieee80211_class; | 368 | rdev->wiphy.dev.class = &ieee80211_class; |
364 | rdev->wiphy.dev.platform_data = rdev; | 369 | rdev->wiphy.dev.platform_data = rdev; |
@@ -542,7 +547,7 @@ void wiphy_unregister(struct wiphy *wiphy) | |||
542 | * First remove the hardware from everywhere, this makes | 547 | * First remove the hardware from everywhere, this makes |
543 | * it impossible to find from userspace. | 548 | * it impossible to find from userspace. |
544 | */ | 549 | */ |
545 | cfg80211_debugfs_rdev_del(rdev); | 550 | debugfs_remove_recursive(rdev->wiphy.debugfsdir); |
546 | list_del(&rdev->list); | 551 | list_del(&rdev->list); |
547 | 552 | ||
548 | /* | 553 | /* |
@@ -565,7 +570,6 @@ void wiphy_unregister(struct wiphy *wiphy) | |||
565 | 570 | ||
566 | cfg80211_rdev_list_generation++; | 571 | cfg80211_rdev_list_generation++; |
567 | device_del(&rdev->wiphy.dev); | 572 | device_del(&rdev->wiphy.dev); |
568 | debugfs_remove(rdev->wiphy.debugfsdir); | ||
569 | 573 | ||
570 | mutex_unlock(&cfg80211_mutex); | 574 | mutex_unlock(&cfg80211_mutex); |
571 | 575 | ||
@@ -626,6 +630,10 @@ static void wdev_cleanup_work(struct work_struct *work) | |||
626 | dev_put(wdev->netdev); | 630 | dev_put(wdev->netdev); |
627 | } | 631 | } |
628 | 632 | ||
633 | static struct device_type wiphy_type = { | ||
634 | .name = "wlan", | ||
635 | }; | ||
636 | |||
629 | static int cfg80211_netdev_notifier_call(struct notifier_block * nb, | 637 | static int cfg80211_netdev_notifier_call(struct notifier_block * nb, |
630 | unsigned long state, | 638 | unsigned long state, |
631 | void *ndev) | 639 | void *ndev) |
@@ -642,6 +650,9 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb, | |||
642 | WARN_ON(wdev->iftype == NL80211_IFTYPE_UNSPECIFIED); | 650 | WARN_ON(wdev->iftype == NL80211_IFTYPE_UNSPECIFIED); |
643 | 651 | ||
644 | switch (state) { | 652 | switch (state) { |
653 | case NETDEV_POST_INIT: | ||
654 | SET_NETDEV_DEVTYPE(dev, &wiphy_type); | ||
655 | break; | ||
645 | case NETDEV_REGISTER: | 656 | case NETDEV_REGISTER: |
646 | /* | 657 | /* |
647 | * NB: cannot take rdev->mtx here because this may be | 658 | * NB: cannot take rdev->mtx here because this may be |
@@ -666,9 +677,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb, | |||
666 | wdev->netdev = dev; | 677 | wdev->netdev = dev; |
667 | wdev->sme_state = CFG80211_SME_IDLE; | 678 | wdev->sme_state = CFG80211_SME_IDLE; |
668 | mutex_unlock(&rdev->devlist_mtx); | 679 | mutex_unlock(&rdev->devlist_mtx); |
669 | #ifdef CONFIG_WIRELESS_EXT | 680 | #ifdef CONFIG_CFG80211_WEXT |
670 | if (!dev->wireless_handlers) | ||
671 | dev->wireless_handlers = &cfg80211_wext_handler; | ||
672 | wdev->wext.default_key = -1; | 681 | wdev->wext.default_key = -1; |
673 | wdev->wext.default_mgmt_key = -1; | 682 | wdev->wext.default_mgmt_key = -1; |
674 | wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; | 683 | wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; |
@@ -682,6 +691,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb, | |||
682 | wdev->wext.ps = false; | 691 | wdev->wext.ps = false; |
683 | } | 692 | } |
684 | #endif | 693 | #endif |
694 | if (!dev->ethtool_ops) | ||
695 | dev->ethtool_ops = &cfg80211_ethtool_ops; | ||
685 | break; | 696 | break; |
686 | case NETDEV_GOING_DOWN: | 697 | case NETDEV_GOING_DOWN: |
687 | switch (wdev->iftype) { | 698 | switch (wdev->iftype) { |
@@ -690,7 +701,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb, | |||
690 | break; | 701 | break; |
691 | case NL80211_IFTYPE_STATION: | 702 | case NL80211_IFTYPE_STATION: |
692 | wdev_lock(wdev); | 703 | wdev_lock(wdev); |
693 | #ifdef CONFIG_WIRELESS_EXT | 704 | #ifdef CONFIG_CFG80211_WEXT |
694 | kfree(wdev->wext.ie); | 705 | kfree(wdev->wext.ie); |
695 | wdev->wext.ie = NULL; | 706 | wdev->wext.ie = NULL; |
696 | wdev->wext.ie_len = 0; | 707 | wdev->wext.ie_len = 0; |
@@ -722,7 +733,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb, | |||
722 | mutex_unlock(&rdev->devlist_mtx); | 733 | mutex_unlock(&rdev->devlist_mtx); |
723 | dev_put(dev); | 734 | dev_put(dev); |
724 | } | 735 | } |
725 | #ifdef CONFIG_WIRELESS_EXT | 736 | #ifdef CONFIG_CFG80211_WEXT |
726 | cfg80211_lock_rdev(rdev); | 737 | cfg80211_lock_rdev(rdev); |
727 | mutex_lock(&rdev->devlist_mtx); | 738 | mutex_lock(&rdev->devlist_mtx); |
728 | wdev_lock(wdev); | 739 | wdev_lock(wdev); |
@@ -760,7 +771,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb, | |||
760 | sysfs_remove_link(&dev->dev.kobj, "phy80211"); | 771 | sysfs_remove_link(&dev->dev.kobj, "phy80211"); |
761 | list_del_init(&wdev->list); | 772 | list_del_init(&wdev->list); |
762 | rdev->devlist_generation++; | 773 | rdev->devlist_generation++; |
763 | #ifdef CONFIG_WIRELESS_EXT | 774 | #ifdef CONFIG_CFG80211_WEXT |
764 | kfree(wdev->wext.keys); | 775 | kfree(wdev->wext.keys); |
765 | #endif | 776 | #endif |
766 | } | 777 | } |
diff --git a/net/wireless/core.h b/net/wireless/core.h index 68b321997d4c..5aeebb9085f8 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h | |||
@@ -72,17 +72,6 @@ struct cfg80211_registered_device { | |||
72 | /* current channel */ | 72 | /* current channel */ |
73 | struct ieee80211_channel *channel; | 73 | struct ieee80211_channel *channel; |
74 | 74 | ||
75 | #ifdef CONFIG_CFG80211_DEBUGFS | ||
76 | /* Debugfs entries */ | ||
77 | struct wiphy_debugfsdentries { | ||
78 | struct dentry *rts_threshold; | ||
79 | struct dentry *fragmentation_threshold; | ||
80 | struct dentry *short_retry_limit; | ||
81 | struct dentry *long_retry_limit; | ||
82 | struct dentry *ht40allow_map; | ||
83 | } debugfs; | ||
84 | #endif | ||
85 | |||
86 | /* must be last because of the way we do wiphy_priv(), | 75 | /* must be last because of the way we do wiphy_priv(), |
87 | * and it should at least be aligned to NETDEV_ALIGN */ | 76 | * and it should at least be aligned to NETDEV_ALIGN */ |
88 | struct wiphy wiphy __attribute__((__aligned__(NETDEV_ALIGN))); | 77 | struct wiphy wiphy __attribute__((__aligned__(NETDEV_ALIGN))); |
diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c index 13d93d84f902..2e4895615037 100644 --- a/net/wireless/debugfs.c +++ b/net/wireless/debugfs.c | |||
@@ -104,11 +104,7 @@ static const struct file_operations ht40allow_map_ops = { | |||
104 | }; | 104 | }; |
105 | 105 | ||
106 | #define DEBUGFS_ADD(name) \ | 106 | #define DEBUGFS_ADD(name) \ |
107 | rdev->debugfs.name = debugfs_create_file(#name, S_IRUGO, phyd, \ | 107 | debugfs_create_file(#name, S_IRUGO, phyd, &rdev->wiphy, &name## _ops); |
108 | &rdev->wiphy, &name## _ops); | ||
109 | #define DEBUGFS_DEL(name) \ | ||
110 | debugfs_remove(rdev->debugfs.name); \ | ||
111 | rdev->debugfs.name = NULL; | ||
112 | 108 | ||
113 | void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev) | 109 | void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev) |
114 | { | 110 | { |
@@ -120,12 +116,3 @@ void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev) | |||
120 | DEBUGFS_ADD(long_retry_limit); | 116 | DEBUGFS_ADD(long_retry_limit); |
121 | DEBUGFS_ADD(ht40allow_map); | 117 | DEBUGFS_ADD(ht40allow_map); |
122 | } | 118 | } |
123 | |||
124 | void cfg80211_debugfs_rdev_del(struct cfg80211_registered_device *rdev) | ||
125 | { | ||
126 | DEBUGFS_DEL(rts_threshold); | ||
127 | DEBUGFS_DEL(fragmentation_threshold); | ||
128 | DEBUGFS_DEL(short_retry_limit); | ||
129 | DEBUGFS_DEL(long_retry_limit); | ||
130 | DEBUGFS_DEL(ht40allow_map); | ||
131 | } | ||
diff --git a/net/wireless/debugfs.h b/net/wireless/debugfs.h index 6419b6d6ce3e..74fdd3811427 100644 --- a/net/wireless/debugfs.h +++ b/net/wireless/debugfs.h | |||
@@ -3,12 +3,9 @@ | |||
3 | 3 | ||
4 | #ifdef CONFIG_CFG80211_DEBUGFS | 4 | #ifdef CONFIG_CFG80211_DEBUGFS |
5 | void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev); | 5 | void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev); |
6 | void cfg80211_debugfs_rdev_del(struct cfg80211_registered_device *rdev); | ||
7 | #else | 6 | #else |
8 | static inline | 7 | static inline |
9 | void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev) {} | 8 | void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev) {} |
10 | static inline | ||
11 | void cfg80211_debugfs_rdev_del(struct cfg80211_registered_device *rdev) {} | ||
12 | #endif | 9 | #endif |
13 | 10 | ||
14 | #endif /* __CFG80211_DEBUGFS_H */ | 11 | #endif /* __CFG80211_DEBUGFS_H */ |
diff --git a/net/wireless/ethtool.c b/net/wireless/ethtool.c new file mode 100644 index 000000000000..ca4c825be93d --- /dev/null +++ b/net/wireless/ethtool.c | |||
@@ -0,0 +1,45 @@ | |||
1 | #include <linux/utsname.h> | ||
2 | #include <net/cfg80211.h> | ||
3 | #include "ethtool.h" | ||
4 | |||
5 | static void cfg80211_get_drvinfo(struct net_device *dev, | ||
6 | struct ethtool_drvinfo *info) | ||
7 | { | ||
8 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
9 | |||
10 | strlcpy(info->driver, wiphy_dev(wdev->wiphy)->driver->name, | ||
11 | sizeof(info->driver)); | ||
12 | |||
13 | strlcpy(info->version, init_utsname()->release, sizeof(info->version)); | ||
14 | |||
15 | if (wdev->wiphy->fw_version[0]) | ||
16 | strncpy(info->fw_version, wdev->wiphy->fw_version, | ||
17 | sizeof(info->fw_version)); | ||
18 | else | ||
19 | strncpy(info->fw_version, "N/A", sizeof(info->fw_version)); | ||
20 | |||
21 | strlcpy(info->bus_info, dev_name(wiphy_dev(wdev->wiphy)), | ||
22 | sizeof(info->bus_info)); | ||
23 | } | ||
24 | |||
25 | static int cfg80211_get_regs_len(struct net_device *dev) | ||
26 | { | ||
27 | /* For now, return 0... */ | ||
28 | return 0; | ||
29 | } | ||
30 | |||
31 | static void cfg80211_get_regs(struct net_device *dev, struct ethtool_regs *regs, | ||
32 | void *data) | ||
33 | { | ||
34 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
35 | |||
36 | regs->version = wdev->wiphy->hw_version; | ||
37 | regs->len = 0; | ||
38 | } | ||
39 | |||
40 | const struct ethtool_ops cfg80211_ethtool_ops = { | ||
41 | .get_drvinfo = cfg80211_get_drvinfo, | ||
42 | .get_regs_len = cfg80211_get_regs_len, | ||
43 | .get_regs = cfg80211_get_regs, | ||
44 | .get_link = ethtool_op_get_link, | ||
45 | }; | ||
diff --git a/net/wireless/ethtool.h b/net/wireless/ethtool.h new file mode 100644 index 000000000000..695ecad20bd6 --- /dev/null +++ b/net/wireless/ethtool.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef __CFG80211_ETHTOOL__ | ||
2 | #define __CFG80211_ETHTOOL__ | ||
3 | |||
4 | extern const struct ethtool_ops cfg80211_ethtool_ops; | ||
5 | |||
6 | #endif /* __CFG80211_ETHTOOL__ */ | ||
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c index c88338911979..39b6d92e2828 100644 --- a/net/wireless/ibss.c +++ b/net/wireless/ibss.c | |||
@@ -15,7 +15,7 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid) | |||
15 | { | 15 | { |
16 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 16 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
17 | struct cfg80211_bss *bss; | 17 | struct cfg80211_bss *bss; |
18 | #ifdef CONFIG_WIRELESS_EXT | 18 | #ifdef CONFIG_CFG80211_WEXT |
19 | union iwreq_data wrqu; | 19 | union iwreq_data wrqu; |
20 | #endif | 20 | #endif |
21 | 21 | ||
@@ -44,7 +44,7 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid) | |||
44 | 44 | ||
45 | nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid, | 45 | nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid, |
46 | GFP_KERNEL); | 46 | GFP_KERNEL); |
47 | #ifdef CONFIG_WIRELESS_EXT | 47 | #ifdef CONFIG_CFG80211_WEXT |
48 | memset(&wrqu, 0, sizeof(wrqu)); | 48 | memset(&wrqu, 0, sizeof(wrqu)); |
49 | memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN); | 49 | memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN); |
50 | wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); | 50 | wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); |
@@ -96,7 +96,7 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev, | |||
96 | kfree(wdev->connect_keys); | 96 | kfree(wdev->connect_keys); |
97 | wdev->connect_keys = connkeys; | 97 | wdev->connect_keys = connkeys; |
98 | 98 | ||
99 | #ifdef CONFIG_WIRELESS_EXT | 99 | #ifdef CONFIG_CFG80211_WEXT |
100 | wdev->wext.ibss.channel = params->channel; | 100 | wdev->wext.ibss.channel = params->channel; |
101 | #endif | 101 | #endif |
102 | err = rdev->ops->join_ibss(&rdev->wiphy, dev, params); | 102 | err = rdev->ops->join_ibss(&rdev->wiphy, dev, params); |
@@ -154,7 +154,7 @@ static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext) | |||
154 | 154 | ||
155 | wdev->current_bss = NULL; | 155 | wdev->current_bss = NULL; |
156 | wdev->ssid_len = 0; | 156 | wdev->ssid_len = 0; |
157 | #ifdef CONFIG_WIRELESS_EXT | 157 | #ifdef CONFIG_CFG80211_WEXT |
158 | if (!nowext) | 158 | if (!nowext) |
159 | wdev->wext.ibss.ssid_len = 0; | 159 | wdev->wext.ibss.ssid_len = 0; |
160 | #endif | 160 | #endif |
@@ -203,7 +203,7 @@ int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev, | |||
203 | return err; | 203 | return err; |
204 | } | 204 | } |
205 | 205 | ||
206 | #ifdef CONFIG_WIRELESS_EXT | 206 | #ifdef CONFIG_CFG80211_WEXT |
207 | int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev, | 207 | int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev, |
208 | struct wireless_dev *wdev) | 208 | struct wireless_dev *wdev) |
209 | { | 209 | { |
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 0a6b7a0eca6b..2610b746effa 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c | |||
@@ -62,7 +62,6 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len) | |||
62 | u8 *ie = mgmt->u.assoc_resp.variable; | 62 | u8 *ie = mgmt->u.assoc_resp.variable; |
63 | int i, ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable); | 63 | int i, ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable); |
64 | struct cfg80211_internal_bss *bss = NULL; | 64 | struct cfg80211_internal_bss *bss = NULL; |
65 | bool need_connect_result = true; | ||
66 | 65 | ||
67 | wdev_lock(wdev); | 66 | wdev_lock(wdev); |
68 | 67 | ||
@@ -97,7 +96,6 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len) | |||
97 | WARN_ON(!bss); | 96 | WARN_ON(!bss); |
98 | } else if (wdev->conn) { | 97 | } else if (wdev->conn) { |
99 | cfg80211_sme_failed_assoc(wdev); | 98 | cfg80211_sme_failed_assoc(wdev); |
100 | need_connect_result = false; | ||
101 | /* | 99 | /* |
102 | * do not call connect_result() now because the | 100 | * do not call connect_result() now because the |
103 | * sme will schedule work that does it later. | 101 | * sme will schedule work that does it later. |
@@ -130,7 +128,7 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len) | |||
130 | } | 128 | } |
131 | EXPORT_SYMBOL(cfg80211_send_rx_assoc); | 129 | EXPORT_SYMBOL(cfg80211_send_rx_assoc); |
132 | 130 | ||
133 | static void __cfg80211_send_deauth(struct net_device *dev, | 131 | void __cfg80211_send_deauth(struct net_device *dev, |
134 | const u8 *buf, size_t len) | 132 | const u8 *buf, size_t len) |
135 | { | 133 | { |
136 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 134 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
@@ -139,7 +137,6 @@ static void __cfg80211_send_deauth(struct net_device *dev, | |||
139 | struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; | 137 | struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; |
140 | const u8 *bssid = mgmt->bssid; | 138 | const u8 *bssid = mgmt->bssid; |
141 | int i; | 139 | int i; |
142 | bool done = false; | ||
143 | 140 | ||
144 | ASSERT_WDEV_LOCK(wdev); | 141 | ASSERT_WDEV_LOCK(wdev); |
145 | 142 | ||
@@ -147,7 +144,6 @@ static void __cfg80211_send_deauth(struct net_device *dev, | |||
147 | 144 | ||
148 | if (wdev->current_bss && | 145 | if (wdev->current_bss && |
149 | memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) { | 146 | memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) { |
150 | done = true; | ||
151 | cfg80211_unhold_bss(wdev->current_bss); | 147 | cfg80211_unhold_bss(wdev->current_bss); |
152 | cfg80211_put_bss(&wdev->current_bss->pub); | 148 | cfg80211_put_bss(&wdev->current_bss->pub); |
153 | wdev->current_bss = NULL; | 149 | wdev->current_bss = NULL; |
@@ -157,7 +153,6 @@ static void __cfg80211_send_deauth(struct net_device *dev, | |||
157 | cfg80211_unhold_bss(wdev->auth_bsses[i]); | 153 | cfg80211_unhold_bss(wdev->auth_bsses[i]); |
158 | cfg80211_put_bss(&wdev->auth_bsses[i]->pub); | 154 | cfg80211_put_bss(&wdev->auth_bsses[i]->pub); |
159 | wdev->auth_bsses[i] = NULL; | 155 | wdev->auth_bsses[i] = NULL; |
160 | done = true; | ||
161 | break; | 156 | break; |
162 | } | 157 | } |
163 | if (wdev->authtry_bsses[i] && | 158 | if (wdev->authtry_bsses[i] && |
@@ -165,13 +160,10 @@ static void __cfg80211_send_deauth(struct net_device *dev, | |||
165 | cfg80211_unhold_bss(wdev->authtry_bsses[i]); | 160 | cfg80211_unhold_bss(wdev->authtry_bsses[i]); |
166 | cfg80211_put_bss(&wdev->authtry_bsses[i]->pub); | 161 | cfg80211_put_bss(&wdev->authtry_bsses[i]->pub); |
167 | wdev->authtry_bsses[i] = NULL; | 162 | wdev->authtry_bsses[i] = NULL; |
168 | done = true; | ||
169 | break; | 163 | break; |
170 | } | 164 | } |
171 | } | 165 | } |
172 | 166 | ||
173 | WARN_ON(!done); | ||
174 | |||
175 | if (wdev->sme_state == CFG80211_SME_CONNECTED) { | 167 | if (wdev->sme_state == CFG80211_SME_CONNECTED) { |
176 | u16 reason_code; | 168 | u16 reason_code; |
177 | bool from_ap; | 169 | bool from_ap; |
@@ -186,27 +178,19 @@ static void __cfg80211_send_deauth(struct net_device *dev, | |||
186 | false, NULL); | 178 | false, NULL); |
187 | } | 179 | } |
188 | } | 180 | } |
181 | EXPORT_SYMBOL(__cfg80211_send_deauth); | ||
189 | 182 | ||
190 | 183 | void cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len) | |
191 | void cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len, | ||
192 | void *cookie) | ||
193 | { | 184 | { |
194 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 185 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
195 | 186 | ||
196 | BUG_ON(cookie && wdev != cookie); | 187 | wdev_lock(wdev); |
197 | 188 | __cfg80211_send_deauth(dev, buf, len); | |
198 | if (cookie) { | 189 | wdev_unlock(wdev); |
199 | /* called within callback */ | ||
200 | __cfg80211_send_deauth(dev, buf, len); | ||
201 | } else { | ||
202 | wdev_lock(wdev); | ||
203 | __cfg80211_send_deauth(dev, buf, len); | ||
204 | wdev_unlock(wdev); | ||
205 | } | ||
206 | } | 190 | } |
207 | EXPORT_SYMBOL(cfg80211_send_deauth); | 191 | EXPORT_SYMBOL(cfg80211_send_deauth); |
208 | 192 | ||
209 | static void __cfg80211_send_disassoc(struct net_device *dev, | 193 | void __cfg80211_send_disassoc(struct net_device *dev, |
210 | const u8 *buf, size_t len) | 194 | const u8 *buf, size_t len) |
211 | { | 195 | { |
212 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 196 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
@@ -247,22 +231,15 @@ static void __cfg80211_send_disassoc(struct net_device *dev, | |||
247 | from_ap = memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0; | 231 | from_ap = memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0; |
248 | __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap); | 232 | __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap); |
249 | } | 233 | } |
234 | EXPORT_SYMBOL(__cfg80211_send_disassoc); | ||
250 | 235 | ||
251 | void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len, | 236 | void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len) |
252 | void *cookie) | ||
253 | { | 237 | { |
254 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 238 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
255 | 239 | ||
256 | BUG_ON(cookie && wdev != cookie); | 240 | wdev_lock(wdev); |
257 | 241 | __cfg80211_send_disassoc(dev, buf, len); | |
258 | if (cookie) { | 242 | wdev_unlock(wdev); |
259 | /* called within callback */ | ||
260 | __cfg80211_send_disassoc(dev, buf, len); | ||
261 | } else { | ||
262 | wdev_lock(wdev); | ||
263 | __cfg80211_send_disassoc(dev, buf, len); | ||
264 | wdev_unlock(wdev); | ||
265 | } | ||
266 | } | 243 | } |
267 | EXPORT_SYMBOL(cfg80211_send_disassoc); | 244 | EXPORT_SYMBOL(cfg80211_send_disassoc); |
268 | 245 | ||
@@ -340,7 +317,7 @@ void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr, | |||
340 | { | 317 | { |
341 | struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; | 318 | struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; |
342 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 319 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
343 | #ifdef CONFIG_WIRELESS_EXT | 320 | #ifdef CONFIG_CFG80211_WEXT |
344 | union iwreq_data wrqu; | 321 | union iwreq_data wrqu; |
345 | char *buf = kmalloc(128, gfp); | 322 | char *buf = kmalloc(128, gfp); |
346 | 323 | ||
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index ca3c92a0a14f..37264d56bace 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -138,6 +138,7 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = { | |||
138 | [NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 }, | 138 | [NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 }, |
139 | [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 }, | 139 | [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 }, |
140 | [NL80211_ATTR_PID] = { .type = NLA_U32 }, | 140 | [NL80211_ATTR_PID] = { .type = NLA_U32 }, |
141 | [NL80211_ATTR_4ADDR] = { .type = NLA_U8 }, | ||
141 | }; | 142 | }; |
142 | 143 | ||
143 | /* policy for the attributes */ | 144 | /* policy for the attributes */ |
@@ -151,6 +152,26 @@ nl80211_key_policy[NL80211_KEY_MAX + 1] __read_mostly = { | |||
151 | [NL80211_KEY_DEFAULT_MGMT] = { .type = NLA_FLAG }, | 152 | [NL80211_KEY_DEFAULT_MGMT] = { .type = NLA_FLAG }, |
152 | }; | 153 | }; |
153 | 154 | ||
155 | /* ifidx get helper */ | ||
156 | static int nl80211_get_ifidx(struct netlink_callback *cb) | ||
157 | { | ||
158 | int res; | ||
159 | |||
160 | res = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, | ||
161 | nl80211_fam.attrbuf, nl80211_fam.maxattr, | ||
162 | nl80211_policy); | ||
163 | if (res) | ||
164 | return res; | ||
165 | |||
166 | if (!nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX]) | ||
167 | return -EINVAL; | ||
168 | |||
169 | res = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX]); | ||
170 | if (!res) | ||
171 | return -EINVAL; | ||
172 | return res; | ||
173 | } | ||
174 | |||
154 | /* IE validation */ | 175 | /* IE validation */ |
155 | static bool is_valid_ie_attr(const struct nlattr *attr) | 176 | static bool is_valid_ie_attr(const struct nlattr *attr) |
156 | { | 177 | { |
@@ -987,6 +1008,13 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) | |||
987 | change = true; | 1008 | change = true; |
988 | } | 1009 | } |
989 | 1010 | ||
1011 | if (info->attrs[NL80211_ATTR_4ADDR]) { | ||
1012 | params.use_4addr = !!nla_get_u8(info->attrs[NL80211_ATTR_4ADDR]); | ||
1013 | change = true; | ||
1014 | } else { | ||
1015 | params.use_4addr = -1; | ||
1016 | } | ||
1017 | |||
990 | if (info->attrs[NL80211_ATTR_MNTR_FLAGS]) { | 1018 | if (info->attrs[NL80211_ATTR_MNTR_FLAGS]) { |
991 | if (ntype != NL80211_IFTYPE_MONITOR) { | 1019 | if (ntype != NL80211_IFTYPE_MONITOR) { |
992 | err = -EINVAL; | 1020 | err = -EINVAL; |
@@ -1053,6 +1081,9 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) | |||
1053 | params.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]); | 1081 | params.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]); |
1054 | } | 1082 | } |
1055 | 1083 | ||
1084 | if (info->attrs[NL80211_ATTR_4ADDR]) | ||
1085 | params.use_4addr = !!nla_get_u8(info->attrs[NL80211_ATTR_4ADDR]); | ||
1086 | |||
1056 | err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ? | 1087 | err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ? |
1057 | info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, | 1088 | info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, |
1058 | &flags); | 1089 | &flags); |
@@ -1264,7 +1295,7 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info) | |||
1264 | if (!err) | 1295 | if (!err) |
1265 | err = func(&rdev->wiphy, dev, key.idx); | 1296 | err = func(&rdev->wiphy, dev, key.idx); |
1266 | 1297 | ||
1267 | #ifdef CONFIG_WIRELESS_EXT | 1298 | #ifdef CONFIG_CFG80211_WEXT |
1268 | if (!err) { | 1299 | if (!err) { |
1269 | if (func == rdev->ops->set_default_key) | 1300 | if (func == rdev->ops->set_default_key) |
1270 | dev->ieee80211_ptr->wext.default_key = key.idx; | 1301 | dev->ieee80211_ptr->wext.default_key = key.idx; |
@@ -1365,7 +1396,7 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info) | |||
1365 | if (!err) | 1396 | if (!err) |
1366 | err = rdev->ops->del_key(&rdev->wiphy, dev, key.idx, mac_addr); | 1397 | err = rdev->ops->del_key(&rdev->wiphy, dev, key.idx, mac_addr); |
1367 | 1398 | ||
1368 | #ifdef CONFIG_WIRELESS_EXT | 1399 | #ifdef CONFIG_CFG80211_WEXT |
1369 | if (!err) { | 1400 | if (!err) { |
1370 | if (key.idx == dev->ieee80211_ptr->wext.default_key) | 1401 | if (key.idx == dev->ieee80211_ptr->wext.default_key) |
1371 | dev->ieee80211_ptr->wext.default_key = -1; | 1402 | dev->ieee80211_ptr->wext.default_key = -1; |
@@ -1682,20 +1713,10 @@ static int nl80211_dump_station(struct sk_buff *skb, | |||
1682 | int sta_idx = cb->args[1]; | 1713 | int sta_idx = cb->args[1]; |
1683 | int err; | 1714 | int err; |
1684 | 1715 | ||
1685 | if (!ifidx) { | 1716 | if (!ifidx) |
1686 | err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, | 1717 | ifidx = nl80211_get_ifidx(cb); |
1687 | nl80211_fam.attrbuf, nl80211_fam.maxattr, | 1718 | if (ifidx < 0) |
1688 | nl80211_policy); | 1719 | return ifidx; |
1689 | if (err) | ||
1690 | return err; | ||
1691 | |||
1692 | if (!nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX]) | ||
1693 | return -EINVAL; | ||
1694 | |||
1695 | ifidx = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX]); | ||
1696 | if (!ifidx) | ||
1697 | return -EINVAL; | ||
1698 | } | ||
1699 | 1720 | ||
1700 | rtnl_lock(); | 1721 | rtnl_lock(); |
1701 | 1722 | ||
@@ -1800,7 +1821,7 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info) | |||
1800 | } | 1821 | } |
1801 | 1822 | ||
1802 | /* | 1823 | /* |
1803 | * Get vlan interface making sure it is on the right wiphy. | 1824 | * Get vlan interface making sure it is running and on the right wiphy. |
1804 | */ | 1825 | */ |
1805 | static int get_vlan(struct genl_info *info, | 1826 | static int get_vlan(struct genl_info *info, |
1806 | struct cfg80211_registered_device *rdev, | 1827 | struct cfg80211_registered_device *rdev, |
@@ -1818,6 +1839,8 @@ static int get_vlan(struct genl_info *info, | |||
1818 | return -EINVAL; | 1839 | return -EINVAL; |
1819 | if ((*vlan)->ieee80211_ptr->wiphy != &rdev->wiphy) | 1840 | if ((*vlan)->ieee80211_ptr->wiphy != &rdev->wiphy) |
1820 | return -EINVAL; | 1841 | return -EINVAL; |
1842 | if (!netif_running(*vlan)) | ||
1843 | return -ENETDOWN; | ||
1821 | } | 1844 | } |
1822 | return 0; | 1845 | return 0; |
1823 | } | 1846 | } |
@@ -2105,9 +2128,9 @@ static int nl80211_send_mpath(struct sk_buff *msg, u32 pid, u32 seq, | |||
2105 | if (pinfo->filled & MPATH_INFO_FRAME_QLEN) | 2128 | if (pinfo->filled & MPATH_INFO_FRAME_QLEN) |
2106 | NLA_PUT_U32(msg, NL80211_MPATH_INFO_FRAME_QLEN, | 2129 | NLA_PUT_U32(msg, NL80211_MPATH_INFO_FRAME_QLEN, |
2107 | pinfo->frame_qlen); | 2130 | pinfo->frame_qlen); |
2108 | if (pinfo->filled & MPATH_INFO_DSN) | 2131 | if (pinfo->filled & MPATH_INFO_SN) |
2109 | NLA_PUT_U32(msg, NL80211_MPATH_INFO_DSN, | 2132 | NLA_PUT_U32(msg, NL80211_MPATH_INFO_SN, |
2110 | pinfo->dsn); | 2133 | pinfo->sn); |
2111 | if (pinfo->filled & MPATH_INFO_METRIC) | 2134 | if (pinfo->filled & MPATH_INFO_METRIC) |
2112 | NLA_PUT_U32(msg, NL80211_MPATH_INFO_METRIC, | 2135 | NLA_PUT_U32(msg, NL80211_MPATH_INFO_METRIC, |
2113 | pinfo->metric); | 2136 | pinfo->metric); |
@@ -2145,20 +2168,10 @@ static int nl80211_dump_mpath(struct sk_buff *skb, | |||
2145 | int path_idx = cb->args[1]; | 2168 | int path_idx = cb->args[1]; |
2146 | int err; | 2169 | int err; |
2147 | 2170 | ||
2148 | if (!ifidx) { | 2171 | if (!ifidx) |
2149 | err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, | 2172 | ifidx = nl80211_get_ifidx(cb); |
2150 | nl80211_fam.attrbuf, nl80211_fam.maxattr, | 2173 | if (ifidx < 0) |
2151 | nl80211_policy); | 2174 | return ifidx; |
2152 | if (err) | ||
2153 | return err; | ||
2154 | |||
2155 | if (!nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX]) | ||
2156 | return -EINVAL; | ||
2157 | |||
2158 | ifidx = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX]); | ||
2159 | if (!ifidx) | ||
2160 | return -EINVAL; | ||
2161 | } | ||
2162 | 2175 | ||
2163 | rtnl_lock(); | 2176 | rtnl_lock(); |
2164 | 2177 | ||
@@ -2605,6 +2618,8 @@ static int nl80211_get_mesh_params(struct sk_buff *skb, | |||
2605 | cur_params.dot11MeshHWMPpreqMinInterval); | 2618 | cur_params.dot11MeshHWMPpreqMinInterval); |
2606 | NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, | 2619 | NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, |
2607 | cur_params.dot11MeshHWMPnetDiameterTraversalTime); | 2620 | cur_params.dot11MeshHWMPnetDiameterTraversalTime); |
2621 | NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_ROOTMODE, | ||
2622 | cur_params.dot11MeshHWMPRootMode); | ||
2608 | nla_nest_end(msg, pinfoattr); | 2623 | nla_nest_end(msg, pinfoattr); |
2609 | genlmsg_end(msg, hdr); | 2624 | genlmsg_end(msg, hdr); |
2610 | err = genlmsg_reply(msg, info); | 2625 | err = genlmsg_reply(msg, info); |
@@ -2715,6 +2730,10 @@ static int nl80211_set_mesh_params(struct sk_buff *skb, struct genl_info *info) | |||
2715 | dot11MeshHWMPnetDiameterTraversalTime, | 2730 | dot11MeshHWMPnetDiameterTraversalTime, |
2716 | mask, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, | 2731 | mask, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, |
2717 | nla_get_u16); | 2732 | nla_get_u16); |
2733 | FILL_IN_MESH_PARAM_IF_SET(tb, cfg, | ||
2734 | dot11MeshHWMPRootMode, mask, | ||
2735 | NL80211_MESHCONF_HWMP_ROOTMODE, | ||
2736 | nla_get_u8); | ||
2718 | 2737 | ||
2719 | /* Apply changes */ | 2738 | /* Apply changes */ |
2720 | err = rdev->ops->set_mesh_params(&rdev->wiphy, dev, &cfg, mask); | 2739 | err = rdev->ops->set_mesh_params(&rdev->wiphy, dev, &cfg, mask); |
@@ -2988,7 +3007,6 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) | |||
2988 | goto out; | 3007 | goto out; |
2989 | } | 3008 | } |
2990 | 3009 | ||
2991 | request->n_channels = n_channels; | ||
2992 | if (n_ssids) | 3010 | if (n_ssids) |
2993 | request->ssids = (void *)&request->channels[n_channels]; | 3011 | request->ssids = (void *)&request->channels[n_channels]; |
2994 | request->n_ssids = n_ssids; | 3012 | request->n_ssids = n_ssids; |
@@ -2999,32 +3017,53 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) | |||
2999 | request->ie = (void *)(request->channels + n_channels); | 3017 | request->ie = (void *)(request->channels + n_channels); |
3000 | } | 3018 | } |
3001 | 3019 | ||
3020 | i = 0; | ||
3002 | if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { | 3021 | if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { |
3003 | /* user specified, bail out if channel not found */ | 3022 | /* user specified, bail out if channel not found */ |
3004 | request->n_channels = n_channels; | ||
3005 | i = 0; | ||
3006 | nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_FREQUENCIES], tmp) { | 3023 | nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_FREQUENCIES], tmp) { |
3007 | request->channels[i] = ieee80211_get_channel(wiphy, nla_get_u32(attr)); | 3024 | struct ieee80211_channel *chan; |
3008 | if (!request->channels[i]) { | 3025 | |
3026 | chan = ieee80211_get_channel(wiphy, nla_get_u32(attr)); | ||
3027 | |||
3028 | if (!chan) { | ||
3009 | err = -EINVAL; | 3029 | err = -EINVAL; |
3010 | goto out_free; | 3030 | goto out_free; |
3011 | } | 3031 | } |
3032 | |||
3033 | /* ignore disabled channels */ | ||
3034 | if (chan->flags & IEEE80211_CHAN_DISABLED) | ||
3035 | continue; | ||
3036 | |||
3037 | request->channels[i] = chan; | ||
3012 | i++; | 3038 | i++; |
3013 | } | 3039 | } |
3014 | } else { | 3040 | } else { |
3015 | /* all channels */ | 3041 | /* all channels */ |
3016 | i = 0; | ||
3017 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { | 3042 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { |
3018 | int j; | 3043 | int j; |
3019 | if (!wiphy->bands[band]) | 3044 | if (!wiphy->bands[band]) |
3020 | continue; | 3045 | continue; |
3021 | for (j = 0; j < wiphy->bands[band]->n_channels; j++) { | 3046 | for (j = 0; j < wiphy->bands[band]->n_channels; j++) { |
3022 | request->channels[i] = &wiphy->bands[band]->channels[j]; | 3047 | struct ieee80211_channel *chan; |
3048 | |||
3049 | chan = &wiphy->bands[band]->channels[j]; | ||
3050 | |||
3051 | if (chan->flags & IEEE80211_CHAN_DISABLED) | ||
3052 | continue; | ||
3053 | |||
3054 | request->channels[i] = chan; | ||
3023 | i++; | 3055 | i++; |
3024 | } | 3056 | } |
3025 | } | 3057 | } |
3026 | } | 3058 | } |
3027 | 3059 | ||
3060 | if (!i) { | ||
3061 | err = -EINVAL; | ||
3062 | goto out_free; | ||
3063 | } | ||
3064 | |||
3065 | request->n_channels = i; | ||
3066 | |||
3028 | i = 0; | 3067 | i = 0; |
3029 | if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) { | 3068 | if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) { |
3030 | nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) { | 3069 | nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) { |
@@ -3105,6 +3144,8 @@ static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags, | |||
3105 | NLA_PUT_U16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval); | 3144 | NLA_PUT_U16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval); |
3106 | NLA_PUT_U16(msg, NL80211_BSS_CAPABILITY, res->capability); | 3145 | NLA_PUT_U16(msg, NL80211_BSS_CAPABILITY, res->capability); |
3107 | NLA_PUT_U32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq); | 3146 | NLA_PUT_U32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq); |
3147 | NLA_PUT_U32(msg, NL80211_BSS_SEEN_MS_AGO, | ||
3148 | jiffies_to_msecs(jiffies - intbss->ts)); | ||
3108 | 3149 | ||
3109 | switch (rdev->wiphy.signal_type) { | 3150 | switch (rdev->wiphy.signal_type) { |
3110 | case CFG80211_SIGNAL_TYPE_MBM: | 3151 | case CFG80211_SIGNAL_TYPE_MBM: |
@@ -3159,21 +3200,11 @@ static int nl80211_dump_scan(struct sk_buff *skb, | |||
3159 | int start = cb->args[1], idx = 0; | 3200 | int start = cb->args[1], idx = 0; |
3160 | int err; | 3201 | int err; |
3161 | 3202 | ||
3162 | if (!ifidx) { | 3203 | if (!ifidx) |
3163 | err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, | 3204 | ifidx = nl80211_get_ifidx(cb); |
3164 | nl80211_fam.attrbuf, nl80211_fam.maxattr, | 3205 | if (ifidx < 0) |
3165 | nl80211_policy); | 3206 | return ifidx; |
3166 | if (err) | 3207 | cb->args[0] = ifidx; |
3167 | return err; | ||
3168 | |||
3169 | if (!nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX]) | ||
3170 | return -EINVAL; | ||
3171 | |||
3172 | ifidx = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX]); | ||
3173 | if (!ifidx) | ||
3174 | return -EINVAL; | ||
3175 | cb->args[0] = ifidx; | ||
3176 | } | ||
3177 | 3208 | ||
3178 | dev = dev_get_by_index(sock_net(skb->sk), ifidx); | 3209 | dev = dev_get_by_index(sock_net(skb->sk), ifidx); |
3179 | if (!dev) | 3210 | if (!dev) |
@@ -3216,6 +3247,106 @@ static int nl80211_dump_scan(struct sk_buff *skb, | |||
3216 | return err; | 3247 | return err; |
3217 | } | 3248 | } |
3218 | 3249 | ||
3250 | static int nl80211_send_survey(struct sk_buff *msg, u32 pid, u32 seq, | ||
3251 | int flags, struct net_device *dev, | ||
3252 | struct survey_info *survey) | ||
3253 | { | ||
3254 | void *hdr; | ||
3255 | struct nlattr *infoattr; | ||
3256 | |||
3257 | /* Survey without a channel doesn't make sense */ | ||
3258 | if (!survey->channel) | ||
3259 | return -EINVAL; | ||
3260 | |||
3261 | hdr = nl80211hdr_put(msg, pid, seq, flags, | ||
3262 | NL80211_CMD_NEW_SURVEY_RESULTS); | ||
3263 | if (!hdr) | ||
3264 | return -ENOMEM; | ||
3265 | |||
3266 | NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); | ||
3267 | |||
3268 | infoattr = nla_nest_start(msg, NL80211_ATTR_SURVEY_INFO); | ||
3269 | if (!infoattr) | ||
3270 | goto nla_put_failure; | ||
3271 | |||
3272 | NLA_PUT_U32(msg, NL80211_SURVEY_INFO_FREQUENCY, | ||
3273 | survey->channel->center_freq); | ||
3274 | if (survey->filled & SURVEY_INFO_NOISE_DBM) | ||
3275 | NLA_PUT_U8(msg, NL80211_SURVEY_INFO_NOISE, | ||
3276 | survey->noise); | ||
3277 | |||
3278 | nla_nest_end(msg, infoattr); | ||
3279 | |||
3280 | return genlmsg_end(msg, hdr); | ||
3281 | |||
3282 | nla_put_failure: | ||
3283 | genlmsg_cancel(msg, hdr); | ||
3284 | return -EMSGSIZE; | ||
3285 | } | ||
3286 | |||
3287 | static int nl80211_dump_survey(struct sk_buff *skb, | ||
3288 | struct netlink_callback *cb) | ||
3289 | { | ||
3290 | struct survey_info survey; | ||
3291 | struct cfg80211_registered_device *dev; | ||
3292 | struct net_device *netdev; | ||
3293 | int ifidx = cb->args[0]; | ||
3294 | int survey_idx = cb->args[1]; | ||
3295 | int res; | ||
3296 | |||
3297 | if (!ifidx) | ||
3298 | ifidx = nl80211_get_ifidx(cb); | ||
3299 | if (ifidx < 0) | ||
3300 | return ifidx; | ||
3301 | cb->args[0] = ifidx; | ||
3302 | |||
3303 | rtnl_lock(); | ||
3304 | |||
3305 | netdev = __dev_get_by_index(sock_net(skb->sk), ifidx); | ||
3306 | if (!netdev) { | ||
3307 | res = -ENODEV; | ||
3308 | goto out_rtnl; | ||
3309 | } | ||
3310 | |||
3311 | dev = cfg80211_get_dev_from_ifindex(sock_net(skb->sk), ifidx); | ||
3312 | if (IS_ERR(dev)) { | ||
3313 | res = PTR_ERR(dev); | ||
3314 | goto out_rtnl; | ||
3315 | } | ||
3316 | |||
3317 | if (!dev->ops->dump_survey) { | ||
3318 | res = -EOPNOTSUPP; | ||
3319 | goto out_err; | ||
3320 | } | ||
3321 | |||
3322 | while (1) { | ||
3323 | res = dev->ops->dump_survey(&dev->wiphy, netdev, survey_idx, | ||
3324 | &survey); | ||
3325 | if (res == -ENOENT) | ||
3326 | break; | ||
3327 | if (res) | ||
3328 | goto out_err; | ||
3329 | |||
3330 | if (nl80211_send_survey(skb, | ||
3331 | NETLINK_CB(cb->skb).pid, | ||
3332 | cb->nlh->nlmsg_seq, NLM_F_MULTI, | ||
3333 | netdev, | ||
3334 | &survey) < 0) | ||
3335 | goto out; | ||
3336 | survey_idx++; | ||
3337 | } | ||
3338 | |||
3339 | out: | ||
3340 | cb->args[1] = survey_idx; | ||
3341 | res = skb->len; | ||
3342 | out_err: | ||
3343 | cfg80211_unlock_rdev(dev); | ||
3344 | out_rtnl: | ||
3345 | rtnl_unlock(); | ||
3346 | |||
3347 | return res; | ||
3348 | } | ||
3349 | |||
3219 | static bool nl80211_valid_auth_type(enum nl80211_auth_type auth_type) | 3350 | static bool nl80211_valid_auth_type(enum nl80211_auth_type auth_type) |
3220 | { | 3351 | { |
3221 | return auth_type <= NL80211_AUTHTYPE_MAX; | 3352 | return auth_type <= NL80211_AUTHTYPE_MAX; |
@@ -4293,6 +4424,11 @@ static struct genl_ops nl80211_ops[] = { | |||
4293 | .policy = nl80211_policy, | 4424 | .policy = nl80211_policy, |
4294 | .flags = GENL_ADMIN_PERM, | 4425 | .flags = GENL_ADMIN_PERM, |
4295 | }, | 4426 | }, |
4427 | { | ||
4428 | .cmd = NL80211_CMD_GET_SURVEY, | ||
4429 | .policy = nl80211_policy, | ||
4430 | .dumpit = nl80211_dump_survey, | ||
4431 | }, | ||
4296 | }; | 4432 | }; |
4297 | static struct genl_multicast_group nl80211_mlme_mcgrp = { | 4433 | static struct genl_multicast_group nl80211_mlme_mcgrp = { |
4298 | .name = "mlme", | 4434 | .name = "mlme", |
diff --git a/net/wireless/scan.c b/net/wireless/scan.c index e5f92ee758f4..e2d344ff6745 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c | |||
@@ -22,7 +22,7 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak) | |||
22 | { | 22 | { |
23 | struct cfg80211_scan_request *request; | 23 | struct cfg80211_scan_request *request; |
24 | struct net_device *dev; | 24 | struct net_device *dev; |
25 | #ifdef CONFIG_WIRELESS_EXT | 25 | #ifdef CONFIG_CFG80211_WEXT |
26 | union iwreq_data wrqu; | 26 | union iwreq_data wrqu; |
27 | #endif | 27 | #endif |
28 | 28 | ||
@@ -47,7 +47,7 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak) | |||
47 | else | 47 | else |
48 | nl80211_send_scan_done(rdev, dev); | 48 | nl80211_send_scan_done(rdev, dev); |
49 | 49 | ||
50 | #ifdef CONFIG_WIRELESS_EXT | 50 | #ifdef CONFIG_CFG80211_WEXT |
51 | if (!request->aborted) { | 51 | if (!request->aborted) { |
52 | memset(&wrqu, 0, sizeof(wrqu)); | 52 | memset(&wrqu, 0, sizeof(wrqu)); |
53 | 53 | ||
@@ -592,7 +592,7 @@ void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub) | |||
592 | } | 592 | } |
593 | EXPORT_SYMBOL(cfg80211_unlink_bss); | 593 | EXPORT_SYMBOL(cfg80211_unlink_bss); |
594 | 594 | ||
595 | #ifdef CONFIG_WIRELESS_EXT | 595 | #ifdef CONFIG_CFG80211_WEXT |
596 | int cfg80211_wext_siwscan(struct net_device *dev, | 596 | int cfg80211_wext_siwscan(struct net_device *dev, |
597 | struct iw_request_info *info, | 597 | struct iw_request_info *info, |
598 | union iwreq_data *wrqu, char *extra) | 598 | union iwreq_data *wrqu, char *extra) |
@@ -650,9 +650,15 @@ int cfg80211_wext_siwscan(struct net_device *dev, | |||
650 | i = 0; | 650 | i = 0; |
651 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { | 651 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { |
652 | int j; | 652 | int j; |
653 | |||
653 | if (!wiphy->bands[band]) | 654 | if (!wiphy->bands[band]) |
654 | continue; | 655 | continue; |
656 | |||
655 | for (j = 0; j < wiphy->bands[band]->n_channels; j++) { | 657 | for (j = 0; j < wiphy->bands[band]->n_channels; j++) { |
658 | /* ignore disabled channels */ | ||
659 | if (wiphy->bands[band]->channels[j].flags & | ||
660 | IEEE80211_CHAN_DISABLED) | ||
661 | continue; | ||
656 | 662 | ||
657 | /* If we have a wireless request structure and the | 663 | /* If we have a wireless request structure and the |
658 | * wireless request specifies frequencies, then search | 664 | * wireless request specifies frequencies, then search |
diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 9f0b2800a9d7..0115d07d2c1a 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c | |||
@@ -365,7 +365,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, | |||
365 | { | 365 | { |
366 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 366 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
367 | u8 *country_ie; | 367 | u8 *country_ie; |
368 | #ifdef CONFIG_WIRELESS_EXT | 368 | #ifdef CONFIG_CFG80211_WEXT |
369 | union iwreq_data wrqu; | 369 | union iwreq_data wrqu; |
370 | #endif | 370 | #endif |
371 | 371 | ||
@@ -382,7 +382,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, | |||
382 | resp_ie, resp_ie_len, | 382 | resp_ie, resp_ie_len, |
383 | status, GFP_KERNEL); | 383 | status, GFP_KERNEL); |
384 | 384 | ||
385 | #ifdef CONFIG_WIRELESS_EXT | 385 | #ifdef CONFIG_CFG80211_WEXT |
386 | if (wextev) { | 386 | if (wextev) { |
387 | if (req_ie && status == WLAN_STATUS_SUCCESS) { | 387 | if (req_ie && status == WLAN_STATUS_SUCCESS) { |
388 | memset(&wrqu, 0, sizeof(wrqu)); | 388 | memset(&wrqu, 0, sizeof(wrqu)); |
@@ -497,7 +497,7 @@ void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid, | |||
497 | const u8 *resp_ie, size_t resp_ie_len) | 497 | const u8 *resp_ie, size_t resp_ie_len) |
498 | { | 498 | { |
499 | struct cfg80211_bss *bss; | 499 | struct cfg80211_bss *bss; |
500 | #ifdef CONFIG_WIRELESS_EXT | 500 | #ifdef CONFIG_CFG80211_WEXT |
501 | union iwreq_data wrqu; | 501 | union iwreq_data wrqu; |
502 | #endif | 502 | #endif |
503 | 503 | ||
@@ -532,7 +532,7 @@ void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid, | |||
532 | req_ie, req_ie_len, resp_ie, resp_ie_len, | 532 | req_ie, req_ie_len, resp_ie, resp_ie_len, |
533 | GFP_KERNEL); | 533 | GFP_KERNEL); |
534 | 534 | ||
535 | #ifdef CONFIG_WIRELESS_EXT | 535 | #ifdef CONFIG_CFG80211_WEXT |
536 | if (req_ie) { | 536 | if (req_ie) { |
537 | memset(&wrqu, 0, sizeof(wrqu)); | 537 | memset(&wrqu, 0, sizeof(wrqu)); |
538 | wrqu.data.length = req_ie_len; | 538 | wrqu.data.length = req_ie_len; |
@@ -593,7 +593,7 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, | |||
593 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 593 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
594 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | 594 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); |
595 | int i; | 595 | int i; |
596 | #ifdef CONFIG_WIRELESS_EXT | 596 | #ifdef CONFIG_CFG80211_WEXT |
597 | union iwreq_data wrqu; | 597 | union iwreq_data wrqu; |
598 | #endif | 598 | #endif |
599 | 599 | ||
@@ -651,7 +651,7 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, | |||
651 | for (i = 0; i < 6; i++) | 651 | for (i = 0; i < 6; i++) |
652 | rdev->ops->del_key(wdev->wiphy, dev, i, NULL); | 652 | rdev->ops->del_key(wdev->wiphy, dev, i, NULL); |
653 | 653 | ||
654 | #ifdef CONFIG_WIRELESS_EXT | 654 | #ifdef CONFIG_CFG80211_WEXT |
655 | memset(&wrqu, 0, sizeof(wrqu)); | 655 | memset(&wrqu, 0, sizeof(wrqu)); |
656 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; | 656 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; |
657 | wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); | 657 | wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); |
diff --git a/net/wireless/util.c b/net/wireless/util.c index 3fc2df86278f..5aa39f7cf9b9 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c | |||
@@ -320,7 +320,9 @@ int ieee80211_data_to_8023(struct sk_buff *skb, u8 *addr, | |||
320 | break; | 320 | break; |
321 | case cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS): | 321 | case cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS): |
322 | if (unlikely(iftype != NL80211_IFTYPE_WDS && | 322 | if (unlikely(iftype != NL80211_IFTYPE_WDS && |
323 | iftype != NL80211_IFTYPE_MESH_POINT)) | 323 | iftype != NL80211_IFTYPE_MESH_POINT && |
324 | iftype != NL80211_IFTYPE_AP_VLAN && | ||
325 | iftype != NL80211_IFTYPE_STATION)) | ||
324 | return -1; | 326 | return -1; |
325 | if (iftype == NL80211_IFTYPE_MESH_POINT) { | 327 | if (iftype == NL80211_IFTYPE_MESH_POINT) { |
326 | struct ieee80211s_hdr *meshdr = | 328 | struct ieee80211s_hdr *meshdr = |
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index 561a45cf2a6a..41abcbdc5fb9 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c | |||
@@ -904,8 +904,6 @@ static int cfg80211_set_auth_alg(struct wireless_dev *wdev, | |||
904 | 904 | ||
905 | static int cfg80211_set_wpa_version(struct wireless_dev *wdev, u32 wpa_versions) | 905 | static int cfg80211_set_wpa_version(struct wireless_dev *wdev, u32 wpa_versions) |
906 | { | 906 | { |
907 | wdev->wext.connect.crypto.wpa_versions = 0; | ||
908 | |||
909 | if (wpa_versions & ~(IW_AUTH_WPA_VERSION_WPA | | 907 | if (wpa_versions & ~(IW_AUTH_WPA_VERSION_WPA | |
910 | IW_AUTH_WPA_VERSION_WPA2| | 908 | IW_AUTH_WPA_VERSION_WPA2| |
911 | IW_AUTH_WPA_VERSION_DISABLED)) | 909 | IW_AUTH_WPA_VERSION_DISABLED)) |
@@ -933,8 +931,6 @@ static int cfg80211_set_wpa_version(struct wireless_dev *wdev, u32 wpa_versions) | |||
933 | 931 | ||
934 | static int cfg80211_set_cipher_group(struct wireless_dev *wdev, u32 cipher) | 932 | static int cfg80211_set_cipher_group(struct wireless_dev *wdev, u32 cipher) |
935 | { | 933 | { |
936 | wdev->wext.connect.crypto.cipher_group = 0; | ||
937 | |||
938 | if (cipher & IW_AUTH_CIPHER_WEP40) | 934 | if (cipher & IW_AUTH_CIPHER_WEP40) |
939 | wdev->wext.connect.crypto.cipher_group = | 935 | wdev->wext.connect.crypto.cipher_group = |
940 | WLAN_CIPHER_SUITE_WEP40; | 936 | WLAN_CIPHER_SUITE_WEP40; |
@@ -950,6 +946,8 @@ static int cfg80211_set_cipher_group(struct wireless_dev *wdev, u32 cipher) | |||
950 | else if (cipher & IW_AUTH_CIPHER_AES_CMAC) | 946 | else if (cipher & IW_AUTH_CIPHER_AES_CMAC) |
951 | wdev->wext.connect.crypto.cipher_group = | 947 | wdev->wext.connect.crypto.cipher_group = |
952 | WLAN_CIPHER_SUITE_AES_CMAC; | 948 | WLAN_CIPHER_SUITE_AES_CMAC; |
949 | else if (cipher & IW_AUTH_CIPHER_NONE) | ||
950 | wdev->wext.connect.crypto.cipher_group = 0; | ||
953 | else | 951 | else |
954 | return -EINVAL; | 952 | return -EINVAL; |
955 | 953 | ||
diff --git a/net/wireless/wext.c b/net/wireless/wext-core.c index 60fe57761ca9..a4e5ddc8d4f5 100644 --- a/net/wireless/wext.c +++ b/net/wireless/wext-core.c | |||
@@ -1,112 +1,28 @@ | |||
1 | /* | 1 | /* |
2 | * This file implement the Wireless Extensions APIs. | 2 | * This file implement the Wireless Extensions core API. |
3 | * | 3 | * |
4 | * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com> | 4 | * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com> |
5 | * Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved. | 5 | * Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved. |
6 | * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> | ||
6 | * | 7 | * |
7 | * (As all part of the Linux kernel, this file is GPL) | 8 | * (As all part of the Linux kernel, this file is GPL) |
8 | */ | 9 | */ |
9 | 10 | #include <linux/kernel.h> | |
10 | /************************** DOCUMENTATION **************************/ | 11 | #include <linux/netdevice.h> |
11 | /* | 12 | #include <linux/rtnetlink.h> |
12 | * API definition : | 13 | #include <linux/wireless.h> |
13 | * -------------- | 14 | #include <linux/uaccess.h> |
14 | * See <linux/wireless.h> for details of the APIs and the rest. | 15 | #include <net/cfg80211.h> |
15 | * | 16 | #include <net/iw_handler.h> |
16 | * History : | ||
17 | * ------- | ||
18 | * | ||
19 | * v1 - 5.12.01 - Jean II | ||
20 | * o Created this file. | ||
21 | * | ||
22 | * v2 - 13.12.01 - Jean II | ||
23 | * o Move /proc/net/wireless stuff from net/core/dev.c to here | ||
24 | * o Make Wireless Extension IOCTLs go through here | ||
25 | * o Added iw_handler handling ;-) | ||
26 | * o Added standard ioctl description | ||
27 | * o Initial dumb commit strategy based on orinoco.c | ||
28 | * | ||
29 | * v3 - 19.12.01 - Jean II | ||
30 | * o Make sure we don't go out of standard_ioctl[] in ioctl_standard_call | ||
31 | * o Add event dispatcher function | ||
32 | * o Add event description | ||
33 | * o Propagate events as rtnetlink IFLA_WIRELESS option | ||
34 | * o Generate event on selected SET requests | ||
35 | * | ||
36 | * v4 - 18.04.02 - Jean II | ||
37 | * o Fix stupid off by one in iw_ioctl_description : IW_ESSID_MAX_SIZE + 1 | ||
38 | * | ||
39 | * v5 - 21.06.02 - Jean II | ||
40 | * o Add IW_PRIV_TYPE_ADDR in priv_type_size (+cleanup) | ||
41 | * o Reshuffle IW_HEADER_TYPE_XXX to map IW_PRIV_TYPE_XXX changes | ||
42 | * o Add IWEVCUSTOM for driver specific event/scanning token | ||
43 | * o Turn on WE_STRICT_WRITE by default + kernel warning | ||
44 | * o Fix WE_STRICT_WRITE in ioctl_export_private() (32 => iw_num) | ||
45 | * o Fix off-by-one in test (extra_size <= IFNAMSIZ) | ||
46 | * | ||
47 | * v6 - 9.01.03 - Jean II | ||
48 | * o Add common spy support : iw_handler_set_spy(), wireless_spy_update() | ||
49 | * o Add enhanced spy support : iw_handler_set_thrspy() and event. | ||
50 | * o Add WIRELESS_EXT version display in /proc/net/wireless | ||
51 | * | ||
52 | * v6 - 18.06.04 - Jean II | ||
53 | * o Change get_spydata() method for added safety | ||
54 | * o Remove spy #ifdef, they are always on -> cleaner code | ||
55 | * o Allow any size GET request if user specifies length > max | ||
56 | * and if request has IW_DESCR_FLAG_NOMAX flag or is SIOCGIWPRIV | ||
57 | * o Start migrating get_wireless_stats to struct iw_handler_def | ||
58 | * o Add wmb() in iw_handler_set_spy() for non-coherent archs/cpus | ||
59 | * Based on patch from Pavel Roskin <proski@gnu.org> : | ||
60 | * o Fix kernel data leak to user space in private handler handling | ||
61 | * | ||
62 | * v7 - 18.3.05 - Jean II | ||
63 | * o Remove (struct iw_point *)->pointer from events and streams | ||
64 | * o Remove spy_offset from struct iw_handler_def | ||
65 | * o Start deprecating dev->get_wireless_stats, output a warning | ||
66 | * o If IW_QUAL_DBM is set, show dBm values in /proc/net/wireless | ||
67 | * o Don't lose INVALID/DBM flags when clearing UPDATED flags (iwstats) | ||
68 | * | ||
69 | * v8 - 17.02.06 - Jean II | ||
70 | * o RtNetlink requests support (SET/GET) | ||
71 | * | ||
72 | * v8b - 03.08.06 - Herbert Xu | ||
73 | * o Fix Wireless Event locking issues. | ||
74 | * | ||
75 | * v9 - 14.3.06 - Jean II | ||
76 | * o Change length in ESSID and NICK to strlen() instead of strlen()+1 | ||
77 | * o Make standard_ioctl_num and standard_event_num unsigned | ||
78 | * o Remove (struct net_device *)->get_wireless_stats() | ||
79 | * | ||
80 | * v10 - 16.3.07 - Jean II | ||
81 | * o Prevent leaking of kernel space in stream on 64 bits. | ||
82 | */ | ||
83 | |||
84 | /***************************** INCLUDES *****************************/ | ||
85 | |||
86 | #include <linux/module.h> | ||
87 | #include <linux/types.h> /* off_t */ | ||
88 | #include <linux/netdevice.h> /* struct ifreq, dev_get_by_name() */ | ||
89 | #include <linux/proc_fs.h> | ||
90 | #include <linux/rtnetlink.h> /* rtnetlink stuff */ | ||
91 | #include <linux/seq_file.h> | ||
92 | #include <linux/init.h> /* for __init */ | ||
93 | #include <linux/if_arp.h> /* ARPHRD_ETHER */ | ||
94 | #include <linux/etherdevice.h> /* compare_ether_addr */ | ||
95 | #include <linux/interrupt.h> | ||
96 | #include <net/net_namespace.h> | ||
97 | |||
98 | #include <linux/wireless.h> /* Pretty obvious */ | ||
99 | #include <net/iw_handler.h> /* New driver API */ | ||
100 | #include <net/netlink.h> | 17 | #include <net/netlink.h> |
101 | #include <net/wext.h> | 18 | #include <net/wext.h> |
19 | #include <net/net_namespace.h> | ||
20 | |||
21 | typedef int (*wext_ioctl_func)(struct net_device *, struct iwreq *, | ||
22 | unsigned int, struct iw_request_info *, | ||
23 | iw_handler); | ||
102 | 24 | ||
103 | #include <asm/uaccess.h> /* copy_to_user() */ | ||
104 | 25 | ||
105 | /************************* GLOBAL VARIABLES *************************/ | ||
106 | /* | ||
107 | * You should not use global variables, because of re-entrancy. | ||
108 | * On our case, it's only const, so it's OK... | ||
109 | */ | ||
110 | /* | 26 | /* |
111 | * Meta-data about all the standard Wireless Extension request we | 27 | * Meta-data about all the standard Wireless Extension request we |
112 | * know about. | 28 | * know about. |
@@ -390,18 +306,6 @@ static const struct iw_ioctl_description standard_event[] = { | |||
390 | }; | 306 | }; |
391 | static const unsigned standard_event_num = ARRAY_SIZE(standard_event); | 307 | static const unsigned standard_event_num = ARRAY_SIZE(standard_event); |
392 | 308 | ||
393 | /* Size (in bytes) of the various private data types */ | ||
394 | static const char iw_priv_type_size[] = { | ||
395 | 0, /* IW_PRIV_TYPE_NONE */ | ||
396 | 1, /* IW_PRIV_TYPE_BYTE */ | ||
397 | 1, /* IW_PRIV_TYPE_CHAR */ | ||
398 | 0, /* Not defined */ | ||
399 | sizeof(__u32), /* IW_PRIV_TYPE_INT */ | ||
400 | sizeof(struct iw_freq), /* IW_PRIV_TYPE_FLOAT */ | ||
401 | sizeof(struct sockaddr), /* IW_PRIV_TYPE_ADDR */ | ||
402 | 0, /* Not defined */ | ||
403 | }; | ||
404 | |||
405 | /* Size (in bytes) of various events */ | 309 | /* Size (in bytes) of various events */ |
406 | static const int event_type_size[] = { | 310 | static const int event_type_size[] = { |
407 | IW_EV_LCP_LEN, /* IW_HEADER_TYPE_NULL */ | 311 | IW_EV_LCP_LEN, /* IW_HEADER_TYPE_NULL */ |
@@ -433,323 +337,346 @@ static const int compat_event_type_size[] = { | |||
433 | }; | 337 | }; |
434 | #endif | 338 | #endif |
435 | 339 | ||
436 | /************************ COMMON SUBROUTINES ************************/ | ||
437 | /* | ||
438 | * Stuff that may be used in various place or doesn't fit in one | ||
439 | * of the section below. | ||
440 | */ | ||
441 | |||
442 | /* ---------------------------------------------------------------- */ | ||
443 | /* | ||
444 | * Return the driver handler associated with a specific Wireless Extension. | ||
445 | */ | ||
446 | static iw_handler get_handler(struct net_device *dev, unsigned int cmd) | ||
447 | { | ||
448 | /* Don't "optimise" the following variable, it will crash */ | ||
449 | unsigned int index; /* *MUST* be unsigned */ | ||
450 | 340 | ||
451 | /* Check if we have some wireless handlers defined */ | 341 | /* IW event code */ |
452 | if (dev->wireless_handlers == NULL) | ||
453 | return NULL; | ||
454 | |||
455 | /* Try as a standard command */ | ||
456 | index = cmd - SIOCIWFIRST; | ||
457 | if (index < dev->wireless_handlers->num_standard) | ||
458 | return dev->wireless_handlers->standard[index]; | ||
459 | |||
460 | /* Try as a private command */ | ||
461 | index = cmd - SIOCIWFIRSTPRIV; | ||
462 | if (index < dev->wireless_handlers->num_private) | ||
463 | return dev->wireless_handlers->private[index]; | ||
464 | 342 | ||
465 | /* Not found */ | 343 | static int __net_init wext_pernet_init(struct net *net) |
466 | return NULL; | ||
467 | } | ||
468 | |||
469 | /* ---------------------------------------------------------------- */ | ||
470 | /* | ||
471 | * Get statistics out of the driver | ||
472 | */ | ||
473 | struct iw_statistics *get_wireless_stats(struct net_device *dev) | ||
474 | { | 344 | { |
475 | /* New location */ | 345 | skb_queue_head_init(&net->wext_nlevents); |
476 | if ((dev->wireless_handlers != NULL) && | 346 | return 0; |
477 | (dev->wireless_handlers->get_wireless_stats != NULL)) | ||
478 | return dev->wireless_handlers->get_wireless_stats(dev); | ||
479 | |||
480 | /* Not found */ | ||
481 | return NULL; | ||
482 | } | 347 | } |
483 | 348 | ||
484 | /* ---------------------------------------------------------------- */ | 349 | static void __net_exit wext_pernet_exit(struct net *net) |
485 | /* | ||
486 | * Call the commit handler in the driver | ||
487 | * (if exist and if conditions are right) | ||
488 | * | ||
489 | * Note : our current commit strategy is currently pretty dumb, | ||
490 | * but we will be able to improve on that... | ||
491 | * The goal is to try to agreagate as many changes as possible | ||
492 | * before doing the commit. Drivers that will define a commit handler | ||
493 | * are usually those that need a reset after changing parameters, so | ||
494 | * we want to minimise the number of reset. | ||
495 | * A cool idea is to use a timer : at each "set" command, we re-set the | ||
496 | * timer, when the timer eventually fires, we call the driver. | ||
497 | * Hopefully, more on that later. | ||
498 | * | ||
499 | * Also, I'm waiting to see how many people will complain about the | ||
500 | * netif_running(dev) test. I'm open on that one... | ||
501 | * Hopefully, the driver will remember to do a commit in "open()" ;-) | ||
502 | */ | ||
503 | static int call_commit_handler(struct net_device *dev) | ||
504 | { | 350 | { |
505 | if ((netif_running(dev)) && | 351 | skb_queue_purge(&net->wext_nlevents); |
506 | (dev->wireless_handlers->standard[0] != NULL)) | ||
507 | /* Call the commit handler on the driver */ | ||
508 | return dev->wireless_handlers->standard[0](dev, NULL, | ||
509 | NULL, NULL); | ||
510 | else | ||
511 | return 0; /* Command completed successfully */ | ||
512 | } | 352 | } |
513 | 353 | ||
514 | /* ---------------------------------------------------------------- */ | 354 | static struct pernet_operations wext_pernet_ops = { |
515 | /* | 355 | .init = wext_pernet_init, |
516 | * Calculate size of private arguments | 356 | .exit = wext_pernet_exit, |
517 | */ | 357 | }; |
518 | static int get_priv_size(__u16 args) | ||
519 | { | ||
520 | int num = args & IW_PRIV_SIZE_MASK; | ||
521 | int type = (args & IW_PRIV_TYPE_MASK) >> 12; | ||
522 | 358 | ||
523 | return num * iw_priv_type_size[type]; | 359 | static int __init wireless_nlevent_init(void) |
360 | { | ||
361 | return register_pernet_subsys(&wext_pernet_ops); | ||
524 | } | 362 | } |
525 | 363 | ||
526 | /* ---------------------------------------------------------------- */ | 364 | subsys_initcall(wireless_nlevent_init); |
527 | /* | 365 | |
528 | * Re-calculate the size of private arguments | 366 | /* Process events generated by the wireless layer or the driver. */ |
529 | */ | 367 | static void wireless_nlevent_process(struct work_struct *work) |
530 | static int adjust_priv_size(__u16 args, struct iw_point *iwp) | ||
531 | { | 368 | { |
532 | int num = iwp->length; | 369 | struct sk_buff *skb; |
533 | int max = args & IW_PRIV_SIZE_MASK; | 370 | struct net *net; |
534 | int type = (args & IW_PRIV_TYPE_MASK) >> 12; | ||
535 | 371 | ||
536 | /* Make sure the driver doesn't goof up */ | 372 | rtnl_lock(); |
537 | if (max < num) | 373 | |
538 | num = max; | 374 | for_each_net(net) { |
375 | while ((skb = skb_dequeue(&net->wext_nlevents))) | ||
376 | rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, | ||
377 | GFP_KERNEL); | ||
378 | } | ||
539 | 379 | ||
540 | return num * iw_priv_type_size[type]; | 380 | rtnl_unlock(); |
541 | } | 381 | } |
542 | 382 | ||
543 | /* ---------------------------------------------------------------- */ | 383 | static DECLARE_WORK(wireless_nlevent_work, wireless_nlevent_process); |
544 | /* | 384 | |
545 | * Standard Wireless Handler : get wireless stats | 385 | static struct nlmsghdr *rtnetlink_ifinfo_prep(struct net_device *dev, |
546 | * Allow programatic access to /proc/net/wireless even if /proc | 386 | struct sk_buff *skb) |
547 | * doesn't exist... Also more efficient... | ||
548 | */ | ||
549 | static int iw_handler_get_iwstats(struct net_device * dev, | ||
550 | struct iw_request_info * info, | ||
551 | union iwreq_data * wrqu, | ||
552 | char * extra) | ||
553 | { | 387 | { |
554 | /* Get stats from the driver */ | 388 | struct ifinfomsg *r; |
555 | struct iw_statistics *stats; | 389 | struct nlmsghdr *nlh; |
556 | 390 | ||
557 | stats = get_wireless_stats(dev); | 391 | nlh = nlmsg_put(skb, 0, 0, RTM_NEWLINK, sizeof(*r), 0); |
558 | if (stats) { | 392 | if (!nlh) |
559 | /* Copy statistics to extra */ | 393 | return NULL; |
560 | memcpy(extra, stats, sizeof(struct iw_statistics)); | ||
561 | wrqu->data.length = sizeof(struct iw_statistics); | ||
562 | 394 | ||
563 | /* Check if we need to clear the updated flag */ | 395 | r = nlmsg_data(nlh); |
564 | if (wrqu->data.flags != 0) | 396 | r->ifi_family = AF_UNSPEC; |
565 | stats->qual.updated &= ~IW_QUAL_ALL_UPDATED; | 397 | r->__ifi_pad = 0; |
566 | return 0; | 398 | r->ifi_type = dev->type; |
567 | } else | 399 | r->ifi_index = dev->ifindex; |
568 | return -EOPNOTSUPP; | 400 | r->ifi_flags = dev_get_flags(dev); |
401 | r->ifi_change = 0; /* Wireless changes don't affect those flags */ | ||
402 | |||
403 | NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); | ||
404 | |||
405 | return nlh; | ||
406 | nla_put_failure: | ||
407 | nlmsg_cancel(skb, nlh); | ||
408 | return NULL; | ||
569 | } | 409 | } |
570 | 410 | ||
571 | /* ---------------------------------------------------------------- */ | 411 | |
572 | /* | 412 | /* |
573 | * Standard Wireless Handler : get iwpriv definitions | 413 | * Main event dispatcher. Called from other parts and drivers. |
574 | * Export the driver private handler definition | 414 | * Send the event on the appropriate channels. |
575 | * They will be picked up by tools like iwpriv... | 415 | * May be called from interrupt context. |
576 | */ | 416 | */ |
577 | static int iw_handler_get_private(struct net_device * dev, | 417 | void wireless_send_event(struct net_device * dev, |
578 | struct iw_request_info * info, | 418 | unsigned int cmd, |
579 | union iwreq_data * wrqu, | 419 | union iwreq_data * wrqu, |
580 | char * extra) | 420 | const char * extra) |
581 | { | 421 | { |
582 | /* Check if the driver has something to export */ | 422 | const struct iw_ioctl_description * descr = NULL; |
583 | if ((dev->wireless_handlers->num_private_args == 0) || | 423 | int extra_len = 0; |
584 | (dev->wireless_handlers->private_args == NULL)) | 424 | struct iw_event *event; /* Mallocated whole event */ |
585 | return -EOPNOTSUPP; | 425 | int event_len; /* Its size */ |
426 | int hdr_len; /* Size of the event header */ | ||
427 | int wrqu_off = 0; /* Offset in wrqu */ | ||
428 | /* Don't "optimise" the following variable, it will crash */ | ||
429 | unsigned cmd_index; /* *MUST* be unsigned */ | ||
430 | struct sk_buff *skb; | ||
431 | struct nlmsghdr *nlh; | ||
432 | struct nlattr *nla; | ||
433 | #ifdef CONFIG_COMPAT | ||
434 | struct __compat_iw_event *compat_event; | ||
435 | struct compat_iw_point compat_wrqu; | ||
436 | struct sk_buff *compskb; | ||
437 | #endif | ||
586 | 438 | ||
587 | /* Check if there is enough buffer up there */ | 439 | /* |
588 | if (wrqu->data.length < dev->wireless_handlers->num_private_args) { | 440 | * Nothing in the kernel sends scan events with data, be safe. |
589 | /* User space can't know in advance how large the buffer | 441 | * This is necessary because we cannot fix up scan event data |
590 | * needs to be. Give it a hint, so that we can support | 442 | * for compat, due to being contained in 'extra', but normally |
591 | * any size buffer we want somewhat efficiently... */ | 443 | * applications are required to retrieve the scan data anyway |
592 | wrqu->data.length = dev->wireless_handlers->num_private_args; | 444 | * and no data is included in the event, this codifies that |
593 | return -E2BIG; | 445 | * practice. |
446 | */ | ||
447 | if (WARN_ON(cmd == SIOCGIWSCAN && extra)) | ||
448 | extra = NULL; | ||
449 | |||
450 | /* Get the description of the Event */ | ||
451 | if (cmd <= SIOCIWLAST) { | ||
452 | cmd_index = cmd - SIOCIWFIRST; | ||
453 | if (cmd_index < standard_ioctl_num) | ||
454 | descr = &(standard_ioctl[cmd_index]); | ||
455 | } else { | ||
456 | cmd_index = cmd - IWEVFIRST; | ||
457 | if (cmd_index < standard_event_num) | ||
458 | descr = &(standard_event[cmd_index]); | ||
459 | } | ||
460 | /* Don't accept unknown events */ | ||
461 | if (descr == NULL) { | ||
462 | /* Note : we don't return an error to the driver, because | ||
463 | * the driver would not know what to do about it. It can't | ||
464 | * return an error to the user, because the event is not | ||
465 | * initiated by a user request. | ||
466 | * The best the driver could do is to log an error message. | ||
467 | * We will do it ourselves instead... | ||
468 | */ | ||
469 | printk(KERN_ERR "%s (WE) : Invalid/Unknown Wireless Event (0x%04X)\n", | ||
470 | dev->name, cmd); | ||
471 | return; | ||
594 | } | 472 | } |
595 | 473 | ||
596 | /* Set the number of available ioctls. */ | 474 | /* Check extra parameters and set extra_len */ |
597 | wrqu->data.length = dev->wireless_handlers->num_private_args; | 475 | if (descr->header_type == IW_HEADER_TYPE_POINT) { |
476 | /* Check if number of token fits within bounds */ | ||
477 | if (wrqu->data.length > descr->max_tokens) { | ||
478 | printk(KERN_ERR "%s (WE) : Wireless Event too big (%d)\n", dev->name, wrqu->data.length); | ||
479 | return; | ||
480 | } | ||
481 | if (wrqu->data.length < descr->min_tokens) { | ||
482 | printk(KERN_ERR "%s (WE) : Wireless Event too small (%d)\n", dev->name, wrqu->data.length); | ||
483 | return; | ||
484 | } | ||
485 | /* Calculate extra_len - extra is NULL for restricted events */ | ||
486 | if (extra != NULL) | ||
487 | extra_len = wrqu->data.length * descr->token_size; | ||
488 | /* Always at an offset in wrqu */ | ||
489 | wrqu_off = IW_EV_POINT_OFF; | ||
490 | } | ||
598 | 491 | ||
599 | /* Copy structure to the user buffer. */ | 492 | /* Total length of the event */ |
600 | memcpy(extra, dev->wireless_handlers->private_args, | 493 | hdr_len = event_type_size[descr->header_type]; |
601 | sizeof(struct iw_priv_args) * wrqu->data.length); | 494 | event_len = hdr_len + extra_len; |
602 | 495 | ||
603 | return 0; | 496 | /* |
604 | } | 497 | * The problem for 64/32 bit. |
498 | * | ||
499 | * On 64-bit, a regular event is laid out as follows: | ||
500 | * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | | ||
501 | * | event.len | event.cmd | p a d d i n g | | ||
502 | * | wrqu data ... (with the correct size) | | ||
503 | * | ||
504 | * This padding exists because we manipulate event->u, | ||
505 | * and 'event' is not packed. | ||
506 | * | ||
507 | * An iw_point event is laid out like this instead: | ||
508 | * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | | ||
509 | * | event.len | event.cmd | p a d d i n g | | ||
510 | * | iwpnt.len | iwpnt.flg | p a d d i n g | | ||
511 | * | extra data ... | ||
512 | * | ||
513 | * The second padding exists because struct iw_point is extended, | ||
514 | * but this depends on the platform... | ||
515 | * | ||
516 | * On 32-bit, all the padding shouldn't be there. | ||
517 | */ | ||
605 | 518 | ||
519 | skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); | ||
520 | if (!skb) | ||
521 | return; | ||
606 | 522 | ||
607 | /******************** /proc/net/wireless SUPPORT ********************/ | 523 | /* Send via the RtNetlink event channel */ |
608 | /* | 524 | nlh = rtnetlink_ifinfo_prep(dev, skb); |
609 | * The /proc/net/wireless file is a human readable user-space interface | 525 | if (WARN_ON(!nlh)) { |
610 | * exporting various wireless specific statistics from the wireless devices. | 526 | kfree_skb(skb); |
611 | * This is the most popular part of the Wireless Extensions ;-) | 527 | return; |
612 | * | 528 | } |
613 | * This interface is a pure clone of /proc/net/dev (in net/core/dev.c). | ||
614 | * The content of the file is basically the content of "struct iw_statistics". | ||
615 | */ | ||
616 | 529 | ||
617 | #ifdef CONFIG_PROC_FS | 530 | /* Add the wireless events in the netlink packet */ |
531 | nla = nla_reserve(skb, IFLA_WIRELESS, event_len); | ||
532 | if (!nla) { | ||
533 | kfree_skb(skb); | ||
534 | return; | ||
535 | } | ||
536 | event = nla_data(nla); | ||
618 | 537 | ||
619 | /* ---------------------------------------------------------------- */ | 538 | /* Fill event - first clear to avoid data leaking */ |
620 | /* | 539 | memset(event, 0, hdr_len); |
621 | * Print one entry (line) of /proc/net/wireless | 540 | event->len = event_len; |
622 | */ | 541 | event->cmd = cmd; |
623 | static void wireless_seq_printf_stats(struct seq_file *seq, | 542 | memcpy(&event->u, ((char *) wrqu) + wrqu_off, hdr_len - IW_EV_LCP_LEN); |
624 | struct net_device *dev) | 543 | if (extra_len) |
625 | { | 544 | memcpy(((char *) event) + hdr_len, extra, extra_len); |
626 | /* Get stats from the driver */ | ||
627 | struct iw_statistics *stats = get_wireless_stats(dev); | ||
628 | static struct iw_statistics nullstats = {}; | ||
629 | 545 | ||
630 | /* show device if it's wireless regardless of current stats */ | 546 | nlmsg_end(skb, nlh); |
631 | if (!stats && dev->wireless_handlers) | 547 | #ifdef CONFIG_COMPAT |
632 | stats = &nullstats; | 548 | hdr_len = compat_event_type_size[descr->header_type]; |
549 | event_len = hdr_len + extra_len; | ||
633 | 550 | ||
634 | if (stats) { | 551 | compskb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); |
635 | seq_printf(seq, "%6s: %04x %3d%c %3d%c %3d%c %6d %6d %6d " | 552 | if (!compskb) { |
636 | "%6d %6d %6d\n", | 553 | kfree_skb(skb); |
637 | dev->name, stats->status, stats->qual.qual, | 554 | return; |
638 | stats->qual.updated & IW_QUAL_QUAL_UPDATED | ||
639 | ? '.' : ' ', | ||
640 | ((__s32) stats->qual.level) - | ||
641 | ((stats->qual.updated & IW_QUAL_DBM) ? 0x100 : 0), | ||
642 | stats->qual.updated & IW_QUAL_LEVEL_UPDATED | ||
643 | ? '.' : ' ', | ||
644 | ((__s32) stats->qual.noise) - | ||
645 | ((stats->qual.updated & IW_QUAL_DBM) ? 0x100 : 0), | ||
646 | stats->qual.updated & IW_QUAL_NOISE_UPDATED | ||
647 | ? '.' : ' ', | ||
648 | stats->discard.nwid, stats->discard.code, | ||
649 | stats->discard.fragment, stats->discard.retries, | ||
650 | stats->discard.misc, stats->miss.beacon); | ||
651 | |||
652 | if (stats != &nullstats) | ||
653 | stats->qual.updated &= ~IW_QUAL_ALL_UPDATED; | ||
654 | } | 555 | } |
655 | } | ||
656 | 556 | ||
657 | /* ---------------------------------------------------------------- */ | 557 | /* Send via the RtNetlink event channel */ |
658 | /* | 558 | nlh = rtnetlink_ifinfo_prep(dev, compskb); |
659 | * Print info for /proc/net/wireless (print all entries) | 559 | if (WARN_ON(!nlh)) { |
660 | */ | 560 | kfree_skb(skb); |
661 | static int wireless_dev_seq_show(struct seq_file *seq, void *v) | 561 | kfree_skb(compskb); |
662 | { | 562 | return; |
663 | might_sleep(); | 563 | } |
664 | 564 | ||
665 | if (v == SEQ_START_TOKEN) | 565 | /* Add the wireless events in the netlink packet */ |
666 | seq_printf(seq, "Inter-| sta-| Quality | Discarded " | 566 | nla = nla_reserve(compskb, IFLA_WIRELESS, event_len); |
667 | "packets | Missed | WE\n" | 567 | if (!nla) { |
668 | " face | tus | link level noise | nwid " | 568 | kfree_skb(skb); |
669 | "crypt frag retry misc | beacon | %d\n", | 569 | kfree_skb(compskb); |
670 | WIRELESS_EXT); | 570 | return; |
671 | else | 571 | } |
672 | wireless_seq_printf_stats(seq, v); | 572 | compat_event = nla_data(nla); |
673 | return 0; | 573 | |
574 | compat_event->len = event_len; | ||
575 | compat_event->cmd = cmd; | ||
576 | if (descr->header_type == IW_HEADER_TYPE_POINT) { | ||
577 | compat_wrqu.length = wrqu->data.length; | ||
578 | compat_wrqu.flags = wrqu->data.flags; | ||
579 | memcpy(&compat_event->pointer, | ||
580 | ((char *) &compat_wrqu) + IW_EV_COMPAT_POINT_OFF, | ||
581 | hdr_len - IW_EV_COMPAT_LCP_LEN); | ||
582 | if (extra_len) | ||
583 | memcpy(((char *) compat_event) + hdr_len, | ||
584 | extra, extra_len); | ||
585 | } else { | ||
586 | /* extra_len must be zero, so no if (extra) needed */ | ||
587 | memcpy(&compat_event->pointer, wrqu, | ||
588 | hdr_len - IW_EV_COMPAT_LCP_LEN); | ||
589 | } | ||
590 | |||
591 | nlmsg_end(compskb, nlh); | ||
592 | |||
593 | skb_shinfo(skb)->frag_list = compskb; | ||
594 | #endif | ||
595 | skb_queue_tail(&dev_net(dev)->wext_nlevents, skb); | ||
596 | schedule_work(&wireless_nlevent_work); | ||
674 | } | 597 | } |
598 | EXPORT_SYMBOL(wireless_send_event); | ||
599 | |||
600 | |||
601 | |||
602 | /* IW handlers */ | ||
675 | 603 | ||
676 | static void *wireless_dev_seq_start(struct seq_file *seq, loff_t *pos) | 604 | struct iw_statistics *get_wireless_stats(struct net_device *dev) |
677 | { | 605 | { |
678 | struct net *net = seq_file_net(seq); | 606 | #ifdef CONFIG_WIRELESS_EXT |
679 | loff_t off; | 607 | if ((dev->wireless_handlers != NULL) && |
680 | struct net_device *dev; | 608 | (dev->wireless_handlers->get_wireless_stats != NULL)) |
609 | return dev->wireless_handlers->get_wireless_stats(dev); | ||
610 | #endif | ||
681 | 611 | ||
682 | rtnl_lock(); | 612 | #ifdef CONFIG_CFG80211_WEXT |
683 | if (!*pos) | 613 | if (dev->ieee80211_ptr && dev->ieee80211_ptr && |
684 | return SEQ_START_TOKEN; | 614 | dev->ieee80211_ptr->wiphy && |
615 | dev->ieee80211_ptr->wiphy->wext && | ||
616 | dev->ieee80211_ptr->wiphy->wext->get_wireless_stats) | ||
617 | return dev->ieee80211_ptr->wiphy->wext->get_wireless_stats(dev); | ||
618 | #endif | ||
685 | 619 | ||
686 | off = 1; | 620 | /* not found */ |
687 | for_each_netdev(net, dev) | ||
688 | if (off++ == *pos) | ||
689 | return dev; | ||
690 | return NULL; | 621 | return NULL; |
691 | } | 622 | } |
692 | 623 | ||
693 | static void *wireless_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 624 | static int iw_handler_get_iwstats(struct net_device * dev, |
625 | struct iw_request_info * info, | ||
626 | union iwreq_data * wrqu, | ||
627 | char * extra) | ||
694 | { | 628 | { |
695 | struct net *net = seq_file_net(seq); | 629 | /* Get stats from the driver */ |
630 | struct iw_statistics *stats; | ||
696 | 631 | ||
697 | ++*pos; | 632 | stats = get_wireless_stats(dev); |
633 | if (stats) { | ||
634 | /* Copy statistics to extra */ | ||
635 | memcpy(extra, stats, sizeof(struct iw_statistics)); | ||
636 | wrqu->data.length = sizeof(struct iw_statistics); | ||
698 | 637 | ||
699 | return v == SEQ_START_TOKEN ? | 638 | /* Check if we need to clear the updated flag */ |
700 | first_net_device(net) : next_net_device(v); | 639 | if (wrqu->data.flags != 0) |
640 | stats->qual.updated &= ~IW_QUAL_ALL_UPDATED; | ||
641 | return 0; | ||
642 | } else | ||
643 | return -EOPNOTSUPP; | ||
701 | } | 644 | } |
702 | 645 | ||
703 | static void wireless_dev_seq_stop(struct seq_file *seq, void *v) | 646 | static iw_handler get_handler(struct net_device *dev, unsigned int cmd) |
704 | { | 647 | { |
705 | rtnl_unlock(); | 648 | /* Don't "optimise" the following variable, it will crash */ |
706 | } | 649 | unsigned int index; /* *MUST* be unsigned */ |
707 | 650 | const struct iw_handler_def *handlers = NULL; | |
708 | static const struct seq_operations wireless_seq_ops = { | ||
709 | .start = wireless_dev_seq_start, | ||
710 | .next = wireless_dev_seq_next, | ||
711 | .stop = wireless_dev_seq_stop, | ||
712 | .show = wireless_dev_seq_show, | ||
713 | }; | ||
714 | 651 | ||
715 | static int seq_open_wireless(struct inode *inode, struct file *file) | 652 | #ifdef CONFIG_CFG80211_WEXT |
716 | { | 653 | if (dev->ieee80211_ptr && dev->ieee80211_ptr->wiphy) |
717 | return seq_open_net(inode, file, &wireless_seq_ops, | 654 | handlers = dev->ieee80211_ptr->wiphy->wext; |
718 | sizeof(struct seq_net_private)); | 655 | #endif |
719 | } | 656 | #ifdef CONFIG_WIRELESS_EXT |
657 | if (dev->wireless_handlers) | ||
658 | handlers = dev->wireless_handlers; | ||
659 | #endif | ||
720 | 660 | ||
721 | static const struct file_operations wireless_seq_fops = { | 661 | if (!handlers) |
722 | .owner = THIS_MODULE, | 662 | return NULL; |
723 | .open = seq_open_wireless, | ||
724 | .read = seq_read, | ||
725 | .llseek = seq_lseek, | ||
726 | .release = seq_release_net, | ||
727 | }; | ||
728 | 663 | ||
729 | int wext_proc_init(struct net *net) | 664 | /* Try as a standard command */ |
730 | { | 665 | index = cmd - SIOCIWFIRST; |
731 | /* Create /proc/net/wireless entry */ | 666 | if (index < handlers->num_standard) |
732 | if (!proc_net_fops_create(net, "wireless", S_IRUGO, &wireless_seq_fops)) | 667 | return handlers->standard[index]; |
733 | return -ENOMEM; | ||
734 | 668 | ||
735 | return 0; | 669 | #ifdef CONFIG_WEXT_PRIV |
736 | } | 670 | /* Try as a private command */ |
671 | index = cmd - SIOCIWFIRSTPRIV; | ||
672 | if (index < handlers->num_private) | ||
673 | return handlers->private[index]; | ||
674 | #endif | ||
737 | 675 | ||
738 | void wext_proc_exit(struct net *net) | 676 | /* Not found */ |
739 | { | 677 | return NULL; |
740 | proc_net_remove(net, "wireless"); | ||
741 | } | 678 | } |
742 | #endif /* CONFIG_PROC_FS */ | ||
743 | 679 | ||
744 | /************************** IOCTL SUPPORT **************************/ | ||
745 | /* | ||
746 | * The original user space API to configure all those Wireless Extensions | ||
747 | * is through IOCTLs. | ||
748 | * In there, we check if we need to call the new driver API (iw_handler) | ||
749 | * or just call the driver ioctl handler. | ||
750 | */ | ||
751 | |||
752 | /* ---------------------------------------------------------------- */ | ||
753 | static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd, | 680 | static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd, |
754 | const struct iw_ioctl_description *descr, | 681 | const struct iw_ioctl_description *descr, |
755 | iw_handler handler, struct net_device *dev, | 682 | iw_handler handler, struct net_device *dev, |
@@ -893,188 +820,39 @@ out: | |||
893 | } | 820 | } |
894 | 821 | ||
895 | /* | 822 | /* |
896 | * Wrapper to call a standard Wireless Extension handler. | 823 | * Call the commit handler in the driver |
897 | * We do various checks and also take care of moving data between | 824 | * (if exist and if conditions are right) |
898 | * user space and kernel space. | 825 | * |
899 | */ | 826 | * Note : our current commit strategy is currently pretty dumb, |
900 | static int ioctl_standard_call(struct net_device * dev, | 827 | * but we will be able to improve on that... |
901 | struct iwreq *iwr, | 828 | * The goal is to try to agreagate as many changes as possible |
902 | unsigned int cmd, | 829 | * before doing the commit. Drivers that will define a commit handler |
903 | struct iw_request_info *info, | 830 | * are usually those that need a reset after changing parameters, so |
904 | iw_handler handler) | 831 | * we want to minimise the number of reset. |
905 | { | 832 | * A cool idea is to use a timer : at each "set" command, we re-set the |
906 | const struct iw_ioctl_description * descr; | 833 | * timer, when the timer eventually fires, we call the driver. |
907 | int ret = -EINVAL; | 834 | * Hopefully, more on that later. |
908 | |||
909 | /* Get the description of the IOCTL */ | ||
910 | if ((cmd - SIOCIWFIRST) >= standard_ioctl_num) | ||
911 | return -EOPNOTSUPP; | ||
912 | descr = &(standard_ioctl[cmd - SIOCIWFIRST]); | ||
913 | |||
914 | /* Check if we have a pointer to user space data or not */ | ||
915 | if (descr->header_type != IW_HEADER_TYPE_POINT) { | ||
916 | |||
917 | /* No extra arguments. Trivial to handle */ | ||
918 | ret = handler(dev, info, &(iwr->u), NULL); | ||
919 | |||
920 | /* Generate an event to notify listeners of the change */ | ||
921 | if ((descr->flags & IW_DESCR_FLAG_EVENT) && | ||
922 | ((ret == 0) || (ret == -EIWCOMMIT))) | ||
923 | wireless_send_event(dev, cmd, &(iwr->u), NULL); | ||
924 | } else { | ||
925 | ret = ioctl_standard_iw_point(&iwr->u.data, cmd, descr, | ||
926 | handler, dev, info); | ||
927 | } | ||
928 | |||
929 | /* Call commit handler if needed and defined */ | ||
930 | if (ret == -EIWCOMMIT) | ||
931 | ret = call_commit_handler(dev); | ||
932 | |||
933 | /* Here, we will generate the appropriate event if needed */ | ||
934 | |||
935 | return ret; | ||
936 | } | ||
937 | |||
938 | /* ---------------------------------------------------------------- */ | ||
939 | /* | ||
940 | * Wrapper to call a private Wireless Extension handler. | ||
941 | * We do various checks and also take care of moving data between | ||
942 | * user space and kernel space. | ||
943 | * It's not as nice and slimline as the standard wrapper. The cause | ||
944 | * is struct iw_priv_args, which was not really designed for the | ||
945 | * job we are going here. | ||
946 | * | 835 | * |
947 | * IMPORTANT : This function prevent to set and get data on the same | 836 | * Also, I'm waiting to see how many people will complain about the |
948 | * IOCTL and enforce the SET/GET convention. Not doing it would be | 837 | * netif_running(dev) test. I'm open on that one... |
949 | * far too hairy... | 838 | * Hopefully, the driver will remember to do a commit in "open()" ;-) |
950 | * If you need to set and get data at the same time, please don't use | ||
951 | * a iw_handler but process it in your ioctl handler (i.e. use the | ||
952 | * old driver API). | ||
953 | */ | 839 | */ |
954 | static int get_priv_descr_and_size(struct net_device *dev, unsigned int cmd, | 840 | int call_commit_handler(struct net_device *dev) |
955 | const struct iw_priv_args **descrp) | ||
956 | { | ||
957 | const struct iw_priv_args *descr; | ||
958 | int i, extra_size; | ||
959 | |||
960 | descr = NULL; | ||
961 | for (i = 0; i < dev->wireless_handlers->num_private_args; i++) { | ||
962 | if (cmd == dev->wireless_handlers->private_args[i].cmd) { | ||
963 | descr = &dev->wireless_handlers->private_args[i]; | ||
964 | break; | ||
965 | } | ||
966 | } | ||
967 | |||
968 | extra_size = 0; | ||
969 | if (descr) { | ||
970 | if (IW_IS_SET(cmd)) { | ||
971 | int offset = 0; /* For sub-ioctls */ | ||
972 | /* Check for sub-ioctl handler */ | ||
973 | if (descr->name[0] == '\0') | ||
974 | /* Reserve one int for sub-ioctl index */ | ||
975 | offset = sizeof(__u32); | ||
976 | |||
977 | /* Size of set arguments */ | ||
978 | extra_size = get_priv_size(descr->set_args); | ||
979 | |||
980 | /* Does it fits in iwr ? */ | ||
981 | if ((descr->set_args & IW_PRIV_SIZE_FIXED) && | ||
982 | ((extra_size + offset) <= IFNAMSIZ)) | ||
983 | extra_size = 0; | ||
984 | } else { | ||
985 | /* Size of get arguments */ | ||
986 | extra_size = get_priv_size(descr->get_args); | ||
987 | |||
988 | /* Does it fits in iwr ? */ | ||
989 | if ((descr->get_args & IW_PRIV_SIZE_FIXED) && | ||
990 | (extra_size <= IFNAMSIZ)) | ||
991 | extra_size = 0; | ||
992 | } | ||
993 | } | ||
994 | *descrp = descr; | ||
995 | return extra_size; | ||
996 | } | ||
997 | |||
998 | static int ioctl_private_iw_point(struct iw_point *iwp, unsigned int cmd, | ||
999 | const struct iw_priv_args *descr, | ||
1000 | iw_handler handler, struct net_device *dev, | ||
1001 | struct iw_request_info *info, int extra_size) | ||
1002 | { | ||
1003 | char *extra; | ||
1004 | int err; | ||
1005 | |||
1006 | /* Check what user space is giving us */ | ||
1007 | if (IW_IS_SET(cmd)) { | ||
1008 | if (!iwp->pointer && iwp->length != 0) | ||
1009 | return -EFAULT; | ||
1010 | |||
1011 | if (iwp->length > (descr->set_args & IW_PRIV_SIZE_MASK)) | ||
1012 | return -E2BIG; | ||
1013 | } else if (!iwp->pointer) | ||
1014 | return -EFAULT; | ||
1015 | |||
1016 | extra = kmalloc(extra_size, GFP_KERNEL); | ||
1017 | if (!extra) | ||
1018 | return -ENOMEM; | ||
1019 | |||
1020 | /* If it is a SET, get all the extra data in here */ | ||
1021 | if (IW_IS_SET(cmd) && (iwp->length != 0)) { | ||
1022 | if (copy_from_user(extra, iwp->pointer, extra_size)) { | ||
1023 | err = -EFAULT; | ||
1024 | goto out; | ||
1025 | } | ||
1026 | } | ||
1027 | |||
1028 | /* Call the handler */ | ||
1029 | err = handler(dev, info, (union iwreq_data *) iwp, extra); | ||
1030 | |||
1031 | /* If we have something to return to the user */ | ||
1032 | if (!err && IW_IS_GET(cmd)) { | ||
1033 | /* Adjust for the actual length if it's variable, | ||
1034 | * avoid leaking kernel bits outside. | ||
1035 | */ | ||
1036 | if (!(descr->get_args & IW_PRIV_SIZE_FIXED)) | ||
1037 | extra_size = adjust_priv_size(descr->get_args, iwp); | ||
1038 | |||
1039 | if (copy_to_user(iwp->pointer, extra, extra_size)) | ||
1040 | err = -EFAULT; | ||
1041 | } | ||
1042 | |||
1043 | out: | ||
1044 | kfree(extra); | ||
1045 | return err; | ||
1046 | } | ||
1047 | |||
1048 | static int ioctl_private_call(struct net_device *dev, struct iwreq *iwr, | ||
1049 | unsigned int cmd, struct iw_request_info *info, | ||
1050 | iw_handler handler) | ||
1051 | { | 841 | { |
1052 | int extra_size = 0, ret = -EINVAL; | 842 | #ifdef CONFIG_WIRELESS_EXT |
1053 | const struct iw_priv_args *descr; | 843 | if ((netif_running(dev)) && |
1054 | 844 | (dev->wireless_handlers->standard[0] != NULL)) | |
1055 | extra_size = get_priv_descr_and_size(dev, cmd, &descr); | 845 | /* Call the commit handler on the driver */ |
1056 | 846 | return dev->wireless_handlers->standard[0](dev, NULL, | |
1057 | /* Check if we have a pointer to user space data or not. */ | 847 | NULL, NULL); |
1058 | if (extra_size == 0) { | 848 | else |
1059 | /* No extra arguments. Trivial to handle */ | 849 | return 0; /* Command completed successfully */ |
1060 | ret = handler(dev, info, &(iwr->u), (char *) &(iwr->u)); | 850 | #else |
1061 | } else { | 851 | /* cfg80211 has no commit */ |
1062 | ret = ioctl_private_iw_point(&iwr->u.data, cmd, descr, | 852 | return 0; |
1063 | handler, dev, info, extra_size); | 853 | #endif |
1064 | } | ||
1065 | |||
1066 | /* Call commit handler if needed and defined */ | ||
1067 | if (ret == -EIWCOMMIT) | ||
1068 | ret = call_commit_handler(dev); | ||
1069 | |||
1070 | return ret; | ||
1071 | } | 854 | } |
1072 | 855 | ||
1073 | /* ---------------------------------------------------------------- */ | ||
1074 | typedef int (*wext_ioctl_func)(struct net_device *, struct iwreq *, | ||
1075 | unsigned int, struct iw_request_info *, | ||
1076 | iw_handler); | ||
1077 | |||
1078 | /* | 856 | /* |
1079 | * Main IOCTl dispatcher. | 857 | * Main IOCTl dispatcher. |
1080 | * Check the type of IOCTL and call the appropriate wrapper... | 858 | * Check the type of IOCTL and call the appropriate wrapper... |
@@ -1103,9 +881,11 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, | |||
1103 | return standard(dev, iwr, cmd, info, | 881 | return standard(dev, iwr, cmd, info, |
1104 | &iw_handler_get_iwstats); | 882 | &iw_handler_get_iwstats); |
1105 | 883 | ||
884 | #ifdef CONFIG_WEXT_PRIV | ||
1106 | if (cmd == SIOCGIWPRIV && dev->wireless_handlers) | 885 | if (cmd == SIOCGIWPRIV && dev->wireless_handlers) |
1107 | return standard(dev, iwr, cmd, info, | 886 | return standard(dev, iwr, cmd, info, |
1108 | &iw_handler_get_private); | 887 | iw_handler_get_private); |
888 | #endif | ||
1109 | 889 | ||
1110 | /* Basic check */ | 890 | /* Basic check */ |
1111 | if (!netif_device_present(dev)) | 891 | if (!netif_device_present(dev)) |
@@ -1117,7 +897,7 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, | |||
1117 | /* Standard and private are not the same */ | 897 | /* Standard and private are not the same */ |
1118 | if (cmd < SIOCIWFIRSTPRIV) | 898 | if (cmd < SIOCIWFIRSTPRIV) |
1119 | return standard(dev, iwr, cmd, info, handler); | 899 | return standard(dev, iwr, cmd, info, handler); |
1120 | else | 900 | else if (private) |
1121 | return private(dev, iwr, cmd, info, handler); | 901 | return private(dev, iwr, cmd, info, handler); |
1122 | } | 902 | } |
1123 | /* Old driver API : call driver ioctl handler */ | 903 | /* Old driver API : call driver ioctl handler */ |
@@ -1157,6 +937,50 @@ static int wext_ioctl_dispatch(struct net *net, struct ifreq *ifr, | |||
1157 | return ret; | 937 | return ret; |
1158 | } | 938 | } |
1159 | 939 | ||
940 | /* | ||
941 | * Wrapper to call a standard Wireless Extension handler. | ||
942 | * We do various checks and also take care of moving data between | ||
943 | * user space and kernel space. | ||
944 | */ | ||
945 | static int ioctl_standard_call(struct net_device * dev, | ||
946 | struct iwreq *iwr, | ||
947 | unsigned int cmd, | ||
948 | struct iw_request_info *info, | ||
949 | iw_handler handler) | ||
950 | { | ||
951 | const struct iw_ioctl_description * descr; | ||
952 | int ret = -EINVAL; | ||
953 | |||
954 | /* Get the description of the IOCTL */ | ||
955 | if ((cmd - SIOCIWFIRST) >= standard_ioctl_num) | ||
956 | return -EOPNOTSUPP; | ||
957 | descr = &(standard_ioctl[cmd - SIOCIWFIRST]); | ||
958 | |||
959 | /* Check if we have a pointer to user space data or not */ | ||
960 | if (descr->header_type != IW_HEADER_TYPE_POINT) { | ||
961 | |||
962 | /* No extra arguments. Trivial to handle */ | ||
963 | ret = handler(dev, info, &(iwr->u), NULL); | ||
964 | |||
965 | /* Generate an event to notify listeners of the change */ | ||
966 | if ((descr->flags & IW_DESCR_FLAG_EVENT) && | ||
967 | ((ret == 0) || (ret == -EIWCOMMIT))) | ||
968 | wireless_send_event(dev, cmd, &(iwr->u), NULL); | ||
969 | } else { | ||
970 | ret = ioctl_standard_iw_point(&iwr->u.data, cmd, descr, | ||
971 | handler, dev, info); | ||
972 | } | ||
973 | |||
974 | /* Call commit handler if needed and defined */ | ||
975 | if (ret == -EIWCOMMIT) | ||
976 | ret = call_commit_handler(dev); | ||
977 | |||
978 | /* Here, we will generate the appropriate event if needed */ | ||
979 | |||
980 | return ret; | ||
981 | } | ||
982 | |||
983 | |||
1160 | int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, | 984 | int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, |
1161 | void __user *arg) | 985 | void __user *arg) |
1162 | { | 986 | { |
@@ -1205,43 +1029,6 @@ static int compat_standard_call(struct net_device *dev, | |||
1205 | return err; | 1029 | return err; |
1206 | } | 1030 | } |
1207 | 1031 | ||
1208 | static int compat_private_call(struct net_device *dev, struct iwreq *iwr, | ||
1209 | unsigned int cmd, struct iw_request_info *info, | ||
1210 | iw_handler handler) | ||
1211 | { | ||
1212 | const struct iw_priv_args *descr; | ||
1213 | int ret, extra_size; | ||
1214 | |||
1215 | extra_size = get_priv_descr_and_size(dev, cmd, &descr); | ||
1216 | |||
1217 | /* Check if we have a pointer to user space data or not. */ | ||
1218 | if (extra_size == 0) { | ||
1219 | /* No extra arguments. Trivial to handle */ | ||
1220 | ret = handler(dev, info, &(iwr->u), (char *) &(iwr->u)); | ||
1221 | } else { | ||
1222 | struct compat_iw_point *iwp_compat; | ||
1223 | struct iw_point iwp; | ||
1224 | |||
1225 | iwp_compat = (struct compat_iw_point *) &iwr->u.data; | ||
1226 | iwp.pointer = compat_ptr(iwp_compat->pointer); | ||
1227 | iwp.length = iwp_compat->length; | ||
1228 | iwp.flags = iwp_compat->flags; | ||
1229 | |||
1230 | ret = ioctl_private_iw_point(&iwp, cmd, descr, | ||
1231 | handler, dev, info, extra_size); | ||
1232 | |||
1233 | iwp_compat->pointer = ptr_to_compat(iwp.pointer); | ||
1234 | iwp_compat->length = iwp.length; | ||
1235 | iwp_compat->flags = iwp.flags; | ||
1236 | } | ||
1237 | |||
1238 | /* Call commit handler if needed and defined */ | ||
1239 | if (ret == -EIWCOMMIT) | ||
1240 | ret = call_commit_handler(dev); | ||
1241 | |||
1242 | return ret; | ||
1243 | } | ||
1244 | |||
1245 | int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, | 1032 | int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, |
1246 | unsigned long arg) | 1033 | unsigned long arg) |
1247 | { | 1034 | { |
@@ -1274,502 +1061,3 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, | |||
1274 | return ret; | 1061 | return ret; |
1275 | } | 1062 | } |
1276 | #endif | 1063 | #endif |
1277 | |||
1278 | static int __net_init wext_pernet_init(struct net *net) | ||
1279 | { | ||
1280 | skb_queue_head_init(&net->wext_nlevents); | ||
1281 | return 0; | ||
1282 | } | ||
1283 | |||
1284 | static void __net_exit wext_pernet_exit(struct net *net) | ||
1285 | { | ||
1286 | skb_queue_purge(&net->wext_nlevents); | ||
1287 | } | ||
1288 | |||
1289 | static struct pernet_operations wext_pernet_ops = { | ||
1290 | .init = wext_pernet_init, | ||
1291 | .exit = wext_pernet_exit, | ||
1292 | }; | ||
1293 | |||
1294 | static int __init wireless_nlevent_init(void) | ||
1295 | { | ||
1296 | return register_pernet_subsys(&wext_pernet_ops); | ||
1297 | } | ||
1298 | |||
1299 | subsys_initcall(wireless_nlevent_init); | ||
1300 | |||
1301 | /* Process events generated by the wireless layer or the driver. */ | ||
1302 | static void wireless_nlevent_process(struct work_struct *work) | ||
1303 | { | ||
1304 | struct sk_buff *skb; | ||
1305 | struct net *net; | ||
1306 | |||
1307 | rtnl_lock(); | ||
1308 | |||
1309 | for_each_net(net) { | ||
1310 | while ((skb = skb_dequeue(&net->wext_nlevents))) | ||
1311 | rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, | ||
1312 | GFP_KERNEL); | ||
1313 | } | ||
1314 | |||
1315 | rtnl_unlock(); | ||
1316 | } | ||
1317 | |||
1318 | static DECLARE_WORK(wireless_nlevent_work, wireless_nlevent_process); | ||
1319 | |||
1320 | static struct nlmsghdr *rtnetlink_ifinfo_prep(struct net_device *dev, | ||
1321 | struct sk_buff *skb) | ||
1322 | { | ||
1323 | struct ifinfomsg *r; | ||
1324 | struct nlmsghdr *nlh; | ||
1325 | |||
1326 | nlh = nlmsg_put(skb, 0, 0, RTM_NEWLINK, sizeof(*r), 0); | ||
1327 | if (!nlh) | ||
1328 | return NULL; | ||
1329 | |||
1330 | r = nlmsg_data(nlh); | ||
1331 | r->ifi_family = AF_UNSPEC; | ||
1332 | r->__ifi_pad = 0; | ||
1333 | r->ifi_type = dev->type; | ||
1334 | r->ifi_index = dev->ifindex; | ||
1335 | r->ifi_flags = dev_get_flags(dev); | ||
1336 | r->ifi_change = 0; /* Wireless changes don't affect those flags */ | ||
1337 | |||
1338 | NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); | ||
1339 | |||
1340 | return nlh; | ||
1341 | nla_put_failure: | ||
1342 | nlmsg_cancel(skb, nlh); | ||
1343 | return NULL; | ||
1344 | } | ||
1345 | |||
1346 | |||
1347 | /* | ||
1348 | * Main event dispatcher. Called from other parts and drivers. | ||
1349 | * Send the event on the appropriate channels. | ||
1350 | * May be called from interrupt context. | ||
1351 | */ | ||
1352 | void wireless_send_event(struct net_device * dev, | ||
1353 | unsigned int cmd, | ||
1354 | union iwreq_data * wrqu, | ||
1355 | const char * extra) | ||
1356 | { | ||
1357 | const struct iw_ioctl_description * descr = NULL; | ||
1358 | int extra_len = 0; | ||
1359 | struct iw_event *event; /* Mallocated whole event */ | ||
1360 | int event_len; /* Its size */ | ||
1361 | int hdr_len; /* Size of the event header */ | ||
1362 | int wrqu_off = 0; /* Offset in wrqu */ | ||
1363 | /* Don't "optimise" the following variable, it will crash */ | ||
1364 | unsigned cmd_index; /* *MUST* be unsigned */ | ||
1365 | struct sk_buff *skb; | ||
1366 | struct nlmsghdr *nlh; | ||
1367 | struct nlattr *nla; | ||
1368 | #ifdef CONFIG_COMPAT | ||
1369 | struct __compat_iw_event *compat_event; | ||
1370 | struct compat_iw_point compat_wrqu; | ||
1371 | struct sk_buff *compskb; | ||
1372 | #endif | ||
1373 | |||
1374 | /* | ||
1375 | * Nothing in the kernel sends scan events with data, be safe. | ||
1376 | * This is necessary because we cannot fix up scan event data | ||
1377 | * for compat, due to being contained in 'extra', but normally | ||
1378 | * applications are required to retrieve the scan data anyway | ||
1379 | * and no data is included in the event, this codifies that | ||
1380 | * practice. | ||
1381 | */ | ||
1382 | if (WARN_ON(cmd == SIOCGIWSCAN && extra)) | ||
1383 | extra = NULL; | ||
1384 | |||
1385 | /* Get the description of the Event */ | ||
1386 | if (cmd <= SIOCIWLAST) { | ||
1387 | cmd_index = cmd - SIOCIWFIRST; | ||
1388 | if (cmd_index < standard_ioctl_num) | ||
1389 | descr = &(standard_ioctl[cmd_index]); | ||
1390 | } else { | ||
1391 | cmd_index = cmd - IWEVFIRST; | ||
1392 | if (cmd_index < standard_event_num) | ||
1393 | descr = &(standard_event[cmd_index]); | ||
1394 | } | ||
1395 | /* Don't accept unknown events */ | ||
1396 | if (descr == NULL) { | ||
1397 | /* Note : we don't return an error to the driver, because | ||
1398 | * the driver would not know what to do about it. It can't | ||
1399 | * return an error to the user, because the event is not | ||
1400 | * initiated by a user request. | ||
1401 | * The best the driver could do is to log an error message. | ||
1402 | * We will do it ourselves instead... | ||
1403 | */ | ||
1404 | printk(KERN_ERR "%s (WE) : Invalid/Unknown Wireless Event (0x%04X)\n", | ||
1405 | dev->name, cmd); | ||
1406 | return; | ||
1407 | } | ||
1408 | |||
1409 | /* Check extra parameters and set extra_len */ | ||
1410 | if (descr->header_type == IW_HEADER_TYPE_POINT) { | ||
1411 | /* Check if number of token fits within bounds */ | ||
1412 | if (wrqu->data.length > descr->max_tokens) { | ||
1413 | printk(KERN_ERR "%s (WE) : Wireless Event too big (%d)\n", dev->name, wrqu->data.length); | ||
1414 | return; | ||
1415 | } | ||
1416 | if (wrqu->data.length < descr->min_tokens) { | ||
1417 | printk(KERN_ERR "%s (WE) : Wireless Event too small (%d)\n", dev->name, wrqu->data.length); | ||
1418 | return; | ||
1419 | } | ||
1420 | /* Calculate extra_len - extra is NULL for restricted events */ | ||
1421 | if (extra != NULL) | ||
1422 | extra_len = wrqu->data.length * descr->token_size; | ||
1423 | /* Always at an offset in wrqu */ | ||
1424 | wrqu_off = IW_EV_POINT_OFF; | ||
1425 | } | ||
1426 | |||
1427 | /* Total length of the event */ | ||
1428 | hdr_len = event_type_size[descr->header_type]; | ||
1429 | event_len = hdr_len + extra_len; | ||
1430 | |||
1431 | /* | ||
1432 | * The problem for 64/32 bit. | ||
1433 | * | ||
1434 | * On 64-bit, a regular event is laid out as follows: | ||
1435 | * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | | ||
1436 | * | event.len | event.cmd | p a d d i n g | | ||
1437 | * | wrqu data ... (with the correct size) | | ||
1438 | * | ||
1439 | * This padding exists because we manipulate event->u, | ||
1440 | * and 'event' is not packed. | ||
1441 | * | ||
1442 | * An iw_point event is laid out like this instead: | ||
1443 | * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | | ||
1444 | * | event.len | event.cmd | p a d d i n g | | ||
1445 | * | iwpnt.len | iwpnt.flg | p a d d i n g | | ||
1446 | * | extra data ... | ||
1447 | * | ||
1448 | * The second padding exists because struct iw_point is extended, | ||
1449 | * but this depends on the platform... | ||
1450 | * | ||
1451 | * On 32-bit, all the padding shouldn't be there. | ||
1452 | */ | ||
1453 | |||
1454 | skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); | ||
1455 | if (!skb) | ||
1456 | return; | ||
1457 | |||
1458 | /* Send via the RtNetlink event channel */ | ||
1459 | nlh = rtnetlink_ifinfo_prep(dev, skb); | ||
1460 | if (WARN_ON(!nlh)) { | ||
1461 | kfree_skb(skb); | ||
1462 | return; | ||
1463 | } | ||
1464 | |||
1465 | /* Add the wireless events in the netlink packet */ | ||
1466 | nla = nla_reserve(skb, IFLA_WIRELESS, event_len); | ||
1467 | if (!nla) { | ||
1468 | kfree_skb(skb); | ||
1469 | return; | ||
1470 | } | ||
1471 | event = nla_data(nla); | ||
1472 | |||
1473 | /* Fill event - first clear to avoid data leaking */ | ||
1474 | memset(event, 0, hdr_len); | ||
1475 | event->len = event_len; | ||
1476 | event->cmd = cmd; | ||
1477 | memcpy(&event->u, ((char *) wrqu) + wrqu_off, hdr_len - IW_EV_LCP_LEN); | ||
1478 | if (extra_len) | ||
1479 | memcpy(((char *) event) + hdr_len, extra, extra_len); | ||
1480 | |||
1481 | nlmsg_end(skb, nlh); | ||
1482 | #ifdef CONFIG_COMPAT | ||
1483 | hdr_len = compat_event_type_size[descr->header_type]; | ||
1484 | event_len = hdr_len + extra_len; | ||
1485 | |||
1486 | compskb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); | ||
1487 | if (!compskb) { | ||
1488 | kfree_skb(skb); | ||
1489 | return; | ||
1490 | } | ||
1491 | |||
1492 | /* Send via the RtNetlink event channel */ | ||
1493 | nlh = rtnetlink_ifinfo_prep(dev, compskb); | ||
1494 | if (WARN_ON(!nlh)) { | ||
1495 | kfree_skb(skb); | ||
1496 | kfree_skb(compskb); | ||
1497 | return; | ||
1498 | } | ||
1499 | |||
1500 | /* Add the wireless events in the netlink packet */ | ||
1501 | nla = nla_reserve(compskb, IFLA_WIRELESS, event_len); | ||
1502 | if (!nla) { | ||
1503 | kfree_skb(skb); | ||
1504 | kfree_skb(compskb); | ||
1505 | return; | ||
1506 | } | ||
1507 | compat_event = nla_data(nla); | ||
1508 | |||
1509 | compat_event->len = event_len; | ||
1510 | compat_event->cmd = cmd; | ||
1511 | if (descr->header_type == IW_HEADER_TYPE_POINT) { | ||
1512 | compat_wrqu.length = wrqu->data.length; | ||
1513 | compat_wrqu.flags = wrqu->data.flags; | ||
1514 | memcpy(&compat_event->pointer, | ||
1515 | ((char *) &compat_wrqu) + IW_EV_COMPAT_POINT_OFF, | ||
1516 | hdr_len - IW_EV_COMPAT_LCP_LEN); | ||
1517 | if (extra_len) | ||
1518 | memcpy(((char *) compat_event) + hdr_len, | ||
1519 | extra, extra_len); | ||
1520 | } else { | ||
1521 | /* extra_len must be zero, so no if (extra) needed */ | ||
1522 | memcpy(&compat_event->pointer, wrqu, | ||
1523 | hdr_len - IW_EV_COMPAT_LCP_LEN); | ||
1524 | } | ||
1525 | |||
1526 | nlmsg_end(compskb, nlh); | ||
1527 | |||
1528 | skb_shinfo(skb)->frag_list = compskb; | ||
1529 | #endif | ||
1530 | skb_queue_tail(&dev_net(dev)->wext_nlevents, skb); | ||
1531 | schedule_work(&wireless_nlevent_work); | ||
1532 | } | ||
1533 | EXPORT_SYMBOL(wireless_send_event); | ||
1534 | |||
1535 | /********************** ENHANCED IWSPY SUPPORT **********************/ | ||
1536 | /* | ||
1537 | * In the old days, the driver was handling spy support all by itself. | ||
1538 | * Now, the driver can delegate this task to Wireless Extensions. | ||
1539 | * It needs to use those standard spy iw_handler in struct iw_handler_def, | ||
1540 | * push data to us via wireless_spy_update() and include struct iw_spy_data | ||
1541 | * in its private part (and export it in net_device->wireless_data->spy_data). | ||
1542 | * One of the main advantage of centralising spy support here is that | ||
1543 | * it becomes much easier to improve and extend it without having to touch | ||
1544 | * the drivers. One example is the addition of the Spy-Threshold events. | ||
1545 | */ | ||
1546 | |||
1547 | /* ---------------------------------------------------------------- */ | ||
1548 | /* | ||
1549 | * Return the pointer to the spy data in the driver. | ||
1550 | * Because this is called on the Rx path via wireless_spy_update(), | ||
1551 | * we want it to be efficient... | ||
1552 | */ | ||
1553 | static inline struct iw_spy_data *get_spydata(struct net_device *dev) | ||
1554 | { | ||
1555 | /* This is the new way */ | ||
1556 | if (dev->wireless_data) | ||
1557 | return dev->wireless_data->spy_data; | ||
1558 | return NULL; | ||
1559 | } | ||
1560 | |||
1561 | /*------------------------------------------------------------------*/ | ||
1562 | /* | ||
1563 | * Standard Wireless Handler : set Spy List | ||
1564 | */ | ||
1565 | int iw_handler_set_spy(struct net_device * dev, | ||
1566 | struct iw_request_info * info, | ||
1567 | union iwreq_data * wrqu, | ||
1568 | char * extra) | ||
1569 | { | ||
1570 | struct iw_spy_data * spydata = get_spydata(dev); | ||
1571 | struct sockaddr * address = (struct sockaddr *) extra; | ||
1572 | |||
1573 | /* Make sure driver is not buggy or using the old API */ | ||
1574 | if (!spydata) | ||
1575 | return -EOPNOTSUPP; | ||
1576 | |||
1577 | /* Disable spy collection while we copy the addresses. | ||
1578 | * While we copy addresses, any call to wireless_spy_update() | ||
1579 | * will NOP. This is OK, as anyway the addresses are changing. */ | ||
1580 | spydata->spy_number = 0; | ||
1581 | |||
1582 | /* We want to operate without locking, because wireless_spy_update() | ||
1583 | * most likely will happen in the interrupt handler, and therefore | ||
1584 | * have its own locking constraints and needs performance. | ||
1585 | * The rtnl_lock() make sure we don't race with the other iw_handlers. | ||
1586 | * This make sure wireless_spy_update() "see" that the spy list | ||
1587 | * is temporarily disabled. */ | ||
1588 | smp_wmb(); | ||
1589 | |||
1590 | /* Are there are addresses to copy? */ | ||
1591 | if (wrqu->data.length > 0) { | ||
1592 | int i; | ||
1593 | |||
1594 | /* Copy addresses */ | ||
1595 | for (i = 0; i < wrqu->data.length; i++) | ||
1596 | memcpy(spydata->spy_address[i], address[i].sa_data, | ||
1597 | ETH_ALEN); | ||
1598 | /* Reset stats */ | ||
1599 | memset(spydata->spy_stat, 0, | ||
1600 | sizeof(struct iw_quality) * IW_MAX_SPY); | ||
1601 | } | ||
1602 | |||
1603 | /* Make sure above is updated before re-enabling */ | ||
1604 | smp_wmb(); | ||
1605 | |||
1606 | /* Enable addresses */ | ||
1607 | spydata->spy_number = wrqu->data.length; | ||
1608 | |||
1609 | return 0; | ||
1610 | } | ||
1611 | EXPORT_SYMBOL(iw_handler_set_spy); | ||
1612 | |||
1613 | /*------------------------------------------------------------------*/ | ||
1614 | /* | ||
1615 | * Standard Wireless Handler : get Spy List | ||
1616 | */ | ||
1617 | int iw_handler_get_spy(struct net_device * dev, | ||
1618 | struct iw_request_info * info, | ||
1619 | union iwreq_data * wrqu, | ||
1620 | char * extra) | ||
1621 | { | ||
1622 | struct iw_spy_data * spydata = get_spydata(dev); | ||
1623 | struct sockaddr * address = (struct sockaddr *) extra; | ||
1624 | int i; | ||
1625 | |||
1626 | /* Make sure driver is not buggy or using the old API */ | ||
1627 | if (!spydata) | ||
1628 | return -EOPNOTSUPP; | ||
1629 | |||
1630 | wrqu->data.length = spydata->spy_number; | ||
1631 | |||
1632 | /* Copy addresses. */ | ||
1633 | for (i = 0; i < spydata->spy_number; i++) { | ||
1634 | memcpy(address[i].sa_data, spydata->spy_address[i], ETH_ALEN); | ||
1635 | address[i].sa_family = AF_UNIX; | ||
1636 | } | ||
1637 | /* Copy stats to the user buffer (just after). */ | ||
1638 | if (spydata->spy_number > 0) | ||
1639 | memcpy(extra + (sizeof(struct sockaddr) *spydata->spy_number), | ||
1640 | spydata->spy_stat, | ||
1641 | sizeof(struct iw_quality) * spydata->spy_number); | ||
1642 | /* Reset updated flags. */ | ||
1643 | for (i = 0; i < spydata->spy_number; i++) | ||
1644 | spydata->spy_stat[i].updated &= ~IW_QUAL_ALL_UPDATED; | ||
1645 | return 0; | ||
1646 | } | ||
1647 | EXPORT_SYMBOL(iw_handler_get_spy); | ||
1648 | |||
1649 | /*------------------------------------------------------------------*/ | ||
1650 | /* | ||
1651 | * Standard Wireless Handler : set spy threshold | ||
1652 | */ | ||
1653 | int iw_handler_set_thrspy(struct net_device * dev, | ||
1654 | struct iw_request_info *info, | ||
1655 | union iwreq_data * wrqu, | ||
1656 | char * extra) | ||
1657 | { | ||
1658 | struct iw_spy_data * spydata = get_spydata(dev); | ||
1659 | struct iw_thrspy * threshold = (struct iw_thrspy *) extra; | ||
1660 | |||
1661 | /* Make sure driver is not buggy or using the old API */ | ||
1662 | if (!spydata) | ||
1663 | return -EOPNOTSUPP; | ||
1664 | |||
1665 | /* Just do it */ | ||
1666 | memcpy(&(spydata->spy_thr_low), &(threshold->low), | ||
1667 | 2 * sizeof(struct iw_quality)); | ||
1668 | |||
1669 | /* Clear flag */ | ||
1670 | memset(spydata->spy_thr_under, '\0', sizeof(spydata->spy_thr_under)); | ||
1671 | |||
1672 | return 0; | ||
1673 | } | ||
1674 | EXPORT_SYMBOL(iw_handler_set_thrspy); | ||
1675 | |||
1676 | /*------------------------------------------------------------------*/ | ||
1677 | /* | ||
1678 | * Standard Wireless Handler : get spy threshold | ||
1679 | */ | ||
1680 | int iw_handler_get_thrspy(struct net_device * dev, | ||
1681 | struct iw_request_info *info, | ||
1682 | union iwreq_data * wrqu, | ||
1683 | char * extra) | ||
1684 | { | ||
1685 | struct iw_spy_data * spydata = get_spydata(dev); | ||
1686 | struct iw_thrspy * threshold = (struct iw_thrspy *) extra; | ||
1687 | |||
1688 | /* Make sure driver is not buggy or using the old API */ | ||
1689 | if (!spydata) | ||
1690 | return -EOPNOTSUPP; | ||
1691 | |||
1692 | /* Just do it */ | ||
1693 | memcpy(&(threshold->low), &(spydata->spy_thr_low), | ||
1694 | 2 * sizeof(struct iw_quality)); | ||
1695 | |||
1696 | return 0; | ||
1697 | } | ||
1698 | EXPORT_SYMBOL(iw_handler_get_thrspy); | ||
1699 | |||
1700 | /*------------------------------------------------------------------*/ | ||
1701 | /* | ||
1702 | * Prepare and send a Spy Threshold event | ||
1703 | */ | ||
1704 | static void iw_send_thrspy_event(struct net_device * dev, | ||
1705 | struct iw_spy_data * spydata, | ||
1706 | unsigned char * address, | ||
1707 | struct iw_quality * wstats) | ||
1708 | { | ||
1709 | union iwreq_data wrqu; | ||
1710 | struct iw_thrspy threshold; | ||
1711 | |||
1712 | /* Init */ | ||
1713 | wrqu.data.length = 1; | ||
1714 | wrqu.data.flags = 0; | ||
1715 | /* Copy address */ | ||
1716 | memcpy(threshold.addr.sa_data, address, ETH_ALEN); | ||
1717 | threshold.addr.sa_family = ARPHRD_ETHER; | ||
1718 | /* Copy stats */ | ||
1719 | memcpy(&(threshold.qual), wstats, sizeof(struct iw_quality)); | ||
1720 | /* Copy also thresholds */ | ||
1721 | memcpy(&(threshold.low), &(spydata->spy_thr_low), | ||
1722 | 2 * sizeof(struct iw_quality)); | ||
1723 | |||
1724 | /* Send event to user space */ | ||
1725 | wireless_send_event(dev, SIOCGIWTHRSPY, &wrqu, (char *) &threshold); | ||
1726 | } | ||
1727 | |||
1728 | /* ---------------------------------------------------------------- */ | ||
1729 | /* | ||
1730 | * Call for the driver to update the spy data. | ||
1731 | * For now, the spy data is a simple array. As the size of the array is | ||
1732 | * small, this is good enough. If we wanted to support larger number of | ||
1733 | * spy addresses, we should use something more efficient... | ||
1734 | */ | ||
1735 | void wireless_spy_update(struct net_device * dev, | ||
1736 | unsigned char * address, | ||
1737 | struct iw_quality * wstats) | ||
1738 | { | ||
1739 | struct iw_spy_data * spydata = get_spydata(dev); | ||
1740 | int i; | ||
1741 | int match = -1; | ||
1742 | |||
1743 | /* Make sure driver is not buggy or using the old API */ | ||
1744 | if (!spydata) | ||
1745 | return; | ||
1746 | |||
1747 | /* Update all records that match */ | ||
1748 | for (i = 0; i < spydata->spy_number; i++) | ||
1749 | if (!compare_ether_addr(address, spydata->spy_address[i])) { | ||
1750 | memcpy(&(spydata->spy_stat[i]), wstats, | ||
1751 | sizeof(struct iw_quality)); | ||
1752 | match = i; | ||
1753 | } | ||
1754 | |||
1755 | /* Generate an event if we cross the spy threshold. | ||
1756 | * To avoid event storms, we have a simple hysteresis : we generate | ||
1757 | * event only when we go under the low threshold or above the | ||
1758 | * high threshold. */ | ||
1759 | if (match >= 0) { | ||
1760 | if (spydata->spy_thr_under[match]) { | ||
1761 | if (wstats->level > spydata->spy_thr_high.level) { | ||
1762 | spydata->spy_thr_under[match] = 0; | ||
1763 | iw_send_thrspy_event(dev, spydata, | ||
1764 | address, wstats); | ||
1765 | } | ||
1766 | } else { | ||
1767 | if (wstats->level < spydata->spy_thr_low.level) { | ||
1768 | spydata->spy_thr_under[match] = 1; | ||
1769 | iw_send_thrspy_event(dev, spydata, | ||
1770 | address, wstats); | ||
1771 | } | ||
1772 | } | ||
1773 | } | ||
1774 | } | ||
1775 | EXPORT_SYMBOL(wireless_spy_update); | ||
diff --git a/net/wireless/wext-priv.c b/net/wireless/wext-priv.c new file mode 100644 index 000000000000..a3c2277de9e5 --- /dev/null +++ b/net/wireless/wext-priv.c | |||
@@ -0,0 +1,248 @@ | |||
1 | /* | ||
2 | * This file implement the Wireless Extensions priv API. | ||
3 | * | ||
4 | * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com> | ||
5 | * Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved. | ||
6 | * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> | ||
7 | * | ||
8 | * (As all part of the Linux kernel, this file is GPL) | ||
9 | */ | ||
10 | #include <linux/wireless.h> | ||
11 | #include <linux/netdevice.h> | ||
12 | #include <net/iw_handler.h> | ||
13 | #include <net/wext.h> | ||
14 | |||
15 | int iw_handler_get_private(struct net_device * dev, | ||
16 | struct iw_request_info * info, | ||
17 | union iwreq_data * wrqu, | ||
18 | char * extra) | ||
19 | { | ||
20 | /* Check if the driver has something to export */ | ||
21 | if ((dev->wireless_handlers->num_private_args == 0) || | ||
22 | (dev->wireless_handlers->private_args == NULL)) | ||
23 | return -EOPNOTSUPP; | ||
24 | |||
25 | /* Check if there is enough buffer up there */ | ||
26 | if (wrqu->data.length < dev->wireless_handlers->num_private_args) { | ||
27 | /* User space can't know in advance how large the buffer | ||
28 | * needs to be. Give it a hint, so that we can support | ||
29 | * any size buffer we want somewhat efficiently... */ | ||
30 | wrqu->data.length = dev->wireless_handlers->num_private_args; | ||
31 | return -E2BIG; | ||
32 | } | ||
33 | |||
34 | /* Set the number of available ioctls. */ | ||
35 | wrqu->data.length = dev->wireless_handlers->num_private_args; | ||
36 | |||
37 | /* Copy structure to the user buffer. */ | ||
38 | memcpy(extra, dev->wireless_handlers->private_args, | ||
39 | sizeof(struct iw_priv_args) * wrqu->data.length); | ||
40 | |||
41 | return 0; | ||
42 | } | ||
43 | |||
44 | /* Size (in bytes) of the various private data types */ | ||
45 | static const char iw_priv_type_size[] = { | ||
46 | 0, /* IW_PRIV_TYPE_NONE */ | ||
47 | 1, /* IW_PRIV_TYPE_BYTE */ | ||
48 | 1, /* IW_PRIV_TYPE_CHAR */ | ||
49 | 0, /* Not defined */ | ||
50 | sizeof(__u32), /* IW_PRIV_TYPE_INT */ | ||
51 | sizeof(struct iw_freq), /* IW_PRIV_TYPE_FLOAT */ | ||
52 | sizeof(struct sockaddr), /* IW_PRIV_TYPE_ADDR */ | ||
53 | 0, /* Not defined */ | ||
54 | }; | ||
55 | |||
56 | static int get_priv_size(__u16 args) | ||
57 | { | ||
58 | int num = args & IW_PRIV_SIZE_MASK; | ||
59 | int type = (args & IW_PRIV_TYPE_MASK) >> 12; | ||
60 | |||
61 | return num * iw_priv_type_size[type]; | ||
62 | } | ||
63 | |||
64 | static int adjust_priv_size(__u16 args, struct iw_point *iwp) | ||
65 | { | ||
66 | int num = iwp->length; | ||
67 | int max = args & IW_PRIV_SIZE_MASK; | ||
68 | int type = (args & IW_PRIV_TYPE_MASK) >> 12; | ||
69 | |||
70 | /* Make sure the driver doesn't goof up */ | ||
71 | if (max < num) | ||
72 | num = max; | ||
73 | |||
74 | return num * iw_priv_type_size[type]; | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * Wrapper to call a private Wireless Extension handler. | ||
79 | * We do various checks and also take care of moving data between | ||
80 | * user space and kernel space. | ||
81 | * It's not as nice and slimline as the standard wrapper. The cause | ||
82 | * is struct iw_priv_args, which was not really designed for the | ||
83 | * job we are going here. | ||
84 | * | ||
85 | * IMPORTANT : This function prevent to set and get data on the same | ||
86 | * IOCTL and enforce the SET/GET convention. Not doing it would be | ||
87 | * far too hairy... | ||
88 | * If you need to set and get data at the same time, please don't use | ||
89 | * a iw_handler but process it in your ioctl handler (i.e. use the | ||
90 | * old driver API). | ||
91 | */ | ||
92 | static int get_priv_descr_and_size(struct net_device *dev, unsigned int cmd, | ||
93 | const struct iw_priv_args **descrp) | ||
94 | { | ||
95 | const struct iw_priv_args *descr; | ||
96 | int i, extra_size; | ||
97 | |||
98 | descr = NULL; | ||
99 | for (i = 0; i < dev->wireless_handlers->num_private_args; i++) { | ||
100 | if (cmd == dev->wireless_handlers->private_args[i].cmd) { | ||
101 | descr = &dev->wireless_handlers->private_args[i]; | ||
102 | break; | ||
103 | } | ||
104 | } | ||
105 | |||
106 | extra_size = 0; | ||
107 | if (descr) { | ||
108 | if (IW_IS_SET(cmd)) { | ||
109 | int offset = 0; /* For sub-ioctls */ | ||
110 | /* Check for sub-ioctl handler */ | ||
111 | if (descr->name[0] == '\0') | ||
112 | /* Reserve one int for sub-ioctl index */ | ||
113 | offset = sizeof(__u32); | ||
114 | |||
115 | /* Size of set arguments */ | ||
116 | extra_size = get_priv_size(descr->set_args); | ||
117 | |||
118 | /* Does it fits in iwr ? */ | ||
119 | if ((descr->set_args & IW_PRIV_SIZE_FIXED) && | ||
120 | ((extra_size + offset) <= IFNAMSIZ)) | ||
121 | extra_size = 0; | ||
122 | } else { | ||
123 | /* Size of get arguments */ | ||
124 | extra_size = get_priv_size(descr->get_args); | ||
125 | |||
126 | /* Does it fits in iwr ? */ | ||
127 | if ((descr->get_args & IW_PRIV_SIZE_FIXED) && | ||
128 | (extra_size <= IFNAMSIZ)) | ||
129 | extra_size = 0; | ||
130 | } | ||
131 | } | ||
132 | *descrp = descr; | ||
133 | return extra_size; | ||
134 | } | ||
135 | |||
136 | static int ioctl_private_iw_point(struct iw_point *iwp, unsigned int cmd, | ||
137 | const struct iw_priv_args *descr, | ||
138 | iw_handler handler, struct net_device *dev, | ||
139 | struct iw_request_info *info, int extra_size) | ||
140 | { | ||
141 | char *extra; | ||
142 | int err; | ||
143 | |||
144 | /* Check what user space is giving us */ | ||
145 | if (IW_IS_SET(cmd)) { | ||
146 | if (!iwp->pointer && iwp->length != 0) | ||
147 | return -EFAULT; | ||
148 | |||
149 | if (iwp->length > (descr->set_args & IW_PRIV_SIZE_MASK)) | ||
150 | return -E2BIG; | ||
151 | } else if (!iwp->pointer) | ||
152 | return -EFAULT; | ||
153 | |||
154 | extra = kmalloc(extra_size, GFP_KERNEL); | ||
155 | if (!extra) | ||
156 | return -ENOMEM; | ||
157 | |||
158 | /* If it is a SET, get all the extra data in here */ | ||
159 | if (IW_IS_SET(cmd) && (iwp->length != 0)) { | ||
160 | if (copy_from_user(extra, iwp->pointer, extra_size)) { | ||
161 | err = -EFAULT; | ||
162 | goto out; | ||
163 | } | ||
164 | } | ||
165 | |||
166 | /* Call the handler */ | ||
167 | err = handler(dev, info, (union iwreq_data *) iwp, extra); | ||
168 | |||
169 | /* If we have something to return to the user */ | ||
170 | if (!err && IW_IS_GET(cmd)) { | ||
171 | /* Adjust for the actual length if it's variable, | ||
172 | * avoid leaking kernel bits outside. | ||
173 | */ | ||
174 | if (!(descr->get_args & IW_PRIV_SIZE_FIXED)) | ||
175 | extra_size = adjust_priv_size(descr->get_args, iwp); | ||
176 | |||
177 | if (copy_to_user(iwp->pointer, extra, extra_size)) | ||
178 | err = -EFAULT; | ||
179 | } | ||
180 | |||
181 | out: | ||
182 | kfree(extra); | ||
183 | return err; | ||
184 | } | ||
185 | |||
186 | int ioctl_private_call(struct net_device *dev, struct iwreq *iwr, | ||
187 | unsigned int cmd, struct iw_request_info *info, | ||
188 | iw_handler handler) | ||
189 | { | ||
190 | int extra_size = 0, ret = -EINVAL; | ||
191 | const struct iw_priv_args *descr; | ||
192 | |||
193 | extra_size = get_priv_descr_and_size(dev, cmd, &descr); | ||
194 | |||
195 | /* Check if we have a pointer to user space data or not. */ | ||
196 | if (extra_size == 0) { | ||
197 | /* No extra arguments. Trivial to handle */ | ||
198 | ret = handler(dev, info, &(iwr->u), (char *) &(iwr->u)); | ||
199 | } else { | ||
200 | ret = ioctl_private_iw_point(&iwr->u.data, cmd, descr, | ||
201 | handler, dev, info, extra_size); | ||
202 | } | ||
203 | |||
204 | /* Call commit handler if needed and defined */ | ||
205 | if (ret == -EIWCOMMIT) | ||
206 | ret = call_commit_handler(dev); | ||
207 | |||
208 | return ret; | ||
209 | } | ||
210 | |||
211 | #ifdef CONFIG_COMPAT | ||
212 | int compat_private_call(struct net_device *dev, struct iwreq *iwr, | ||
213 | unsigned int cmd, struct iw_request_info *info, | ||
214 | iw_handler handler) | ||
215 | { | ||
216 | const struct iw_priv_args *descr; | ||
217 | int ret, extra_size; | ||
218 | |||
219 | extra_size = get_priv_descr_and_size(dev, cmd, &descr); | ||
220 | |||
221 | /* Check if we have a pointer to user space data or not. */ | ||
222 | if (extra_size == 0) { | ||
223 | /* No extra arguments. Trivial to handle */ | ||
224 | ret = handler(dev, info, &(iwr->u), (char *) &(iwr->u)); | ||
225 | } else { | ||
226 | struct compat_iw_point *iwp_compat; | ||
227 | struct iw_point iwp; | ||
228 | |||
229 | iwp_compat = (struct compat_iw_point *) &iwr->u.data; | ||
230 | iwp.pointer = compat_ptr(iwp_compat->pointer); | ||
231 | iwp.length = iwp_compat->length; | ||
232 | iwp.flags = iwp_compat->flags; | ||
233 | |||
234 | ret = ioctl_private_iw_point(&iwp, cmd, descr, | ||
235 | handler, dev, info, extra_size); | ||
236 | |||
237 | iwp_compat->pointer = ptr_to_compat(iwp.pointer); | ||
238 | iwp_compat->length = iwp.length; | ||
239 | iwp_compat->flags = iwp.flags; | ||
240 | } | ||
241 | |||
242 | /* Call commit handler if needed and defined */ | ||
243 | if (ret == -EIWCOMMIT) | ||
244 | ret = call_commit_handler(dev); | ||
245 | |||
246 | return ret; | ||
247 | } | ||
248 | #endif | ||
diff --git a/net/wireless/wext-proc.c b/net/wireless/wext-proc.c new file mode 100644 index 000000000000..273a7f77c834 --- /dev/null +++ b/net/wireless/wext-proc.c | |||
@@ -0,0 +1,155 @@ | |||
1 | /* | ||
2 | * This file implement the Wireless Extensions proc API. | ||
3 | * | ||
4 | * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com> | ||
5 | * Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved. | ||
6 | * | ||
7 | * (As all part of the Linux kernel, this file is GPL) | ||
8 | */ | ||
9 | |||
10 | /* | ||
11 | * The /proc/net/wireless file is a human readable user-space interface | ||
12 | * exporting various wireless specific statistics from the wireless devices. | ||
13 | * This is the most popular part of the Wireless Extensions ;-) | ||
14 | * | ||
15 | * This interface is a pure clone of /proc/net/dev (in net/core/dev.c). | ||
16 | * The content of the file is basically the content of "struct iw_statistics". | ||
17 | */ | ||
18 | |||
19 | #include <linux/module.h> | ||
20 | #include <linux/proc_fs.h> | ||
21 | #include <linux/seq_file.h> | ||
22 | #include <linux/wireless.h> | ||
23 | #include <linux/netdevice.h> | ||
24 | #include <linux/rtnetlink.h> | ||
25 | #include <net/iw_handler.h> | ||
26 | #include <net/wext.h> | ||
27 | |||
28 | |||
29 | static void wireless_seq_printf_stats(struct seq_file *seq, | ||
30 | struct net_device *dev) | ||
31 | { | ||
32 | /* Get stats from the driver */ | ||
33 | struct iw_statistics *stats = get_wireless_stats(dev); | ||
34 | static struct iw_statistics nullstats = {}; | ||
35 | |||
36 | /* show device if it's wireless regardless of current stats */ | ||
37 | if (!stats) { | ||
38 | #ifdef CONFIG_WIRELESS_EXT | ||
39 | if (dev->wireless_handlers) | ||
40 | stats = &nullstats; | ||
41 | #endif | ||
42 | #ifdef CONFIG_CFG80211 | ||
43 | if (dev->ieee80211_ptr) | ||
44 | stats = &nullstats; | ||
45 | #endif | ||
46 | } | ||
47 | |||
48 | if (stats) { | ||
49 | seq_printf(seq, "%6s: %04x %3d%c %3d%c %3d%c %6d %6d %6d " | ||
50 | "%6d %6d %6d\n", | ||
51 | dev->name, stats->status, stats->qual.qual, | ||
52 | stats->qual.updated & IW_QUAL_QUAL_UPDATED | ||
53 | ? '.' : ' ', | ||
54 | ((__s32) stats->qual.level) - | ||
55 | ((stats->qual.updated & IW_QUAL_DBM) ? 0x100 : 0), | ||
56 | stats->qual.updated & IW_QUAL_LEVEL_UPDATED | ||
57 | ? '.' : ' ', | ||
58 | ((__s32) stats->qual.noise) - | ||
59 | ((stats->qual.updated & IW_QUAL_DBM) ? 0x100 : 0), | ||
60 | stats->qual.updated & IW_QUAL_NOISE_UPDATED | ||
61 | ? '.' : ' ', | ||
62 | stats->discard.nwid, stats->discard.code, | ||
63 | stats->discard.fragment, stats->discard.retries, | ||
64 | stats->discard.misc, stats->miss.beacon); | ||
65 | |||
66 | if (stats != &nullstats) | ||
67 | stats->qual.updated &= ~IW_QUAL_ALL_UPDATED; | ||
68 | } | ||
69 | } | ||
70 | |||
71 | /* ---------------------------------------------------------------- */ | ||
72 | /* | ||
73 | * Print info for /proc/net/wireless (print all entries) | ||
74 | */ | ||
75 | static int wireless_dev_seq_show(struct seq_file *seq, void *v) | ||
76 | { | ||
77 | might_sleep(); | ||
78 | |||
79 | if (v == SEQ_START_TOKEN) | ||
80 | seq_printf(seq, "Inter-| sta-| Quality | Discarded " | ||
81 | "packets | Missed | WE\n" | ||
82 | " face | tus | link level noise | nwid " | ||
83 | "crypt frag retry misc | beacon | %d\n", | ||
84 | WIRELESS_EXT); | ||
85 | else | ||
86 | wireless_seq_printf_stats(seq, v); | ||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | static void *wireless_dev_seq_start(struct seq_file *seq, loff_t *pos) | ||
91 | { | ||
92 | struct net *net = seq_file_net(seq); | ||
93 | loff_t off; | ||
94 | struct net_device *dev; | ||
95 | |||
96 | rtnl_lock(); | ||
97 | if (!*pos) | ||
98 | return SEQ_START_TOKEN; | ||
99 | |||
100 | off = 1; | ||
101 | for_each_netdev(net, dev) | ||
102 | if (off++ == *pos) | ||
103 | return dev; | ||
104 | return NULL; | ||
105 | } | ||
106 | |||
107 | static void *wireless_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
108 | { | ||
109 | struct net *net = seq_file_net(seq); | ||
110 | |||
111 | ++*pos; | ||
112 | |||
113 | return v == SEQ_START_TOKEN ? | ||
114 | first_net_device(net) : next_net_device(v); | ||
115 | } | ||
116 | |||
117 | static void wireless_dev_seq_stop(struct seq_file *seq, void *v) | ||
118 | { | ||
119 | rtnl_unlock(); | ||
120 | } | ||
121 | |||
122 | static const struct seq_operations wireless_seq_ops = { | ||
123 | .start = wireless_dev_seq_start, | ||
124 | .next = wireless_dev_seq_next, | ||
125 | .stop = wireless_dev_seq_stop, | ||
126 | .show = wireless_dev_seq_show, | ||
127 | }; | ||
128 | |||
129 | static int seq_open_wireless(struct inode *inode, struct file *file) | ||
130 | { | ||
131 | return seq_open_net(inode, file, &wireless_seq_ops, | ||
132 | sizeof(struct seq_net_private)); | ||
133 | } | ||
134 | |||
135 | static const struct file_operations wireless_seq_fops = { | ||
136 | .owner = THIS_MODULE, | ||
137 | .open = seq_open_wireless, | ||
138 | .read = seq_read, | ||
139 | .llseek = seq_lseek, | ||
140 | .release = seq_release_net, | ||
141 | }; | ||
142 | |||
143 | int wext_proc_init(struct net *net) | ||
144 | { | ||
145 | /* Create /proc/net/wireless entry */ | ||
146 | if (!proc_net_fops_create(net, "wireless", S_IRUGO, &wireless_seq_fops)) | ||
147 | return -ENOMEM; | ||
148 | |||
149 | return 0; | ||
150 | } | ||
151 | |||
152 | void wext_proc_exit(struct net *net) | ||
153 | { | ||
154 | proc_net_remove(net, "wireless"); | ||
155 | } | ||
diff --git a/net/wireless/wext-spy.c b/net/wireless/wext-spy.c new file mode 100644 index 000000000000..6dcfe65a2d1a --- /dev/null +++ b/net/wireless/wext-spy.c | |||
@@ -0,0 +1,231 @@ | |||
1 | /* | ||
2 | * This file implement the Wireless Extensions spy API. | ||
3 | * | ||
4 | * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com> | ||
5 | * Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved. | ||
6 | * | ||
7 | * (As all part of the Linux kernel, this file is GPL) | ||
8 | */ | ||
9 | |||
10 | #include <linux/wireless.h> | ||
11 | #include <linux/netdevice.h> | ||
12 | #include <linux/etherdevice.h> | ||
13 | #include <net/iw_handler.h> | ||
14 | #include <net/arp.h> | ||
15 | #include <net/wext.h> | ||
16 | |||
17 | static inline struct iw_spy_data *get_spydata(struct net_device *dev) | ||
18 | { | ||
19 | /* This is the new way */ | ||
20 | if (dev->wireless_data) | ||
21 | return dev->wireless_data->spy_data; | ||
22 | return NULL; | ||
23 | } | ||
24 | |||
25 | int iw_handler_set_spy(struct net_device * dev, | ||
26 | struct iw_request_info * info, | ||
27 | union iwreq_data * wrqu, | ||
28 | char * extra) | ||
29 | { | ||
30 | struct iw_spy_data * spydata = get_spydata(dev); | ||
31 | struct sockaddr * address = (struct sockaddr *) extra; | ||
32 | |||
33 | /* Make sure driver is not buggy or using the old API */ | ||
34 | if (!spydata) | ||
35 | return -EOPNOTSUPP; | ||
36 | |||
37 | /* Disable spy collection while we copy the addresses. | ||
38 | * While we copy addresses, any call to wireless_spy_update() | ||
39 | * will NOP. This is OK, as anyway the addresses are changing. */ | ||
40 | spydata->spy_number = 0; | ||
41 | |||
42 | /* We want to operate without locking, because wireless_spy_update() | ||
43 | * most likely will happen in the interrupt handler, and therefore | ||
44 | * have its own locking constraints and needs performance. | ||
45 | * The rtnl_lock() make sure we don't race with the other iw_handlers. | ||
46 | * This make sure wireless_spy_update() "see" that the spy list | ||
47 | * is temporarily disabled. */ | ||
48 | smp_wmb(); | ||
49 | |||
50 | /* Are there are addresses to copy? */ | ||
51 | if (wrqu->data.length > 0) { | ||
52 | int i; | ||
53 | |||
54 | /* Copy addresses */ | ||
55 | for (i = 0; i < wrqu->data.length; i++) | ||
56 | memcpy(spydata->spy_address[i], address[i].sa_data, | ||
57 | ETH_ALEN); | ||
58 | /* Reset stats */ | ||
59 | memset(spydata->spy_stat, 0, | ||
60 | sizeof(struct iw_quality) * IW_MAX_SPY); | ||
61 | } | ||
62 | |||
63 | /* Make sure above is updated before re-enabling */ | ||
64 | smp_wmb(); | ||
65 | |||
66 | /* Enable addresses */ | ||
67 | spydata->spy_number = wrqu->data.length; | ||
68 | |||
69 | return 0; | ||
70 | } | ||
71 | EXPORT_SYMBOL(iw_handler_set_spy); | ||
72 | |||
73 | int iw_handler_get_spy(struct net_device * dev, | ||
74 | struct iw_request_info * info, | ||
75 | union iwreq_data * wrqu, | ||
76 | char * extra) | ||
77 | { | ||
78 | struct iw_spy_data * spydata = get_spydata(dev); | ||
79 | struct sockaddr * address = (struct sockaddr *) extra; | ||
80 | int i; | ||
81 | |||
82 | /* Make sure driver is not buggy or using the old API */ | ||
83 | if (!spydata) | ||
84 | return -EOPNOTSUPP; | ||
85 | |||
86 | wrqu->data.length = spydata->spy_number; | ||
87 | |||
88 | /* Copy addresses. */ | ||
89 | for (i = 0; i < spydata->spy_number; i++) { | ||
90 | memcpy(address[i].sa_data, spydata->spy_address[i], ETH_ALEN); | ||
91 | address[i].sa_family = AF_UNIX; | ||
92 | } | ||
93 | /* Copy stats to the user buffer (just after). */ | ||
94 | if (spydata->spy_number > 0) | ||
95 | memcpy(extra + (sizeof(struct sockaddr) *spydata->spy_number), | ||
96 | spydata->spy_stat, | ||
97 | sizeof(struct iw_quality) * spydata->spy_number); | ||
98 | /* Reset updated flags. */ | ||
99 | for (i = 0; i < spydata->spy_number; i++) | ||
100 | spydata->spy_stat[i].updated &= ~IW_QUAL_ALL_UPDATED; | ||
101 | return 0; | ||
102 | } | ||
103 | EXPORT_SYMBOL(iw_handler_get_spy); | ||
104 | |||
105 | /*------------------------------------------------------------------*/ | ||
106 | /* | ||
107 | * Standard Wireless Handler : set spy threshold | ||
108 | */ | ||
109 | int iw_handler_set_thrspy(struct net_device * dev, | ||
110 | struct iw_request_info *info, | ||
111 | union iwreq_data * wrqu, | ||
112 | char * extra) | ||
113 | { | ||
114 | struct iw_spy_data * spydata = get_spydata(dev); | ||
115 | struct iw_thrspy * threshold = (struct iw_thrspy *) extra; | ||
116 | |||
117 | /* Make sure driver is not buggy or using the old API */ | ||
118 | if (!spydata) | ||
119 | return -EOPNOTSUPP; | ||
120 | |||
121 | /* Just do it */ | ||
122 | memcpy(&(spydata->spy_thr_low), &(threshold->low), | ||
123 | 2 * sizeof(struct iw_quality)); | ||
124 | |||
125 | /* Clear flag */ | ||
126 | memset(spydata->spy_thr_under, '\0', sizeof(spydata->spy_thr_under)); | ||
127 | |||
128 | return 0; | ||
129 | } | ||
130 | EXPORT_SYMBOL(iw_handler_set_thrspy); | ||
131 | |||
132 | /*------------------------------------------------------------------*/ | ||
133 | /* | ||
134 | * Standard Wireless Handler : get spy threshold | ||
135 | */ | ||
136 | int iw_handler_get_thrspy(struct net_device * dev, | ||
137 | struct iw_request_info *info, | ||
138 | union iwreq_data * wrqu, | ||
139 | char * extra) | ||
140 | { | ||
141 | struct iw_spy_data * spydata = get_spydata(dev); | ||
142 | struct iw_thrspy * threshold = (struct iw_thrspy *) extra; | ||
143 | |||
144 | /* Make sure driver is not buggy or using the old API */ | ||
145 | if (!spydata) | ||
146 | return -EOPNOTSUPP; | ||
147 | |||
148 | /* Just do it */ | ||
149 | memcpy(&(threshold->low), &(spydata->spy_thr_low), | ||
150 | 2 * sizeof(struct iw_quality)); | ||
151 | |||
152 | return 0; | ||
153 | } | ||
154 | EXPORT_SYMBOL(iw_handler_get_thrspy); | ||
155 | |||
156 | /*------------------------------------------------------------------*/ | ||
157 | /* | ||
158 | * Prepare and send a Spy Threshold event | ||
159 | */ | ||
160 | static void iw_send_thrspy_event(struct net_device * dev, | ||
161 | struct iw_spy_data * spydata, | ||
162 | unsigned char * address, | ||
163 | struct iw_quality * wstats) | ||
164 | { | ||
165 | union iwreq_data wrqu; | ||
166 | struct iw_thrspy threshold; | ||
167 | |||
168 | /* Init */ | ||
169 | wrqu.data.length = 1; | ||
170 | wrqu.data.flags = 0; | ||
171 | /* Copy address */ | ||
172 | memcpy(threshold.addr.sa_data, address, ETH_ALEN); | ||
173 | threshold.addr.sa_family = ARPHRD_ETHER; | ||
174 | /* Copy stats */ | ||
175 | memcpy(&(threshold.qual), wstats, sizeof(struct iw_quality)); | ||
176 | /* Copy also thresholds */ | ||
177 | memcpy(&(threshold.low), &(spydata->spy_thr_low), | ||
178 | 2 * sizeof(struct iw_quality)); | ||
179 | |||
180 | /* Send event to user space */ | ||
181 | wireless_send_event(dev, SIOCGIWTHRSPY, &wrqu, (char *) &threshold); | ||
182 | } | ||
183 | |||
184 | /* ---------------------------------------------------------------- */ | ||
185 | /* | ||
186 | * Call for the driver to update the spy data. | ||
187 | * For now, the spy data is a simple array. As the size of the array is | ||
188 | * small, this is good enough. If we wanted to support larger number of | ||
189 | * spy addresses, we should use something more efficient... | ||
190 | */ | ||
191 | void wireless_spy_update(struct net_device * dev, | ||
192 | unsigned char * address, | ||
193 | struct iw_quality * wstats) | ||
194 | { | ||
195 | struct iw_spy_data * spydata = get_spydata(dev); | ||
196 | int i; | ||
197 | int match = -1; | ||
198 | |||
199 | /* Make sure driver is not buggy or using the old API */ | ||
200 | if (!spydata) | ||
201 | return; | ||
202 | |||
203 | /* Update all records that match */ | ||
204 | for (i = 0; i < spydata->spy_number; i++) | ||
205 | if (!compare_ether_addr(address, spydata->spy_address[i])) { | ||
206 | memcpy(&(spydata->spy_stat[i]), wstats, | ||
207 | sizeof(struct iw_quality)); | ||
208 | match = i; | ||
209 | } | ||
210 | |||
211 | /* Generate an event if we cross the spy threshold. | ||
212 | * To avoid event storms, we have a simple hysteresis : we generate | ||
213 | * event only when we go under the low threshold or above the | ||
214 | * high threshold. */ | ||
215 | if (match >= 0) { | ||
216 | if (spydata->spy_thr_under[match]) { | ||
217 | if (wstats->level > spydata->spy_thr_high.level) { | ||
218 | spydata->spy_thr_under[match] = 0; | ||
219 | iw_send_thrspy_event(dev, spydata, | ||
220 | address, wstats); | ||
221 | } | ||
222 | } else { | ||
223 | if (wstats->level < spydata->spy_thr_low.level) { | ||
224 | spydata->spy_thr_under[match] = 1; | ||
225 | iw_send_thrspy_event(dev, spydata, | ||
226 | address, wstats); | ||
227 | } | ||
228 | } | ||
229 | } | ||
230 | } | ||
231 | EXPORT_SYMBOL(wireless_spy_update); | ||
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 7fa9c7ad3d3b..39ce03e07d18 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c | |||
@@ -415,6 +415,7 @@ static int x25_setsockopt(struct socket *sock, int level, int optname, | |||
415 | struct sock *sk = sock->sk; | 415 | struct sock *sk = sock->sk; |
416 | int rc = -ENOPROTOOPT; | 416 | int rc = -ENOPROTOOPT; |
417 | 417 | ||
418 | lock_kernel(); | ||
418 | if (level != SOL_X25 || optname != X25_QBITINCL) | 419 | if (level != SOL_X25 || optname != X25_QBITINCL) |
419 | goto out; | 420 | goto out; |
420 | 421 | ||
@@ -429,6 +430,7 @@ static int x25_setsockopt(struct socket *sock, int level, int optname, | |||
429 | x25_sk(sk)->qbitincl = !!opt; | 430 | x25_sk(sk)->qbitincl = !!opt; |
430 | rc = 0; | 431 | rc = 0; |
431 | out: | 432 | out: |
433 | unlock_kernel(); | ||
432 | return rc; | 434 | return rc; |
433 | } | 435 | } |
434 | 436 | ||
@@ -438,6 +440,7 @@ static int x25_getsockopt(struct socket *sock, int level, int optname, | |||
438 | struct sock *sk = sock->sk; | 440 | struct sock *sk = sock->sk; |
439 | int val, len, rc = -ENOPROTOOPT; | 441 | int val, len, rc = -ENOPROTOOPT; |
440 | 442 | ||
443 | lock_kernel(); | ||
441 | if (level != SOL_X25 || optname != X25_QBITINCL) | 444 | if (level != SOL_X25 || optname != X25_QBITINCL) |
442 | goto out; | 445 | goto out; |
443 | 446 | ||
@@ -458,6 +461,7 @@ static int x25_getsockopt(struct socket *sock, int level, int optname, | |||
458 | val = x25_sk(sk)->qbitincl; | 461 | val = x25_sk(sk)->qbitincl; |
459 | rc = copy_to_user(optval, &val, len) ? -EFAULT : 0; | 462 | rc = copy_to_user(optval, &val, len) ? -EFAULT : 0; |
460 | out: | 463 | out: |
464 | unlock_kernel(); | ||
461 | return rc; | 465 | return rc; |
462 | } | 466 | } |
463 | 467 | ||
@@ -466,12 +470,14 @@ static int x25_listen(struct socket *sock, int backlog) | |||
466 | struct sock *sk = sock->sk; | 470 | struct sock *sk = sock->sk; |
467 | int rc = -EOPNOTSUPP; | 471 | int rc = -EOPNOTSUPP; |
468 | 472 | ||
473 | lock_kernel(); | ||
469 | if (sk->sk_state != TCP_LISTEN) { | 474 | if (sk->sk_state != TCP_LISTEN) { |
470 | memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN); | 475 | memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN); |
471 | sk->sk_max_ack_backlog = backlog; | 476 | sk->sk_max_ack_backlog = backlog; |
472 | sk->sk_state = TCP_LISTEN; | 477 | sk->sk_state = TCP_LISTEN; |
473 | rc = 0; | 478 | rc = 0; |
474 | } | 479 | } |
480 | unlock_kernel(); | ||
475 | 481 | ||
476 | return rc; | 482 | return rc; |
477 | } | 483 | } |
@@ -501,7 +507,8 @@ out: | |||
501 | return sk; | 507 | return sk; |
502 | } | 508 | } |
503 | 509 | ||
504 | static int x25_create(struct net *net, struct socket *sock, int protocol) | 510 | static int x25_create(struct net *net, struct socket *sock, int protocol, |
511 | int kern) | ||
505 | { | 512 | { |
506 | struct sock *sk; | 513 | struct sock *sk; |
507 | struct x25_sock *x25; | 514 | struct x25_sock *x25; |
@@ -597,6 +604,7 @@ static int x25_release(struct socket *sock) | |||
597 | struct sock *sk = sock->sk; | 604 | struct sock *sk = sock->sk; |
598 | struct x25_sock *x25; | 605 | struct x25_sock *x25; |
599 | 606 | ||
607 | lock_kernel(); | ||
600 | if (!sk) | 608 | if (!sk) |
601 | goto out; | 609 | goto out; |
602 | 610 | ||
@@ -627,6 +635,7 @@ static int x25_release(struct socket *sock) | |||
627 | 635 | ||
628 | sock_orphan(sk); | 636 | sock_orphan(sk); |
629 | out: | 637 | out: |
638 | unlock_kernel(); | ||
630 | return 0; | 639 | return 0; |
631 | } | 640 | } |
632 | 641 | ||
@@ -634,18 +643,23 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
634 | { | 643 | { |
635 | struct sock *sk = sock->sk; | 644 | struct sock *sk = sock->sk; |
636 | struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; | 645 | struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; |
646 | int rc = 0; | ||
637 | 647 | ||
648 | lock_kernel(); | ||
638 | if (!sock_flag(sk, SOCK_ZAPPED) || | 649 | if (!sock_flag(sk, SOCK_ZAPPED) || |
639 | addr_len != sizeof(struct sockaddr_x25) || | 650 | addr_len != sizeof(struct sockaddr_x25) || |
640 | addr->sx25_family != AF_X25) | 651 | addr->sx25_family != AF_X25) { |
641 | return -EINVAL; | 652 | rc = -EINVAL; |
653 | goto out; | ||
654 | } | ||
642 | 655 | ||
643 | x25_sk(sk)->source_addr = addr->sx25_addr; | 656 | x25_sk(sk)->source_addr = addr->sx25_addr; |
644 | x25_insert_socket(sk); | 657 | x25_insert_socket(sk); |
645 | sock_reset_flag(sk, SOCK_ZAPPED); | 658 | sock_reset_flag(sk, SOCK_ZAPPED); |
646 | SOCK_DEBUG(sk, "x25_bind: socket is bound\n"); | 659 | SOCK_DEBUG(sk, "x25_bind: socket is bound\n"); |
647 | 660 | out: | |
648 | return 0; | 661 | unlock_kernel(); |
662 | return rc; | ||
649 | } | 663 | } |
650 | 664 | ||
651 | static int x25_wait_for_connection_establishment(struct sock *sk) | 665 | static int x25_wait_for_connection_establishment(struct sock *sk) |
@@ -686,6 +700,7 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr, | |||
686 | struct x25_route *rt; | 700 | struct x25_route *rt; |
687 | int rc = 0; | 701 | int rc = 0; |
688 | 702 | ||
703 | lock_kernel(); | ||
689 | lock_sock(sk); | 704 | lock_sock(sk); |
690 | if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { | 705 | if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { |
691 | sock->state = SS_CONNECTED; | 706 | sock->state = SS_CONNECTED; |
@@ -763,6 +778,7 @@ out_put_route: | |||
763 | x25_route_put(rt); | 778 | x25_route_put(rt); |
764 | out: | 779 | out: |
765 | release_sock(sk); | 780 | release_sock(sk); |
781 | unlock_kernel(); | ||
766 | return rc; | 782 | return rc; |
767 | } | 783 | } |
768 | 784 | ||
@@ -802,6 +818,7 @@ static int x25_accept(struct socket *sock, struct socket *newsock, int flags) | |||
802 | struct sk_buff *skb; | 818 | struct sk_buff *skb; |
803 | int rc = -EINVAL; | 819 | int rc = -EINVAL; |
804 | 820 | ||
821 | lock_kernel(); | ||
805 | if (!sk || sk->sk_state != TCP_LISTEN) | 822 | if (!sk || sk->sk_state != TCP_LISTEN) |
806 | goto out; | 823 | goto out; |
807 | 824 | ||
@@ -829,6 +846,7 @@ static int x25_accept(struct socket *sock, struct socket *newsock, int flags) | |||
829 | out2: | 846 | out2: |
830 | release_sock(sk); | 847 | release_sock(sk); |
831 | out: | 848 | out: |
849 | unlock_kernel(); | ||
832 | return rc; | 850 | return rc; |
833 | } | 851 | } |
834 | 852 | ||
@@ -838,10 +856,14 @@ static int x25_getname(struct socket *sock, struct sockaddr *uaddr, | |||
838 | struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)uaddr; | 856 | struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)uaddr; |
839 | struct sock *sk = sock->sk; | 857 | struct sock *sk = sock->sk; |
840 | struct x25_sock *x25 = x25_sk(sk); | 858 | struct x25_sock *x25 = x25_sk(sk); |
859 | int rc = 0; | ||
841 | 860 | ||
861 | lock_kernel(); | ||
842 | if (peer) { | 862 | if (peer) { |
843 | if (sk->sk_state != TCP_ESTABLISHED) | 863 | if (sk->sk_state != TCP_ESTABLISHED) { |
844 | return -ENOTCONN; | 864 | rc = -ENOTCONN; |
865 | goto out; | ||
866 | } | ||
845 | sx25->sx25_addr = x25->dest_addr; | 867 | sx25->sx25_addr = x25->dest_addr; |
846 | } else | 868 | } else |
847 | sx25->sx25_addr = x25->source_addr; | 869 | sx25->sx25_addr = x25->source_addr; |
@@ -849,7 +871,21 @@ static int x25_getname(struct socket *sock, struct sockaddr *uaddr, | |||
849 | sx25->sx25_family = AF_X25; | 871 | sx25->sx25_family = AF_X25; |
850 | *uaddr_len = sizeof(*sx25); | 872 | *uaddr_len = sizeof(*sx25); |
851 | 873 | ||
852 | return 0; | 874 | out: |
875 | unlock_kernel(); | ||
876 | return rc; | ||
877 | } | ||
878 | |||
879 | static unsigned int x25_datagram_poll(struct file *file, struct socket *sock, | ||
880 | poll_table *wait) | ||
881 | { | ||
882 | int rc; | ||
883 | |||
884 | lock_kernel(); | ||
885 | rc = datagram_poll(file, sock, wait); | ||
886 | unlock_kernel(); | ||
887 | |||
888 | return rc; | ||
853 | } | 889 | } |
854 | 890 | ||
855 | int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, | 891 | int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, |
@@ -1002,6 +1038,7 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1002 | size_t size; | 1038 | size_t size; |
1003 | int qbit = 0, rc = -EINVAL; | 1039 | int qbit = 0, rc = -EINVAL; |
1004 | 1040 | ||
1041 | lock_kernel(); | ||
1005 | if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_OOB|MSG_EOR|MSG_CMSG_COMPAT)) | 1042 | if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_OOB|MSG_EOR|MSG_CMSG_COMPAT)) |
1006 | goto out; | 1043 | goto out; |
1007 | 1044 | ||
@@ -1166,6 +1203,7 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1166 | release_sock(sk); | 1203 | release_sock(sk); |
1167 | rc = len; | 1204 | rc = len; |
1168 | out: | 1205 | out: |
1206 | unlock_kernel(); | ||
1169 | return rc; | 1207 | return rc; |
1170 | out_kfree_skb: | 1208 | out_kfree_skb: |
1171 | kfree_skb(skb); | 1209 | kfree_skb(skb); |
@@ -1186,6 +1224,7 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1186 | unsigned char *asmptr; | 1224 | unsigned char *asmptr; |
1187 | int rc = -ENOTCONN; | 1225 | int rc = -ENOTCONN; |
1188 | 1226 | ||
1227 | lock_kernel(); | ||
1189 | /* | 1228 | /* |
1190 | * This works for seqpacket too. The receiver has ordered the queue for | 1229 | * This works for seqpacket too. The receiver has ordered the queue for |
1191 | * us! We do one quick check first though | 1230 | * us! We do one quick check first though |
@@ -1259,6 +1298,7 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1259 | out_free_dgram: | 1298 | out_free_dgram: |
1260 | skb_free_datagram(sk, skb); | 1299 | skb_free_datagram(sk, skb); |
1261 | out: | 1300 | out: |
1301 | unlock_kernel(); | ||
1262 | return rc; | 1302 | return rc; |
1263 | } | 1303 | } |
1264 | 1304 | ||
@@ -1270,6 +1310,7 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
1270 | void __user *argp = (void __user *)arg; | 1310 | void __user *argp = (void __user *)arg; |
1271 | int rc; | 1311 | int rc; |
1272 | 1312 | ||
1313 | lock_kernel(); | ||
1273 | switch (cmd) { | 1314 | switch (cmd) { |
1274 | case TIOCOUTQ: { | 1315 | case TIOCOUTQ: { |
1275 | int amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); | 1316 | int amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); |
@@ -1363,7 +1404,7 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
1363 | facilities.throughput > 0xDD) | 1404 | facilities.throughput > 0xDD) |
1364 | break; | 1405 | break; |
1365 | if (facilities.reverse && | 1406 | if (facilities.reverse && |
1366 | (facilities.reverse | 0x81)!= 0x81) | 1407 | (facilities.reverse & 0x81) != 0x81) |
1367 | break; | 1408 | break; |
1368 | x25->facilities = facilities; | 1409 | x25->facilities = facilities; |
1369 | rc = 0; | 1410 | rc = 0; |
@@ -1472,11 +1513,12 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
1472 | rc = -ENOIOCTLCMD; | 1513 | rc = -ENOIOCTLCMD; |
1473 | break; | 1514 | break; |
1474 | } | 1515 | } |
1516 | unlock_kernel(); | ||
1475 | 1517 | ||
1476 | return rc; | 1518 | return rc; |
1477 | } | 1519 | } |
1478 | 1520 | ||
1479 | static struct net_proto_family x25_family_ops = { | 1521 | static const struct net_proto_family x25_family_ops = { |
1480 | .family = AF_X25, | 1522 | .family = AF_X25, |
1481 | .create = x25_create, | 1523 | .create = x25_create, |
1482 | .owner = THIS_MODULE, | 1524 | .owner = THIS_MODULE, |
@@ -1542,15 +1584,19 @@ static int compat_x25_ioctl(struct socket *sock, unsigned int cmd, | |||
1542 | break; | 1584 | break; |
1543 | case SIOCGSTAMP: | 1585 | case SIOCGSTAMP: |
1544 | rc = -EINVAL; | 1586 | rc = -EINVAL; |
1587 | lock_kernel(); | ||
1545 | if (sk) | 1588 | if (sk) |
1546 | rc = compat_sock_get_timestamp(sk, | 1589 | rc = compat_sock_get_timestamp(sk, |
1547 | (struct timeval __user*)argp); | 1590 | (struct timeval __user*)argp); |
1591 | unlock_kernel(); | ||
1548 | break; | 1592 | break; |
1549 | case SIOCGSTAMPNS: | 1593 | case SIOCGSTAMPNS: |
1550 | rc = -EINVAL; | 1594 | rc = -EINVAL; |
1595 | lock_kernel(); | ||
1551 | if (sk) | 1596 | if (sk) |
1552 | rc = compat_sock_get_timestampns(sk, | 1597 | rc = compat_sock_get_timestampns(sk, |
1553 | (struct timespec __user*)argp); | 1598 | (struct timespec __user*)argp); |
1599 | unlock_kernel(); | ||
1554 | break; | 1600 | break; |
1555 | case SIOCGIFADDR: | 1601 | case SIOCGIFADDR: |
1556 | case SIOCSIFADDR: | 1602 | case SIOCSIFADDR: |
@@ -1569,16 +1615,22 @@ static int compat_x25_ioctl(struct socket *sock, unsigned int cmd, | |||
1569 | rc = -EPERM; | 1615 | rc = -EPERM; |
1570 | if (!capable(CAP_NET_ADMIN)) | 1616 | if (!capable(CAP_NET_ADMIN)) |
1571 | break; | 1617 | break; |
1618 | lock_kernel(); | ||
1572 | rc = x25_route_ioctl(cmd, argp); | 1619 | rc = x25_route_ioctl(cmd, argp); |
1620 | unlock_kernel(); | ||
1573 | break; | 1621 | break; |
1574 | case SIOCX25GSUBSCRIP: | 1622 | case SIOCX25GSUBSCRIP: |
1623 | lock_kernel(); | ||
1575 | rc = compat_x25_subscr_ioctl(cmd, argp); | 1624 | rc = compat_x25_subscr_ioctl(cmd, argp); |
1625 | unlock_kernel(); | ||
1576 | break; | 1626 | break; |
1577 | case SIOCX25SSUBSCRIP: | 1627 | case SIOCX25SSUBSCRIP: |
1578 | rc = -EPERM; | 1628 | rc = -EPERM; |
1579 | if (!capable(CAP_NET_ADMIN)) | 1629 | if (!capable(CAP_NET_ADMIN)) |
1580 | break; | 1630 | break; |
1631 | lock_kernel(); | ||
1581 | rc = compat_x25_subscr_ioctl(cmd, argp); | 1632 | rc = compat_x25_subscr_ioctl(cmd, argp); |
1633 | unlock_kernel(); | ||
1582 | break; | 1634 | break; |
1583 | case SIOCX25GFACILITIES: | 1635 | case SIOCX25GFACILITIES: |
1584 | case SIOCX25SFACILITIES: | 1636 | case SIOCX25SFACILITIES: |
@@ -1600,7 +1652,7 @@ static int compat_x25_ioctl(struct socket *sock, unsigned int cmd, | |||
1600 | } | 1652 | } |
1601 | #endif | 1653 | #endif |
1602 | 1654 | ||
1603 | static const struct proto_ops SOCKOPS_WRAPPED(x25_proto_ops) = { | 1655 | static const struct proto_ops x25_proto_ops = { |
1604 | .family = AF_X25, | 1656 | .family = AF_X25, |
1605 | .owner = THIS_MODULE, | 1657 | .owner = THIS_MODULE, |
1606 | .release = x25_release, | 1658 | .release = x25_release, |
@@ -1609,7 +1661,7 @@ static const struct proto_ops SOCKOPS_WRAPPED(x25_proto_ops) = { | |||
1609 | .socketpair = sock_no_socketpair, | 1661 | .socketpair = sock_no_socketpair, |
1610 | .accept = x25_accept, | 1662 | .accept = x25_accept, |
1611 | .getname = x25_getname, | 1663 | .getname = x25_getname, |
1612 | .poll = datagram_poll, | 1664 | .poll = x25_datagram_poll, |
1613 | .ioctl = x25_ioctl, | 1665 | .ioctl = x25_ioctl, |
1614 | #ifdef CONFIG_COMPAT | 1666 | #ifdef CONFIG_COMPAT |
1615 | .compat_ioctl = compat_x25_ioctl, | 1667 | .compat_ioctl = compat_x25_ioctl, |
@@ -1624,8 +1676,6 @@ static const struct proto_ops SOCKOPS_WRAPPED(x25_proto_ops) = { | |||
1624 | .sendpage = sock_no_sendpage, | 1676 | .sendpage = sock_no_sendpage, |
1625 | }; | 1677 | }; |
1626 | 1678 | ||
1627 | SOCKOPS_WRAP(x25_proto, AF_X25); | ||
1628 | |||
1629 | static struct packet_type x25_packet_type __read_mostly = { | 1679 | static struct packet_type x25_packet_type __read_mostly = { |
1630 | .type = cpu_to_be16(ETH_P_X25), | 1680 | .type = cpu_to_be16(ETH_P_X25), |
1631 | .func = x25_lapb_receive_frame, | 1681 | .func = x25_lapb_receive_frame, |
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c index 7d7c3abf38b5..96d922783547 100644 --- a/net/x25/x25_in.c +++ b/net/x25/x25_in.c | |||
@@ -114,7 +114,7 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp | |||
114 | /* | 114 | /* |
115 | * Copy any Call User Data. | 115 | * Copy any Call User Data. |
116 | */ | 116 | */ |
117 | if (skb->len >= 0) { | 117 | if (skb->len > 0) { |
118 | skb_copy_from_linear_data(skb, | 118 | skb_copy_from_linear_data(skb, |
119 | x25->calluserdata.cuddata, | 119 | x25->calluserdata.cuddata, |
120 | skb->len); | 120 | skb->len); |
diff --git a/net/x25/x25_route.c b/net/x25/x25_route.c index 2c999ccf504a..66961ea28c91 100644 --- a/net/x25/x25_route.c +++ b/net/x25/x25_route.c | |||
@@ -190,7 +190,7 @@ int x25_route_ioctl(unsigned int cmd, void __user *arg) | |||
190 | goto out; | 190 | goto out; |
191 | 191 | ||
192 | rc = -EINVAL; | 192 | rc = -EINVAL; |
193 | if (rt.sigdigits < 0 || rt.sigdigits > 15) | 193 | if (rt.sigdigits > 15) |
194 | goto out; | 194 | goto out; |
195 | 195 | ||
196 | dev = x25_dev_get(rt.device); | 196 | dev = x25_dev_get(rt.device); |
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c index faf54c6bf96b..b39341072aa6 100644 --- a/net/xfrm/xfrm_algo.c +++ b/net/xfrm/xfrm_algo.c | |||
@@ -689,84 +689,6 @@ int xfrm_count_enc_supported(void) | |||
689 | } | 689 | } |
690 | EXPORT_SYMBOL_GPL(xfrm_count_enc_supported); | 690 | EXPORT_SYMBOL_GPL(xfrm_count_enc_supported); |
691 | 691 | ||
692 | /* Move to common area: it is shared with AH. */ | ||
693 | |||
694 | int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc, | ||
695 | int offset, int len, icv_update_fn_t icv_update) | ||
696 | { | ||
697 | int start = skb_headlen(skb); | ||
698 | int i, copy = start - offset; | ||
699 | struct sk_buff *frag_iter; | ||
700 | struct scatterlist sg; | ||
701 | int err; | ||
702 | |||
703 | /* Checksum header. */ | ||
704 | if (copy > 0) { | ||
705 | if (copy > len) | ||
706 | copy = len; | ||
707 | |||
708 | sg_init_one(&sg, skb->data + offset, copy); | ||
709 | |||
710 | err = icv_update(desc, &sg, copy); | ||
711 | if (unlikely(err)) | ||
712 | return err; | ||
713 | |||
714 | if ((len -= copy) == 0) | ||
715 | return 0; | ||
716 | offset += copy; | ||
717 | } | ||
718 | |||
719 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
720 | int end; | ||
721 | |||
722 | WARN_ON(start > offset + len); | ||
723 | |||
724 | end = start + skb_shinfo(skb)->frags[i].size; | ||
725 | if ((copy = end - offset) > 0) { | ||
726 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
727 | |||
728 | if (copy > len) | ||
729 | copy = len; | ||
730 | |||
731 | sg_init_table(&sg, 1); | ||
732 | sg_set_page(&sg, frag->page, copy, | ||
733 | frag->page_offset + offset-start); | ||
734 | |||
735 | err = icv_update(desc, &sg, copy); | ||
736 | if (unlikely(err)) | ||
737 | return err; | ||
738 | |||
739 | if (!(len -= copy)) | ||
740 | return 0; | ||
741 | offset += copy; | ||
742 | } | ||
743 | start = end; | ||
744 | } | ||
745 | |||
746 | skb_walk_frags(skb, frag_iter) { | ||
747 | int end; | ||
748 | |||
749 | WARN_ON(start > offset + len); | ||
750 | |||
751 | end = start + frag_iter->len; | ||
752 | if ((copy = end - offset) > 0) { | ||
753 | if (copy > len) | ||
754 | copy = len; | ||
755 | err = skb_icv_walk(frag_iter, desc, offset-start, | ||
756 | copy, icv_update); | ||
757 | if (unlikely(err)) | ||
758 | return err; | ||
759 | if ((len -= copy) == 0) | ||
760 | return 0; | ||
761 | offset += copy; | ||
762 | } | ||
763 | start = end; | ||
764 | } | ||
765 | BUG_ON(len); | ||
766 | return 0; | ||
767 | } | ||
768 | EXPORT_SYMBOL_GPL(skb_icv_walk); | ||
769 | |||
770 | #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE) | 692 | #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE) |
771 | 693 | ||
772 | void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) | 694 | void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index f2f7c638083e..e9ac0cec0877 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -21,6 +21,9 @@ | |||
21 | #include <linux/cache.h> | 21 | #include <linux/cache.h> |
22 | #include <linux/audit.h> | 22 | #include <linux/audit.h> |
23 | #include <asm/uaccess.h> | 23 | #include <asm/uaccess.h> |
24 | #include <linux/ktime.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/kernel.h> | ||
24 | 27 | ||
25 | #include "xfrm_hash.h" | 28 | #include "xfrm_hash.h" |
26 | 29 | ||
@@ -352,7 +355,7 @@ static void xfrm_put_mode(struct xfrm_mode *mode) | |||
352 | 355 | ||
353 | static void xfrm_state_gc_destroy(struct xfrm_state *x) | 356 | static void xfrm_state_gc_destroy(struct xfrm_state *x) |
354 | { | 357 | { |
355 | del_timer_sync(&x->timer); | 358 | tasklet_hrtimer_cancel(&x->mtimer); |
356 | del_timer_sync(&x->rtimer); | 359 | del_timer_sync(&x->rtimer); |
357 | kfree(x->aalg); | 360 | kfree(x->aalg); |
358 | kfree(x->ealg); | 361 | kfree(x->ealg); |
@@ -398,9 +401,10 @@ static inline unsigned long make_jiffies(long secs) | |||
398 | return secs*HZ; | 401 | return secs*HZ; |
399 | } | 402 | } |
400 | 403 | ||
401 | static void xfrm_timer_handler(unsigned long data) | 404 | static enum hrtimer_restart xfrm_timer_handler(struct hrtimer * me) |
402 | { | 405 | { |
403 | struct xfrm_state *x = (struct xfrm_state*)data; | 406 | struct tasklet_hrtimer *thr = container_of(me, struct tasklet_hrtimer, timer); |
407 | struct xfrm_state *x = container_of(thr, struct xfrm_state, mtimer); | ||
404 | struct net *net = xs_net(x); | 408 | struct net *net = xs_net(x); |
405 | unsigned long now = get_seconds(); | 409 | unsigned long now = get_seconds(); |
406 | long next = LONG_MAX; | 410 | long next = LONG_MAX; |
@@ -451,8 +455,9 @@ static void xfrm_timer_handler(unsigned long data) | |||
451 | if (warn) | 455 | if (warn) |
452 | km_state_expired(x, 0, 0); | 456 | km_state_expired(x, 0, 0); |
453 | resched: | 457 | resched: |
454 | if (next != LONG_MAX) | 458 | if (next != LONG_MAX){ |
455 | mod_timer(&x->timer, jiffies + make_jiffies(next)); | 459 | tasklet_hrtimer_start(&x->mtimer, ktime_set(next, 0), HRTIMER_MODE_REL); |
460 | } | ||
456 | 461 | ||
457 | goto out; | 462 | goto out; |
458 | 463 | ||
@@ -474,6 +479,7 @@ expired: | |||
474 | 479 | ||
475 | out: | 480 | out: |
476 | spin_unlock(&x->lock); | 481 | spin_unlock(&x->lock); |
482 | return HRTIMER_NORESTART; | ||
477 | } | 483 | } |
478 | 484 | ||
479 | static void xfrm_replay_timer_handler(unsigned long data); | 485 | static void xfrm_replay_timer_handler(unsigned long data); |
@@ -492,7 +498,7 @@ struct xfrm_state *xfrm_state_alloc(struct net *net) | |||
492 | INIT_HLIST_NODE(&x->bydst); | 498 | INIT_HLIST_NODE(&x->bydst); |
493 | INIT_HLIST_NODE(&x->bysrc); | 499 | INIT_HLIST_NODE(&x->bysrc); |
494 | INIT_HLIST_NODE(&x->byspi); | 500 | INIT_HLIST_NODE(&x->byspi); |
495 | setup_timer(&x->timer, xfrm_timer_handler, (unsigned long)x); | 501 | tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler, CLOCK_REALTIME, HRTIMER_MODE_ABS); |
496 | setup_timer(&x->rtimer, xfrm_replay_timer_handler, | 502 | setup_timer(&x->rtimer, xfrm_replay_timer_handler, |
497 | (unsigned long)x); | 503 | (unsigned long)x); |
498 | x->curlft.add_time = get_seconds(); | 504 | x->curlft.add_time = get_seconds(); |
@@ -843,8 +849,7 @@ found: | |||
843 | hlist_add_head(&x->byspi, net->xfrm.state_byspi+h); | 849 | hlist_add_head(&x->byspi, net->xfrm.state_byspi+h); |
844 | } | 850 | } |
845 | x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; | 851 | x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; |
846 | x->timer.expires = jiffies + net->xfrm.sysctl_acq_expires*HZ; | 852 | tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL); |
847 | add_timer(&x->timer); | ||
848 | net->xfrm.state_num++; | 853 | net->xfrm.state_num++; |
849 | xfrm_hash_grow_check(net, x->bydst.next != NULL); | 854 | xfrm_hash_grow_check(net, x->bydst.next != NULL); |
850 | } else { | 855 | } else { |
@@ -921,7 +926,7 @@ static void __xfrm_state_insert(struct xfrm_state *x) | |||
921 | hlist_add_head(&x->byspi, net->xfrm.state_byspi+h); | 926 | hlist_add_head(&x->byspi, net->xfrm.state_byspi+h); |
922 | } | 927 | } |
923 | 928 | ||
924 | mod_timer(&x->timer, jiffies + HZ); | 929 | tasklet_hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL); |
925 | if (x->replay_maxage) | 930 | if (x->replay_maxage) |
926 | mod_timer(&x->rtimer, jiffies + x->replay_maxage); | 931 | mod_timer(&x->rtimer, jiffies + x->replay_maxage); |
927 | 932 | ||
@@ -1019,8 +1024,7 @@ static struct xfrm_state *__find_acq_core(struct net *net, unsigned short family | |||
1019 | x->props.reqid = reqid; | 1024 | x->props.reqid = reqid; |
1020 | x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; | 1025 | x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; |
1021 | xfrm_state_hold(x); | 1026 | xfrm_state_hold(x); |
1022 | x->timer.expires = jiffies + net->xfrm.sysctl_acq_expires*HZ; | 1027 | tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL); |
1023 | add_timer(&x->timer); | ||
1024 | list_add(&x->km.all, &net->xfrm.state_all); | 1028 | list_add(&x->km.all, &net->xfrm.state_all); |
1025 | hlist_add_head(&x->bydst, net->xfrm.state_bydst+h); | 1029 | hlist_add_head(&x->bydst, net->xfrm.state_bydst+h); |
1026 | h = xfrm_src_hash(net, daddr, saddr, family); | 1030 | h = xfrm_src_hash(net, daddr, saddr, family); |
@@ -1300,7 +1304,7 @@ out: | |||
1300 | memcpy(&x1->lft, &x->lft, sizeof(x1->lft)); | 1304 | memcpy(&x1->lft, &x->lft, sizeof(x1->lft)); |
1301 | x1->km.dying = 0; | 1305 | x1->km.dying = 0; |
1302 | 1306 | ||
1303 | mod_timer(&x1->timer, jiffies + HZ); | 1307 | tasklet_hrtimer_start(&x1->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL); |
1304 | if (x1->curlft.use_time) | 1308 | if (x1->curlft.use_time) |
1305 | xfrm_state_check_expire(x1); | 1309 | xfrm_state_check_expire(x1); |
1306 | 1310 | ||
@@ -1325,7 +1329,7 @@ int xfrm_state_check_expire(struct xfrm_state *x) | |||
1325 | if (x->curlft.bytes >= x->lft.hard_byte_limit || | 1329 | if (x->curlft.bytes >= x->lft.hard_byte_limit || |
1326 | x->curlft.packets >= x->lft.hard_packet_limit) { | 1330 | x->curlft.packets >= x->lft.hard_packet_limit) { |
1327 | x->km.state = XFRM_STATE_EXPIRED; | 1331 | x->km.state = XFRM_STATE_EXPIRED; |
1328 | mod_timer(&x->timer, jiffies); | 1332 | tasklet_hrtimer_start(&x->mtimer, ktime_set(0,0), HRTIMER_MODE_REL); |
1329 | return -EINVAL; | 1333 | return -EINVAL; |
1330 | } | 1334 | } |
1331 | 1335 | ||