diff options
Diffstat (limited to 'net')
154 files changed, 4229 insertions, 2960 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 8836575f9d79..511afe72af31 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -140,7 +140,7 @@ static void vlan_rcu_free(struct rcu_head *rcu) | |||
140 | vlan_group_free(container_of(rcu, struct vlan_group, rcu)); | 140 | vlan_group_free(container_of(rcu, struct vlan_group, rcu)); |
141 | } | 141 | } |
142 | 142 | ||
143 | void unregister_vlan_dev(struct net_device *dev) | 143 | void unregister_vlan_dev(struct net_device *dev, struct list_head *head) |
144 | { | 144 | { |
145 | struct vlan_dev_info *vlan = vlan_dev_info(dev); | 145 | struct vlan_dev_info *vlan = vlan_dev_info(dev); |
146 | struct net_device *real_dev = vlan->real_dev; | 146 | struct net_device *real_dev = vlan->real_dev; |
@@ -159,12 +159,13 @@ void unregister_vlan_dev(struct net_device *dev) | |||
159 | if (real_dev->features & NETIF_F_HW_VLAN_FILTER) | 159 | if (real_dev->features & NETIF_F_HW_VLAN_FILTER) |
160 | ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id); | 160 | ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id); |
161 | 161 | ||
162 | vlan_group_set_device(grp, vlan_id, NULL); | ||
163 | grp->nr_vlans--; | 162 | grp->nr_vlans--; |
164 | 163 | ||
165 | synchronize_net(); | 164 | if (!grp->killall) { |
166 | 165 | vlan_group_set_device(grp, vlan_id, NULL); | |
167 | unregister_netdevice(dev); | 166 | synchronize_net(); |
167 | } | ||
168 | unregister_netdevice_queue(dev, head); | ||
168 | 169 | ||
169 | /* If the group is now empty, kill off the group. */ | 170 | /* If the group is now empty, kill off the group. */ |
170 | if (grp->nr_vlans == 0) { | 171 | if (grp->nr_vlans == 0) { |
@@ -183,6 +184,34 @@ void unregister_vlan_dev(struct net_device *dev) | |||
183 | dev_put(real_dev); | 184 | dev_put(real_dev); |
184 | } | 185 | } |
185 | 186 | ||
187 | void unregister_vlan_dev_alls(struct vlan_group *grp) | ||
188 | { | ||
189 | LIST_HEAD(list); | ||
190 | int i; | ||
191 | struct net_device *vlandev; | ||
192 | struct vlan_group save; | ||
193 | |||
194 | memcpy(&save, grp, sizeof(save)); | ||
195 | memset(&grp->vlan_devices_arrays, 0, sizeof(grp->vlan_devices_arrays)); | ||
196 | grp->killall = 1; | ||
197 | |||
198 | synchronize_net(); | ||
199 | |||
200 | /* Delete all VLANs for this dev. */ | ||
201 | for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { | ||
202 | vlandev = vlan_group_get_device(&save, i); | ||
203 | if (!vlandev) | ||
204 | continue; | ||
205 | |||
206 | unregister_vlan_dev(vlandev, &list); | ||
207 | if (grp->nr_vlans == 0) | ||
208 | break; | ||
209 | } | ||
210 | unregister_netdevice_many(&list); | ||
211 | for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++) | ||
212 | kfree(save.vlan_devices_arrays[i]); | ||
213 | } | ||
214 | |||
186 | static void vlan_transfer_operstate(const struct net_device *dev, | 215 | static void vlan_transfer_operstate(const struct net_device *dev, |
187 | struct net_device *vlandev) | 216 | struct net_device *vlandev) |
188 | { | 217 | { |
@@ -524,19 +553,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
524 | break; | 553 | break; |
525 | 554 | ||
526 | case NETDEV_UNREGISTER: | 555 | case NETDEV_UNREGISTER: |
527 | /* Delete all VLANs for this dev. */ | 556 | unregister_vlan_dev_alls(grp); |
528 | for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { | ||
529 | vlandev = vlan_group_get_device(grp, i); | ||
530 | if (!vlandev) | ||
531 | continue; | ||
532 | |||
533 | /* unregistration of last vlan destroys group, abort | ||
534 | * afterwards */ | ||
535 | if (grp->nr_vlans == 1) | ||
536 | i = VLAN_GROUP_ARRAY_LEN; | ||
537 | |||
538 | unregister_vlan_dev(vlandev); | ||
539 | } | ||
540 | break; | 557 | break; |
541 | } | 558 | } |
542 | 559 | ||
@@ -642,7 +659,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg) | |||
642 | err = -EPERM; | 659 | err = -EPERM; |
643 | if (!capable(CAP_NET_ADMIN)) | 660 | if (!capable(CAP_NET_ADMIN)) |
644 | break; | 661 | break; |
645 | unregister_vlan_dev(dev); | 662 | unregister_vlan_dev(dev, NULL); |
646 | err = 0; | 663 | err = 0; |
647 | break; | 664 | break; |
648 | 665 | ||
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h index 82570bc2a180..68f9290e6837 100644 --- a/net/8021q/vlan.h +++ b/net/8021q/vlan.h | |||
@@ -82,14 +82,14 @@ void vlan_dev_get_realdev_name(const struct net_device *dev, char *result); | |||
82 | int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id); | 82 | int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id); |
83 | void vlan_setup(struct net_device *dev); | 83 | void vlan_setup(struct net_device *dev); |
84 | int register_vlan_dev(struct net_device *dev); | 84 | int register_vlan_dev(struct net_device *dev); |
85 | void unregister_vlan_dev(struct net_device *dev); | 85 | void unregister_vlan_dev(struct net_device *dev, struct list_head *head); |
86 | 86 | ||
87 | static inline u32 vlan_get_ingress_priority(struct net_device *dev, | 87 | static inline u32 vlan_get_ingress_priority(struct net_device *dev, |
88 | u16 vlan_tci) | 88 | u16 vlan_tci) |
89 | { | 89 | { |
90 | struct vlan_dev_info *vip = vlan_dev_info(dev); | 90 | struct vlan_dev_info *vip = vlan_dev_info(dev); |
91 | 91 | ||
92 | return vip->ingress_priority_map[(vlan_tci >> 13) & 0x7]; | 92 | return vip->ingress_priority_map[(vlan_tci >> VLAN_PRIO_SHIFT) & 0x7]; |
93 | } | 93 | } |
94 | 94 | ||
95 | #ifdef CONFIG_VLAN_8021Q_GVRP | 95 | #ifdef CONFIG_VLAN_8021Q_GVRP |
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 4198ec5c8abc..790fd55ec318 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -393,7 +393,7 @@ int vlan_dev_set_egress_priority(const struct net_device *dev, | |||
393 | struct vlan_dev_info *vlan = vlan_dev_info(dev); | 393 | struct vlan_dev_info *vlan = vlan_dev_info(dev); |
394 | struct vlan_priority_tci_mapping *mp = NULL; | 394 | struct vlan_priority_tci_mapping *mp = NULL; |
395 | struct vlan_priority_tci_mapping *np; | 395 | struct vlan_priority_tci_mapping *np; |
396 | u32 vlan_qos = (vlan_prio << 13) & 0xE000; | 396 | u32 vlan_qos = (vlan_prio << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK; |
397 | 397 | ||
398 | /* See if a priority mapping exists.. */ | 398 | /* See if a priority mapping exists.. */ |
399 | mp = vlan->egress_priority_map[skb_prio & 0xF]; | 399 | mp = vlan->egress_priority_map[skb_prio & 0xF]; |
@@ -626,6 +626,17 @@ static int vlan_dev_fcoe_disable(struct net_device *dev) | |||
626 | rc = ops->ndo_fcoe_disable(real_dev); | 626 | rc = ops->ndo_fcoe_disable(real_dev); |
627 | return rc; | 627 | return rc; |
628 | } | 628 | } |
629 | |||
630 | static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) | ||
631 | { | ||
632 | struct net_device *real_dev = vlan_dev_info(dev)->real_dev; | ||
633 | const struct net_device_ops *ops = real_dev->netdev_ops; | ||
634 | int rc = -EINVAL; | ||
635 | |||
636 | if (ops->ndo_fcoe_get_wwn) | ||
637 | rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type); | ||
638 | return rc; | ||
639 | } | ||
629 | #endif | 640 | #endif |
630 | 641 | ||
631 | static void vlan_dev_change_rx_flags(struct net_device *dev, int change) | 642 | static void vlan_dev_change_rx_flags(struct net_device *dev, int change) |
@@ -791,6 +802,7 @@ static const struct net_device_ops vlan_netdev_ops = { | |||
791 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, | 802 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, |
792 | .ndo_fcoe_enable = vlan_dev_fcoe_enable, | 803 | .ndo_fcoe_enable = vlan_dev_fcoe_enable, |
793 | .ndo_fcoe_disable = vlan_dev_fcoe_disable, | 804 | .ndo_fcoe_disable = vlan_dev_fcoe_disable, |
805 | .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, | ||
794 | #endif | 806 | #endif |
795 | }; | 807 | }; |
796 | 808 | ||
@@ -813,6 +825,7 @@ static const struct net_device_ops vlan_netdev_accel_ops = { | |||
813 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, | 825 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, |
814 | .ndo_fcoe_enable = vlan_dev_fcoe_enable, | 826 | .ndo_fcoe_enable = vlan_dev_fcoe_enable, |
815 | .ndo_fcoe_disable = vlan_dev_fcoe_disable, | 827 | .ndo_fcoe_disable = vlan_dev_fcoe_disable, |
828 | .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, | ||
816 | #endif | 829 | #endif |
817 | }; | 830 | }; |
818 | 831 | ||
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index b1a4290996b5..abe38014b7fd 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c | |||
@@ -1821,7 +1821,7 @@ static int atalk_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned lo | |||
1821 | #endif | 1821 | #endif |
1822 | 1822 | ||
1823 | 1823 | ||
1824 | static struct net_proto_family atalk_family_ops = { | 1824 | static const struct net_proto_family atalk_family_ops = { |
1825 | .family = PF_APPLETALK, | 1825 | .family = PF_APPLETALK, |
1826 | .create = atalk_create, | 1826 | .create = atalk_create, |
1827 | .owner = THIS_MODULE, | 1827 | .owner = THIS_MODULE, |
diff --git a/net/atm/common.c b/net/atm/common.c index 950bd16d2383..d61e051e0a3f 100644 --- a/net/atm/common.c +++ b/net/atm/common.c | |||
@@ -496,7 +496,7 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
496 | error = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | 496 | error = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); |
497 | if (error) | 497 | if (error) |
498 | return error; | 498 | return error; |
499 | sock_recv_timestamp(msg, sk, skb); | 499 | sock_recv_ts_and_drops(msg, sk, skb); |
500 | pr_debug("RcvM %d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize); | 500 | pr_debug("RcvM %d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize); |
501 | atm_return(vcc, skb->truesize); | 501 | atm_return(vcc, skb->truesize); |
502 | skb_free_datagram(sk, skb); | 502 | skb_free_datagram(sk, skb); |
diff --git a/net/atm/pvc.c b/net/atm/pvc.c index d4c024504f99..a6e1fdbae87f 100644 --- a/net/atm/pvc.c +++ b/net/atm/pvc.c | |||
@@ -137,7 +137,7 @@ static int pvc_create(struct net *net, struct socket *sock,int protocol) | |||
137 | } | 137 | } |
138 | 138 | ||
139 | 139 | ||
140 | static struct net_proto_family pvc_family_ops = { | 140 | static const struct net_proto_family pvc_family_ops = { |
141 | .family = PF_ATMPVC, | 141 | .family = PF_ATMPVC, |
142 | .create = pvc_create, | 142 | .create = pvc_create, |
143 | .owner = THIS_MODULE, | 143 | .owner = THIS_MODULE, |
diff --git a/net/atm/svc.c b/net/atm/svc.c index f90d143c4b25..819354233318 100644 --- a/net/atm/svc.c +++ b/net/atm/svc.c | |||
@@ -666,7 +666,7 @@ static int svc_create(struct net *net, struct socket *sock,int protocol) | |||
666 | } | 666 | } |
667 | 667 | ||
668 | 668 | ||
669 | static struct net_proto_family svc_family_ops = { | 669 | static const struct net_proto_family svc_family_ops = { |
670 | .family = PF_ATMSVC, | 670 | .family = PF_ATMSVC, |
671 | .create = svc_create, | 671 | .create = svc_create, |
672 | .owner = THIS_MODULE, | 672 | .owner = THIS_MODULE, |
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index f45460730371..f05306f168fa 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c | |||
@@ -1961,7 +1961,7 @@ static const struct file_operations ax25_info_fops = { | |||
1961 | 1961 | ||
1962 | #endif | 1962 | #endif |
1963 | 1963 | ||
1964 | static struct net_proto_family ax25_family_ops = { | 1964 | static const struct net_proto_family ax25_family_ops = { |
1965 | .family = PF_AX25, | 1965 | .family = PF_AX25, |
1966 | .create = ax25_create, | 1966 | .create = ax25_create, |
1967 | .owner = THIS_MODULE, | 1967 | .owner = THIS_MODULE, |
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 8cfb5a849841..399e59c9c6cb 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c | |||
@@ -45,7 +45,7 @@ | |||
45 | 45 | ||
46 | /* Bluetooth sockets */ | 46 | /* Bluetooth sockets */ |
47 | #define BT_MAX_PROTO 8 | 47 | #define BT_MAX_PROTO 8 |
48 | static struct net_proto_family *bt_proto[BT_MAX_PROTO]; | 48 | static const struct net_proto_family *bt_proto[BT_MAX_PROTO]; |
49 | static DEFINE_RWLOCK(bt_proto_lock); | 49 | static DEFINE_RWLOCK(bt_proto_lock); |
50 | 50 | ||
51 | static struct lock_class_key bt_lock_key[BT_MAX_PROTO]; | 51 | static struct lock_class_key bt_lock_key[BT_MAX_PROTO]; |
@@ -86,7 +86,7 @@ static inline void bt_sock_reclassify_lock(struct socket *sock, int proto) | |||
86 | bt_key_strings[proto], &bt_lock_key[proto]); | 86 | bt_key_strings[proto], &bt_lock_key[proto]); |
87 | } | 87 | } |
88 | 88 | ||
89 | int bt_sock_register(int proto, struct net_proto_family *ops) | 89 | int bt_sock_register(int proto, const struct net_proto_family *ops) |
90 | { | 90 | { |
91 | int err = 0; | 91 | int err = 0; |
92 | 92 | ||
@@ -257,7 +257,7 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
257 | skb_reset_transport_header(skb); | 257 | skb_reset_transport_header(skb); |
258 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | 258 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); |
259 | if (err == 0) | 259 | if (err == 0) |
260 | sock_recv_timestamp(msg, sk, skb); | 260 | sock_recv_ts_and_drops(msg, sk, skb); |
261 | 261 | ||
262 | skb_free_datagram(sk, skb); | 262 | skb_free_datagram(sk, skb); |
263 | 263 | ||
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c index e857628b0b27..0a2c5460bb48 100644 --- a/net/bluetooth/bnep/sock.c +++ b/net/bluetooth/bnep/sock.c | |||
@@ -222,7 +222,7 @@ static int bnep_sock_create(struct net *net, struct socket *sock, int protocol) | |||
222 | return 0; | 222 | return 0; |
223 | } | 223 | } |
224 | 224 | ||
225 | static struct net_proto_family bnep_sock_family_ops = { | 225 | static const struct net_proto_family bnep_sock_family_ops = { |
226 | .family = PF_BLUETOOTH, | 226 | .family = PF_BLUETOOTH, |
227 | .owner = THIS_MODULE, | 227 | .owner = THIS_MODULE, |
228 | .create = bnep_sock_create | 228 | .create = bnep_sock_create |
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c index 16b0fad74f6e..de7c8040bc56 100644 --- a/net/bluetooth/cmtp/sock.c +++ b/net/bluetooth/cmtp/sock.c | |||
@@ -217,7 +217,7 @@ static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol) | |||
217 | return 0; | 217 | return 0; |
218 | } | 218 | } |
219 | 219 | ||
220 | static struct net_proto_family cmtp_sock_family_ops = { | 220 | static const struct net_proto_family cmtp_sock_family_ops = { |
221 | .family = PF_BLUETOOTH, | 221 | .family = PF_BLUETOOTH, |
222 | .owner = THIS_MODULE, | 222 | .owner = THIS_MODULE, |
223 | .create = cmtp_sock_create | 223 | .create = cmtp_sock_create |
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 75302a986067..e7395f231989 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c | |||
@@ -687,7 +687,7 @@ static int hci_sock_dev_event(struct notifier_block *this, unsigned long event, | |||
687 | return NOTIFY_DONE; | 687 | return NOTIFY_DONE; |
688 | } | 688 | } |
689 | 689 | ||
690 | static struct net_proto_family hci_sock_family_ops = { | 690 | static const struct net_proto_family hci_sock_family_ops = { |
691 | .family = PF_BLUETOOTH, | 691 | .family = PF_BLUETOOTH, |
692 | .owner = THIS_MODULE, | 692 | .owner = THIS_MODULE, |
693 | .create = hci_sock_create, | 693 | .create = hci_sock_create, |
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c index 37c9d7d2e688..4beb6a7a2953 100644 --- a/net/bluetooth/hidp/sock.c +++ b/net/bluetooth/hidp/sock.c | |||
@@ -268,7 +268,7 @@ static int hidp_sock_create(struct net *net, struct socket *sock, int protocol) | |||
268 | return 0; | 268 | return 0; |
269 | } | 269 | } |
270 | 270 | ||
271 | static struct net_proto_family hidp_sock_family_ops = { | 271 | static const struct net_proto_family hidp_sock_family_ops = { |
272 | .family = PF_BLUETOOTH, | 272 | .family = PF_BLUETOOTH, |
273 | .owner = THIS_MODULE, | 273 | .owner = THIS_MODULE, |
274 | .create = hidp_sock_create | 274 | .create = hidp_sock_create |
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 77e9fb130adb..d65101d92ee5 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c | |||
@@ -3919,7 +3919,7 @@ static const struct proto_ops l2cap_sock_ops = { | |||
3919 | .getsockopt = l2cap_sock_getsockopt | 3919 | .getsockopt = l2cap_sock_getsockopt |
3920 | }; | 3920 | }; |
3921 | 3921 | ||
3922 | static struct net_proto_family l2cap_sock_family_ops = { | 3922 | static const struct net_proto_family l2cap_sock_family_ops = { |
3923 | .family = PF_BLUETOOTH, | 3923 | .family = PF_BLUETOOTH, |
3924 | .owner = THIS_MODULE, | 3924 | .owner = THIS_MODULE, |
3925 | .create = l2cap_sock_create, | 3925 | .create = l2cap_sock_create, |
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 8a20aaf1f231..d3bfc1b0afb1 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c | |||
@@ -703,7 +703,7 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
703 | copied += chunk; | 703 | copied += chunk; |
704 | size -= chunk; | 704 | size -= chunk; |
705 | 705 | ||
706 | sock_recv_timestamp(msg, sk, skb); | 706 | sock_recv_ts_and_drops(msg, sk, skb); |
707 | 707 | ||
708 | if (!(flags & MSG_PEEK)) { | 708 | if (!(flags & MSG_PEEK)) { |
709 | atomic_sub(chunk, &sk->sk_rmem_alloc); | 709 | atomic_sub(chunk, &sk->sk_rmem_alloc); |
@@ -1101,7 +1101,7 @@ static const struct proto_ops rfcomm_sock_ops = { | |||
1101 | .mmap = sock_no_mmap | 1101 | .mmap = sock_no_mmap |
1102 | }; | 1102 | }; |
1103 | 1103 | ||
1104 | static struct net_proto_family rfcomm_sock_family_ops = { | 1104 | static const struct net_proto_family rfcomm_sock_family_ops = { |
1105 | .family = PF_BLUETOOTH, | 1105 | .family = PF_BLUETOOTH, |
1106 | .owner = THIS_MODULE, | 1106 | .owner = THIS_MODULE, |
1107 | .create = rfcomm_sock_create | 1107 | .create = rfcomm_sock_create |
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 77f4153bdb5e..694a65541b73 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
@@ -993,7 +993,7 @@ static const struct proto_ops sco_sock_ops = { | |||
993 | .getsockopt = sco_sock_getsockopt | 993 | .getsockopt = sco_sock_getsockopt |
994 | }; | 994 | }; |
995 | 995 | ||
996 | static struct net_proto_family sco_sock_family_ops = { | 996 | static const struct net_proto_family sco_sock_family_ops = { |
997 | .family = PF_BLUETOOTH, | 997 | .family = PF_BLUETOOTH, |
998 | .owner = THIS_MODULE, | 998 | .owner = THIS_MODULE, |
999 | .create = sco_sock_create, | 999 | .create = sco_sock_create, |
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 07a07770c8b6..1a99c4e04e85 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -157,6 +157,7 @@ static const struct ethtool_ops br_ethtool_ops = { | |||
157 | .get_tso = ethtool_op_get_tso, | 157 | .get_tso = ethtool_op_get_tso, |
158 | .set_tso = br_set_tso, | 158 | .set_tso = br_set_tso, |
159 | .get_ufo = ethtool_op_get_ufo, | 159 | .get_ufo = ethtool_op_get_ufo, |
160 | .set_ufo = ethtool_op_set_ufo, | ||
160 | .get_flags = ethtool_op_get_flags, | 161 | .get_flags = ethtool_op_get_flags, |
161 | }; | 162 | }; |
162 | 163 | ||
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index b1b3b0fbf41c..2117e5ba24c8 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -154,7 +154,7 @@ static void del_nbp(struct net_bridge_port *p) | |||
154 | } | 154 | } |
155 | 155 | ||
156 | /* called with RTNL */ | 156 | /* called with RTNL */ |
157 | static void del_br(struct net_bridge *br) | 157 | static void del_br(struct net_bridge *br, struct list_head *head) |
158 | { | 158 | { |
159 | struct net_bridge_port *p, *n; | 159 | struct net_bridge_port *p, *n; |
160 | 160 | ||
@@ -165,7 +165,7 @@ static void del_br(struct net_bridge *br) | |||
165 | del_timer_sync(&br->gc_timer); | 165 | del_timer_sync(&br->gc_timer); |
166 | 166 | ||
167 | br_sysfs_delbr(br->dev); | 167 | br_sysfs_delbr(br->dev); |
168 | unregister_netdevice(br->dev); | 168 | unregister_netdevice_queue(br->dev, head); |
169 | } | 169 | } |
170 | 170 | ||
171 | static struct net_device *new_bridge_dev(struct net *net, const char *name) | 171 | static struct net_device *new_bridge_dev(struct net *net, const char *name) |
@@ -323,7 +323,7 @@ int br_del_bridge(struct net *net, const char *name) | |||
323 | } | 323 | } |
324 | 324 | ||
325 | else | 325 | else |
326 | del_br(netdev_priv(dev)); | 326 | del_br(netdev_priv(dev), NULL); |
327 | 327 | ||
328 | rtnl_unlock(); | 328 | rtnl_unlock(); |
329 | return ret; | 329 | return ret; |
@@ -462,15 +462,14 @@ int br_del_if(struct net_bridge *br, struct net_device *dev) | |||
462 | void br_net_exit(struct net *net) | 462 | void br_net_exit(struct net *net) |
463 | { | 463 | { |
464 | struct net_device *dev; | 464 | struct net_device *dev; |
465 | LIST_HEAD(list); | ||
465 | 466 | ||
466 | rtnl_lock(); | 467 | rtnl_lock(); |
467 | restart: | 468 | for_each_netdev(net, dev) |
468 | for_each_netdev(net, dev) { | 469 | if (dev->priv_flags & IFF_EBRIDGE) |
469 | if (dev->priv_flags & IFF_EBRIDGE) { | 470 | del_br(netdev_priv(dev), &list); |
470 | del_br(netdev_priv(dev)); | 471 | |
471 | goto restart; | 472 | unregister_netdevice_many(&list); |
472 | } | ||
473 | } | ||
474 | rtnl_unlock(); | 473 | rtnl_unlock(); |
475 | 474 | ||
476 | } | 475 | } |
diff --git a/net/can/af_can.c b/net/can/af_can.c index 606832115674..3f2eb27e1ffb 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c | |||
@@ -842,7 +842,7 @@ static struct packet_type can_packet __read_mostly = { | |||
842 | .func = can_rcv, | 842 | .func = can_rcv, |
843 | }; | 843 | }; |
844 | 844 | ||
845 | static struct net_proto_family can_family_ops __read_mostly = { | 845 | static const struct net_proto_family can_family_ops = { |
846 | .family = PF_CAN, | 846 | .family = PF_CAN, |
847 | .create = can_create, | 847 | .create = can_create, |
848 | .owner = THIS_MODULE, | 848 | .owner = THIS_MODULE, |
diff --git a/net/can/bcm.c b/net/can/bcm.c index 597da4f8f888..2f47039c79dd 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
@@ -1534,7 +1534,7 @@ static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1534 | return err; | 1534 | return err; |
1535 | } | 1535 | } |
1536 | 1536 | ||
1537 | sock_recv_timestamp(msg, sk, skb); | 1537 | sock_recv_ts_and_drops(msg, sk, skb); |
1538 | 1538 | ||
1539 | if (msg->msg_name) { | 1539 | if (msg->msg_name) { |
1540 | msg->msg_namelen = sizeof(struct sockaddr_can); | 1540 | msg->msg_namelen = sizeof(struct sockaddr_can); |
diff --git a/net/can/raw.c b/net/can/raw.c index b5e897922d32..6e77db58b9e6 100644 --- a/net/can/raw.c +++ b/net/can/raw.c | |||
@@ -424,8 +424,6 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, | |||
424 | 424 | ||
425 | if (level != SOL_CAN_RAW) | 425 | if (level != SOL_CAN_RAW) |
426 | return -EINVAL; | 426 | return -EINVAL; |
427 | if (optlen < 0) | ||
428 | return -EINVAL; | ||
429 | 427 | ||
430 | switch (optname) { | 428 | switch (optname) { |
431 | 429 | ||
@@ -702,7 +700,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
702 | return err; | 700 | return err; |
703 | } | 701 | } |
704 | 702 | ||
705 | sock_recv_timestamp(msg, sk, skb); | 703 | sock_recv_ts_and_drops(msg, sk, skb); |
706 | 704 | ||
707 | if (msg->msg_name) { | 705 | if (msg->msg_name) { |
708 | msg->msg_namelen = sizeof(struct sockaddr_can); | 706 | msg->msg_namelen = sizeof(struct sockaddr_can); |
diff --git a/net/compat.c b/net/compat.c index a407c3addbae..6a2f75fb3f45 100644 --- a/net/compat.c +++ b/net/compat.c | |||
@@ -390,9 +390,6 @@ asmlinkage long compat_sys_setsockopt(int fd, int level, int optname, | |||
390 | int err; | 390 | int err; |
391 | struct socket *sock; | 391 | struct socket *sock; |
392 | 392 | ||
393 | if (optlen < 0) | ||
394 | return -EINVAL; | ||
395 | |||
396 | if ((sock = sockfd_lookup(fd, &err))!=NULL) | 393 | if ((sock = sockfd_lookup(fd, &err))!=NULL) |
397 | { | 394 | { |
398 | err = security_socket_setsockopt(sock,level,optname); | 395 | err = security_socket_setsockopt(sock,level,optname); |
@@ -727,10 +724,10 @@ EXPORT_SYMBOL(compat_mc_getsockopt); | |||
727 | 724 | ||
728 | /* Argument list sizes for compat_sys_socketcall */ | 725 | /* Argument list sizes for compat_sys_socketcall */ |
729 | #define AL(x) ((x) * sizeof(u32)) | 726 | #define AL(x) ((x) * sizeof(u32)) |
730 | static unsigned char nas[19]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), | 727 | static unsigned char nas[20]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), |
731 | AL(3),AL(3),AL(4),AL(4),AL(4),AL(6), | 728 | AL(3),AL(3),AL(4),AL(4),AL(4),AL(6), |
732 | AL(6),AL(2),AL(5),AL(5),AL(3),AL(3), | 729 | AL(6),AL(2),AL(5),AL(5),AL(3),AL(3), |
733 | AL(4)}; | 730 | AL(4),AL(5)}; |
734 | #undef AL | 731 | #undef AL |
735 | 732 | ||
736 | asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags) | 733 | asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags) |
@@ -755,13 +752,36 @@ asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len, | |||
755 | return sys_recvfrom(fd, buf, len, flags | MSG_CMSG_COMPAT, addr, addrlen); | 752 | return sys_recvfrom(fd, buf, len, flags | MSG_CMSG_COMPAT, addr, addrlen); |
756 | } | 753 | } |
757 | 754 | ||
755 | asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, | ||
756 | unsigned vlen, unsigned int flags, | ||
757 | struct timespec __user *timeout) | ||
758 | { | ||
759 | int datagrams; | ||
760 | struct timespec ktspec; | ||
761 | struct compat_timespec __user *utspec = | ||
762 | (struct compat_timespec __user *)timeout; | ||
763 | |||
764 | if (get_user(ktspec.tv_sec, &utspec->tv_sec) || | ||
765 | get_user(ktspec.tv_nsec, &utspec->tv_nsec)) | ||
766 | return -EFAULT; | ||
767 | |||
768 | datagrams = __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, | ||
769 | flags | MSG_CMSG_COMPAT, &ktspec); | ||
770 | if (datagrams > 0 && | ||
771 | (put_user(ktspec.tv_sec, &utspec->tv_sec) || | ||
772 | put_user(ktspec.tv_nsec, &utspec->tv_nsec))) | ||
773 | datagrams = -EFAULT; | ||
774 | |||
775 | return datagrams; | ||
776 | } | ||
777 | |||
758 | asmlinkage long compat_sys_socketcall(int call, u32 __user *args) | 778 | asmlinkage long compat_sys_socketcall(int call, u32 __user *args) |
759 | { | 779 | { |
760 | int ret; | 780 | int ret; |
761 | u32 a[6]; | 781 | u32 a[6]; |
762 | u32 a0, a1; | 782 | u32 a0, a1; |
763 | 783 | ||
764 | if (call < SYS_SOCKET || call > SYS_ACCEPT4) | 784 | if (call < SYS_SOCKET || call > SYS_RECVMMSG) |
765 | return -EINVAL; | 785 | return -EINVAL; |
766 | if (copy_from_user(a, args, nas[call])) | 786 | if (copy_from_user(a, args, nas[call])) |
767 | return -EFAULT; | 787 | return -EFAULT; |
@@ -823,6 +843,10 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args) | |||
823 | case SYS_RECVMSG: | 843 | case SYS_RECVMSG: |
824 | ret = compat_sys_recvmsg(a0, compat_ptr(a1), a[2]); | 844 | ret = compat_sys_recvmsg(a0, compat_ptr(a1), a[2]); |
825 | break; | 845 | break; |
846 | case SYS_RECVMMSG: | ||
847 | ret = compat_sys_recvmmsg(a0, compat_ptr(a1), a[2], a[3], | ||
848 | compat_ptr(a[4])); | ||
849 | break; | ||
826 | case SYS_ACCEPT4: | 850 | case SYS_ACCEPT4: |
827 | ret = sys_accept4(a0, compat_ptr(a1), compat_ptr(a[2]), a[3]); | 851 | ret = sys_accept4(a0, compat_ptr(a1), compat_ptr(a[2]), a[3]); |
828 | break; | 852 | break; |
diff --git a/net/core/datagram.c b/net/core/datagram.c index 1c6cf3a1a4f6..4d57f5e12b05 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
@@ -262,6 +262,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags) | |||
262 | } | 262 | } |
263 | 263 | ||
264 | kfree_skb(skb); | 264 | kfree_skb(skb); |
265 | atomic_inc(&sk->sk_drops); | ||
265 | sk_mem_reclaim_partial(sk); | 266 | sk_mem_reclaim_partial(sk); |
266 | 267 | ||
267 | return err; | 268 | return err; |
diff --git a/net/core/dev.c b/net/core/dev.c index b8f74cfb1bfd..68a1bb68b5a8 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -193,18 +193,15 @@ static struct list_head ptype_all __read_mostly; /* Taps */ | |||
193 | DEFINE_RWLOCK(dev_base_lock); | 193 | DEFINE_RWLOCK(dev_base_lock); |
194 | EXPORT_SYMBOL(dev_base_lock); | 194 | EXPORT_SYMBOL(dev_base_lock); |
195 | 195 | ||
196 | #define NETDEV_HASHBITS 8 | ||
197 | #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS) | ||
198 | |||
199 | static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) | 196 | static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) |
200 | { | 197 | { |
201 | unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); | 198 | unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); |
202 | return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)]; | 199 | return &net->dev_name_head[hash & (NETDEV_HASHENTRIES - 1)]; |
203 | } | 200 | } |
204 | 201 | ||
205 | static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) | 202 | static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) |
206 | { | 203 | { |
207 | return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)]; | 204 | return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; |
208 | } | 205 | } |
209 | 206 | ||
210 | /* Device list insertion */ | 207 | /* Device list insertion */ |
@@ -217,12 +214,15 @@ static int list_netdevice(struct net_device *dev) | |||
217 | write_lock_bh(&dev_base_lock); | 214 | write_lock_bh(&dev_base_lock); |
218 | list_add_tail(&dev->dev_list, &net->dev_base_head); | 215 | list_add_tail(&dev->dev_list, &net->dev_base_head); |
219 | hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name)); | 216 | hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name)); |
220 | hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex)); | 217 | hlist_add_head_rcu(&dev->index_hlist, |
218 | dev_index_hash(net, dev->ifindex)); | ||
221 | write_unlock_bh(&dev_base_lock); | 219 | write_unlock_bh(&dev_base_lock); |
222 | return 0; | 220 | return 0; |
223 | } | 221 | } |
224 | 222 | ||
225 | /* Device list removal */ | 223 | /* Device list removal |
224 | * caller must respect a RCU grace period before freeing/reusing dev | ||
225 | */ | ||
226 | static void unlist_netdevice(struct net_device *dev) | 226 | static void unlist_netdevice(struct net_device *dev) |
227 | { | 227 | { |
228 | ASSERT_RTNL(); | 228 | ASSERT_RTNL(); |
@@ -231,7 +231,7 @@ static void unlist_netdevice(struct net_device *dev) | |||
231 | write_lock_bh(&dev_base_lock); | 231 | write_lock_bh(&dev_base_lock); |
232 | list_del(&dev->dev_list); | 232 | list_del(&dev->dev_list); |
233 | hlist_del(&dev->name_hlist); | 233 | hlist_del(&dev->name_hlist); |
234 | hlist_del(&dev->index_hlist); | 234 | hlist_del_rcu(&dev->index_hlist); |
235 | write_unlock_bh(&dev_base_lock); | 235 | write_unlock_bh(&dev_base_lock); |
236 | } | 236 | } |
237 | 237 | ||
@@ -649,6 +649,31 @@ struct net_device *__dev_get_by_index(struct net *net, int ifindex) | |||
649 | } | 649 | } |
650 | EXPORT_SYMBOL(__dev_get_by_index); | 650 | EXPORT_SYMBOL(__dev_get_by_index); |
651 | 651 | ||
652 | /** | ||
653 | * dev_get_by_index_rcu - find a device by its ifindex | ||
654 | * @net: the applicable net namespace | ||
655 | * @ifindex: index of device | ||
656 | * | ||
657 | * Search for an interface by index. Returns %NULL if the device | ||
658 | * is not found or a pointer to the device. The device has not | ||
659 | * had its reference counter increased so the caller must be careful | ||
660 | * about locking. The caller must hold RCU lock. | ||
661 | */ | ||
662 | |||
663 | struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) | ||
664 | { | ||
665 | struct hlist_node *p; | ||
666 | struct net_device *dev; | ||
667 | struct hlist_head *head = dev_index_hash(net, ifindex); | ||
668 | |||
669 | hlist_for_each_entry_rcu(dev, p, head, index_hlist) | ||
670 | if (dev->ifindex == ifindex) | ||
671 | return dev; | ||
672 | |||
673 | return NULL; | ||
674 | } | ||
675 | EXPORT_SYMBOL(dev_get_by_index_rcu); | ||
676 | |||
652 | 677 | ||
653 | /** | 678 | /** |
654 | * dev_get_by_index - find a device by its ifindex | 679 | * dev_get_by_index - find a device by its ifindex |
@@ -665,11 +690,11 @@ struct net_device *dev_get_by_index(struct net *net, int ifindex) | |||
665 | { | 690 | { |
666 | struct net_device *dev; | 691 | struct net_device *dev; |
667 | 692 | ||
668 | read_lock(&dev_base_lock); | 693 | rcu_read_lock(); |
669 | dev = __dev_get_by_index(net, ifindex); | 694 | dev = dev_get_by_index_rcu(net, ifindex); |
670 | if (dev) | 695 | if (dev) |
671 | dev_hold(dev); | 696 | dev_hold(dev); |
672 | read_unlock(&dev_base_lock); | 697 | rcu_read_unlock(); |
673 | return dev; | 698 | return dev; |
674 | } | 699 | } |
675 | EXPORT_SYMBOL(dev_get_by_index); | 700 | EXPORT_SYMBOL(dev_get_by_index); |
@@ -1791,13 +1816,25 @@ EXPORT_SYMBOL(skb_tx_hash); | |||
1791 | static struct netdev_queue *dev_pick_tx(struct net_device *dev, | 1816 | static struct netdev_queue *dev_pick_tx(struct net_device *dev, |
1792 | struct sk_buff *skb) | 1817 | struct sk_buff *skb) |
1793 | { | 1818 | { |
1794 | const struct net_device_ops *ops = dev->netdev_ops; | 1819 | u16 queue_index; |
1795 | u16 queue_index = 0; | 1820 | struct sock *sk = skb->sk; |
1821 | |||
1822 | if (sk_tx_queue_recorded(sk)) { | ||
1823 | queue_index = sk_tx_queue_get(sk); | ||
1824 | } else { | ||
1825 | const struct net_device_ops *ops = dev->netdev_ops; | ||
1826 | |||
1827 | if (ops->ndo_select_queue) { | ||
1828 | queue_index = ops->ndo_select_queue(dev, skb); | ||
1829 | } else { | ||
1830 | queue_index = 0; | ||
1831 | if (dev->real_num_tx_queues > 1) | ||
1832 | queue_index = skb_tx_hash(dev, skb); | ||
1796 | 1833 | ||
1797 | if (ops->ndo_select_queue) | 1834 | if (sk && sk->sk_dst_cache) |
1798 | queue_index = ops->ndo_select_queue(dev, skb); | 1835 | sk_tx_queue_set(sk, queue_index); |
1799 | else if (dev->real_num_tx_queues > 1) | 1836 | } |
1800 | queue_index = skb_tx_hash(dev, skb); | 1837 | } |
1801 | 1838 | ||
1802 | skb_set_queue_mapping(skb, queue_index); | 1839 | skb_set_queue_mapping(skb, queue_index); |
1803 | return netdev_get_tx_queue(dev, queue_index); | 1840 | return netdev_get_tx_queue(dev, queue_index); |
@@ -2291,7 +2328,7 @@ int netif_receive_skb(struct sk_buff *skb) | |||
2291 | if (!skb->tstamp.tv64) | 2328 | if (!skb->tstamp.tv64) |
2292 | net_timestamp(skb); | 2329 | net_timestamp(skb); |
2293 | 2330 | ||
2294 | if (skb->vlan_tci && vlan_hwaccel_do_receive(skb)) | 2331 | if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb)) |
2295 | return NET_RX_SUCCESS; | 2332 | return NET_RX_SUCCESS; |
2296 | 2333 | ||
2297 | /* if we've gotten here through NAPI, check netpoll */ | 2334 | /* if we've gotten here through NAPI, check netpoll */ |
@@ -2604,20 +2641,13 @@ EXPORT_SYMBOL(napi_reuse_skb); | |||
2604 | 2641 | ||
2605 | struct sk_buff *napi_get_frags(struct napi_struct *napi) | 2642 | struct sk_buff *napi_get_frags(struct napi_struct *napi) |
2606 | { | 2643 | { |
2607 | struct net_device *dev = napi->dev; | ||
2608 | struct sk_buff *skb = napi->skb; | 2644 | struct sk_buff *skb = napi->skb; |
2609 | 2645 | ||
2610 | if (!skb) { | 2646 | if (!skb) { |
2611 | skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN); | 2647 | skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD); |
2612 | if (!skb) | 2648 | if (skb) |
2613 | goto out; | 2649 | napi->skb = skb; |
2614 | |||
2615 | skb_reserve(skb, NET_IP_ALIGN); | ||
2616 | |||
2617 | napi->skb = skb; | ||
2618 | } | 2650 | } |
2619 | |||
2620 | out: | ||
2621 | return skb; | 2651 | return skb; |
2622 | } | 2652 | } |
2623 | EXPORT_SYMBOL(napi_get_frags); | 2653 | EXPORT_SYMBOL(napi_get_frags); |
@@ -2937,15 +2967,15 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg) | |||
2937 | if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) | 2967 | if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) |
2938 | return -EFAULT; | 2968 | return -EFAULT; |
2939 | 2969 | ||
2940 | read_lock(&dev_base_lock); | 2970 | rcu_read_lock(); |
2941 | dev = __dev_get_by_index(net, ifr.ifr_ifindex); | 2971 | dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex); |
2942 | if (!dev) { | 2972 | if (!dev) { |
2943 | read_unlock(&dev_base_lock); | 2973 | rcu_read_unlock(); |
2944 | return -ENODEV; | 2974 | return -ENODEV; |
2945 | } | 2975 | } |
2946 | 2976 | ||
2947 | strcpy(ifr.ifr_name, dev->name); | 2977 | strcpy(ifr.ifr_name, dev->name); |
2948 | read_unlock(&dev_base_lock); | 2978 | rcu_read_unlock(); |
2949 | 2979 | ||
2950 | if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) | 2980 | if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) |
2951 | return -EFAULT; | 2981 | return -EFAULT; |
@@ -4635,59 +4665,76 @@ static void net_set_todo(struct net_device *dev) | |||
4635 | list_add_tail(&dev->todo_list, &net_todo_list); | 4665 | list_add_tail(&dev->todo_list, &net_todo_list); |
4636 | } | 4666 | } |
4637 | 4667 | ||
4638 | static void rollback_registered(struct net_device *dev) | 4668 | static void rollback_registered_many(struct list_head *head) |
4639 | { | 4669 | { |
4670 | struct net_device *dev; | ||
4671 | |||
4640 | BUG_ON(dev_boot_phase); | 4672 | BUG_ON(dev_boot_phase); |
4641 | ASSERT_RTNL(); | 4673 | ASSERT_RTNL(); |
4642 | 4674 | ||
4643 | /* Some devices call without registering for initialization unwind. */ | 4675 | list_for_each_entry(dev, head, unreg_list) { |
4644 | if (dev->reg_state == NETREG_UNINITIALIZED) { | 4676 | /* Some devices call without registering |
4645 | printk(KERN_DEBUG "unregister_netdevice: device %s/%p never " | 4677 | * for initialization unwind. |
4646 | "was registered\n", dev->name, dev); | 4678 | */ |
4679 | if (dev->reg_state == NETREG_UNINITIALIZED) { | ||
4680 | pr_debug("unregister_netdevice: device %s/%p never " | ||
4681 | "was registered\n", dev->name, dev); | ||
4647 | 4682 | ||
4648 | WARN_ON(1); | 4683 | WARN_ON(1); |
4649 | return; | 4684 | return; |
4650 | } | 4685 | } |
4651 | 4686 | ||
4652 | BUG_ON(dev->reg_state != NETREG_REGISTERED); | 4687 | BUG_ON(dev->reg_state != NETREG_REGISTERED); |
4653 | 4688 | ||
4654 | /* If device is running, close it first. */ | 4689 | /* If device is running, close it first. */ |
4655 | dev_close(dev); | 4690 | dev_close(dev); |
4656 | 4691 | ||
4657 | /* And unlink it from device chain. */ | 4692 | /* And unlink it from device chain. */ |
4658 | unlist_netdevice(dev); | 4693 | unlist_netdevice(dev); |
4659 | 4694 | ||
4660 | dev->reg_state = NETREG_UNREGISTERING; | 4695 | dev->reg_state = NETREG_UNREGISTERING; |
4696 | } | ||
4661 | 4697 | ||
4662 | synchronize_net(); | 4698 | synchronize_net(); |
4663 | 4699 | ||
4664 | /* Shutdown queueing discipline. */ | 4700 | list_for_each_entry(dev, head, unreg_list) { |
4665 | dev_shutdown(dev); | 4701 | /* Shutdown queueing discipline. */ |
4702 | dev_shutdown(dev); | ||
4666 | 4703 | ||
4667 | 4704 | ||
4668 | /* Notify protocols, that we are about to destroy | 4705 | /* Notify protocols, that we are about to destroy |
4669 | this device. They should clean all the things. | 4706 | this device. They should clean all the things. |
4670 | */ | 4707 | */ |
4671 | call_netdevice_notifiers(NETDEV_UNREGISTER, dev); | 4708 | call_netdevice_notifiers(NETDEV_UNREGISTER, dev); |
4672 | 4709 | ||
4673 | /* | 4710 | /* |
4674 | * Flush the unicast and multicast chains | 4711 | * Flush the unicast and multicast chains |
4675 | */ | 4712 | */ |
4676 | dev_unicast_flush(dev); | 4713 | dev_unicast_flush(dev); |
4677 | dev_addr_discard(dev); | 4714 | dev_addr_discard(dev); |
4678 | 4715 | ||
4679 | if (dev->netdev_ops->ndo_uninit) | 4716 | if (dev->netdev_ops->ndo_uninit) |
4680 | dev->netdev_ops->ndo_uninit(dev); | 4717 | dev->netdev_ops->ndo_uninit(dev); |
4681 | 4718 | ||
4682 | /* Notifier chain MUST detach us from master device. */ | 4719 | /* Notifier chain MUST detach us from master device. */ |
4683 | WARN_ON(dev->master); | 4720 | WARN_ON(dev->master); |
4684 | 4721 | ||
4685 | /* Remove entries from kobject tree */ | 4722 | /* Remove entries from kobject tree */ |
4686 | netdev_unregister_kobject(dev); | 4723 | netdev_unregister_kobject(dev); |
4724 | } | ||
4687 | 4725 | ||
4688 | synchronize_net(); | 4726 | synchronize_net(); |
4689 | 4727 | ||
4690 | dev_put(dev); | 4728 | list_for_each_entry(dev, head, unreg_list) |
4729 | dev_put(dev); | ||
4730 | } | ||
4731 | |||
4732 | static void rollback_registered(struct net_device *dev) | ||
4733 | { | ||
4734 | LIST_HEAD(single); | ||
4735 | |||
4736 | list_add(&dev->unreg_list, &single); | ||
4737 | rollback_registered_many(&single); | ||
4691 | } | 4738 | } |
4692 | 4739 | ||
4693 | static void __netdev_init_queue_locks_one(struct net_device *dev, | 4740 | static void __netdev_init_queue_locks_one(struct net_device *dev, |
@@ -4836,6 +4883,12 @@ int register_netdevice(struct net_device *dev) | |||
4836 | dev->features |= NETIF_F_GSO; | 4883 | dev->features |= NETIF_F_GSO; |
4837 | 4884 | ||
4838 | netdev_initialize_kobject(dev); | 4885 | netdev_initialize_kobject(dev); |
4886 | |||
4887 | ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); | ||
4888 | ret = notifier_to_errno(ret); | ||
4889 | if (ret) | ||
4890 | goto err_uninit; | ||
4891 | |||
4839 | ret = netdev_register_kobject(dev); | 4892 | ret = netdev_register_kobject(dev); |
4840 | if (ret) | 4893 | if (ret) |
4841 | goto err_uninit; | 4894 | goto err_uninit; |
@@ -5237,25 +5290,48 @@ void synchronize_net(void) | |||
5237 | EXPORT_SYMBOL(synchronize_net); | 5290 | EXPORT_SYMBOL(synchronize_net); |
5238 | 5291 | ||
5239 | /** | 5292 | /** |
5240 | * unregister_netdevice - remove device from the kernel | 5293 | * unregister_netdevice_queue - remove device from the kernel |
5241 | * @dev: device | 5294 | * @dev: device |
5242 | * | 5295 | * @head: list |
5296 | |||
5243 | * This function shuts down a device interface and removes it | 5297 | * This function shuts down a device interface and removes it |
5244 | * from the kernel tables. | 5298 | * from the kernel tables. |
5299 | * If head not NULL, device is queued to be unregistered later. | ||
5245 | * | 5300 | * |
5246 | * Callers must hold the rtnl semaphore. You may want | 5301 | * Callers must hold the rtnl semaphore. You may want |
5247 | * unregister_netdev() instead of this. | 5302 | * unregister_netdev() instead of this. |
5248 | */ | 5303 | */ |
5249 | 5304 | ||
5250 | void unregister_netdevice(struct net_device *dev) | 5305 | void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) |
5251 | { | 5306 | { |
5252 | ASSERT_RTNL(); | 5307 | ASSERT_RTNL(); |
5253 | 5308 | ||
5254 | rollback_registered(dev); | 5309 | if (head) { |
5255 | /* Finish processing unregister after unlock */ | 5310 | list_add_tail(&dev->unreg_list, head); |
5256 | net_set_todo(dev); | 5311 | } else { |
5312 | rollback_registered(dev); | ||
5313 | /* Finish processing unregister after unlock */ | ||
5314 | net_set_todo(dev); | ||
5315 | } | ||
5316 | } | ||
5317 | EXPORT_SYMBOL(unregister_netdevice_queue); | ||
5318 | |||
5319 | /** | ||
5320 | * unregister_netdevice_many - unregister many devices | ||
5321 | * @head: list of devices | ||
5322 | * | ||
5323 | */ | ||
5324 | void unregister_netdevice_many(struct list_head *head) | ||
5325 | { | ||
5326 | struct net_device *dev; | ||
5327 | |||
5328 | if (!list_empty(head)) { | ||
5329 | rollback_registered_many(head); | ||
5330 | list_for_each_entry(dev, head, unreg_list) | ||
5331 | net_set_todo(dev); | ||
5332 | } | ||
5257 | } | 5333 | } |
5258 | EXPORT_SYMBOL(unregister_netdevice); | 5334 | EXPORT_SYMBOL(unregister_netdevice_many); |
5259 | 5335 | ||
5260 | /** | 5336 | /** |
5261 | * unregister_netdev - remove device from the kernel | 5337 | * unregister_netdev - remove device from the kernel |
@@ -5483,7 +5559,7 @@ unsigned long netdev_increment_features(unsigned long all, unsigned long one, | |||
5483 | one |= NETIF_F_ALL_CSUM; | 5559 | one |= NETIF_F_ALL_CSUM; |
5484 | 5560 | ||
5485 | one |= all & NETIF_F_ONE_FOR_ALL; | 5561 | one |= all & NETIF_F_ONE_FOR_ALL; |
5486 | all &= one | NETIF_F_LLTX | NETIF_F_GSO; | 5562 | all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO; |
5487 | all |= one & mask & NETIF_F_ONE_FOR_ALL; | 5563 | all |= one & mask & NETIF_F_ONE_FOR_ALL; |
5488 | 5564 | ||
5489 | return all; | 5565 | return all; |
@@ -5582,7 +5658,7 @@ restart: | |||
5582 | 5658 | ||
5583 | /* Delete virtual devices */ | 5659 | /* Delete virtual devices */ |
5584 | if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) { | 5660 | if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) { |
5585 | dev->rtnl_link_ops->dellink(dev); | 5661 | dev->rtnl_link_ops->dellink(dev, NULL); |
5586 | goto restart; | 5662 | goto restart; |
5587 | } | 5663 | } |
5588 | 5664 | ||
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 4c12ddb5f5ee..d8aee584e8d1 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -198,13 +198,6 @@ static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) | |||
198 | rc = ops->get_sset_count(dev, ETH_SS_PRIV_FLAGS); | 198 | rc = ops->get_sset_count(dev, ETH_SS_PRIV_FLAGS); |
199 | if (rc >= 0) | 199 | if (rc >= 0) |
200 | info.n_priv_flags = rc; | 200 | info.n_priv_flags = rc; |
201 | } else { | ||
202 | /* code path for obsolete hooks */ | ||
203 | |||
204 | if (ops->self_test_count) | ||
205 | info.testinfo_len = ops->self_test_count(dev); | ||
206 | if (ops->get_stats_count) | ||
207 | info.n_stats = ops->get_stats_count(dev); | ||
208 | } | 201 | } |
209 | if (ops->get_regs_len) | 202 | if (ops->get_regs_len) |
210 | info.regdump_len = ops->get_regs_len(dev); | 203 | info.regdump_len = ops->get_regs_len(dev); |
@@ -309,6 +302,26 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) | |||
309 | return ret; | 302 | return ret; |
310 | } | 303 | } |
311 | 304 | ||
305 | static int ethtool_reset(struct net_device *dev, char __user *useraddr) | ||
306 | { | ||
307 | struct ethtool_value reset; | ||
308 | int ret; | ||
309 | |||
310 | if (!dev->ethtool_ops->reset) | ||
311 | return -EOPNOTSUPP; | ||
312 | |||
313 | if (copy_from_user(&reset, useraddr, sizeof(reset))) | ||
314 | return -EFAULT; | ||
315 | |||
316 | ret = dev->ethtool_ops->reset(dev, &reset.data); | ||
317 | if (ret) | ||
318 | return ret; | ||
319 | |||
320 | if (copy_to_user(useraddr, &reset, sizeof(reset))) | ||
321 | return -EFAULT; | ||
322 | return 0; | ||
323 | } | ||
324 | |||
312 | static int ethtool_get_wol(struct net_device *dev, char __user *useraddr) | 325 | static int ethtool_get_wol(struct net_device *dev, char __user *useraddr) |
313 | { | 326 | { |
314 | struct ethtool_wolinfo wol = { ETHTOOL_GWOL }; | 327 | struct ethtool_wolinfo wol = { ETHTOOL_GWOL }; |
@@ -684,16 +697,10 @@ static int ethtool_self_test(struct net_device *dev, char __user *useraddr) | |||
684 | u64 *data; | 697 | u64 *data; |
685 | int ret, test_len; | 698 | int ret, test_len; |
686 | 699 | ||
687 | if (!ops->self_test) | 700 | if (!ops->self_test || !ops->get_sset_count) |
688 | return -EOPNOTSUPP; | ||
689 | if (!ops->get_sset_count && !ops->self_test_count) | ||
690 | return -EOPNOTSUPP; | 701 | return -EOPNOTSUPP; |
691 | 702 | ||
692 | if (ops->get_sset_count) | 703 | test_len = ops->get_sset_count(dev, ETH_SS_TEST); |
693 | test_len = ops->get_sset_count(dev, ETH_SS_TEST); | ||
694 | else | ||
695 | /* code path for obsolete hook */ | ||
696 | test_len = ops->self_test_count(dev); | ||
697 | if (test_len < 0) | 704 | if (test_len < 0) |
698 | return test_len; | 705 | return test_len; |
699 | WARN_ON(test_len == 0); | 706 | WARN_ON(test_len == 0); |
@@ -728,36 +735,17 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr) | |||
728 | u8 *data; | 735 | u8 *data; |
729 | int ret; | 736 | int ret; |
730 | 737 | ||
731 | if (!ops->get_strings) | 738 | if (!ops->get_strings || !ops->get_sset_count) |
732 | return -EOPNOTSUPP; | 739 | return -EOPNOTSUPP; |
733 | 740 | ||
734 | if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) | 741 | if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) |
735 | return -EFAULT; | 742 | return -EFAULT; |
736 | 743 | ||
737 | if (ops->get_sset_count) { | 744 | ret = ops->get_sset_count(dev, gstrings.string_set); |
738 | ret = ops->get_sset_count(dev, gstrings.string_set); | 745 | if (ret < 0) |
739 | if (ret < 0) | 746 | return ret; |
740 | return ret; | 747 | |
741 | 748 | gstrings.len = ret; | |
742 | gstrings.len = ret; | ||
743 | } else { | ||
744 | /* code path for obsolete hooks */ | ||
745 | |||
746 | switch (gstrings.string_set) { | ||
747 | case ETH_SS_TEST: | ||
748 | if (!ops->self_test_count) | ||
749 | return -EOPNOTSUPP; | ||
750 | gstrings.len = ops->self_test_count(dev); | ||
751 | break; | ||
752 | case ETH_SS_STATS: | ||
753 | if (!ops->get_stats_count) | ||
754 | return -EOPNOTSUPP; | ||
755 | gstrings.len = ops->get_stats_count(dev); | ||
756 | break; | ||
757 | default: | ||
758 | return -EINVAL; | ||
759 | } | ||
760 | } | ||
761 | 749 | ||
762 | data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); | 750 | data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); |
763 | if (!data) | 751 | if (!data) |
@@ -798,16 +786,10 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr) | |||
798 | u64 *data; | 786 | u64 *data; |
799 | int ret, n_stats; | 787 | int ret, n_stats; |
800 | 788 | ||
801 | if (!ops->get_ethtool_stats) | 789 | if (!ops->get_ethtool_stats || !ops->get_sset_count) |
802 | return -EOPNOTSUPP; | ||
803 | if (!ops->get_sset_count && !ops->get_stats_count) | ||
804 | return -EOPNOTSUPP; | 790 | return -EOPNOTSUPP; |
805 | 791 | ||
806 | if (ops->get_sset_count) | 792 | n_stats = ops->get_sset_count(dev, ETH_SS_STATS); |
807 | n_stats = ops->get_sset_count(dev, ETH_SS_STATS); | ||
808 | else | ||
809 | /* code path for obsolete hook */ | ||
810 | n_stats = ops->get_stats_count(dev); | ||
811 | if (n_stats < 0) | 793 | if (n_stats < 0) |
812 | return n_stats; | 794 | return n_stats; |
813 | WARN_ON(n_stats == 0); | 795 | WARN_ON(n_stats == 0); |
@@ -1127,6 +1109,9 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
1127 | case ETHTOOL_FLASHDEV: | 1109 | case ETHTOOL_FLASHDEV: |
1128 | rc = ethtool_flash_device(dev, useraddr); | 1110 | rc = ethtool_flash_device(dev, useraddr); |
1129 | break; | 1111 | break; |
1112 | case ETHTOOL_RESET: | ||
1113 | rc = ethtool_reset(dev, useraddr); | ||
1114 | break; | ||
1130 | default: | 1115 | default: |
1131 | rc = -EOPNOTSUPP; | 1116 | rc = -EOPNOTSUPP; |
1132 | } | 1117 | } |
diff --git a/net/core/filter.c b/net/core/filter.c index d1d779ca096d..08db7b9143a3 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -303,6 +303,12 @@ load_b: | |||
303 | case SKF_AD_IFINDEX: | 303 | case SKF_AD_IFINDEX: |
304 | A = skb->dev->ifindex; | 304 | A = skb->dev->ifindex; |
305 | continue; | 305 | continue; |
306 | case SKF_AD_MARK: | ||
307 | A = skb->mark; | ||
308 | continue; | ||
309 | case SKF_AD_QUEUE: | ||
310 | A = skb->queue_mapping; | ||
311 | continue; | ||
306 | case SKF_AD_NLATTR: { | 312 | case SKF_AD_NLATTR: { |
307 | struct nlattr *nla; | 313 | struct nlattr *nla; |
308 | 314 | ||
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c index 8569310268ab..393b1d8618e2 100644 --- a/net/core/gen_stats.c +++ b/net/core/gen_stats.c | |||
@@ -127,6 +127,7 @@ gnet_stats_copy_basic(struct gnet_dump *d, struct gnet_stats_basic_packed *b) | |||
127 | /** | 127 | /** |
128 | * gnet_stats_copy_rate_est - copy rate estimator statistics into statistics TLV | 128 | * gnet_stats_copy_rate_est - copy rate estimator statistics into statistics TLV |
129 | * @d: dumping handle | 129 | * @d: dumping handle |
130 | * @b: basic statistics | ||
130 | * @r: rate estimator statistics | 131 | * @r: rate estimator statistics |
131 | * | 132 | * |
132 | * Appends the rate estimator statistics to the top level TLV created by | 133 | * Appends the rate estimator statistics to the top level TLV created by |
@@ -136,8 +137,13 @@ gnet_stats_copy_basic(struct gnet_dump *d, struct gnet_stats_basic_packed *b) | |||
136 | * if the room in the socket buffer was not sufficient. | 137 | * if the room in the socket buffer was not sufficient. |
137 | */ | 138 | */ |
138 | int | 139 | int |
139 | gnet_stats_copy_rate_est(struct gnet_dump *d, struct gnet_stats_rate_est *r) | 140 | gnet_stats_copy_rate_est(struct gnet_dump *d, |
141 | const struct gnet_stats_basic_packed *b, | ||
142 | struct gnet_stats_rate_est *r) | ||
140 | { | 143 | { |
144 | if (b && !gen_estimator_active(b, r)) | ||
145 | return 0; | ||
146 | |||
141 | if (d->compat_tc_stats) { | 147 | if (d->compat_tc_stats) { |
142 | d->tc_stats.bps = r->bps; | 148 | d->tc_stats.bps = r->bps; |
143 | d->tc_stats.pps = r->pps; | 149 | d->tc_stats.pps = r->pps; |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 427ded841224..89de182353b0 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -130,6 +130,48 @@ static ssize_t show_carrier(struct device *dev, | |||
130 | return -EINVAL; | 130 | return -EINVAL; |
131 | } | 131 | } |
132 | 132 | ||
133 | static ssize_t show_speed(struct device *dev, | ||
134 | struct device_attribute *attr, char *buf) | ||
135 | { | ||
136 | struct net_device *netdev = to_net_dev(dev); | ||
137 | int ret = -EINVAL; | ||
138 | |||
139 | if (!rtnl_trylock()) | ||
140 | return restart_syscall(); | ||
141 | |||
142 | if (netif_running(netdev) && | ||
143 | netdev->ethtool_ops && | ||
144 | netdev->ethtool_ops->get_settings) { | ||
145 | struct ethtool_cmd cmd = { ETHTOOL_GSET }; | ||
146 | |||
147 | if (!netdev->ethtool_ops->get_settings(netdev, &cmd)) | ||
148 | ret = sprintf(buf, fmt_dec, ethtool_cmd_speed(&cmd)); | ||
149 | } | ||
150 | rtnl_unlock(); | ||
151 | return ret; | ||
152 | } | ||
153 | |||
154 | static ssize_t show_duplex(struct device *dev, | ||
155 | struct device_attribute *attr, char *buf) | ||
156 | { | ||
157 | struct net_device *netdev = to_net_dev(dev); | ||
158 | int ret = -EINVAL; | ||
159 | |||
160 | if (!rtnl_trylock()) | ||
161 | return restart_syscall(); | ||
162 | |||
163 | if (netif_running(netdev) && | ||
164 | netdev->ethtool_ops && | ||
165 | netdev->ethtool_ops->get_settings) { | ||
166 | struct ethtool_cmd cmd = { ETHTOOL_GSET }; | ||
167 | |||
168 | if (!netdev->ethtool_ops->get_settings(netdev, &cmd)) | ||
169 | ret = sprintf(buf, "%s\n", cmd.duplex ? "full" : "half"); | ||
170 | } | ||
171 | rtnl_unlock(); | ||
172 | return ret; | ||
173 | } | ||
174 | |||
133 | static ssize_t show_dormant(struct device *dev, | 175 | static ssize_t show_dormant(struct device *dev, |
134 | struct device_attribute *attr, char *buf) | 176 | struct device_attribute *attr, char *buf) |
135 | { | 177 | { |
@@ -259,6 +301,8 @@ static struct device_attribute net_class_attributes[] = { | |||
259 | __ATTR(address, S_IRUGO, show_address, NULL), | 301 | __ATTR(address, S_IRUGO, show_address, NULL), |
260 | __ATTR(broadcast, S_IRUGO, show_broadcast, NULL), | 302 | __ATTR(broadcast, S_IRUGO, show_broadcast, NULL), |
261 | __ATTR(carrier, S_IRUGO, show_carrier, NULL), | 303 | __ATTR(carrier, S_IRUGO, show_carrier, NULL), |
304 | __ATTR(speed, S_IRUGO, show_speed, NULL), | ||
305 | __ATTR(duplex, S_IRUGO, show_duplex, NULL), | ||
262 | __ATTR(dormant, S_IRUGO, show_dormant, NULL), | 306 | __ATTR(dormant, S_IRUGO, show_dormant, NULL), |
263 | __ATTR(operstate, S_IRUGO, show_operstate, NULL), | 307 | __ATTR(operstate, S_IRUGO, show_operstate, NULL), |
264 | __ATTR(mtu, S_IRUGO | S_IWUSR, show_mtu, store_mtu), | 308 | __ATTR(mtu, S_IRUGO | S_IWUSR, show_mtu, store_mtu), |
@@ -503,9 +547,13 @@ int netdev_register_kobject(struct net_device *net) | |||
503 | *groups++ = &netstat_group; | 547 | *groups++ = &netstat_group; |
504 | 548 | ||
505 | #ifdef CONFIG_WIRELESS_EXT_SYSFS | 549 | #ifdef CONFIG_WIRELESS_EXT_SYSFS |
506 | if (net->wireless_handlers || net->ieee80211_ptr) | 550 | if (net->ieee80211_ptr) |
551 | *groups++ = &wireless_group; | ||
552 | #ifdef CONFIG_WIRELESS_EXT | ||
553 | else if (net->wireless_handlers) | ||
507 | *groups++ = &wireless_group; | 554 | *groups++ = &wireless_group; |
508 | #endif | 555 | #endif |
556 | #endif | ||
509 | #endif /* CONFIG_SYSFS */ | 557 | #endif /* CONFIG_SYSFS */ |
510 | 558 | ||
511 | if (dev_net(net) != &init_net) | 559 | if (dev_net(net) != &init_net) |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 6eb8d47cbf3a..5ce017bf4afa 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -3446,12 +3446,14 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3446 | txq = netdev_get_tx_queue(odev, queue_map); | 3446 | txq = netdev_get_tx_queue(odev, queue_map); |
3447 | 3447 | ||
3448 | __netif_tx_lock_bh(txq); | 3448 | __netif_tx_lock_bh(txq); |
3449 | atomic_inc(&(pkt_dev->skb->users)); | ||
3450 | 3449 | ||
3451 | if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) | 3450 | if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) { |
3452 | ret = NETDEV_TX_BUSY; | 3451 | ret = NETDEV_TX_BUSY; |
3453 | else | 3452 | pkt_dev->last_ok = 0; |
3454 | ret = (*xmit)(pkt_dev->skb, odev); | 3453 | goto unlock; |
3454 | } | ||
3455 | atomic_inc(&(pkt_dev->skb->users)); | ||
3456 | ret = (*xmit)(pkt_dev->skb, odev); | ||
3455 | 3457 | ||
3456 | switch (ret) { | 3458 | switch (ret) { |
3457 | case NETDEV_TX_OK: | 3459 | case NETDEV_TX_OK: |
@@ -3473,6 +3475,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3473 | atomic_dec(&(pkt_dev->skb->users)); | 3475 | atomic_dec(&(pkt_dev->skb->users)); |
3474 | pkt_dev->last_ok = 0; | 3476 | pkt_dev->last_ok = 0; |
3475 | } | 3477 | } |
3478 | unlock: | ||
3476 | __netif_tx_unlock_bh(txq); | 3479 | __netif_tx_unlock_bh(txq); |
3477 | 3480 | ||
3478 | /* If pkt_dev->count is zero, then run forever */ | 3481 | /* If pkt_dev->count is zero, then run forever */ |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index eb42873f2a3a..391a62cd9df6 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -248,7 +248,7 @@ static LIST_HEAD(link_ops); | |||
248 | int __rtnl_link_register(struct rtnl_link_ops *ops) | 248 | int __rtnl_link_register(struct rtnl_link_ops *ops) |
249 | { | 249 | { |
250 | if (!ops->dellink) | 250 | if (!ops->dellink) |
251 | ops->dellink = unregister_netdevice; | 251 | ops->dellink = unregister_netdevice_queue; |
252 | 252 | ||
253 | list_add_tail(&ops->list, &link_ops); | 253 | list_add_tail(&ops->list, &link_ops); |
254 | return 0; | 254 | return 0; |
@@ -277,13 +277,13 @@ EXPORT_SYMBOL_GPL(rtnl_link_register); | |||
277 | static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops) | 277 | static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops) |
278 | { | 278 | { |
279 | struct net_device *dev; | 279 | struct net_device *dev; |
280 | restart: | 280 | LIST_HEAD(list_kill); |
281 | |||
281 | for_each_netdev(net, dev) { | 282 | for_each_netdev(net, dev) { |
282 | if (dev->rtnl_link_ops == ops) { | 283 | if (dev->rtnl_link_ops == ops) |
283 | ops->dellink(dev); | 284 | ops->dellink(dev, &list_kill); |
284 | goto restart; | ||
285 | } | ||
286 | } | 285 | } |
286 | unregister_netdevice_many(&list_kill); | ||
287 | } | 287 | } |
288 | 288 | ||
289 | void rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops) | 289 | void rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops) |
@@ -682,22 +682,33 @@ nla_put_failure: | |||
682 | static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) | 682 | static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) |
683 | { | 683 | { |
684 | struct net *net = sock_net(skb->sk); | 684 | struct net *net = sock_net(skb->sk); |
685 | int idx; | 685 | int h, s_h; |
686 | int s_idx = cb->args[0]; | 686 | int idx = 0, s_idx; |
687 | struct net_device *dev; | 687 | struct net_device *dev; |
688 | 688 | struct hlist_head *head; | |
689 | idx = 0; | 689 | struct hlist_node *node; |
690 | for_each_netdev(net, dev) { | 690 | |
691 | if (idx < s_idx) | 691 | s_h = cb->args[0]; |
692 | goto cont; | 692 | s_idx = cb->args[1]; |
693 | if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, | 693 | |
694 | NETLINK_CB(cb->skb).pid, | 694 | for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { |
695 | cb->nlh->nlmsg_seq, 0, NLM_F_MULTI) <= 0) | 695 | idx = 0; |
696 | break; | 696 | head = &net->dev_index_head[h]; |
697 | hlist_for_each_entry(dev, node, head, index_hlist) { | ||
698 | if (idx < s_idx) | ||
699 | goto cont; | ||
700 | if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, | ||
701 | NETLINK_CB(cb->skb).pid, | ||
702 | cb->nlh->nlmsg_seq, 0, | ||
703 | NLM_F_MULTI) <= 0) | ||
704 | goto out; | ||
697 | cont: | 705 | cont: |
698 | idx++; | 706 | idx++; |
707 | } | ||
699 | } | 708 | } |
700 | cb->args[0] = idx; | 709 | out: |
710 | cb->args[1] = idx; | ||
711 | cb->args[0] = h; | ||
701 | 712 | ||
702 | return skb->len; | 713 | return skb->len; |
703 | } | 714 | } |
@@ -910,9 +921,9 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
910 | err = -EINVAL; | 921 | err = -EINVAL; |
911 | ifm = nlmsg_data(nlh); | 922 | ifm = nlmsg_data(nlh); |
912 | if (ifm->ifi_index > 0) | 923 | if (ifm->ifi_index > 0) |
913 | dev = dev_get_by_index(net, ifm->ifi_index); | 924 | dev = __dev_get_by_index(net, ifm->ifi_index); |
914 | else if (tb[IFLA_IFNAME]) | 925 | else if (tb[IFLA_IFNAME]) |
915 | dev = dev_get_by_name(net, ifname); | 926 | dev = __dev_get_by_name(net, ifname); |
916 | else | 927 | else |
917 | goto errout; | 928 | goto errout; |
918 | 929 | ||
@@ -922,11 +933,9 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
922 | } | 933 | } |
923 | 934 | ||
924 | if ((err = validate_linkmsg(dev, tb)) < 0) | 935 | if ((err = validate_linkmsg(dev, tb)) < 0) |
925 | goto errout_dev; | 936 | goto errout; |
926 | 937 | ||
927 | err = do_setlink(dev, ifm, tb, ifname, 0); | 938 | err = do_setlink(dev, ifm, tb, ifname, 0); |
928 | errout_dev: | ||
929 | dev_put(dev); | ||
930 | errout: | 939 | errout: |
931 | return err; | 940 | return err; |
932 | } | 941 | } |
@@ -963,7 +972,7 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
963 | if (!ops) | 972 | if (!ops) |
964 | return -EOPNOTSUPP; | 973 | return -EOPNOTSUPP; |
965 | 974 | ||
966 | ops->dellink(dev); | 975 | ops->dellink(dev, NULL); |
967 | return 0; | 976 | return 0; |
968 | } | 977 | } |
969 | 978 | ||
@@ -1154,6 +1163,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | |||
1154 | { | 1163 | { |
1155 | struct net *net = sock_net(skb->sk); | 1164 | struct net *net = sock_net(skb->sk); |
1156 | struct ifinfomsg *ifm; | 1165 | struct ifinfomsg *ifm; |
1166 | char ifname[IFNAMSIZ]; | ||
1157 | struct nlattr *tb[IFLA_MAX+1]; | 1167 | struct nlattr *tb[IFLA_MAX+1]; |
1158 | struct net_device *dev = NULL; | 1168 | struct net_device *dev = NULL; |
1159 | struct sk_buff *nskb; | 1169 | struct sk_buff *nskb; |
@@ -1163,19 +1173,23 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | |||
1163 | if (err < 0) | 1173 | if (err < 0) |
1164 | return err; | 1174 | return err; |
1165 | 1175 | ||
1176 | if (tb[IFLA_IFNAME]) | ||
1177 | nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); | ||
1178 | |||
1166 | ifm = nlmsg_data(nlh); | 1179 | ifm = nlmsg_data(nlh); |
1167 | if (ifm->ifi_index > 0) { | 1180 | if (ifm->ifi_index > 0) |
1168 | dev = dev_get_by_index(net, ifm->ifi_index); | 1181 | dev = __dev_get_by_index(net, ifm->ifi_index); |
1169 | if (dev == NULL) | 1182 | else if (tb[IFLA_IFNAME]) |
1170 | return -ENODEV; | 1183 | dev = __dev_get_by_name(net, ifname); |
1171 | } else | 1184 | else |
1172 | return -EINVAL; | 1185 | return -EINVAL; |
1173 | 1186 | ||
1187 | if (dev == NULL) | ||
1188 | return -ENODEV; | ||
1189 | |||
1174 | nskb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL); | 1190 | nskb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL); |
1175 | if (nskb == NULL) { | 1191 | if (nskb == NULL) |
1176 | err = -ENOBUFS; | 1192 | return -ENOBUFS; |
1177 | goto errout; | ||
1178 | } | ||
1179 | 1193 | ||
1180 | err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).pid, | 1194 | err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).pid, |
1181 | nlh->nlmsg_seq, 0, 0); | 1195 | nlh->nlmsg_seq, 0, 0); |
@@ -1183,11 +1197,8 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | |||
1183 | /* -EMSGSIZE implies BUG in if_nlmsg_size */ | 1197 | /* -EMSGSIZE implies BUG in if_nlmsg_size */ |
1184 | WARN_ON(err == -EMSGSIZE); | 1198 | WARN_ON(err == -EMSGSIZE); |
1185 | kfree_skb(nskb); | 1199 | kfree_skb(nskb); |
1186 | goto errout; | 1200 | } else |
1187 | } | 1201 | err = rtnl_unicast(nskb, net, NETLINK_CB(skb).pid); |
1188 | err = rtnl_unicast(nskb, net, NETLINK_CB(skb).pid); | ||
1189 | errout: | ||
1190 | dev_put(dev); | ||
1191 | 1202 | ||
1192 | return err; | 1203 | return err; |
1193 | } | 1204 | } |
diff --git a/net/core/sock.c b/net/core/sock.c index 7626b6aacd68..5a51512f638a 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -274,25 +274,27 @@ static void sock_disable_timestamp(struct sock *sk, int flag) | |||
274 | 274 | ||
275 | int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | 275 | int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
276 | { | 276 | { |
277 | int err = 0; | 277 | int err; |
278 | int skb_len; | 278 | int skb_len; |
279 | unsigned long flags; | ||
280 | struct sk_buff_head *list = &sk->sk_receive_queue; | ||
279 | 281 | ||
280 | /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces | 282 | /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces |
281 | number of warnings when compiling with -W --ANK | 283 | number of warnings when compiling with -W --ANK |
282 | */ | 284 | */ |
283 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= | 285 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= |
284 | (unsigned)sk->sk_rcvbuf) { | 286 | (unsigned)sk->sk_rcvbuf) { |
285 | err = -ENOMEM; | 287 | atomic_inc(&sk->sk_drops); |
286 | goto out; | 288 | return -ENOMEM; |
287 | } | 289 | } |
288 | 290 | ||
289 | err = sk_filter(sk, skb); | 291 | err = sk_filter(sk, skb); |
290 | if (err) | 292 | if (err) |
291 | goto out; | 293 | return err; |
292 | 294 | ||
293 | if (!sk_rmem_schedule(sk, skb->truesize)) { | 295 | if (!sk_rmem_schedule(sk, skb->truesize)) { |
294 | err = -ENOBUFS; | 296 | atomic_inc(&sk->sk_drops); |
295 | goto out; | 297 | return -ENOBUFS; |
296 | } | 298 | } |
297 | 299 | ||
298 | skb->dev = NULL; | 300 | skb->dev = NULL; |
@@ -305,12 +307,14 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
305 | */ | 307 | */ |
306 | skb_len = skb->len; | 308 | skb_len = skb->len; |
307 | 309 | ||
308 | skb_queue_tail(&sk->sk_receive_queue, skb); | 310 | spin_lock_irqsave(&list->lock, flags); |
311 | skb->dropcount = atomic_read(&sk->sk_drops); | ||
312 | __skb_queue_tail(list, skb); | ||
313 | spin_unlock_irqrestore(&list->lock, flags); | ||
309 | 314 | ||
310 | if (!sock_flag(sk, SOCK_DEAD)) | 315 | if (!sock_flag(sk, SOCK_DEAD)) |
311 | sk->sk_data_ready(sk, skb_len); | 316 | sk->sk_data_ready(sk, skb_len); |
312 | out: | 317 | return 0; |
313 | return err; | ||
314 | } | 318 | } |
315 | EXPORT_SYMBOL(sock_queue_rcv_skb); | 319 | EXPORT_SYMBOL(sock_queue_rcv_skb); |
316 | 320 | ||
@@ -348,11 +352,18 @@ discard_and_relse: | |||
348 | } | 352 | } |
349 | EXPORT_SYMBOL(sk_receive_skb); | 353 | EXPORT_SYMBOL(sk_receive_skb); |
350 | 354 | ||
355 | void sk_reset_txq(struct sock *sk) | ||
356 | { | ||
357 | sk_tx_queue_clear(sk); | ||
358 | } | ||
359 | EXPORT_SYMBOL(sk_reset_txq); | ||
360 | |||
351 | struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) | 361 | struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) |
352 | { | 362 | { |
353 | struct dst_entry *dst = sk->sk_dst_cache; | 363 | struct dst_entry *dst = sk->sk_dst_cache; |
354 | 364 | ||
355 | if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { | 365 | if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { |
366 | sk_tx_queue_clear(sk); | ||
356 | sk->sk_dst_cache = NULL; | 367 | sk->sk_dst_cache = NULL; |
357 | dst_release(dst); | 368 | dst_release(dst); |
358 | return NULL; | 369 | return NULL; |
@@ -702,6 +713,12 @@ set_rcvbuf: | |||
702 | 713 | ||
703 | /* We implement the SO_SNDLOWAT etc to | 714 | /* We implement the SO_SNDLOWAT etc to |
704 | not be settable (1003.1g 5.3) */ | 715 | not be settable (1003.1g 5.3) */ |
716 | case SO_RXQ_OVFL: | ||
717 | if (valbool) | ||
718 | sock_set_flag(sk, SOCK_RXQ_OVFL); | ||
719 | else | ||
720 | sock_reset_flag(sk, SOCK_RXQ_OVFL); | ||
721 | break; | ||
705 | default: | 722 | default: |
706 | ret = -ENOPROTOOPT; | 723 | ret = -ENOPROTOOPT; |
707 | break; | 724 | break; |
@@ -901,6 +918,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname, | |||
901 | v.val = sk->sk_mark; | 918 | v.val = sk->sk_mark; |
902 | break; | 919 | break; |
903 | 920 | ||
921 | case SO_RXQ_OVFL: | ||
922 | v.val = !!sock_flag(sk, SOCK_RXQ_OVFL); | ||
923 | break; | ||
924 | |||
904 | default: | 925 | default: |
905 | return -ENOPROTOOPT; | 926 | return -ENOPROTOOPT; |
906 | } | 927 | } |
@@ -939,7 +960,8 @@ static void sock_copy(struct sock *nsk, const struct sock *osk) | |||
939 | void *sptr = nsk->sk_security; | 960 | void *sptr = nsk->sk_security; |
940 | #endif | 961 | #endif |
941 | BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) != | 962 | BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) != |
942 | sizeof(osk->sk_node) + sizeof(osk->sk_refcnt)); | 963 | sizeof(osk->sk_node) + sizeof(osk->sk_refcnt) + |
964 | sizeof(osk->sk_tx_queue_mapping)); | ||
943 | memcpy(&nsk->sk_copy_start, &osk->sk_copy_start, | 965 | memcpy(&nsk->sk_copy_start, &osk->sk_copy_start, |
944 | osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start)); | 966 | osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start)); |
945 | #ifdef CONFIG_SECURITY_NETWORK | 967 | #ifdef CONFIG_SECURITY_NETWORK |
@@ -983,6 +1005,7 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, | |||
983 | 1005 | ||
984 | if (!try_module_get(prot->owner)) | 1006 | if (!try_module_get(prot->owner)) |
985 | goto out_free_sec; | 1007 | goto out_free_sec; |
1008 | sk_tx_queue_clear(sk); | ||
986 | } | 1009 | } |
987 | 1010 | ||
988 | return sk; | 1011 | return sk; |
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c index e8cf99e880b0..a47a8c918ee8 100644 --- a/net/dccp/ccids/ccid2.c +++ b/net/dccp/ccids/ccid2.c | |||
@@ -33,20 +33,20 @@ | |||
33 | static int ccid2_debug; | 33 | static int ccid2_debug; |
34 | #define ccid2_pr_debug(format, a...) DCCP_PR_DEBUG(ccid2_debug, format, ##a) | 34 | #define ccid2_pr_debug(format, a...) DCCP_PR_DEBUG(ccid2_debug, format, ##a) |
35 | 35 | ||
36 | static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hctx) | 36 | static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hc) |
37 | { | 37 | { |
38 | int len = 0; | 38 | int len = 0; |
39 | int pipe = 0; | 39 | int pipe = 0; |
40 | struct ccid2_seq *seqp = hctx->ccid2hctx_seqh; | 40 | struct ccid2_seq *seqp = hc->tx_seqh; |
41 | 41 | ||
42 | /* there is data in the chain */ | 42 | /* there is data in the chain */ |
43 | if (seqp != hctx->ccid2hctx_seqt) { | 43 | if (seqp != hc->tx_seqt) { |
44 | seqp = seqp->ccid2s_prev; | 44 | seqp = seqp->ccid2s_prev; |
45 | len++; | 45 | len++; |
46 | if (!seqp->ccid2s_acked) | 46 | if (!seqp->ccid2s_acked) |
47 | pipe++; | 47 | pipe++; |
48 | 48 | ||
49 | while (seqp != hctx->ccid2hctx_seqt) { | 49 | while (seqp != hc->tx_seqt) { |
50 | struct ccid2_seq *prev = seqp->ccid2s_prev; | 50 | struct ccid2_seq *prev = seqp->ccid2s_prev; |
51 | 51 | ||
52 | len++; | 52 | len++; |
@@ -63,30 +63,30 @@ static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hctx) | |||
63 | } | 63 | } |
64 | } | 64 | } |
65 | 65 | ||
66 | BUG_ON(pipe != hctx->ccid2hctx_pipe); | 66 | BUG_ON(pipe != hc->tx_pipe); |
67 | ccid2_pr_debug("len of chain=%d\n", len); | 67 | ccid2_pr_debug("len of chain=%d\n", len); |
68 | 68 | ||
69 | do { | 69 | do { |
70 | seqp = seqp->ccid2s_prev; | 70 | seqp = seqp->ccid2s_prev; |
71 | len++; | 71 | len++; |
72 | } while (seqp != hctx->ccid2hctx_seqh); | 72 | } while (seqp != hc->tx_seqh); |
73 | 73 | ||
74 | ccid2_pr_debug("total len=%d\n", len); | 74 | ccid2_pr_debug("total len=%d\n", len); |
75 | BUG_ON(len != hctx->ccid2hctx_seqbufc * CCID2_SEQBUF_LEN); | 75 | BUG_ON(len != hc->tx_seqbufc * CCID2_SEQBUF_LEN); |
76 | } | 76 | } |
77 | #else | 77 | #else |
78 | #define ccid2_pr_debug(format, a...) | 78 | #define ccid2_pr_debug(format, a...) |
79 | #define ccid2_hc_tx_check_sanity(hctx) | 79 | #define ccid2_hc_tx_check_sanity(hc) |
80 | #endif | 80 | #endif |
81 | 81 | ||
82 | static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx) | 82 | static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hc) |
83 | { | 83 | { |
84 | struct ccid2_seq *seqp; | 84 | struct ccid2_seq *seqp; |
85 | int i; | 85 | int i; |
86 | 86 | ||
87 | /* check if we have space to preserve the pointer to the buffer */ | 87 | /* check if we have space to preserve the pointer to the buffer */ |
88 | if (hctx->ccid2hctx_seqbufc >= (sizeof(hctx->ccid2hctx_seqbuf) / | 88 | if (hc->tx_seqbufc >= (sizeof(hc->tx_seqbuf) / |
89 | sizeof(struct ccid2_seq*))) | 89 | sizeof(struct ccid2_seq *))) |
90 | return -ENOMEM; | 90 | return -ENOMEM; |
91 | 91 | ||
92 | /* allocate buffer and initialize linked list */ | 92 | /* allocate buffer and initialize linked list */ |
@@ -102,29 +102,29 @@ static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx) | |||
102 | seqp->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1]; | 102 | seqp->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1]; |
103 | 103 | ||
104 | /* This is the first allocation. Initiate the head and tail. */ | 104 | /* This is the first allocation. Initiate the head and tail. */ |
105 | if (hctx->ccid2hctx_seqbufc == 0) | 105 | if (hc->tx_seqbufc == 0) |
106 | hctx->ccid2hctx_seqh = hctx->ccid2hctx_seqt = seqp; | 106 | hc->tx_seqh = hc->tx_seqt = seqp; |
107 | else { | 107 | else { |
108 | /* link the existing list with the one we just created */ | 108 | /* link the existing list with the one we just created */ |
109 | hctx->ccid2hctx_seqh->ccid2s_next = seqp; | 109 | hc->tx_seqh->ccid2s_next = seqp; |
110 | seqp->ccid2s_prev = hctx->ccid2hctx_seqh; | 110 | seqp->ccid2s_prev = hc->tx_seqh; |
111 | 111 | ||
112 | hctx->ccid2hctx_seqt->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1]; | 112 | hc->tx_seqt->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1]; |
113 | seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = hctx->ccid2hctx_seqt; | 113 | seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = hc->tx_seqt; |
114 | } | 114 | } |
115 | 115 | ||
116 | /* store the original pointer to the buffer so we can free it */ | 116 | /* store the original pointer to the buffer so we can free it */ |
117 | hctx->ccid2hctx_seqbuf[hctx->ccid2hctx_seqbufc] = seqp; | 117 | hc->tx_seqbuf[hc->tx_seqbufc] = seqp; |
118 | hctx->ccid2hctx_seqbufc++; | 118 | hc->tx_seqbufc++; |
119 | 119 | ||
120 | return 0; | 120 | return 0; |
121 | } | 121 | } |
122 | 122 | ||
123 | static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) | 123 | static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) |
124 | { | 124 | { |
125 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 125 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); |
126 | 126 | ||
127 | if (hctx->ccid2hctx_pipe < hctx->ccid2hctx_cwnd) | 127 | if (hc->tx_pipe < hc->tx_cwnd) |
128 | return 0; | 128 | return 0; |
129 | 129 | ||
130 | return 1; /* XXX CCID should dequeue when ready instead of polling */ | 130 | return 1; /* XXX CCID should dequeue when ready instead of polling */ |
@@ -133,7 +133,7 @@ static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) | |||
133 | static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) | 133 | static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) |
134 | { | 134 | { |
135 | struct dccp_sock *dp = dccp_sk(sk); | 135 | struct dccp_sock *dp = dccp_sk(sk); |
136 | u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->ccid2hctx_cwnd, 2); | 136 | u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->tx_cwnd, 2); |
137 | 137 | ||
138 | /* | 138 | /* |
139 | * Ensure that Ack Ratio does not exceed ceil(cwnd/2), which is (2) from | 139 | * Ensure that Ack Ratio does not exceed ceil(cwnd/2), which is (2) from |
@@ -155,10 +155,10 @@ static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) | |||
155 | dp->dccps_l_ack_ratio = val; | 155 | dp->dccps_l_ack_ratio = val; |
156 | } | 156 | } |
157 | 157 | ||
158 | static void ccid2_change_srtt(struct ccid2_hc_tx_sock *hctx, long val) | 158 | static void ccid2_change_srtt(struct ccid2_hc_tx_sock *hc, long val) |
159 | { | 159 | { |
160 | ccid2_pr_debug("change SRTT to %ld\n", val); | 160 | ccid2_pr_debug("change SRTT to %ld\n", val); |
161 | hctx->ccid2hctx_srtt = val; | 161 | hc->tx_srtt = val; |
162 | } | 162 | } |
163 | 163 | ||
164 | static void ccid2_start_rto_timer(struct sock *sk); | 164 | static void ccid2_start_rto_timer(struct sock *sk); |
@@ -166,45 +166,44 @@ static void ccid2_start_rto_timer(struct sock *sk); | |||
166 | static void ccid2_hc_tx_rto_expire(unsigned long data) | 166 | static void ccid2_hc_tx_rto_expire(unsigned long data) |
167 | { | 167 | { |
168 | struct sock *sk = (struct sock *)data; | 168 | struct sock *sk = (struct sock *)data; |
169 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 169 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); |
170 | long s; | 170 | long s; |
171 | 171 | ||
172 | bh_lock_sock(sk); | 172 | bh_lock_sock(sk); |
173 | if (sock_owned_by_user(sk)) { | 173 | if (sock_owned_by_user(sk)) { |
174 | sk_reset_timer(sk, &hctx->ccid2hctx_rtotimer, | 174 | sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + HZ / 5); |
175 | jiffies + HZ / 5); | ||
176 | goto out; | 175 | goto out; |
177 | } | 176 | } |
178 | 177 | ||
179 | ccid2_pr_debug("RTO_EXPIRE\n"); | 178 | ccid2_pr_debug("RTO_EXPIRE\n"); |
180 | 179 | ||
181 | ccid2_hc_tx_check_sanity(hctx); | 180 | ccid2_hc_tx_check_sanity(hc); |
182 | 181 | ||
183 | /* back-off timer */ | 182 | /* back-off timer */ |
184 | hctx->ccid2hctx_rto <<= 1; | 183 | hc->tx_rto <<= 1; |
185 | 184 | ||
186 | s = hctx->ccid2hctx_rto / HZ; | 185 | s = hc->tx_rto / HZ; |
187 | if (s > 60) | 186 | if (s > 60) |
188 | hctx->ccid2hctx_rto = 60 * HZ; | 187 | hc->tx_rto = 60 * HZ; |
189 | 188 | ||
190 | ccid2_start_rto_timer(sk); | 189 | ccid2_start_rto_timer(sk); |
191 | 190 | ||
192 | /* adjust pipe, cwnd etc */ | 191 | /* adjust pipe, cwnd etc */ |
193 | hctx->ccid2hctx_ssthresh = hctx->ccid2hctx_cwnd / 2; | 192 | hc->tx_ssthresh = hc->tx_cwnd / 2; |
194 | if (hctx->ccid2hctx_ssthresh < 2) | 193 | if (hc->tx_ssthresh < 2) |
195 | hctx->ccid2hctx_ssthresh = 2; | 194 | hc->tx_ssthresh = 2; |
196 | hctx->ccid2hctx_cwnd = 1; | 195 | hc->tx_cwnd = 1; |
197 | hctx->ccid2hctx_pipe = 0; | 196 | hc->tx_pipe = 0; |
198 | 197 | ||
199 | /* clear state about stuff we sent */ | 198 | /* clear state about stuff we sent */ |
200 | hctx->ccid2hctx_seqt = hctx->ccid2hctx_seqh; | 199 | hc->tx_seqt = hc->tx_seqh; |
201 | hctx->ccid2hctx_packets_acked = 0; | 200 | hc->tx_packets_acked = 0; |
202 | 201 | ||
203 | /* clear ack ratio state. */ | 202 | /* clear ack ratio state. */ |
204 | hctx->ccid2hctx_rpseq = 0; | 203 | hc->tx_rpseq = 0; |
205 | hctx->ccid2hctx_rpdupack = -1; | 204 | hc->tx_rpdupack = -1; |
206 | ccid2_change_l_ack_ratio(sk, 1); | 205 | ccid2_change_l_ack_ratio(sk, 1); |
207 | ccid2_hc_tx_check_sanity(hctx); | 206 | ccid2_hc_tx_check_sanity(hc); |
208 | out: | 207 | out: |
209 | bh_unlock_sock(sk); | 208 | bh_unlock_sock(sk); |
210 | sock_put(sk); | 209 | sock_put(sk); |
@@ -212,42 +211,40 @@ out: | |||
212 | 211 | ||
213 | static void ccid2_start_rto_timer(struct sock *sk) | 212 | static void ccid2_start_rto_timer(struct sock *sk) |
214 | { | 213 | { |
215 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 214 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); |
216 | 215 | ||
217 | ccid2_pr_debug("setting RTO timeout=%ld\n", hctx->ccid2hctx_rto); | 216 | ccid2_pr_debug("setting RTO timeout=%ld\n", hc->tx_rto); |
218 | 217 | ||
219 | BUG_ON(timer_pending(&hctx->ccid2hctx_rtotimer)); | 218 | BUG_ON(timer_pending(&hc->tx_rtotimer)); |
220 | sk_reset_timer(sk, &hctx->ccid2hctx_rtotimer, | 219 | sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); |
221 | jiffies + hctx->ccid2hctx_rto); | ||
222 | } | 220 | } |
223 | 221 | ||
224 | static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len) | 222 | static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len) |
225 | { | 223 | { |
226 | struct dccp_sock *dp = dccp_sk(sk); | 224 | struct dccp_sock *dp = dccp_sk(sk); |
227 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 225 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); |
228 | struct ccid2_seq *next; | 226 | struct ccid2_seq *next; |
229 | 227 | ||
230 | hctx->ccid2hctx_pipe++; | 228 | hc->tx_pipe++; |
231 | 229 | ||
232 | hctx->ccid2hctx_seqh->ccid2s_seq = dp->dccps_gss; | 230 | hc->tx_seqh->ccid2s_seq = dp->dccps_gss; |
233 | hctx->ccid2hctx_seqh->ccid2s_acked = 0; | 231 | hc->tx_seqh->ccid2s_acked = 0; |
234 | hctx->ccid2hctx_seqh->ccid2s_sent = jiffies; | 232 | hc->tx_seqh->ccid2s_sent = jiffies; |
235 | 233 | ||
236 | next = hctx->ccid2hctx_seqh->ccid2s_next; | 234 | next = hc->tx_seqh->ccid2s_next; |
237 | /* check if we need to alloc more space */ | 235 | /* check if we need to alloc more space */ |
238 | if (next == hctx->ccid2hctx_seqt) { | 236 | if (next == hc->tx_seqt) { |
239 | if (ccid2_hc_tx_alloc_seq(hctx)) { | 237 | if (ccid2_hc_tx_alloc_seq(hc)) { |
240 | DCCP_CRIT("packet history - out of memory!"); | 238 | DCCP_CRIT("packet history - out of memory!"); |
241 | /* FIXME: find a more graceful way to bail out */ | 239 | /* FIXME: find a more graceful way to bail out */ |
242 | return; | 240 | return; |
243 | } | 241 | } |
244 | next = hctx->ccid2hctx_seqh->ccid2s_next; | 242 | next = hc->tx_seqh->ccid2s_next; |
245 | BUG_ON(next == hctx->ccid2hctx_seqt); | 243 | BUG_ON(next == hc->tx_seqt); |
246 | } | 244 | } |
247 | hctx->ccid2hctx_seqh = next; | 245 | hc->tx_seqh = next; |
248 | 246 | ||
249 | ccid2_pr_debug("cwnd=%d pipe=%d\n", hctx->ccid2hctx_cwnd, | 247 | ccid2_pr_debug("cwnd=%d pipe=%d\n", hc->tx_cwnd, hc->tx_pipe); |
250 | hctx->ccid2hctx_pipe); | ||
251 | 248 | ||
252 | /* | 249 | /* |
253 | * FIXME: The code below is broken and the variables have been removed | 250 | * FIXME: The code below is broken and the variables have been removed |
@@ -270,12 +267,12 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len) | |||
270 | */ | 267 | */ |
271 | #if 0 | 268 | #if 0 |
272 | /* Ack Ratio. Need to maintain a concept of how many windows we sent */ | 269 | /* Ack Ratio. Need to maintain a concept of how many windows we sent */ |
273 | hctx->ccid2hctx_arsent++; | 270 | hc->tx_arsent++; |
274 | /* We had an ack loss in this window... */ | 271 | /* We had an ack loss in this window... */ |
275 | if (hctx->ccid2hctx_ackloss) { | 272 | if (hc->tx_ackloss) { |
276 | if (hctx->ccid2hctx_arsent >= hctx->ccid2hctx_cwnd) { | 273 | if (hc->tx_arsent >= hc->tx_cwnd) { |
277 | hctx->ccid2hctx_arsent = 0; | 274 | hc->tx_arsent = 0; |
278 | hctx->ccid2hctx_ackloss = 0; | 275 | hc->tx_ackloss = 0; |
279 | } | 276 | } |
280 | } else { | 277 | } else { |
281 | /* No acks lost up to now... */ | 278 | /* No acks lost up to now... */ |
@@ -285,28 +282,28 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len) | |||
285 | int denom = dp->dccps_l_ack_ratio * dp->dccps_l_ack_ratio - | 282 | int denom = dp->dccps_l_ack_ratio * dp->dccps_l_ack_ratio - |
286 | dp->dccps_l_ack_ratio; | 283 | dp->dccps_l_ack_ratio; |
287 | 284 | ||
288 | denom = hctx->ccid2hctx_cwnd * hctx->ccid2hctx_cwnd / denom; | 285 | denom = hc->tx_cwnd * hc->tx_cwnd / denom; |
289 | 286 | ||
290 | if (hctx->ccid2hctx_arsent >= denom) { | 287 | if (hc->tx_arsent >= denom) { |
291 | ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio - 1); | 288 | ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio - 1); |
292 | hctx->ccid2hctx_arsent = 0; | 289 | hc->tx_arsent = 0; |
293 | } | 290 | } |
294 | } else { | 291 | } else { |
295 | /* we can't increase ack ratio further [1] */ | 292 | /* we can't increase ack ratio further [1] */ |
296 | hctx->ccid2hctx_arsent = 0; /* or maybe set it to cwnd*/ | 293 | hc->tx_arsent = 0; /* or maybe set it to cwnd*/ |
297 | } | 294 | } |
298 | } | 295 | } |
299 | #endif | 296 | #endif |
300 | 297 | ||
301 | /* setup RTO timer */ | 298 | /* setup RTO timer */ |
302 | if (!timer_pending(&hctx->ccid2hctx_rtotimer)) | 299 | if (!timer_pending(&hc->tx_rtotimer)) |
303 | ccid2_start_rto_timer(sk); | 300 | ccid2_start_rto_timer(sk); |
304 | 301 | ||
305 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG | 302 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG |
306 | do { | 303 | do { |
307 | struct ccid2_seq *seqp = hctx->ccid2hctx_seqt; | 304 | struct ccid2_seq *seqp = hc->tx_seqt; |
308 | 305 | ||
309 | while (seqp != hctx->ccid2hctx_seqh) { | 306 | while (seqp != hc->tx_seqh) { |
310 | ccid2_pr_debug("out seq=%llu acked=%d time=%lu\n", | 307 | ccid2_pr_debug("out seq=%llu acked=%d time=%lu\n", |
311 | (unsigned long long)seqp->ccid2s_seq, | 308 | (unsigned long long)seqp->ccid2s_seq, |
312 | seqp->ccid2s_acked, seqp->ccid2s_sent); | 309 | seqp->ccid2s_acked, seqp->ccid2s_sent); |
@@ -314,7 +311,7 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len) | |||
314 | } | 311 | } |
315 | } while (0); | 312 | } while (0); |
316 | ccid2_pr_debug("=========\n"); | 313 | ccid2_pr_debug("=========\n"); |
317 | ccid2_hc_tx_check_sanity(hctx); | 314 | ccid2_hc_tx_check_sanity(hc); |
318 | #endif | 315 | #endif |
319 | } | 316 | } |
320 | 317 | ||
@@ -382,9 +379,9 @@ out_invalid_option: | |||
382 | 379 | ||
383 | static void ccid2_hc_tx_kill_rto_timer(struct sock *sk) | 380 | static void ccid2_hc_tx_kill_rto_timer(struct sock *sk) |
384 | { | 381 | { |
385 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 382 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); |
386 | 383 | ||
387 | sk_stop_timer(sk, &hctx->ccid2hctx_rtotimer); | 384 | sk_stop_timer(sk, &hc->tx_rtotimer); |
388 | ccid2_pr_debug("deleted RTO timer\n"); | 385 | ccid2_pr_debug("deleted RTO timer\n"); |
389 | } | 386 | } |
390 | 387 | ||
@@ -392,75 +389,75 @@ static inline void ccid2_new_ack(struct sock *sk, | |||
392 | struct ccid2_seq *seqp, | 389 | struct ccid2_seq *seqp, |
393 | unsigned int *maxincr) | 390 | unsigned int *maxincr) |
394 | { | 391 | { |
395 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 392 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); |
396 | 393 | ||
397 | if (hctx->ccid2hctx_cwnd < hctx->ccid2hctx_ssthresh) { | 394 | if (hc->tx_cwnd < hc->tx_ssthresh) { |
398 | if (*maxincr > 0 && ++hctx->ccid2hctx_packets_acked == 2) { | 395 | if (*maxincr > 0 && ++hc->tx_packets_acked == 2) { |
399 | hctx->ccid2hctx_cwnd += 1; | 396 | hc->tx_cwnd += 1; |
400 | *maxincr -= 1; | 397 | *maxincr -= 1; |
401 | hctx->ccid2hctx_packets_acked = 0; | 398 | hc->tx_packets_acked = 0; |
402 | } | 399 | } |
403 | } else if (++hctx->ccid2hctx_packets_acked >= hctx->ccid2hctx_cwnd) { | 400 | } else if (++hc->tx_packets_acked >= hc->tx_cwnd) { |
404 | hctx->ccid2hctx_cwnd += 1; | 401 | hc->tx_cwnd += 1; |
405 | hctx->ccid2hctx_packets_acked = 0; | 402 | hc->tx_packets_acked = 0; |
406 | } | 403 | } |
407 | 404 | ||
408 | /* update RTO */ | 405 | /* update RTO */ |
409 | if (hctx->ccid2hctx_srtt == -1 || | 406 | if (hc->tx_srtt == -1 || |
410 | time_after(jiffies, hctx->ccid2hctx_lastrtt + hctx->ccid2hctx_srtt)) { | 407 | time_after(jiffies, hc->tx_lastrtt + hc->tx_srtt)) { |
411 | unsigned long r = (long)jiffies - (long)seqp->ccid2s_sent; | 408 | unsigned long r = (long)jiffies - (long)seqp->ccid2s_sent; |
412 | int s; | 409 | int s; |
413 | 410 | ||
414 | /* first measurement */ | 411 | /* first measurement */ |
415 | if (hctx->ccid2hctx_srtt == -1) { | 412 | if (hc->tx_srtt == -1) { |
416 | ccid2_pr_debug("R: %lu Time=%lu seq=%llu\n", | 413 | ccid2_pr_debug("R: %lu Time=%lu seq=%llu\n", |
417 | r, jiffies, | 414 | r, jiffies, |
418 | (unsigned long long)seqp->ccid2s_seq); | 415 | (unsigned long long)seqp->ccid2s_seq); |
419 | ccid2_change_srtt(hctx, r); | 416 | ccid2_change_srtt(hc, r); |
420 | hctx->ccid2hctx_rttvar = r >> 1; | 417 | hc->tx_rttvar = r >> 1; |
421 | } else { | 418 | } else { |
422 | /* RTTVAR */ | 419 | /* RTTVAR */ |
423 | long tmp = hctx->ccid2hctx_srtt - r; | 420 | long tmp = hc->tx_srtt - r; |
424 | long srtt; | 421 | long srtt; |
425 | 422 | ||
426 | if (tmp < 0) | 423 | if (tmp < 0) |
427 | tmp *= -1; | 424 | tmp *= -1; |
428 | 425 | ||
429 | tmp >>= 2; | 426 | tmp >>= 2; |
430 | hctx->ccid2hctx_rttvar *= 3; | 427 | hc->tx_rttvar *= 3; |
431 | hctx->ccid2hctx_rttvar >>= 2; | 428 | hc->tx_rttvar >>= 2; |
432 | hctx->ccid2hctx_rttvar += tmp; | 429 | hc->tx_rttvar += tmp; |
433 | 430 | ||
434 | /* SRTT */ | 431 | /* SRTT */ |
435 | srtt = hctx->ccid2hctx_srtt; | 432 | srtt = hc->tx_srtt; |
436 | srtt *= 7; | 433 | srtt *= 7; |
437 | srtt >>= 3; | 434 | srtt >>= 3; |
438 | tmp = r >> 3; | 435 | tmp = r >> 3; |
439 | srtt += tmp; | 436 | srtt += tmp; |
440 | ccid2_change_srtt(hctx, srtt); | 437 | ccid2_change_srtt(hc, srtt); |
441 | } | 438 | } |
442 | s = hctx->ccid2hctx_rttvar << 2; | 439 | s = hc->tx_rttvar << 2; |
443 | /* clock granularity is 1 when based on jiffies */ | 440 | /* clock granularity is 1 when based on jiffies */ |
444 | if (!s) | 441 | if (!s) |
445 | s = 1; | 442 | s = 1; |
446 | hctx->ccid2hctx_rto = hctx->ccid2hctx_srtt + s; | 443 | hc->tx_rto = hc->tx_srtt + s; |
447 | 444 | ||
448 | /* must be at least a second */ | 445 | /* must be at least a second */ |
449 | s = hctx->ccid2hctx_rto / HZ; | 446 | s = hc->tx_rto / HZ; |
450 | /* DCCP doesn't require this [but I like it cuz my code sux] */ | 447 | /* DCCP doesn't require this [but I like it cuz my code sux] */ |
451 | #if 1 | 448 | #if 1 |
452 | if (s < 1) | 449 | if (s < 1) |
453 | hctx->ccid2hctx_rto = HZ; | 450 | hc->tx_rto = HZ; |
454 | #endif | 451 | #endif |
455 | /* max 60 seconds */ | 452 | /* max 60 seconds */ |
456 | if (s > 60) | 453 | if (s > 60) |
457 | hctx->ccid2hctx_rto = HZ * 60; | 454 | hc->tx_rto = HZ * 60; |
458 | 455 | ||
459 | hctx->ccid2hctx_lastrtt = jiffies; | 456 | hc->tx_lastrtt = jiffies; |
460 | 457 | ||
461 | ccid2_pr_debug("srtt: %ld rttvar: %ld rto: %ld (HZ=%d) R=%lu\n", | 458 | ccid2_pr_debug("srtt: %ld rttvar: %ld rto: %ld (HZ=%d) R=%lu\n", |
462 | hctx->ccid2hctx_srtt, hctx->ccid2hctx_rttvar, | 459 | hc->tx_srtt, hc->tx_rttvar, |
463 | hctx->ccid2hctx_rto, HZ, r); | 460 | hc->tx_rto, HZ, r); |
464 | } | 461 | } |
465 | 462 | ||
466 | /* we got a new ack, so re-start RTO timer */ | 463 | /* we got a new ack, so re-start RTO timer */ |
@@ -470,40 +467,40 @@ static inline void ccid2_new_ack(struct sock *sk, | |||
470 | 467 | ||
471 | static void ccid2_hc_tx_dec_pipe(struct sock *sk) | 468 | static void ccid2_hc_tx_dec_pipe(struct sock *sk) |
472 | { | 469 | { |
473 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 470 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); |
474 | 471 | ||
475 | if (hctx->ccid2hctx_pipe == 0) | 472 | if (hc->tx_pipe == 0) |
476 | DCCP_BUG("pipe == 0"); | 473 | DCCP_BUG("pipe == 0"); |
477 | else | 474 | else |
478 | hctx->ccid2hctx_pipe--; | 475 | hc->tx_pipe--; |
479 | 476 | ||
480 | if (hctx->ccid2hctx_pipe == 0) | 477 | if (hc->tx_pipe == 0) |
481 | ccid2_hc_tx_kill_rto_timer(sk); | 478 | ccid2_hc_tx_kill_rto_timer(sk); |
482 | } | 479 | } |
483 | 480 | ||
484 | static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp) | 481 | static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp) |
485 | { | 482 | { |
486 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 483 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); |
487 | 484 | ||
488 | if (time_before(seqp->ccid2s_sent, hctx->ccid2hctx_last_cong)) { | 485 | if (time_before(seqp->ccid2s_sent, hc->tx_last_cong)) { |
489 | ccid2_pr_debug("Multiple losses in an RTT---treating as one\n"); | 486 | ccid2_pr_debug("Multiple losses in an RTT---treating as one\n"); |
490 | return; | 487 | return; |
491 | } | 488 | } |
492 | 489 | ||
493 | hctx->ccid2hctx_last_cong = jiffies; | 490 | hc->tx_last_cong = jiffies; |
494 | 491 | ||
495 | hctx->ccid2hctx_cwnd = hctx->ccid2hctx_cwnd / 2 ? : 1U; | 492 | hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U; |
496 | hctx->ccid2hctx_ssthresh = max(hctx->ccid2hctx_cwnd, 2U); | 493 | hc->tx_ssthresh = max(hc->tx_cwnd, 2U); |
497 | 494 | ||
498 | /* Avoid spurious timeouts resulting from Ack Ratio > cwnd */ | 495 | /* Avoid spurious timeouts resulting from Ack Ratio > cwnd */ |
499 | if (dccp_sk(sk)->dccps_l_ack_ratio > hctx->ccid2hctx_cwnd) | 496 | if (dccp_sk(sk)->dccps_l_ack_ratio > hc->tx_cwnd) |
500 | ccid2_change_l_ack_ratio(sk, hctx->ccid2hctx_cwnd); | 497 | ccid2_change_l_ack_ratio(sk, hc->tx_cwnd); |
501 | } | 498 | } |
502 | 499 | ||
503 | static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | 500 | static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) |
504 | { | 501 | { |
505 | struct dccp_sock *dp = dccp_sk(sk); | 502 | struct dccp_sock *dp = dccp_sk(sk); |
506 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 503 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); |
507 | u64 ackno, seqno; | 504 | u64 ackno, seqno; |
508 | struct ccid2_seq *seqp; | 505 | struct ccid2_seq *seqp; |
509 | unsigned char *vector; | 506 | unsigned char *vector; |
@@ -512,7 +509,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
512 | int done = 0; | 509 | int done = 0; |
513 | unsigned int maxincr = 0; | 510 | unsigned int maxincr = 0; |
514 | 511 | ||
515 | ccid2_hc_tx_check_sanity(hctx); | 512 | ccid2_hc_tx_check_sanity(hc); |
516 | /* check reverse path congestion */ | 513 | /* check reverse path congestion */ |
517 | seqno = DCCP_SKB_CB(skb)->dccpd_seq; | 514 | seqno = DCCP_SKB_CB(skb)->dccpd_seq; |
518 | 515 | ||
@@ -521,21 +518,21 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
521 | * -sorbo. | 518 | * -sorbo. |
522 | */ | 519 | */ |
523 | /* need to bootstrap */ | 520 | /* need to bootstrap */ |
524 | if (hctx->ccid2hctx_rpdupack == -1) { | 521 | if (hc->tx_rpdupack == -1) { |
525 | hctx->ccid2hctx_rpdupack = 0; | 522 | hc->tx_rpdupack = 0; |
526 | hctx->ccid2hctx_rpseq = seqno; | 523 | hc->tx_rpseq = seqno; |
527 | } else { | 524 | } else { |
528 | /* check if packet is consecutive */ | 525 | /* check if packet is consecutive */ |
529 | if (dccp_delta_seqno(hctx->ccid2hctx_rpseq, seqno) == 1) | 526 | if (dccp_delta_seqno(hc->tx_rpseq, seqno) == 1) |
530 | hctx->ccid2hctx_rpseq = seqno; | 527 | hc->tx_rpseq = seqno; |
531 | /* it's a later packet */ | 528 | /* it's a later packet */ |
532 | else if (after48(seqno, hctx->ccid2hctx_rpseq)) { | 529 | else if (after48(seqno, hc->tx_rpseq)) { |
533 | hctx->ccid2hctx_rpdupack++; | 530 | hc->tx_rpdupack++; |
534 | 531 | ||
535 | /* check if we got enough dupacks */ | 532 | /* check if we got enough dupacks */ |
536 | if (hctx->ccid2hctx_rpdupack >= NUMDUPACK) { | 533 | if (hc->tx_rpdupack >= NUMDUPACK) { |
537 | hctx->ccid2hctx_rpdupack = -1; /* XXX lame */ | 534 | hc->tx_rpdupack = -1; /* XXX lame */ |
538 | hctx->ccid2hctx_rpseq = 0; | 535 | hc->tx_rpseq = 0; |
539 | 536 | ||
540 | ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio); | 537 | ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio); |
541 | } | 538 | } |
@@ -544,7 +541,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
544 | 541 | ||
545 | /* check forward path congestion */ | 542 | /* check forward path congestion */ |
546 | /* still didn't send out new data packets */ | 543 | /* still didn't send out new data packets */ |
547 | if (hctx->ccid2hctx_seqh == hctx->ccid2hctx_seqt) | 544 | if (hc->tx_seqh == hc->tx_seqt) |
548 | return; | 545 | return; |
549 | 546 | ||
550 | switch (DCCP_SKB_CB(skb)->dccpd_type) { | 547 | switch (DCCP_SKB_CB(skb)->dccpd_type) { |
@@ -556,14 +553,14 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
556 | } | 553 | } |
557 | 554 | ||
558 | ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq; | 555 | ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq; |
559 | if (after48(ackno, hctx->ccid2hctx_high_ack)) | 556 | if (after48(ackno, hc->tx_high_ack)) |
560 | hctx->ccid2hctx_high_ack = ackno; | 557 | hc->tx_high_ack = ackno; |
561 | 558 | ||
562 | seqp = hctx->ccid2hctx_seqt; | 559 | seqp = hc->tx_seqt; |
563 | while (before48(seqp->ccid2s_seq, ackno)) { | 560 | while (before48(seqp->ccid2s_seq, ackno)) { |
564 | seqp = seqp->ccid2s_next; | 561 | seqp = seqp->ccid2s_next; |
565 | if (seqp == hctx->ccid2hctx_seqh) { | 562 | if (seqp == hc->tx_seqh) { |
566 | seqp = hctx->ccid2hctx_seqh->ccid2s_prev; | 563 | seqp = hc->tx_seqh->ccid2s_prev; |
567 | break; | 564 | break; |
568 | } | 565 | } |
569 | } | 566 | } |
@@ -573,7 +570,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
573 | * packets per acknowledgement. Rounding up avoids that cwnd is not | 570 | * packets per acknowledgement. Rounding up avoids that cwnd is not |
574 | * advanced when Ack Ratio is 1 and gives a slight edge otherwise. | 571 | * advanced when Ack Ratio is 1 and gives a slight edge otherwise. |
575 | */ | 572 | */ |
576 | if (hctx->ccid2hctx_cwnd < hctx->ccid2hctx_ssthresh) | 573 | if (hc->tx_cwnd < hc->tx_ssthresh) |
577 | maxincr = DIV_ROUND_UP(dp->dccps_l_ack_ratio, 2); | 574 | maxincr = DIV_ROUND_UP(dp->dccps_l_ack_ratio, 2); |
578 | 575 | ||
579 | /* go through all ack vectors */ | 576 | /* go through all ack vectors */ |
@@ -592,7 +589,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
592 | * seqnos. | 589 | * seqnos. |
593 | */ | 590 | */ |
594 | while (after48(seqp->ccid2s_seq, ackno)) { | 591 | while (after48(seqp->ccid2s_seq, ackno)) { |
595 | if (seqp == hctx->ccid2hctx_seqt) { | 592 | if (seqp == hc->tx_seqt) { |
596 | done = 1; | 593 | done = 1; |
597 | break; | 594 | break; |
598 | } | 595 | } |
@@ -624,7 +621,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
624 | (unsigned long long)seqp->ccid2s_seq); | 621 | (unsigned long long)seqp->ccid2s_seq); |
625 | ccid2_hc_tx_dec_pipe(sk); | 622 | ccid2_hc_tx_dec_pipe(sk); |
626 | } | 623 | } |
627 | if (seqp == hctx->ccid2hctx_seqt) { | 624 | if (seqp == hc->tx_seqt) { |
628 | done = 1; | 625 | done = 1; |
629 | break; | 626 | break; |
630 | } | 627 | } |
@@ -643,11 +640,11 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
643 | /* The state about what is acked should be correct now | 640 | /* The state about what is acked should be correct now |
644 | * Check for NUMDUPACK | 641 | * Check for NUMDUPACK |
645 | */ | 642 | */ |
646 | seqp = hctx->ccid2hctx_seqt; | 643 | seqp = hc->tx_seqt; |
647 | while (before48(seqp->ccid2s_seq, hctx->ccid2hctx_high_ack)) { | 644 | while (before48(seqp->ccid2s_seq, hc->tx_high_ack)) { |
648 | seqp = seqp->ccid2s_next; | 645 | seqp = seqp->ccid2s_next; |
649 | if (seqp == hctx->ccid2hctx_seqh) { | 646 | if (seqp == hc->tx_seqh) { |
650 | seqp = hctx->ccid2hctx_seqh->ccid2s_prev; | 647 | seqp = hc->tx_seqh->ccid2s_prev; |
651 | break; | 648 | break; |
652 | } | 649 | } |
653 | } | 650 | } |
@@ -658,7 +655,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
658 | if (done == NUMDUPACK) | 655 | if (done == NUMDUPACK) |
659 | break; | 656 | break; |
660 | } | 657 | } |
661 | if (seqp == hctx->ccid2hctx_seqt) | 658 | if (seqp == hc->tx_seqt) |
662 | break; | 659 | break; |
663 | seqp = seqp->ccid2s_prev; | 660 | seqp = seqp->ccid2s_prev; |
664 | } | 661 | } |
@@ -681,86 +678,86 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
681 | ccid2_congestion_event(sk, seqp); | 678 | ccid2_congestion_event(sk, seqp); |
682 | ccid2_hc_tx_dec_pipe(sk); | 679 | ccid2_hc_tx_dec_pipe(sk); |
683 | } | 680 | } |
684 | if (seqp == hctx->ccid2hctx_seqt) | 681 | if (seqp == hc->tx_seqt) |
685 | break; | 682 | break; |
686 | seqp = seqp->ccid2s_prev; | 683 | seqp = seqp->ccid2s_prev; |
687 | } | 684 | } |
688 | 685 | ||
689 | hctx->ccid2hctx_seqt = last_acked; | 686 | hc->tx_seqt = last_acked; |
690 | } | 687 | } |
691 | 688 | ||
692 | /* trim acked packets in tail */ | 689 | /* trim acked packets in tail */ |
693 | while (hctx->ccid2hctx_seqt != hctx->ccid2hctx_seqh) { | 690 | while (hc->tx_seqt != hc->tx_seqh) { |
694 | if (!hctx->ccid2hctx_seqt->ccid2s_acked) | 691 | if (!hc->tx_seqt->ccid2s_acked) |
695 | break; | 692 | break; |
696 | 693 | ||
697 | hctx->ccid2hctx_seqt = hctx->ccid2hctx_seqt->ccid2s_next; | 694 | hc->tx_seqt = hc->tx_seqt->ccid2s_next; |
698 | } | 695 | } |
699 | 696 | ||
700 | ccid2_hc_tx_check_sanity(hctx); | 697 | ccid2_hc_tx_check_sanity(hc); |
701 | } | 698 | } |
702 | 699 | ||
703 | static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) | 700 | static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) |
704 | { | 701 | { |
705 | struct ccid2_hc_tx_sock *hctx = ccid_priv(ccid); | 702 | struct ccid2_hc_tx_sock *hc = ccid_priv(ccid); |
706 | struct dccp_sock *dp = dccp_sk(sk); | 703 | struct dccp_sock *dp = dccp_sk(sk); |
707 | u32 max_ratio; | 704 | u32 max_ratio; |
708 | 705 | ||
709 | /* RFC 4341, 5: initialise ssthresh to arbitrarily high (max) value */ | 706 | /* RFC 4341, 5: initialise ssthresh to arbitrarily high (max) value */ |
710 | hctx->ccid2hctx_ssthresh = ~0U; | 707 | hc->tx_ssthresh = ~0U; |
711 | 708 | ||
712 | /* | 709 | /* |
713 | * RFC 4341, 5: "The cwnd parameter is initialized to at most four | 710 | * RFC 4341, 5: "The cwnd parameter is initialized to at most four |
714 | * packets for new connections, following the rules from [RFC3390]". | 711 | * packets for new connections, following the rules from [RFC3390]". |
715 | * We need to convert the bytes of RFC3390 into the packets of RFC 4341. | 712 | * We need to convert the bytes of RFC3390 into the packets of RFC 4341. |
716 | */ | 713 | */ |
717 | hctx->ccid2hctx_cwnd = clamp(4380U / dp->dccps_mss_cache, 2U, 4U); | 714 | hc->tx_cwnd = clamp(4380U / dp->dccps_mss_cache, 2U, 4U); |
718 | 715 | ||
719 | /* Make sure that Ack Ratio is enabled and within bounds. */ | 716 | /* Make sure that Ack Ratio is enabled and within bounds. */ |
720 | max_ratio = DIV_ROUND_UP(hctx->ccid2hctx_cwnd, 2); | 717 | max_ratio = DIV_ROUND_UP(hc->tx_cwnd, 2); |
721 | if (dp->dccps_l_ack_ratio == 0 || dp->dccps_l_ack_ratio > max_ratio) | 718 | if (dp->dccps_l_ack_ratio == 0 || dp->dccps_l_ack_ratio > max_ratio) |
722 | dp->dccps_l_ack_ratio = max_ratio; | 719 | dp->dccps_l_ack_ratio = max_ratio; |
723 | 720 | ||
724 | /* XXX init ~ to window size... */ | 721 | /* XXX init ~ to window size... */ |
725 | if (ccid2_hc_tx_alloc_seq(hctx)) | 722 | if (ccid2_hc_tx_alloc_seq(hc)) |
726 | return -ENOMEM; | 723 | return -ENOMEM; |
727 | 724 | ||
728 | hctx->ccid2hctx_rto = 3 * HZ; | 725 | hc->tx_rto = 3 * HZ; |
729 | ccid2_change_srtt(hctx, -1); | 726 | ccid2_change_srtt(hc, -1); |
730 | hctx->ccid2hctx_rttvar = -1; | 727 | hc->tx_rttvar = -1; |
731 | hctx->ccid2hctx_rpdupack = -1; | 728 | hc->tx_rpdupack = -1; |
732 | hctx->ccid2hctx_last_cong = jiffies; | 729 | hc->tx_last_cong = jiffies; |
733 | setup_timer(&hctx->ccid2hctx_rtotimer, ccid2_hc_tx_rto_expire, | 730 | setup_timer(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire, |
734 | (unsigned long)sk); | 731 | (unsigned long)sk); |
735 | 732 | ||
736 | ccid2_hc_tx_check_sanity(hctx); | 733 | ccid2_hc_tx_check_sanity(hc); |
737 | return 0; | 734 | return 0; |
738 | } | 735 | } |
739 | 736 | ||
740 | static void ccid2_hc_tx_exit(struct sock *sk) | 737 | static void ccid2_hc_tx_exit(struct sock *sk) |
741 | { | 738 | { |
742 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 739 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); |
743 | int i; | 740 | int i; |
744 | 741 | ||
745 | ccid2_hc_tx_kill_rto_timer(sk); | 742 | ccid2_hc_tx_kill_rto_timer(sk); |
746 | 743 | ||
747 | for (i = 0; i < hctx->ccid2hctx_seqbufc; i++) | 744 | for (i = 0; i < hc->tx_seqbufc; i++) |
748 | kfree(hctx->ccid2hctx_seqbuf[i]); | 745 | kfree(hc->tx_seqbuf[i]); |
749 | hctx->ccid2hctx_seqbufc = 0; | 746 | hc->tx_seqbufc = 0; |
750 | } | 747 | } |
751 | 748 | ||
752 | static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | 749 | static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) |
753 | { | 750 | { |
754 | const struct dccp_sock *dp = dccp_sk(sk); | 751 | const struct dccp_sock *dp = dccp_sk(sk); |
755 | struct ccid2_hc_rx_sock *hcrx = ccid2_hc_rx_sk(sk); | 752 | struct ccid2_hc_rx_sock *hc = ccid2_hc_rx_sk(sk); |
756 | 753 | ||
757 | switch (DCCP_SKB_CB(skb)->dccpd_type) { | 754 | switch (DCCP_SKB_CB(skb)->dccpd_type) { |
758 | case DCCP_PKT_DATA: | 755 | case DCCP_PKT_DATA: |
759 | case DCCP_PKT_DATAACK: | 756 | case DCCP_PKT_DATAACK: |
760 | hcrx->ccid2hcrx_data++; | 757 | hc->rx_data++; |
761 | if (hcrx->ccid2hcrx_data >= dp->dccps_r_ack_ratio) { | 758 | if (hc->rx_data >= dp->dccps_r_ack_ratio) { |
762 | dccp_send_ack(sk); | 759 | dccp_send_ack(sk); |
763 | hcrx->ccid2hcrx_data = 0; | 760 | hc->rx_data = 0; |
764 | } | 761 | } |
765 | break; | 762 | break; |
766 | } | 763 | } |
diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h index 326ac90fb909..1ec6a30103bb 100644 --- a/net/dccp/ccids/ccid2.h +++ b/net/dccp/ccids/ccid2.h | |||
@@ -40,34 +40,34 @@ struct ccid2_seq { | |||
40 | 40 | ||
41 | /** | 41 | /** |
42 | * struct ccid2_hc_tx_sock - CCID2 TX half connection | 42 | * struct ccid2_hc_tx_sock - CCID2 TX half connection |
43 | * @ccid2hctx_{cwnd,ssthresh,pipe}: as per RFC 4341, section 5 | 43 | * @tx_{cwnd,ssthresh,pipe}: as per RFC 4341, section 5 |
44 | * @ccid2hctx_packets_acked - Ack counter for deriving cwnd growth (RFC 3465) | 44 | * @tx_packets_acked: Ack counter for deriving cwnd growth (RFC 3465) |
45 | * @ccid2hctx_lastrtt -time RTT was last measured | 45 | * @tx_lastrtt: time RTT was last measured |
46 | * @ccid2hctx_rpseq - last consecutive seqno | 46 | * @tx_rpseq: last consecutive seqno |
47 | * @ccid2hctx_rpdupack - dupacks since rpseq | 47 | * @tx_rpdupack: dupacks since rpseq |
48 | */ | 48 | */ |
49 | struct ccid2_hc_tx_sock { | 49 | struct ccid2_hc_tx_sock { |
50 | u32 ccid2hctx_cwnd; | 50 | u32 tx_cwnd; |
51 | u32 ccid2hctx_ssthresh; | 51 | u32 tx_ssthresh; |
52 | u32 ccid2hctx_pipe; | 52 | u32 tx_pipe; |
53 | u32 ccid2hctx_packets_acked; | 53 | u32 tx_packets_acked; |
54 | struct ccid2_seq *ccid2hctx_seqbuf[CCID2_SEQBUF_MAX]; | 54 | struct ccid2_seq *tx_seqbuf[CCID2_SEQBUF_MAX]; |
55 | int ccid2hctx_seqbufc; | 55 | int tx_seqbufc; |
56 | struct ccid2_seq *ccid2hctx_seqh; | 56 | struct ccid2_seq *tx_seqh; |
57 | struct ccid2_seq *ccid2hctx_seqt; | 57 | struct ccid2_seq *tx_seqt; |
58 | long ccid2hctx_rto; | 58 | long tx_rto; |
59 | long ccid2hctx_srtt; | 59 | long tx_srtt; |
60 | long ccid2hctx_rttvar; | 60 | long tx_rttvar; |
61 | unsigned long ccid2hctx_lastrtt; | 61 | unsigned long tx_lastrtt; |
62 | struct timer_list ccid2hctx_rtotimer; | 62 | struct timer_list tx_rtotimer; |
63 | u64 ccid2hctx_rpseq; | 63 | u64 tx_rpseq; |
64 | int ccid2hctx_rpdupack; | 64 | int tx_rpdupack; |
65 | unsigned long ccid2hctx_last_cong; | 65 | unsigned long tx_last_cong; |
66 | u64 ccid2hctx_high_ack; | 66 | u64 tx_high_ack; |
67 | }; | 67 | }; |
68 | 68 | ||
69 | struct ccid2_hc_rx_sock { | 69 | struct ccid2_hc_rx_sock { |
70 | int ccid2hcrx_data; | 70 | int rx_data; |
71 | }; | 71 | }; |
72 | 72 | ||
73 | static inline struct ccid2_hc_tx_sock *ccid2_hc_tx_sk(const struct sock *sk) | 73 | static inline struct ccid2_hc_tx_sock *ccid2_hc_tx_sk(const struct sock *sk) |
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index 34dcc798c457..bcd7632299f5 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c | |||
@@ -64,14 +64,14 @@ static const char *ccid3_tx_state_name(enum ccid3_hc_tx_states state) | |||
64 | static void ccid3_hc_tx_set_state(struct sock *sk, | 64 | static void ccid3_hc_tx_set_state(struct sock *sk, |
65 | enum ccid3_hc_tx_states state) | 65 | enum ccid3_hc_tx_states state) |
66 | { | 66 | { |
67 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); | 67 | struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); |
68 | enum ccid3_hc_tx_states oldstate = hctx->ccid3hctx_state; | 68 | enum ccid3_hc_tx_states oldstate = hc->tx_state; |
69 | 69 | ||
70 | ccid3_pr_debug("%s(%p) %-8.8s -> %s\n", | 70 | ccid3_pr_debug("%s(%p) %-8.8s -> %s\n", |
71 | dccp_role(sk), sk, ccid3_tx_state_name(oldstate), | 71 | dccp_role(sk), sk, ccid3_tx_state_name(oldstate), |
72 | ccid3_tx_state_name(state)); | 72 | ccid3_tx_state_name(state)); |
73 | WARN_ON(state == oldstate); | 73 | WARN_ON(state == oldstate); |
74 | hctx->ccid3hctx_state = state; | 74 | hc->tx_state = state; |
75 | } | 75 | } |
76 | 76 | ||
77 | /* | 77 | /* |
@@ -85,37 +85,32 @@ static void ccid3_hc_tx_set_state(struct sock *sk, | |||
85 | */ | 85 | */ |
86 | static inline u64 rfc3390_initial_rate(struct sock *sk) | 86 | static inline u64 rfc3390_initial_rate(struct sock *sk) |
87 | { | 87 | { |
88 | const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); | 88 | const struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); |
89 | const __u32 w_init = clamp_t(__u32, 4380U, | 89 | const __u32 w_init = clamp_t(__u32, 4380U, 2 * hc->tx_s, 4 * hc->tx_s); |
90 | 2 * hctx->ccid3hctx_s, 4 * hctx->ccid3hctx_s); | ||
91 | 90 | ||
92 | return scaled_div(w_init << 6, hctx->ccid3hctx_rtt); | 91 | return scaled_div(w_init << 6, hc->tx_rtt); |
93 | } | 92 | } |
94 | 93 | ||
95 | /* | 94 | /* |
96 | * Recalculate t_ipi and delta (should be called whenever X changes) | 95 | * Recalculate t_ipi and delta (should be called whenever X changes) |
97 | */ | 96 | */ |
98 | static void ccid3_update_send_interval(struct ccid3_hc_tx_sock *hctx) | 97 | static void ccid3_update_send_interval(struct ccid3_hc_tx_sock *hc) |
99 | { | 98 | { |
100 | /* Calculate new t_ipi = s / X_inst (X_inst is in 64 * bytes/second) */ | 99 | /* Calculate new t_ipi = s / X_inst (X_inst is in 64 * bytes/second) */ |
101 | hctx->ccid3hctx_t_ipi = scaled_div32(((u64)hctx->ccid3hctx_s) << 6, | 100 | hc->tx_t_ipi = scaled_div32(((u64)hc->tx_s) << 6, hc->tx_x); |
102 | hctx->ccid3hctx_x); | ||
103 | 101 | ||
104 | /* Calculate new delta by delta = min(t_ipi / 2, t_gran / 2) */ | 102 | /* Calculate new delta by delta = min(t_ipi / 2, t_gran / 2) */ |
105 | hctx->ccid3hctx_delta = min_t(u32, hctx->ccid3hctx_t_ipi / 2, | 103 | hc->tx_delta = min_t(u32, hc->tx_t_ipi / 2, TFRC_OPSYS_HALF_TIME_GRAN); |
106 | TFRC_OPSYS_HALF_TIME_GRAN); | ||
107 | |||
108 | ccid3_pr_debug("t_ipi=%u, delta=%u, s=%u, X=%u\n", | ||
109 | hctx->ccid3hctx_t_ipi, hctx->ccid3hctx_delta, | ||
110 | hctx->ccid3hctx_s, (unsigned)(hctx->ccid3hctx_x >> 6)); | ||
111 | 104 | ||
105 | ccid3_pr_debug("t_ipi=%u, delta=%u, s=%u, X=%u\n", hc->tx_t_ipi, | ||
106 | hc->tx_delta, hc->tx_s, (unsigned)(hc->tx_x >> 6)); | ||
112 | } | 107 | } |
113 | 108 | ||
114 | static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hctx, ktime_t now) | 109 | static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hc, ktime_t now) |
115 | { | 110 | { |
116 | u32 delta = ktime_us_delta(now, hctx->ccid3hctx_t_last_win_count); | 111 | u32 delta = ktime_us_delta(now, hc->tx_t_last_win_count); |
117 | 112 | ||
118 | return delta / hctx->ccid3hctx_rtt; | 113 | return delta / hc->tx_rtt; |
119 | } | 114 | } |
120 | 115 | ||
121 | /** | 116 | /** |
@@ -130,9 +125,9 @@ static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hctx, ktime_t now) | |||
130 | */ | 125 | */ |
131 | static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp) | 126 | static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp) |
132 | { | 127 | { |
133 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); | 128 | struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); |
134 | __u64 min_rate = 2 * hctx->ccid3hctx_x_recv; | 129 | __u64 min_rate = 2 * hc->tx_x_recv; |
135 | const __u64 old_x = hctx->ccid3hctx_x; | 130 | const __u64 old_x = hc->tx_x; |
136 | ktime_t now = stamp ? *stamp : ktime_get_real(); | 131 | ktime_t now = stamp ? *stamp : ktime_get_real(); |
137 | 132 | ||
138 | /* | 133 | /* |
@@ -141,37 +136,31 @@ static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp) | |||
141 | * a sender is idle if it has not sent anything over a 2-RTT-period. | 136 | * a sender is idle if it has not sent anything over a 2-RTT-period. |
142 | * For consistency with X and X_recv, min_rate is also scaled by 2^6. | 137 | * For consistency with X and X_recv, min_rate is also scaled by 2^6. |
143 | */ | 138 | */ |
144 | if (ccid3_hc_tx_idle_rtt(hctx, now) >= 2) { | 139 | if (ccid3_hc_tx_idle_rtt(hc, now) >= 2) { |
145 | min_rate = rfc3390_initial_rate(sk); | 140 | min_rate = rfc3390_initial_rate(sk); |
146 | min_rate = max(min_rate, 2 * hctx->ccid3hctx_x_recv); | 141 | min_rate = max(min_rate, 2 * hc->tx_x_recv); |
147 | } | 142 | } |
148 | 143 | ||
149 | if (hctx->ccid3hctx_p > 0) { | 144 | if (hc->tx_p > 0) { |
150 | 145 | ||
151 | hctx->ccid3hctx_x = min(((__u64)hctx->ccid3hctx_x_calc) << 6, | 146 | hc->tx_x = min(((__u64)hc->tx_x_calc) << 6, min_rate); |
152 | min_rate); | 147 | hc->tx_x = max(hc->tx_x, (((__u64)hc->tx_s) << 6) / TFRC_T_MBI); |
153 | hctx->ccid3hctx_x = max(hctx->ccid3hctx_x, | ||
154 | (((__u64)hctx->ccid3hctx_s) << 6) / | ||
155 | TFRC_T_MBI); | ||
156 | 148 | ||
157 | } else if (ktime_us_delta(now, hctx->ccid3hctx_t_ld) | 149 | } else if (ktime_us_delta(now, hc->tx_t_ld) - (s64)hc->tx_rtt >= 0) { |
158 | - (s64)hctx->ccid3hctx_rtt >= 0) { | ||
159 | 150 | ||
160 | hctx->ccid3hctx_x = min(2 * hctx->ccid3hctx_x, min_rate); | 151 | hc->tx_x = min(2 * hc->tx_x, min_rate); |
161 | hctx->ccid3hctx_x = max(hctx->ccid3hctx_x, | 152 | hc->tx_x = max(hc->tx_x, |
162 | scaled_div(((__u64)hctx->ccid3hctx_s) << 6, | 153 | scaled_div(((__u64)hc->tx_s) << 6, hc->tx_rtt)); |
163 | hctx->ccid3hctx_rtt)); | 154 | hc->tx_t_ld = now; |
164 | hctx->ccid3hctx_t_ld = now; | ||
165 | } | 155 | } |
166 | 156 | ||
167 | if (hctx->ccid3hctx_x != old_x) { | 157 | if (hc->tx_x != old_x) { |
168 | ccid3_pr_debug("X_prev=%u, X_now=%u, X_calc=%u, " | 158 | ccid3_pr_debug("X_prev=%u, X_now=%u, X_calc=%u, " |
169 | "X_recv=%u\n", (unsigned)(old_x >> 6), | 159 | "X_recv=%u\n", (unsigned)(old_x >> 6), |
170 | (unsigned)(hctx->ccid3hctx_x >> 6), | 160 | (unsigned)(hc->tx_x >> 6), hc->tx_x_calc, |
171 | hctx->ccid3hctx_x_calc, | 161 | (unsigned)(hc->tx_x_recv >> 6)); |
172 | (unsigned)(hctx->ccid3hctx_x_recv >> 6)); | ||
173 | 162 | ||
174 | ccid3_update_send_interval(hctx); | 163 | ccid3_update_send_interval(hc); |
175 | } | 164 | } |
176 | } | 165 | } |
177 | 166 | ||
@@ -179,37 +168,37 @@ static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp) | |||
179 | * Track the mean packet size `s' (cf. RFC 4342, 5.3 and RFC 3448, 4.1) | 168 | * Track the mean packet size `s' (cf. RFC 4342, 5.3 and RFC 3448, 4.1) |
180 | * @len: DCCP packet payload size in bytes | 169 | * @len: DCCP packet payload size in bytes |
181 | */ | 170 | */ |
182 | static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hctx, int len) | 171 | static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hc, int len) |
183 | { | 172 | { |
184 | const u16 old_s = hctx->ccid3hctx_s; | 173 | const u16 old_s = hc->tx_s; |
185 | 174 | ||
186 | hctx->ccid3hctx_s = tfrc_ewma(hctx->ccid3hctx_s, len, 9); | 175 | hc->tx_s = tfrc_ewma(hc->tx_s, len, 9); |
187 | 176 | ||
188 | if (hctx->ccid3hctx_s != old_s) | 177 | if (hc->tx_s != old_s) |
189 | ccid3_update_send_interval(hctx); | 178 | ccid3_update_send_interval(hc); |
190 | } | 179 | } |
191 | 180 | ||
192 | /* | 181 | /* |
193 | * Update Window Counter using the algorithm from [RFC 4342, 8.1]. | 182 | * Update Window Counter using the algorithm from [RFC 4342, 8.1]. |
194 | * As elsewhere, RTT > 0 is assumed by using dccp_sample_rtt(). | 183 | * As elsewhere, RTT > 0 is assumed by using dccp_sample_rtt(). |
195 | */ | 184 | */ |
196 | static inline void ccid3_hc_tx_update_win_count(struct ccid3_hc_tx_sock *hctx, | 185 | static inline void ccid3_hc_tx_update_win_count(struct ccid3_hc_tx_sock *hc, |
197 | ktime_t now) | 186 | ktime_t now) |
198 | { | 187 | { |
199 | u32 delta = ktime_us_delta(now, hctx->ccid3hctx_t_last_win_count), | 188 | u32 delta = ktime_us_delta(now, hc->tx_t_last_win_count), |
200 | quarter_rtts = (4 * delta) / hctx->ccid3hctx_rtt; | 189 | quarter_rtts = (4 * delta) / hc->tx_rtt; |
201 | 190 | ||
202 | if (quarter_rtts > 0) { | 191 | if (quarter_rtts > 0) { |
203 | hctx->ccid3hctx_t_last_win_count = now; | 192 | hc->tx_t_last_win_count = now; |
204 | hctx->ccid3hctx_last_win_count += min(quarter_rtts, 5U); | 193 | hc->tx_last_win_count += min(quarter_rtts, 5U); |
205 | hctx->ccid3hctx_last_win_count &= 0xF; /* mod 16 */ | 194 | hc->tx_last_win_count &= 0xF; /* mod 16 */ |
206 | } | 195 | } |
207 | } | 196 | } |
208 | 197 | ||
209 | static void ccid3_hc_tx_no_feedback_timer(unsigned long data) | 198 | static void ccid3_hc_tx_no_feedback_timer(unsigned long data) |
210 | { | 199 | { |
211 | struct sock *sk = (struct sock *)data; | 200 | struct sock *sk = (struct sock *)data; |
212 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); | 201 | struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); |
213 | unsigned long t_nfb = USEC_PER_SEC / 5; | 202 | unsigned long t_nfb = USEC_PER_SEC / 5; |
214 | 203 | ||
215 | bh_lock_sock(sk); | 204 | bh_lock_sock(sk); |
@@ -220,24 +209,23 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data) | |||
220 | } | 209 | } |
221 | 210 | ||
222 | ccid3_pr_debug("%s(%p, state=%s) - entry \n", dccp_role(sk), sk, | 211 | ccid3_pr_debug("%s(%p, state=%s) - entry \n", dccp_role(sk), sk, |
223 | ccid3_tx_state_name(hctx->ccid3hctx_state)); | 212 | ccid3_tx_state_name(hc->tx_state)); |
224 | 213 | ||
225 | if (hctx->ccid3hctx_state == TFRC_SSTATE_FBACK) | 214 | if (hc->tx_state == TFRC_SSTATE_FBACK) |
226 | ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK); | 215 | ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK); |
227 | else if (hctx->ccid3hctx_state != TFRC_SSTATE_NO_FBACK) | 216 | else if (hc->tx_state != TFRC_SSTATE_NO_FBACK) |
228 | goto out; | 217 | goto out; |
229 | 218 | ||
230 | /* | 219 | /* |
231 | * Determine new allowed sending rate X as per draft rfc3448bis-00, 4.4 | 220 | * Determine new allowed sending rate X as per draft rfc3448bis-00, 4.4 |
232 | */ | 221 | */ |
233 | if (hctx->ccid3hctx_t_rto == 0 || /* no feedback received yet */ | 222 | if (hc->tx_t_rto == 0 || /* no feedback received yet */ |
234 | hctx->ccid3hctx_p == 0) { | 223 | hc->tx_p == 0) { |
235 | 224 | ||
236 | /* halve send rate directly */ | 225 | /* halve send rate directly */ |
237 | hctx->ccid3hctx_x = max(hctx->ccid3hctx_x / 2, | 226 | hc->tx_x = max(hc->tx_x / 2, |
238 | (((__u64)hctx->ccid3hctx_s) << 6) / | 227 | (((__u64)hc->tx_s) << 6) / TFRC_T_MBI); |
239 | TFRC_T_MBI); | 228 | ccid3_update_send_interval(hc); |
240 | ccid3_update_send_interval(hctx); | ||
241 | } else { | 229 | } else { |
242 | /* | 230 | /* |
243 | * Modify the cached value of X_recv | 231 | * Modify the cached value of X_recv |
@@ -249,33 +237,32 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data) | |||
249 | * | 237 | * |
250 | * Note that X_recv is scaled by 2^6 while X_calc is not | 238 | * Note that X_recv is scaled by 2^6 while X_calc is not |
251 | */ | 239 | */ |
252 | BUG_ON(hctx->ccid3hctx_p && !hctx->ccid3hctx_x_calc); | 240 | BUG_ON(hc->tx_p && !hc->tx_x_calc); |
253 | 241 | ||
254 | if (hctx->ccid3hctx_x_calc > (hctx->ccid3hctx_x_recv >> 5)) | 242 | if (hc->tx_x_calc > (hc->tx_x_recv >> 5)) |
255 | hctx->ccid3hctx_x_recv = | 243 | hc->tx_x_recv = |
256 | max(hctx->ccid3hctx_x_recv / 2, | 244 | max(hc->tx_x_recv / 2, |
257 | (((__u64)hctx->ccid3hctx_s) << 6) / | 245 | (((__u64)hc->tx_s) << 6) / (2*TFRC_T_MBI)); |
258 | (2 * TFRC_T_MBI)); | ||
259 | else { | 246 | else { |
260 | hctx->ccid3hctx_x_recv = hctx->ccid3hctx_x_calc; | 247 | hc->tx_x_recv = hc->tx_x_calc; |
261 | hctx->ccid3hctx_x_recv <<= 4; | 248 | hc->tx_x_recv <<= 4; |
262 | } | 249 | } |
263 | ccid3_hc_tx_update_x(sk, NULL); | 250 | ccid3_hc_tx_update_x(sk, NULL); |
264 | } | 251 | } |
265 | ccid3_pr_debug("Reduced X to %llu/64 bytes/sec\n", | 252 | ccid3_pr_debug("Reduced X to %llu/64 bytes/sec\n", |
266 | (unsigned long long)hctx->ccid3hctx_x); | 253 | (unsigned long long)hc->tx_x); |
267 | 254 | ||
268 | /* | 255 | /* |
269 | * Set new timeout for the nofeedback timer. | 256 | * Set new timeout for the nofeedback timer. |
270 | * See comments in packet_recv() regarding the value of t_RTO. | 257 | * See comments in packet_recv() regarding the value of t_RTO. |
271 | */ | 258 | */ |
272 | if (unlikely(hctx->ccid3hctx_t_rto == 0)) /* no feedback yet */ | 259 | if (unlikely(hc->tx_t_rto == 0)) /* no feedback yet */ |
273 | t_nfb = TFRC_INITIAL_TIMEOUT; | 260 | t_nfb = TFRC_INITIAL_TIMEOUT; |
274 | else | 261 | else |
275 | t_nfb = max(hctx->ccid3hctx_t_rto, 2 * hctx->ccid3hctx_t_ipi); | 262 | t_nfb = max(hc->tx_t_rto, 2 * hc->tx_t_ipi); |
276 | 263 | ||
277 | restart_timer: | 264 | restart_timer: |
278 | sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, | 265 | sk_reset_timer(sk, &hc->tx_no_feedback_timer, |
279 | jiffies + usecs_to_jiffies(t_nfb)); | 266 | jiffies + usecs_to_jiffies(t_nfb)); |
280 | out: | 267 | out: |
281 | bh_unlock_sock(sk); | 268 | bh_unlock_sock(sk); |
@@ -291,7 +278,7 @@ out: | |||
291 | static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) | 278 | static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) |
292 | { | 279 | { |
293 | struct dccp_sock *dp = dccp_sk(sk); | 280 | struct dccp_sock *dp = dccp_sk(sk); |
294 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); | 281 | struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); |
295 | ktime_t now = ktime_get_real(); | 282 | ktime_t now = ktime_get_real(); |
296 | s64 delay; | 283 | s64 delay; |
297 | 284 | ||
@@ -303,18 +290,17 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) | |||
303 | if (unlikely(skb->len == 0)) | 290 | if (unlikely(skb->len == 0)) |
304 | return -EBADMSG; | 291 | return -EBADMSG; |
305 | 292 | ||
306 | switch (hctx->ccid3hctx_state) { | 293 | switch (hc->tx_state) { |
307 | case TFRC_SSTATE_NO_SENT: | 294 | case TFRC_SSTATE_NO_SENT: |
308 | sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, | 295 | sk_reset_timer(sk, &hc->tx_no_feedback_timer, (jiffies + |
309 | (jiffies + | 296 | usecs_to_jiffies(TFRC_INITIAL_TIMEOUT))); |
310 | usecs_to_jiffies(TFRC_INITIAL_TIMEOUT))); | 297 | hc->tx_last_win_count = 0; |
311 | hctx->ccid3hctx_last_win_count = 0; | 298 | hc->tx_t_last_win_count = now; |
312 | hctx->ccid3hctx_t_last_win_count = now; | ||
313 | 299 | ||
314 | /* Set t_0 for initial packet */ | 300 | /* Set t_0 for initial packet */ |
315 | hctx->ccid3hctx_t_nom = now; | 301 | hc->tx_t_nom = now; |
316 | 302 | ||
317 | hctx->ccid3hctx_s = skb->len; | 303 | hc->tx_s = skb->len; |
318 | 304 | ||
319 | /* | 305 | /* |
320 | * Use initial RTT sample when available: recommended by erratum | 306 | * Use initial RTT sample when available: recommended by erratum |
@@ -323,9 +309,9 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) | |||
323 | */ | 309 | */ |
324 | if (dp->dccps_syn_rtt) { | 310 | if (dp->dccps_syn_rtt) { |
325 | ccid3_pr_debug("SYN RTT = %uus\n", dp->dccps_syn_rtt); | 311 | ccid3_pr_debug("SYN RTT = %uus\n", dp->dccps_syn_rtt); |
326 | hctx->ccid3hctx_rtt = dp->dccps_syn_rtt; | 312 | hc->tx_rtt = dp->dccps_syn_rtt; |
327 | hctx->ccid3hctx_x = rfc3390_initial_rate(sk); | 313 | hc->tx_x = rfc3390_initial_rate(sk); |
328 | hctx->ccid3hctx_t_ld = now; | 314 | hc->tx_t_ld = now; |
329 | } else { | 315 | } else { |
330 | /* | 316 | /* |
331 | * Sender does not have RTT sample: | 317 | * Sender does not have RTT sample: |
@@ -333,17 +319,17 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) | |||
333 | * is needed in several parts (e.g. window counter); | 319 | * is needed in several parts (e.g. window counter); |
334 | * - set sending rate X_pps = 1pps as per RFC 3448, 4.2. | 320 | * - set sending rate X_pps = 1pps as per RFC 3448, 4.2. |
335 | */ | 321 | */ |
336 | hctx->ccid3hctx_rtt = DCCP_FALLBACK_RTT; | 322 | hc->tx_rtt = DCCP_FALLBACK_RTT; |
337 | hctx->ccid3hctx_x = hctx->ccid3hctx_s; | 323 | hc->tx_x = hc->tx_s; |
338 | hctx->ccid3hctx_x <<= 6; | 324 | hc->tx_x <<= 6; |
339 | } | 325 | } |
340 | ccid3_update_send_interval(hctx); | 326 | ccid3_update_send_interval(hc); |
341 | 327 | ||
342 | ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK); | 328 | ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK); |
343 | break; | 329 | break; |
344 | case TFRC_SSTATE_NO_FBACK: | 330 | case TFRC_SSTATE_NO_FBACK: |
345 | case TFRC_SSTATE_FBACK: | 331 | case TFRC_SSTATE_FBACK: |
346 | delay = ktime_us_delta(hctx->ccid3hctx_t_nom, now); | 332 | delay = ktime_us_delta(hc->tx_t_nom, now); |
347 | ccid3_pr_debug("delay=%ld\n", (long)delay); | 333 | ccid3_pr_debug("delay=%ld\n", (long)delay); |
348 | /* | 334 | /* |
349 | * Scheduling of packet transmissions [RFC 3448, 4.6] | 335 | * Scheduling of packet transmissions [RFC 3448, 4.6] |
@@ -353,10 +339,10 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) | |||
353 | * else | 339 | * else |
354 | * // send the packet in (t_nom - t_now) milliseconds. | 340 | * // send the packet in (t_nom - t_now) milliseconds. |
355 | */ | 341 | */ |
356 | if (delay - (s64)hctx->ccid3hctx_delta >= 1000) | 342 | if (delay - (s64)hc->tx_delta >= 1000) |
357 | return (u32)delay / 1000L; | 343 | return (u32)delay / 1000L; |
358 | 344 | ||
359 | ccid3_hc_tx_update_win_count(hctx, now); | 345 | ccid3_hc_tx_update_win_count(hc, now); |
360 | break; | 346 | break; |
361 | case TFRC_SSTATE_TERM: | 347 | case TFRC_SSTATE_TERM: |
362 | DCCP_BUG("%s(%p) - Illegal state TERM", dccp_role(sk), sk); | 348 | DCCP_BUG("%s(%p) - Illegal state TERM", dccp_role(sk), sk); |
@@ -365,28 +351,27 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) | |||
365 | 351 | ||
366 | /* prepare to send now (add options etc.) */ | 352 | /* prepare to send now (add options etc.) */ |
367 | dp->dccps_hc_tx_insert_options = 1; | 353 | dp->dccps_hc_tx_insert_options = 1; |
368 | DCCP_SKB_CB(skb)->dccpd_ccval = hctx->ccid3hctx_last_win_count; | 354 | DCCP_SKB_CB(skb)->dccpd_ccval = hc->tx_last_win_count; |
369 | 355 | ||
370 | /* set the nominal send time for the next following packet */ | 356 | /* set the nominal send time for the next following packet */ |
371 | hctx->ccid3hctx_t_nom = ktime_add_us(hctx->ccid3hctx_t_nom, | 357 | hc->tx_t_nom = ktime_add_us(hc->tx_t_nom, hc->tx_t_ipi); |
372 | hctx->ccid3hctx_t_ipi); | ||
373 | return 0; | 358 | return 0; |
374 | } | 359 | } |
375 | 360 | ||
376 | static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, | 361 | static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, |
377 | unsigned int len) | 362 | unsigned int len) |
378 | { | 363 | { |
379 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); | 364 | struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); |
380 | 365 | ||
381 | ccid3_hc_tx_update_s(hctx, len); | 366 | ccid3_hc_tx_update_s(hc, len); |
382 | 367 | ||
383 | if (tfrc_tx_hist_add(&hctx->ccid3hctx_hist, dccp_sk(sk)->dccps_gss)) | 368 | if (tfrc_tx_hist_add(&hc->tx_hist, dccp_sk(sk)->dccps_gss)) |
384 | DCCP_CRIT("packet history - out of memory!"); | 369 | DCCP_CRIT("packet history - out of memory!"); |
385 | } | 370 | } |
386 | 371 | ||
387 | static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | 372 | static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) |
388 | { | 373 | { |
389 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); | 374 | struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); |
390 | struct ccid3_options_received *opt_recv; | 375 | struct ccid3_options_received *opt_recv; |
391 | ktime_t now; | 376 | ktime_t now; |
392 | unsigned long t_nfb; | 377 | unsigned long t_nfb; |
@@ -397,15 +382,15 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
397 | DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_DATAACK)) | 382 | DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_DATAACK)) |
398 | return; | 383 | return; |
399 | /* ... and only in the established state */ | 384 | /* ... and only in the established state */ |
400 | if (hctx->ccid3hctx_state != TFRC_SSTATE_FBACK && | 385 | if (hc->tx_state != TFRC_SSTATE_FBACK && |
401 | hctx->ccid3hctx_state != TFRC_SSTATE_NO_FBACK) | 386 | hc->tx_state != TFRC_SSTATE_NO_FBACK) |
402 | return; | 387 | return; |
403 | 388 | ||
404 | opt_recv = &hctx->ccid3hctx_options_received; | 389 | opt_recv = &hc->tx_options_received; |
405 | now = ktime_get_real(); | 390 | now = ktime_get_real(); |
406 | 391 | ||
407 | /* Estimate RTT from history if ACK number is valid */ | 392 | /* Estimate RTT from history if ACK number is valid */ |
408 | r_sample = tfrc_tx_hist_rtt(hctx->ccid3hctx_hist, | 393 | r_sample = tfrc_tx_hist_rtt(hc->tx_hist, |
409 | DCCP_SKB_CB(skb)->dccpd_ack_seq, now); | 394 | DCCP_SKB_CB(skb)->dccpd_ack_seq, now); |
410 | if (r_sample == 0) { | 395 | if (r_sample == 0) { |
411 | DCCP_WARN("%s(%p): %s with bogus ACK-%llu\n", dccp_role(sk), sk, | 396 | DCCP_WARN("%s(%p): %s with bogus ACK-%llu\n", dccp_role(sk), sk, |
@@ -415,37 +400,37 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
415 | } | 400 | } |
416 | 401 | ||
417 | /* Update receive rate in units of 64 * bytes/second */ | 402 | /* Update receive rate in units of 64 * bytes/second */ |
418 | hctx->ccid3hctx_x_recv = opt_recv->ccid3or_receive_rate; | 403 | hc->tx_x_recv = opt_recv->ccid3or_receive_rate; |
419 | hctx->ccid3hctx_x_recv <<= 6; | 404 | hc->tx_x_recv <<= 6; |
420 | 405 | ||
421 | /* Update loss event rate (which is scaled by 1e6) */ | 406 | /* Update loss event rate (which is scaled by 1e6) */ |
422 | pinv = opt_recv->ccid3or_loss_event_rate; | 407 | pinv = opt_recv->ccid3or_loss_event_rate; |
423 | if (pinv == ~0U || pinv == 0) /* see RFC 4342, 8.5 */ | 408 | if (pinv == ~0U || pinv == 0) /* see RFC 4342, 8.5 */ |
424 | hctx->ccid3hctx_p = 0; | 409 | hc->tx_p = 0; |
425 | else /* can not exceed 100% */ | 410 | else /* can not exceed 100% */ |
426 | hctx->ccid3hctx_p = scaled_div(1, pinv); | 411 | hc->tx_p = scaled_div(1, pinv); |
427 | /* | 412 | /* |
428 | * Validate new RTT sample and update moving average | 413 | * Validate new RTT sample and update moving average |
429 | */ | 414 | */ |
430 | r_sample = dccp_sample_rtt(sk, r_sample); | 415 | r_sample = dccp_sample_rtt(sk, r_sample); |
431 | hctx->ccid3hctx_rtt = tfrc_ewma(hctx->ccid3hctx_rtt, r_sample, 9); | 416 | hc->tx_rtt = tfrc_ewma(hc->tx_rtt, r_sample, 9); |
432 | /* | 417 | /* |
433 | * Update allowed sending rate X as per draft rfc3448bis-00, 4.2/3 | 418 | * Update allowed sending rate X as per draft rfc3448bis-00, 4.2/3 |
434 | */ | 419 | */ |
435 | if (hctx->ccid3hctx_state == TFRC_SSTATE_NO_FBACK) { | 420 | if (hc->tx_state == TFRC_SSTATE_NO_FBACK) { |
436 | ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK); | 421 | ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK); |
437 | 422 | ||
438 | if (hctx->ccid3hctx_t_rto == 0) { | 423 | if (hc->tx_t_rto == 0) { |
439 | /* | 424 | /* |
440 | * Initial feedback packet: Larger Initial Windows (4.2) | 425 | * Initial feedback packet: Larger Initial Windows (4.2) |
441 | */ | 426 | */ |
442 | hctx->ccid3hctx_x = rfc3390_initial_rate(sk); | 427 | hc->tx_x = rfc3390_initial_rate(sk); |
443 | hctx->ccid3hctx_t_ld = now; | 428 | hc->tx_t_ld = now; |
444 | 429 | ||
445 | ccid3_update_send_interval(hctx); | 430 | ccid3_update_send_interval(hc); |
446 | 431 | ||
447 | goto done_computing_x; | 432 | goto done_computing_x; |
448 | } else if (hctx->ccid3hctx_p == 0) { | 433 | } else if (hc->tx_p == 0) { |
449 | /* | 434 | /* |
450 | * First feedback after nofeedback timer expiry (4.3) | 435 | * First feedback after nofeedback timer expiry (4.3) |
451 | */ | 436 | */ |
@@ -454,25 +439,20 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
454 | } | 439 | } |
455 | 440 | ||
456 | /* Update sending rate (step 4 of [RFC 3448, 4.3]) */ | 441 | /* Update sending rate (step 4 of [RFC 3448, 4.3]) */ |
457 | if (hctx->ccid3hctx_p > 0) | 442 | if (hc->tx_p > 0) |
458 | hctx->ccid3hctx_x_calc = | 443 | hc->tx_x_calc = tfrc_calc_x(hc->tx_s, hc->tx_rtt, hc->tx_p); |
459 | tfrc_calc_x(hctx->ccid3hctx_s, | ||
460 | hctx->ccid3hctx_rtt, | ||
461 | hctx->ccid3hctx_p); | ||
462 | ccid3_hc_tx_update_x(sk, &now); | 444 | ccid3_hc_tx_update_x(sk, &now); |
463 | 445 | ||
464 | done_computing_x: | 446 | done_computing_x: |
465 | ccid3_pr_debug("%s(%p), RTT=%uus (sample=%uus), s=%u, " | 447 | ccid3_pr_debug("%s(%p), RTT=%uus (sample=%uus), s=%u, " |
466 | "p=%u, X_calc=%u, X_recv=%u, X=%u\n", | 448 | "p=%u, X_calc=%u, X_recv=%u, X=%u\n", |
467 | dccp_role(sk), | 449 | dccp_role(sk), sk, hc->tx_rtt, r_sample, |
468 | sk, hctx->ccid3hctx_rtt, r_sample, | 450 | hc->tx_s, hc->tx_p, hc->tx_x_calc, |
469 | hctx->ccid3hctx_s, hctx->ccid3hctx_p, | 451 | (unsigned)(hc->tx_x_recv >> 6), |
470 | hctx->ccid3hctx_x_calc, | 452 | (unsigned)(hc->tx_x >> 6)); |
471 | (unsigned)(hctx->ccid3hctx_x_recv >> 6), | ||
472 | (unsigned)(hctx->ccid3hctx_x >> 6)); | ||
473 | 453 | ||
474 | /* unschedule no feedback timer */ | 454 | /* unschedule no feedback timer */ |
475 | sk_stop_timer(sk, &hctx->ccid3hctx_no_feedback_timer); | 455 | sk_stop_timer(sk, &hc->tx_no_feedback_timer); |
476 | 456 | ||
477 | /* | 457 | /* |
478 | * As we have calculated new ipi, delta, t_nom it is possible | 458 | * As we have calculated new ipi, delta, t_nom it is possible |
@@ -486,21 +466,19 @@ done_computing_x: | |||
486 | * This can help avoid triggering the nofeedback timer too | 466 | * This can help avoid triggering the nofeedback timer too |
487 | * often ('spinning') on LANs with small RTTs. | 467 | * often ('spinning') on LANs with small RTTs. |
488 | */ | 468 | */ |
489 | hctx->ccid3hctx_t_rto = max_t(u32, 4 * hctx->ccid3hctx_rtt, | 469 | hc->tx_t_rto = max_t(u32, 4 * hc->tx_rtt, (CONFIG_IP_DCCP_CCID3_RTO * |
490 | (CONFIG_IP_DCCP_CCID3_RTO * | 470 | (USEC_PER_SEC / 1000))); |
491 | (USEC_PER_SEC / 1000))); | ||
492 | /* | 471 | /* |
493 | * Schedule no feedback timer to expire in | 472 | * Schedule no feedback timer to expire in |
494 | * max(t_RTO, 2 * s/X) = max(t_RTO, 2 * t_ipi) | 473 | * max(t_RTO, 2 * s/X) = max(t_RTO, 2 * t_ipi) |
495 | */ | 474 | */ |
496 | t_nfb = max(hctx->ccid3hctx_t_rto, 2 * hctx->ccid3hctx_t_ipi); | 475 | t_nfb = max(hc->tx_t_rto, 2 * hc->tx_t_ipi); |
497 | 476 | ||
498 | ccid3_pr_debug("%s(%p), Scheduled no feedback timer to " | 477 | ccid3_pr_debug("%s(%p), Scheduled no feedback timer to " |
499 | "expire in %lu jiffies (%luus)\n", | 478 | "expire in %lu jiffies (%luus)\n", |
500 | dccp_role(sk), | 479 | dccp_role(sk), sk, usecs_to_jiffies(t_nfb), t_nfb); |
501 | sk, usecs_to_jiffies(t_nfb), t_nfb); | ||
502 | 480 | ||
503 | sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, | 481 | sk_reset_timer(sk, &hc->tx_no_feedback_timer, |
504 | jiffies + usecs_to_jiffies(t_nfb)); | 482 | jiffies + usecs_to_jiffies(t_nfb)); |
505 | } | 483 | } |
506 | 484 | ||
@@ -510,11 +488,11 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option, | |||
510 | { | 488 | { |
511 | int rc = 0; | 489 | int rc = 0; |
512 | const struct dccp_sock *dp = dccp_sk(sk); | 490 | const struct dccp_sock *dp = dccp_sk(sk); |
513 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); | 491 | struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); |
514 | struct ccid3_options_received *opt_recv; | 492 | struct ccid3_options_received *opt_recv; |
515 | __be32 opt_val; | 493 | __be32 opt_val; |
516 | 494 | ||
517 | opt_recv = &hctx->ccid3hctx_options_received; | 495 | opt_recv = &hc->tx_options_received; |
518 | 496 | ||
519 | if (opt_recv->ccid3or_seqno != dp->dccps_gsr) { | 497 | if (opt_recv->ccid3or_seqno != dp->dccps_gsr) { |
520 | opt_recv->ccid3or_seqno = dp->dccps_gsr; | 498 | opt_recv->ccid3or_seqno = dp->dccps_gsr; |
@@ -568,56 +546,55 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option, | |||
568 | 546 | ||
569 | static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk) | 547 | static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk) |
570 | { | 548 | { |
571 | struct ccid3_hc_tx_sock *hctx = ccid_priv(ccid); | 549 | struct ccid3_hc_tx_sock *hc = ccid_priv(ccid); |
572 | 550 | ||
573 | hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT; | 551 | hc->tx_state = TFRC_SSTATE_NO_SENT; |
574 | hctx->ccid3hctx_hist = NULL; | 552 | hc->tx_hist = NULL; |
575 | setup_timer(&hctx->ccid3hctx_no_feedback_timer, | 553 | setup_timer(&hc->tx_no_feedback_timer, |
576 | ccid3_hc_tx_no_feedback_timer, (unsigned long)sk); | 554 | ccid3_hc_tx_no_feedback_timer, (unsigned long)sk); |
577 | |||
578 | return 0; | 555 | return 0; |
579 | } | 556 | } |
580 | 557 | ||
581 | static void ccid3_hc_tx_exit(struct sock *sk) | 558 | static void ccid3_hc_tx_exit(struct sock *sk) |
582 | { | 559 | { |
583 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); | 560 | struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); |
584 | 561 | ||
585 | ccid3_hc_tx_set_state(sk, TFRC_SSTATE_TERM); | 562 | ccid3_hc_tx_set_state(sk, TFRC_SSTATE_TERM); |
586 | sk_stop_timer(sk, &hctx->ccid3hctx_no_feedback_timer); | 563 | sk_stop_timer(sk, &hc->tx_no_feedback_timer); |
587 | 564 | ||
588 | tfrc_tx_hist_purge(&hctx->ccid3hctx_hist); | 565 | tfrc_tx_hist_purge(&hc->tx_hist); |
589 | } | 566 | } |
590 | 567 | ||
591 | static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info) | 568 | static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info) |
592 | { | 569 | { |
593 | struct ccid3_hc_tx_sock *hctx; | 570 | struct ccid3_hc_tx_sock *hc; |
594 | 571 | ||
595 | /* Listen socks doesn't have a private CCID block */ | 572 | /* Listen socks doesn't have a private CCID block */ |
596 | if (sk->sk_state == DCCP_LISTEN) | 573 | if (sk->sk_state == DCCP_LISTEN) |
597 | return; | 574 | return; |
598 | 575 | ||
599 | hctx = ccid3_hc_tx_sk(sk); | 576 | hc = ccid3_hc_tx_sk(sk); |
600 | info->tcpi_rto = hctx->ccid3hctx_t_rto; | 577 | info->tcpi_rto = hc->tx_t_rto; |
601 | info->tcpi_rtt = hctx->ccid3hctx_rtt; | 578 | info->tcpi_rtt = hc->tx_rtt; |
602 | } | 579 | } |
603 | 580 | ||
604 | static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len, | 581 | static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len, |
605 | u32 __user *optval, int __user *optlen) | 582 | u32 __user *optval, int __user *optlen) |
606 | { | 583 | { |
607 | const struct ccid3_hc_tx_sock *hctx; | 584 | const struct ccid3_hc_tx_sock *hc; |
608 | const void *val; | 585 | const void *val; |
609 | 586 | ||
610 | /* Listen socks doesn't have a private CCID block */ | 587 | /* Listen socks doesn't have a private CCID block */ |
611 | if (sk->sk_state == DCCP_LISTEN) | 588 | if (sk->sk_state == DCCP_LISTEN) |
612 | return -EINVAL; | 589 | return -EINVAL; |
613 | 590 | ||
614 | hctx = ccid3_hc_tx_sk(sk); | 591 | hc = ccid3_hc_tx_sk(sk); |
615 | switch (optname) { | 592 | switch (optname) { |
616 | case DCCP_SOCKOPT_CCID_TX_INFO: | 593 | case DCCP_SOCKOPT_CCID_TX_INFO: |
617 | if (len < sizeof(hctx->ccid3hctx_tfrc)) | 594 | if (len < sizeof(hc->tx_tfrc)) |
618 | return -EINVAL; | 595 | return -EINVAL; |
619 | len = sizeof(hctx->ccid3hctx_tfrc); | 596 | len = sizeof(hc->tx_tfrc); |
620 | val = &hctx->ccid3hctx_tfrc; | 597 | val = &hc->tx_tfrc; |
621 | break; | 598 | break; |
622 | default: | 599 | default: |
623 | return -ENOPROTOOPT; | 600 | return -ENOPROTOOPT; |
@@ -657,34 +634,34 @@ static const char *ccid3_rx_state_name(enum ccid3_hc_rx_states state) | |||
657 | static void ccid3_hc_rx_set_state(struct sock *sk, | 634 | static void ccid3_hc_rx_set_state(struct sock *sk, |
658 | enum ccid3_hc_rx_states state) | 635 | enum ccid3_hc_rx_states state) |
659 | { | 636 | { |
660 | struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); | 637 | struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); |
661 | enum ccid3_hc_rx_states oldstate = hcrx->ccid3hcrx_state; | 638 | enum ccid3_hc_rx_states oldstate = hc->rx_state; |
662 | 639 | ||
663 | ccid3_pr_debug("%s(%p) %-8.8s -> %s\n", | 640 | ccid3_pr_debug("%s(%p) %-8.8s -> %s\n", |
664 | dccp_role(sk), sk, ccid3_rx_state_name(oldstate), | 641 | dccp_role(sk), sk, ccid3_rx_state_name(oldstate), |
665 | ccid3_rx_state_name(state)); | 642 | ccid3_rx_state_name(state)); |
666 | WARN_ON(state == oldstate); | 643 | WARN_ON(state == oldstate); |
667 | hcrx->ccid3hcrx_state = state; | 644 | hc->rx_state = state; |
668 | } | 645 | } |
669 | 646 | ||
670 | static void ccid3_hc_rx_send_feedback(struct sock *sk, | 647 | static void ccid3_hc_rx_send_feedback(struct sock *sk, |
671 | const struct sk_buff *skb, | 648 | const struct sk_buff *skb, |
672 | enum ccid3_fback_type fbtype) | 649 | enum ccid3_fback_type fbtype) |
673 | { | 650 | { |
674 | struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); | 651 | struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); |
675 | struct dccp_sock *dp = dccp_sk(sk); | 652 | struct dccp_sock *dp = dccp_sk(sk); |
676 | ktime_t now; | 653 | ktime_t now; |
677 | s64 delta = 0; | 654 | s64 delta = 0; |
678 | 655 | ||
679 | if (unlikely(hcrx->ccid3hcrx_state == TFRC_RSTATE_TERM)) | 656 | if (unlikely(hc->rx_state == TFRC_RSTATE_TERM)) |
680 | return; | 657 | return; |
681 | 658 | ||
682 | now = ktime_get_real(); | 659 | now = ktime_get_real(); |
683 | 660 | ||
684 | switch (fbtype) { | 661 | switch (fbtype) { |
685 | case CCID3_FBACK_INITIAL: | 662 | case CCID3_FBACK_INITIAL: |
686 | hcrx->ccid3hcrx_x_recv = 0; | 663 | hc->rx_x_recv = 0; |
687 | hcrx->ccid3hcrx_pinv = ~0U; /* see RFC 4342, 8.5 */ | 664 | hc->rx_pinv = ~0U; /* see RFC 4342, 8.5 */ |
688 | break; | 665 | break; |
689 | case CCID3_FBACK_PARAM_CHANGE: | 666 | case CCID3_FBACK_PARAM_CHANGE: |
690 | /* | 667 | /* |
@@ -697,27 +674,26 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk, | |||
697 | * the number of bytes since last feedback. | 674 | * the number of bytes since last feedback. |
698 | * This is a safe fallback, since X is bounded above by X_calc. | 675 | * This is a safe fallback, since X is bounded above by X_calc. |
699 | */ | 676 | */ |
700 | if (hcrx->ccid3hcrx_x_recv > 0) | 677 | if (hc->rx_x_recv > 0) |
701 | break; | 678 | break; |
702 | /* fall through */ | 679 | /* fall through */ |
703 | case CCID3_FBACK_PERIODIC: | 680 | case CCID3_FBACK_PERIODIC: |
704 | delta = ktime_us_delta(now, hcrx->ccid3hcrx_tstamp_last_feedback); | 681 | delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback); |
705 | if (delta <= 0) | 682 | if (delta <= 0) |
706 | DCCP_BUG("delta (%ld) <= 0", (long)delta); | 683 | DCCP_BUG("delta (%ld) <= 0", (long)delta); |
707 | else | 684 | else |
708 | hcrx->ccid3hcrx_x_recv = | 685 | hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta); |
709 | scaled_div32(hcrx->ccid3hcrx_bytes_recv, delta); | ||
710 | break; | 686 | break; |
711 | default: | 687 | default: |
712 | return; | 688 | return; |
713 | } | 689 | } |
714 | 690 | ||
715 | ccid3_pr_debug("Interval %ldusec, X_recv=%u, 1/p=%u\n", (long)delta, | 691 | ccid3_pr_debug("Interval %ldusec, X_recv=%u, 1/p=%u\n", (long)delta, |
716 | hcrx->ccid3hcrx_x_recv, hcrx->ccid3hcrx_pinv); | 692 | hc->rx_x_recv, hc->rx_pinv); |
717 | 693 | ||
718 | hcrx->ccid3hcrx_tstamp_last_feedback = now; | 694 | hc->rx_tstamp_last_feedback = now; |
719 | hcrx->ccid3hcrx_last_counter = dccp_hdr(skb)->dccph_ccval; | 695 | hc->rx_last_counter = dccp_hdr(skb)->dccph_ccval; |
720 | hcrx->ccid3hcrx_bytes_recv = 0; | 696 | hc->rx_bytes_recv = 0; |
721 | 697 | ||
722 | dp->dccps_hc_rx_insert_options = 1; | 698 | dp->dccps_hc_rx_insert_options = 1; |
723 | dccp_send_ack(sk); | 699 | dccp_send_ack(sk); |
@@ -725,19 +701,19 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk, | |||
725 | 701 | ||
726 | static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb) | 702 | static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb) |
727 | { | 703 | { |
728 | const struct ccid3_hc_rx_sock *hcrx; | 704 | const struct ccid3_hc_rx_sock *hc; |
729 | __be32 x_recv, pinv; | 705 | __be32 x_recv, pinv; |
730 | 706 | ||
731 | if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN)) | 707 | if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN)) |
732 | return 0; | 708 | return 0; |
733 | 709 | ||
734 | hcrx = ccid3_hc_rx_sk(sk); | 710 | hc = ccid3_hc_rx_sk(sk); |
735 | 711 | ||
736 | if (dccp_packet_without_ack(skb)) | 712 | if (dccp_packet_without_ack(skb)) |
737 | return 0; | 713 | return 0; |
738 | 714 | ||
739 | x_recv = htonl(hcrx->ccid3hcrx_x_recv); | 715 | x_recv = htonl(hc->rx_x_recv); |
740 | pinv = htonl(hcrx->ccid3hcrx_pinv); | 716 | pinv = htonl(hc->rx_pinv); |
741 | 717 | ||
742 | if (dccp_insert_option(sk, skb, TFRC_OPT_LOSS_EVENT_RATE, | 718 | if (dccp_insert_option(sk, skb, TFRC_OPT_LOSS_EVENT_RATE, |
743 | &pinv, sizeof(pinv)) || | 719 | &pinv, sizeof(pinv)) || |
@@ -760,26 +736,26 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb) | |||
760 | */ | 736 | */ |
761 | static u32 ccid3_first_li(struct sock *sk) | 737 | static u32 ccid3_first_li(struct sock *sk) |
762 | { | 738 | { |
763 | struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); | 739 | struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); |
764 | u32 x_recv, p, delta; | 740 | u32 x_recv, p, delta; |
765 | u64 fval; | 741 | u64 fval; |
766 | 742 | ||
767 | if (hcrx->ccid3hcrx_rtt == 0) { | 743 | if (hc->rx_rtt == 0) { |
768 | DCCP_WARN("No RTT estimate available, using fallback RTT\n"); | 744 | DCCP_WARN("No RTT estimate available, using fallback RTT\n"); |
769 | hcrx->ccid3hcrx_rtt = DCCP_FALLBACK_RTT; | 745 | hc->rx_rtt = DCCP_FALLBACK_RTT; |
770 | } | 746 | } |
771 | 747 | ||
772 | delta = ktime_to_us(net_timedelta(hcrx->ccid3hcrx_tstamp_last_feedback)); | 748 | delta = ktime_to_us(net_timedelta(hc->rx_tstamp_last_feedback)); |
773 | x_recv = scaled_div32(hcrx->ccid3hcrx_bytes_recv, delta); | 749 | x_recv = scaled_div32(hc->rx_bytes_recv, delta); |
774 | if (x_recv == 0) { /* would also trigger divide-by-zero */ | 750 | if (x_recv == 0) { /* would also trigger divide-by-zero */ |
775 | DCCP_WARN("X_recv==0\n"); | 751 | DCCP_WARN("X_recv==0\n"); |
776 | if ((x_recv = hcrx->ccid3hcrx_x_recv) == 0) { | 752 | if ((x_recv = hc->rx_x_recv) == 0) { |
777 | DCCP_BUG("stored value of X_recv is zero"); | 753 | DCCP_BUG("stored value of X_recv is zero"); |
778 | return ~0U; | 754 | return ~0U; |
779 | } | 755 | } |
780 | } | 756 | } |
781 | 757 | ||
782 | fval = scaled_div(hcrx->ccid3hcrx_s, hcrx->ccid3hcrx_rtt); | 758 | fval = scaled_div(hc->rx_s, hc->rx_rtt); |
783 | fval = scaled_div32(fval, x_recv); | 759 | fval = scaled_div32(fval, x_recv); |
784 | p = tfrc_calc_x_reverse_lookup(fval); | 760 | p = tfrc_calc_x_reverse_lookup(fval); |
785 | 761 | ||
@@ -791,19 +767,19 @@ static u32 ccid3_first_li(struct sock *sk) | |||
791 | 767 | ||
792 | static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | 768 | static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) |
793 | { | 769 | { |
794 | struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); | 770 | struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); |
795 | enum ccid3_fback_type do_feedback = CCID3_FBACK_NONE; | 771 | enum ccid3_fback_type do_feedback = CCID3_FBACK_NONE; |
796 | const u64 ndp = dccp_sk(sk)->dccps_options_received.dccpor_ndp; | 772 | const u64 ndp = dccp_sk(sk)->dccps_options_received.dccpor_ndp; |
797 | const bool is_data_packet = dccp_data_packet(skb); | 773 | const bool is_data_packet = dccp_data_packet(skb); |
798 | 774 | ||
799 | if (unlikely(hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA)) { | 775 | if (unlikely(hc->rx_state == TFRC_RSTATE_NO_DATA)) { |
800 | if (is_data_packet) { | 776 | if (is_data_packet) { |
801 | const u32 payload = skb->len - dccp_hdr(skb)->dccph_doff * 4; | 777 | const u32 payload = skb->len - dccp_hdr(skb)->dccph_doff * 4; |
802 | do_feedback = CCID3_FBACK_INITIAL; | 778 | do_feedback = CCID3_FBACK_INITIAL; |
803 | ccid3_hc_rx_set_state(sk, TFRC_RSTATE_DATA); | 779 | ccid3_hc_rx_set_state(sk, TFRC_RSTATE_DATA); |
804 | hcrx->ccid3hcrx_s = payload; | 780 | hc->rx_s = payload; |
805 | /* | 781 | /* |
806 | * Not necessary to update ccid3hcrx_bytes_recv here, | 782 | * Not necessary to update rx_bytes_recv here, |
807 | * since X_recv = 0 for the first feedback packet (cf. | 783 | * since X_recv = 0 for the first feedback packet (cf. |
808 | * RFC 3448, 6.3) -- gerrit | 784 | * RFC 3448, 6.3) -- gerrit |
809 | */ | 785 | */ |
@@ -811,7 +787,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
811 | goto update_records; | 787 | goto update_records; |
812 | } | 788 | } |
813 | 789 | ||
814 | if (tfrc_rx_hist_duplicate(&hcrx->ccid3hcrx_hist, skb)) | 790 | if (tfrc_rx_hist_duplicate(&hc->rx_hist, skb)) |
815 | return; /* done receiving */ | 791 | return; /* done receiving */ |
816 | 792 | ||
817 | if (is_data_packet) { | 793 | if (is_data_packet) { |
@@ -819,20 +795,20 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
819 | /* | 795 | /* |
820 | * Update moving-average of s and the sum of received payload bytes | 796 | * Update moving-average of s and the sum of received payload bytes |
821 | */ | 797 | */ |
822 | hcrx->ccid3hcrx_s = tfrc_ewma(hcrx->ccid3hcrx_s, payload, 9); | 798 | hc->rx_s = tfrc_ewma(hc->rx_s, payload, 9); |
823 | hcrx->ccid3hcrx_bytes_recv += payload; | 799 | hc->rx_bytes_recv += payload; |
824 | } | 800 | } |
825 | 801 | ||
826 | /* | 802 | /* |
827 | * Perform loss detection and handle pending losses | 803 | * Perform loss detection and handle pending losses |
828 | */ | 804 | */ |
829 | if (tfrc_rx_handle_loss(&hcrx->ccid3hcrx_hist, &hcrx->ccid3hcrx_li_hist, | 805 | if (tfrc_rx_handle_loss(&hc->rx_hist, &hc->rx_li_hist, |
830 | skb, ndp, ccid3_first_li, sk)) { | 806 | skb, ndp, ccid3_first_li, sk)) { |
831 | do_feedback = CCID3_FBACK_PARAM_CHANGE; | 807 | do_feedback = CCID3_FBACK_PARAM_CHANGE; |
832 | goto done_receiving; | 808 | goto done_receiving; |
833 | } | 809 | } |
834 | 810 | ||
835 | if (tfrc_rx_hist_loss_pending(&hcrx->ccid3hcrx_hist)) | 811 | if (tfrc_rx_hist_loss_pending(&hc->rx_hist)) |
836 | return; /* done receiving */ | 812 | return; /* done receiving */ |
837 | 813 | ||
838 | /* | 814 | /* |
@@ -841,17 +817,17 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
841 | if (unlikely(!is_data_packet)) | 817 | if (unlikely(!is_data_packet)) |
842 | goto update_records; | 818 | goto update_records; |
843 | 819 | ||
844 | if (!tfrc_lh_is_initialised(&hcrx->ccid3hcrx_li_hist)) { | 820 | if (!tfrc_lh_is_initialised(&hc->rx_li_hist)) { |
845 | const u32 sample = tfrc_rx_hist_sample_rtt(&hcrx->ccid3hcrx_hist, skb); | 821 | const u32 sample = tfrc_rx_hist_sample_rtt(&hc->rx_hist, skb); |
846 | /* | 822 | /* |
847 | * Empty loss history: no loss so far, hence p stays 0. | 823 | * Empty loss history: no loss so far, hence p stays 0. |
848 | * Sample RTT values, since an RTT estimate is required for the | 824 | * Sample RTT values, since an RTT estimate is required for the |
849 | * computation of p when the first loss occurs; RFC 3448, 6.3.1. | 825 | * computation of p when the first loss occurs; RFC 3448, 6.3.1. |
850 | */ | 826 | */ |
851 | if (sample != 0) | 827 | if (sample != 0) |
852 | hcrx->ccid3hcrx_rtt = tfrc_ewma(hcrx->ccid3hcrx_rtt, sample, 9); | 828 | hc->rx_rtt = tfrc_ewma(hc->rx_rtt, sample, 9); |
853 | 829 | ||
854 | } else if (tfrc_lh_update_i_mean(&hcrx->ccid3hcrx_li_hist, skb)) { | 830 | } else if (tfrc_lh_update_i_mean(&hc->rx_li_hist, skb)) { |
855 | /* | 831 | /* |
856 | * Step (3) of [RFC 3448, 6.1]: Recompute I_mean and, if I_mean | 832 | * Step (3) of [RFC 3448, 6.1]: Recompute I_mean and, if I_mean |
857 | * has decreased (resp. p has increased), send feedback now. | 833 | * has decreased (resp. p has increased), send feedback now. |
@@ -862,11 +838,11 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
862 | /* | 838 | /* |
863 | * Check if the periodic once-per-RTT feedback is due; RFC 4342, 10.3 | 839 | * Check if the periodic once-per-RTT feedback is due; RFC 4342, 10.3 |
864 | */ | 840 | */ |
865 | if (SUB16(dccp_hdr(skb)->dccph_ccval, hcrx->ccid3hcrx_last_counter) > 3) | 841 | if (SUB16(dccp_hdr(skb)->dccph_ccval, hc->rx_last_counter) > 3) |
866 | do_feedback = CCID3_FBACK_PERIODIC; | 842 | do_feedback = CCID3_FBACK_PERIODIC; |
867 | 843 | ||
868 | update_records: | 844 | update_records: |
869 | tfrc_rx_hist_add_packet(&hcrx->ccid3hcrx_hist, skb, ndp); | 845 | tfrc_rx_hist_add_packet(&hc->rx_hist, skb, ndp); |
870 | 846 | ||
871 | done_receiving: | 847 | done_receiving: |
872 | if (do_feedback) | 848 | if (do_feedback) |
@@ -875,41 +851,41 @@ done_receiving: | |||
875 | 851 | ||
876 | static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk) | 852 | static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk) |
877 | { | 853 | { |
878 | struct ccid3_hc_rx_sock *hcrx = ccid_priv(ccid); | 854 | struct ccid3_hc_rx_sock *hc = ccid_priv(ccid); |
879 | 855 | ||
880 | hcrx->ccid3hcrx_state = TFRC_RSTATE_NO_DATA; | 856 | hc->rx_state = TFRC_RSTATE_NO_DATA; |
881 | tfrc_lh_init(&hcrx->ccid3hcrx_li_hist); | 857 | tfrc_lh_init(&hc->rx_li_hist); |
882 | return tfrc_rx_hist_alloc(&hcrx->ccid3hcrx_hist); | 858 | return tfrc_rx_hist_alloc(&hc->rx_hist); |
883 | } | 859 | } |
884 | 860 | ||
885 | static void ccid3_hc_rx_exit(struct sock *sk) | 861 | static void ccid3_hc_rx_exit(struct sock *sk) |
886 | { | 862 | { |
887 | struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); | 863 | struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); |
888 | 864 | ||
889 | ccid3_hc_rx_set_state(sk, TFRC_RSTATE_TERM); | 865 | ccid3_hc_rx_set_state(sk, TFRC_RSTATE_TERM); |
890 | 866 | ||
891 | tfrc_rx_hist_purge(&hcrx->ccid3hcrx_hist); | 867 | tfrc_rx_hist_purge(&hc->rx_hist); |
892 | tfrc_lh_cleanup(&hcrx->ccid3hcrx_li_hist); | 868 | tfrc_lh_cleanup(&hc->rx_li_hist); |
893 | } | 869 | } |
894 | 870 | ||
895 | static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info) | 871 | static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info) |
896 | { | 872 | { |
897 | const struct ccid3_hc_rx_sock *hcrx; | 873 | const struct ccid3_hc_rx_sock *hc; |
898 | 874 | ||
899 | /* Listen socks doesn't have a private CCID block */ | 875 | /* Listen socks doesn't have a private CCID block */ |
900 | if (sk->sk_state == DCCP_LISTEN) | 876 | if (sk->sk_state == DCCP_LISTEN) |
901 | return; | 877 | return; |
902 | 878 | ||
903 | hcrx = ccid3_hc_rx_sk(sk); | 879 | hc = ccid3_hc_rx_sk(sk); |
904 | info->tcpi_ca_state = hcrx->ccid3hcrx_state; | 880 | info->tcpi_ca_state = hc->rx_state; |
905 | info->tcpi_options |= TCPI_OPT_TIMESTAMPS; | 881 | info->tcpi_options |= TCPI_OPT_TIMESTAMPS; |
906 | info->tcpi_rcv_rtt = hcrx->ccid3hcrx_rtt; | 882 | info->tcpi_rcv_rtt = hc->rx_rtt; |
907 | } | 883 | } |
908 | 884 | ||
909 | static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len, | 885 | static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len, |
910 | u32 __user *optval, int __user *optlen) | 886 | u32 __user *optval, int __user *optlen) |
911 | { | 887 | { |
912 | const struct ccid3_hc_rx_sock *hcrx; | 888 | const struct ccid3_hc_rx_sock *hc; |
913 | struct tfrc_rx_info rx_info; | 889 | struct tfrc_rx_info rx_info; |
914 | const void *val; | 890 | const void *val; |
915 | 891 | ||
@@ -917,15 +893,15 @@ static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len, | |||
917 | if (sk->sk_state == DCCP_LISTEN) | 893 | if (sk->sk_state == DCCP_LISTEN) |
918 | return -EINVAL; | 894 | return -EINVAL; |
919 | 895 | ||
920 | hcrx = ccid3_hc_rx_sk(sk); | 896 | hc = ccid3_hc_rx_sk(sk); |
921 | switch (optname) { | 897 | switch (optname) { |
922 | case DCCP_SOCKOPT_CCID_RX_INFO: | 898 | case DCCP_SOCKOPT_CCID_RX_INFO: |
923 | if (len < sizeof(rx_info)) | 899 | if (len < sizeof(rx_info)) |
924 | return -EINVAL; | 900 | return -EINVAL; |
925 | rx_info.tfrcrx_x_recv = hcrx->ccid3hcrx_x_recv; | 901 | rx_info.tfrcrx_x_recv = hc->rx_x_recv; |
926 | rx_info.tfrcrx_rtt = hcrx->ccid3hcrx_rtt; | 902 | rx_info.tfrcrx_rtt = hc->rx_rtt; |
927 | rx_info.tfrcrx_p = hcrx->ccid3hcrx_pinv == 0 ? ~0U : | 903 | rx_info.tfrcrx_p = hc->rx_pinv == 0 ? ~0U : |
928 | scaled_div(1, hcrx->ccid3hcrx_pinv); | 904 | scaled_div(1, hc->rx_pinv); |
929 | len = sizeof(rx_info); | 905 | len = sizeof(rx_info); |
930 | val = &rx_info; | 906 | val = &rx_info; |
931 | break; | 907 | break; |
diff --git a/net/dccp/ccids/ccid3.h b/net/dccp/ccids/ccid3.h index e5a244143846..032635776653 100644 --- a/net/dccp/ccids/ccid3.h +++ b/net/dccp/ccids/ccid3.h | |||
@@ -75,44 +75,44 @@ enum ccid3_hc_tx_states { | |||
75 | 75 | ||
76 | /** | 76 | /** |
77 | * struct ccid3_hc_tx_sock - CCID3 sender half-connection socket | 77 | * struct ccid3_hc_tx_sock - CCID3 sender half-connection socket |
78 | * @ccid3hctx_x - Current sending rate in 64 * bytes per second | 78 | * @tx_x: Current sending rate in 64 * bytes per second |
79 | * @ccid3hctx_x_recv - Receive rate in 64 * bytes per second | 79 | * @tx_x_recv: Receive rate in 64 * bytes per second |
80 | * @ccid3hctx_x_calc - Calculated rate in bytes per second | 80 | * @tx_x_calc: Calculated rate in bytes per second |
81 | * @ccid3hctx_rtt - Estimate of current round trip time in usecs | 81 | * @tx_rtt: Estimate of current round trip time in usecs |
82 | * @ccid3hctx_p - Current loss event rate (0-1) scaled by 1000000 | 82 | * @tx_p: Current loss event rate (0-1) scaled by 1000000 |
83 | * @ccid3hctx_s - Packet size in bytes | 83 | * @tx_s: Packet size in bytes |
84 | * @ccid3hctx_t_rto - Nofeedback Timer setting in usecs | 84 | * @tx_t_rto: Nofeedback Timer setting in usecs |
85 | * @ccid3hctx_t_ipi - Interpacket (send) interval (RFC 3448, 4.6) in usecs | 85 | * @tx_t_ipi: Interpacket (send) interval (RFC 3448, 4.6) in usecs |
86 | * @ccid3hctx_state - Sender state, one of %ccid3_hc_tx_states | 86 | * @tx_state: Sender state, one of %ccid3_hc_tx_states |
87 | * @ccid3hctx_last_win_count - Last window counter sent | 87 | * @tx_last_win_count: Last window counter sent |
88 | * @ccid3hctx_t_last_win_count - Timestamp of earliest packet | 88 | * @tx_t_last_win_count: Timestamp of earliest packet |
89 | * with last_win_count value sent | 89 | * with last_win_count value sent |
90 | * @ccid3hctx_no_feedback_timer - Handle to no feedback timer | 90 | * @tx_no_feedback_timer: Handle to no feedback timer |
91 | * @ccid3hctx_t_ld - Time last doubled during slow start | 91 | * @tx_t_ld: Time last doubled during slow start |
92 | * @ccid3hctx_t_nom - Nominal send time of next packet | 92 | * @tx_t_nom: Nominal send time of next packet |
93 | * @ccid3hctx_delta - Send timer delta (RFC 3448, 4.6) in usecs | 93 | * @tx_delta: Send timer delta (RFC 3448, 4.6) in usecs |
94 | * @ccid3hctx_hist - Packet history | 94 | * @tx_hist: Packet history |
95 | * @ccid3hctx_options_received - Parsed set of retrieved options | 95 | * @tx_options_received: Parsed set of retrieved options |
96 | */ | 96 | */ |
97 | struct ccid3_hc_tx_sock { | 97 | struct ccid3_hc_tx_sock { |
98 | struct tfrc_tx_info ccid3hctx_tfrc; | 98 | struct tfrc_tx_info tx_tfrc; |
99 | #define ccid3hctx_x ccid3hctx_tfrc.tfrctx_x | 99 | #define tx_x tx_tfrc.tfrctx_x |
100 | #define ccid3hctx_x_recv ccid3hctx_tfrc.tfrctx_x_recv | 100 | #define tx_x_recv tx_tfrc.tfrctx_x_recv |
101 | #define ccid3hctx_x_calc ccid3hctx_tfrc.tfrctx_x_calc | 101 | #define tx_x_calc tx_tfrc.tfrctx_x_calc |
102 | #define ccid3hctx_rtt ccid3hctx_tfrc.tfrctx_rtt | 102 | #define tx_rtt tx_tfrc.tfrctx_rtt |
103 | #define ccid3hctx_p ccid3hctx_tfrc.tfrctx_p | 103 | #define tx_p tx_tfrc.tfrctx_p |
104 | #define ccid3hctx_t_rto ccid3hctx_tfrc.tfrctx_rto | 104 | #define tx_t_rto tx_tfrc.tfrctx_rto |
105 | #define ccid3hctx_t_ipi ccid3hctx_tfrc.tfrctx_ipi | 105 | #define tx_t_ipi tx_tfrc.tfrctx_ipi |
106 | u16 ccid3hctx_s; | 106 | u16 tx_s; |
107 | enum ccid3_hc_tx_states ccid3hctx_state:8; | 107 | enum ccid3_hc_tx_states tx_state:8; |
108 | u8 ccid3hctx_last_win_count; | 108 | u8 tx_last_win_count; |
109 | ktime_t ccid3hctx_t_last_win_count; | 109 | ktime_t tx_t_last_win_count; |
110 | struct timer_list ccid3hctx_no_feedback_timer; | 110 | struct timer_list tx_no_feedback_timer; |
111 | ktime_t ccid3hctx_t_ld; | 111 | ktime_t tx_t_ld; |
112 | ktime_t ccid3hctx_t_nom; | 112 | ktime_t tx_t_nom; |
113 | u32 ccid3hctx_delta; | 113 | u32 tx_delta; |
114 | struct tfrc_tx_hist_entry *ccid3hctx_hist; | 114 | struct tfrc_tx_hist_entry *tx_hist; |
115 | struct ccid3_options_received ccid3hctx_options_received; | 115 | struct ccid3_options_received tx_options_received; |
116 | }; | 116 | }; |
117 | 117 | ||
118 | static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk) | 118 | static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk) |
@@ -131,32 +131,32 @@ enum ccid3_hc_rx_states { | |||
131 | 131 | ||
132 | /** | 132 | /** |
133 | * struct ccid3_hc_rx_sock - CCID3 receiver half-connection socket | 133 | * struct ccid3_hc_rx_sock - CCID3 receiver half-connection socket |
134 | * @ccid3hcrx_x_recv - Receiver estimate of send rate (RFC 3448 4.3) | 134 | * @rx_x_recv: Receiver estimate of send rate (RFC 3448 4.3) |
135 | * @ccid3hcrx_rtt - Receiver estimate of rtt (non-standard) | 135 | * @rx_rtt: Receiver estimate of rtt (non-standard) |
136 | * @ccid3hcrx_p - Current loss event rate (RFC 3448 5.4) | 136 | * @rx_p: Current loss event rate (RFC 3448 5.4) |
137 | * @ccid3hcrx_last_counter - Tracks window counter (RFC 4342, 8.1) | 137 | * @rx_last_counter: Tracks window counter (RFC 4342, 8.1) |
138 | * @ccid3hcrx_state - Receiver state, one of %ccid3_hc_rx_states | 138 | * @rx_state: Receiver state, one of %ccid3_hc_rx_states |
139 | * @ccid3hcrx_bytes_recv - Total sum of DCCP payload bytes | 139 | * @rx_bytes_recv: Total sum of DCCP payload bytes |
140 | * @ccid3hcrx_x_recv - Receiver estimate of send rate (RFC 3448, sec. 4.3) | 140 | * @rx_x_recv: Receiver estimate of send rate (RFC 3448, sec. 4.3) |
141 | * @ccid3hcrx_rtt - Receiver estimate of RTT | 141 | * @rx_rtt: Receiver estimate of RTT |
142 | * @ccid3hcrx_tstamp_last_feedback - Time at which last feedback was sent | 142 | * @rx_tstamp_last_feedback: Time at which last feedback was sent |
143 | * @ccid3hcrx_tstamp_last_ack - Time at which last feedback was sent | 143 | * @rx_tstamp_last_ack: Time at which last feedback was sent |
144 | * @ccid3hcrx_hist - Packet history (loss detection + RTT sampling) | 144 | * @rx_hist: Packet history (loss detection + RTT sampling) |
145 | * @ccid3hcrx_li_hist - Loss Interval database | 145 | * @rx_li_hist: Loss Interval database |
146 | * @ccid3hcrx_s - Received packet size in bytes | 146 | * @rx_s: Received packet size in bytes |
147 | * @ccid3hcrx_pinv - Inverse of Loss Event Rate (RFC 4342, sec. 8.5) | 147 | * @rx_pinv: Inverse of Loss Event Rate (RFC 4342, sec. 8.5) |
148 | */ | 148 | */ |
149 | struct ccid3_hc_rx_sock { | 149 | struct ccid3_hc_rx_sock { |
150 | u8 ccid3hcrx_last_counter:4; | 150 | u8 rx_last_counter:4; |
151 | enum ccid3_hc_rx_states ccid3hcrx_state:8; | 151 | enum ccid3_hc_rx_states rx_state:8; |
152 | u32 ccid3hcrx_bytes_recv; | 152 | u32 rx_bytes_recv; |
153 | u32 ccid3hcrx_x_recv; | 153 | u32 rx_x_recv; |
154 | u32 ccid3hcrx_rtt; | 154 | u32 rx_rtt; |
155 | ktime_t ccid3hcrx_tstamp_last_feedback; | 155 | ktime_t rx_tstamp_last_feedback; |
156 | struct tfrc_rx_hist ccid3hcrx_hist; | 156 | struct tfrc_rx_hist rx_hist; |
157 | struct tfrc_loss_hist ccid3hcrx_li_hist; | 157 | struct tfrc_loss_hist rx_li_hist; |
158 | u16 ccid3hcrx_s; | 158 | u16 rx_s; |
159 | #define ccid3hcrx_pinv ccid3hcrx_li_hist.i_mean | 159 | #define rx_pinv rx_li_hist.i_mean |
160 | }; | 160 | }; |
161 | 161 | ||
162 | static inline struct ccid3_hc_rx_sock *ccid3_hc_rx_sk(const struct sock *sk) | 162 | static inline struct ccid3_hc_rx_sock *ccid3_hc_rx_sk(const struct sock *sk) |
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 7302e1498d46..00028d4b09d9 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
@@ -62,10 +62,10 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
62 | nexthop = inet->opt->faddr; | 62 | nexthop = inet->opt->faddr; |
63 | } | 63 | } |
64 | 64 | ||
65 | tmp = ip_route_connect(&rt, nexthop, inet->saddr, | 65 | tmp = ip_route_connect(&rt, nexthop, inet->inet_saddr, |
66 | RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, | 66 | RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, |
67 | IPPROTO_DCCP, | 67 | IPPROTO_DCCP, |
68 | inet->sport, usin->sin_port, sk, 1); | 68 | inet->inet_sport, usin->sin_port, sk, 1); |
69 | if (tmp < 0) | 69 | if (tmp < 0) |
70 | return tmp; | 70 | return tmp; |
71 | 71 | ||
@@ -77,12 +77,12 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
77 | if (inet->opt == NULL || !inet->opt->srr) | 77 | if (inet->opt == NULL || !inet->opt->srr) |
78 | daddr = rt->rt_dst; | 78 | daddr = rt->rt_dst; |
79 | 79 | ||
80 | if (inet->saddr == 0) | 80 | if (inet->inet_saddr == 0) |
81 | inet->saddr = rt->rt_src; | 81 | inet->inet_saddr = rt->rt_src; |
82 | inet->rcv_saddr = inet->saddr; | 82 | inet->inet_rcv_saddr = inet->inet_saddr; |
83 | 83 | ||
84 | inet->dport = usin->sin_port; | 84 | inet->inet_dport = usin->sin_port; |
85 | inet->daddr = daddr; | 85 | inet->inet_daddr = daddr; |
86 | 86 | ||
87 | inet_csk(sk)->icsk_ext_hdr_len = 0; | 87 | inet_csk(sk)->icsk_ext_hdr_len = 0; |
88 | if (inet->opt != NULL) | 88 | if (inet->opt != NULL) |
@@ -98,17 +98,19 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
98 | if (err != 0) | 98 | if (err != 0) |
99 | goto failure; | 99 | goto failure; |
100 | 100 | ||
101 | err = ip_route_newports(&rt, IPPROTO_DCCP, inet->sport, inet->dport, | 101 | err = ip_route_newports(&rt, IPPROTO_DCCP, inet->inet_sport, |
102 | sk); | 102 | inet->inet_dport, sk); |
103 | if (err != 0) | 103 | if (err != 0) |
104 | goto failure; | 104 | goto failure; |
105 | 105 | ||
106 | /* OK, now commit destination to socket. */ | 106 | /* OK, now commit destination to socket. */ |
107 | sk_setup_caps(sk, &rt->u.dst); | 107 | sk_setup_caps(sk, &rt->u.dst); |
108 | 108 | ||
109 | dp->dccps_iss = secure_dccp_sequence_number(inet->saddr, inet->daddr, | 109 | dp->dccps_iss = secure_dccp_sequence_number(inet->inet_saddr, |
110 | inet->sport, inet->dport); | 110 | inet->inet_daddr, |
111 | inet->id = dp->dccps_iss ^ jiffies; | 111 | inet->inet_sport, |
112 | inet->inet_dport); | ||
113 | inet->inet_id = dp->dccps_iss ^ jiffies; | ||
112 | 114 | ||
113 | err = dccp_connect(sk); | 115 | err = dccp_connect(sk); |
114 | rt = NULL; | 116 | rt = NULL; |
@@ -123,7 +125,7 @@ failure: | |||
123 | dccp_set_state(sk, DCCP_CLOSED); | 125 | dccp_set_state(sk, DCCP_CLOSED); |
124 | ip_rt_put(rt); | 126 | ip_rt_put(rt); |
125 | sk->sk_route_caps = 0; | 127 | sk->sk_route_caps = 0; |
126 | inet->dport = 0; | 128 | inet->inet_dport = 0; |
127 | goto out; | 129 | goto out; |
128 | } | 130 | } |
129 | 131 | ||
@@ -352,7 +354,9 @@ void dccp_v4_send_check(struct sock *sk, int unused, struct sk_buff *skb) | |||
352 | struct dccp_hdr *dh = dccp_hdr(skb); | 354 | struct dccp_hdr *dh = dccp_hdr(skb); |
353 | 355 | ||
354 | dccp_csum_outgoing(skb); | 356 | dccp_csum_outgoing(skb); |
355 | dh->dccph_checksum = dccp_v4_csum_finish(skb, inet->saddr, inet->daddr); | 357 | dh->dccph_checksum = dccp_v4_csum_finish(skb, |
358 | inet->inet_saddr, | ||
359 | inet->inet_daddr); | ||
356 | } | 360 | } |
357 | 361 | ||
358 | EXPORT_SYMBOL_GPL(dccp_v4_send_check); | 362 | EXPORT_SYMBOL_GPL(dccp_v4_send_check); |
@@ -393,14 +397,14 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
393 | 397 | ||
394 | newinet = inet_sk(newsk); | 398 | newinet = inet_sk(newsk); |
395 | ireq = inet_rsk(req); | 399 | ireq = inet_rsk(req); |
396 | newinet->daddr = ireq->rmt_addr; | 400 | newinet->inet_daddr = ireq->rmt_addr; |
397 | newinet->rcv_saddr = ireq->loc_addr; | 401 | newinet->inet_rcv_saddr = ireq->loc_addr; |
398 | newinet->saddr = ireq->loc_addr; | 402 | newinet->inet_saddr = ireq->loc_addr; |
399 | newinet->opt = ireq->opt; | 403 | newinet->opt = ireq->opt; |
400 | ireq->opt = NULL; | 404 | ireq->opt = NULL; |
401 | newinet->mc_index = inet_iif(skb); | 405 | newinet->mc_index = inet_iif(skb); |
402 | newinet->mc_ttl = ip_hdr(skb)->ttl; | 406 | newinet->mc_ttl = ip_hdr(skb)->ttl; |
403 | newinet->id = jiffies; | 407 | newinet->inet_id = jiffies; |
404 | 408 | ||
405 | dccp_sync_mss(newsk, dst_mtu(dst)); | 409 | dccp_sync_mss(newsk, dst_mtu(dst)); |
406 | 410 | ||
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index e48ca5d45658..6d89f9f7d5d8 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -158,8 +158,8 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
158 | ipv6_addr_copy(&fl.fl6_dst, &np->daddr); | 158 | ipv6_addr_copy(&fl.fl6_dst, &np->daddr); |
159 | ipv6_addr_copy(&fl.fl6_src, &np->saddr); | 159 | ipv6_addr_copy(&fl.fl6_src, &np->saddr); |
160 | fl.oif = sk->sk_bound_dev_if; | 160 | fl.oif = sk->sk_bound_dev_if; |
161 | fl.fl_ip_dport = inet->dport; | 161 | fl.fl_ip_dport = inet->inet_dport; |
162 | fl.fl_ip_sport = inet->sport; | 162 | fl.fl_ip_sport = inet->inet_sport; |
163 | security_sk_classify_flow(sk, &fl); | 163 | security_sk_classify_flow(sk, &fl); |
164 | 164 | ||
165 | err = ip6_dst_lookup(sk, &dst, &fl); | 165 | err = ip6_dst_lookup(sk, &dst, &fl); |
@@ -510,11 +510,9 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, | |||
510 | 510 | ||
511 | memcpy(newnp, np, sizeof(struct ipv6_pinfo)); | 511 | memcpy(newnp, np, sizeof(struct ipv6_pinfo)); |
512 | 512 | ||
513 | ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF), | 513 | ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr); |
514 | newinet->daddr); | ||
515 | 514 | ||
516 | ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF), | 515 | ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr); |
517 | newinet->saddr); | ||
518 | 516 | ||
519 | ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr); | 517 | ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr); |
520 | 518 | ||
@@ -642,7 +640,8 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, | |||
642 | 640 | ||
643 | dccp_sync_mss(newsk, dst_mtu(dst)); | 641 | dccp_sync_mss(newsk, dst_mtu(dst)); |
644 | 642 | ||
645 | newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6; | 643 | newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; |
644 | newinet->inet_rcv_saddr = LOOPBACK4_IPV6; | ||
646 | 645 | ||
647 | __inet6_hash(newsk); | 646 | __inet6_hash(newsk); |
648 | __inet_inherit_port(sk, newsk); | 647 | __inet_inherit_port(sk, newsk); |
@@ -970,12 +969,9 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
970 | icsk->icsk_af_ops = &dccp_ipv6_af_ops; | 969 | icsk->icsk_af_ops = &dccp_ipv6_af_ops; |
971 | sk->sk_backlog_rcv = dccp_v6_do_rcv; | 970 | sk->sk_backlog_rcv = dccp_v6_do_rcv; |
972 | goto failure; | 971 | goto failure; |
973 | } else { | ||
974 | ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF), | ||
975 | inet->saddr); | ||
976 | ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF), | ||
977 | inet->rcv_saddr); | ||
978 | } | 972 | } |
973 | ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr); | ||
974 | ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, &np->rcv_saddr); | ||
979 | 975 | ||
980 | return err; | 976 | return err; |
981 | } | 977 | } |
@@ -988,7 +984,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
988 | ipv6_addr_copy(&fl.fl6_src, saddr ? saddr : &np->saddr); | 984 | ipv6_addr_copy(&fl.fl6_src, saddr ? saddr : &np->saddr); |
989 | fl.oif = sk->sk_bound_dev_if; | 985 | fl.oif = sk->sk_bound_dev_if; |
990 | fl.fl_ip_dport = usin->sin6_port; | 986 | fl.fl_ip_dport = usin->sin6_port; |
991 | fl.fl_ip_sport = inet->sport; | 987 | fl.fl_ip_sport = inet->inet_sport; |
992 | security_sk_classify_flow(sk, &fl); | 988 | security_sk_classify_flow(sk, &fl); |
993 | 989 | ||
994 | if (np->opt != NULL && np->opt->srcrt != NULL) { | 990 | if (np->opt != NULL && np->opt->srcrt != NULL) { |
@@ -1021,7 +1017,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
1021 | 1017 | ||
1022 | /* set the source address */ | 1018 | /* set the source address */ |
1023 | ipv6_addr_copy(&np->saddr, saddr); | 1019 | ipv6_addr_copy(&np->saddr, saddr); |
1024 | inet->rcv_saddr = LOOPBACK4_IPV6; | 1020 | inet->inet_rcv_saddr = LOOPBACK4_IPV6; |
1025 | 1021 | ||
1026 | __ip6_dst_store(sk, dst, NULL, NULL); | 1022 | __ip6_dst_store(sk, dst, NULL, NULL); |
1027 | 1023 | ||
@@ -1030,7 +1026,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
1030 | icsk->icsk_ext_hdr_len = (np->opt->opt_flen + | 1026 | icsk->icsk_ext_hdr_len = (np->opt->opt_flen + |
1031 | np->opt->opt_nflen); | 1027 | np->opt->opt_nflen); |
1032 | 1028 | ||
1033 | inet->dport = usin->sin6_port; | 1029 | inet->inet_dport = usin->sin6_port; |
1034 | 1030 | ||
1035 | dccp_set_state(sk, DCCP_REQUESTING); | 1031 | dccp_set_state(sk, DCCP_REQUESTING); |
1036 | err = inet6_hash_connect(&dccp_death_row, sk); | 1032 | err = inet6_hash_connect(&dccp_death_row, sk); |
@@ -1039,7 +1035,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
1039 | 1035 | ||
1040 | dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32, | 1036 | dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32, |
1041 | np->daddr.s6_addr32, | 1037 | np->daddr.s6_addr32, |
1042 | inet->sport, inet->dport); | 1038 | inet->inet_sport, |
1039 | inet->inet_dport); | ||
1043 | err = dccp_connect(sk); | 1040 | err = dccp_connect(sk); |
1044 | if (err) | 1041 | if (err) |
1045 | goto late_failure; | 1042 | goto late_failure; |
@@ -1050,7 +1047,7 @@ late_failure: | |||
1050 | dccp_set_state(sk, DCCP_CLOSED); | 1047 | dccp_set_state(sk, DCCP_CLOSED); |
1051 | __sk_dst_reset(sk); | 1048 | __sk_dst_reset(sk); |
1052 | failure: | 1049 | failure: |
1053 | inet->dport = 0; | 1050 | inet->inet_dport = 0; |
1054 | sk->sk_route_caps = 0; | 1051 | sk->sk_route_caps = 0; |
1055 | return err; | 1052 | return err; |
1056 | } | 1053 | } |
diff --git a/net/dccp/output.c b/net/dccp/output.c index c96119fda688..d6bb753bf6ad 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c | |||
@@ -99,8 +99,8 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) | |||
99 | /* Build DCCP header and checksum it. */ | 99 | /* Build DCCP header and checksum it. */ |
100 | dh = dccp_zeroed_hdr(skb, dccp_header_size); | 100 | dh = dccp_zeroed_hdr(skb, dccp_header_size); |
101 | dh->dccph_type = dcb->dccpd_type; | 101 | dh->dccph_type = dcb->dccpd_type; |
102 | dh->dccph_sport = inet->sport; | 102 | dh->dccph_sport = inet->inet_sport; |
103 | dh->dccph_dport = inet->dport; | 103 | dh->dccph_dport = inet->inet_dport; |
104 | dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4; | 104 | dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4; |
105 | dh->dccph_ccval = dcb->dccpd_ccval; | 105 | dh->dccph_ccval = dcb->dccpd_ccval; |
106 | dh->dccph_cscov = dp->dccps_pcslen; | 106 | dh->dccph_cscov = dp->dccps_pcslen; |
diff --git a/net/dccp/probe.c b/net/dccp/probe.c index 37731da41481..dc328425fa20 100644 --- a/net/dccp/probe.c +++ b/net/dccp/probe.c | |||
@@ -75,26 +75,25 @@ static int jdccp_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
75 | struct msghdr *msg, size_t size) | 75 | struct msghdr *msg, size_t size) |
76 | { | 76 | { |
77 | const struct inet_sock *inet = inet_sk(sk); | 77 | const struct inet_sock *inet = inet_sk(sk); |
78 | struct ccid3_hc_tx_sock *hctx = NULL; | 78 | struct ccid3_hc_tx_sock *hc = NULL; |
79 | 79 | ||
80 | if (ccid_get_current_tx_ccid(dccp_sk(sk)) == DCCPC_CCID3) | 80 | if (ccid_get_current_tx_ccid(dccp_sk(sk)) == DCCPC_CCID3) |
81 | hctx = ccid3_hc_tx_sk(sk); | 81 | hc = ccid3_hc_tx_sk(sk); |
82 | 82 | ||
83 | if (port == 0 || ntohs(inet->dport) == port || | 83 | if (port == 0 || ntohs(inet->inet_dport) == port || |
84 | ntohs(inet->sport) == port) { | 84 | ntohs(inet->inet_sport) == port) { |
85 | if (hctx) | 85 | if (hc) |
86 | printl("%pI4:%u %pI4:%u %d %d %d %d %u " | 86 | printl("%pI4:%u %pI4:%u %d %d %d %d %u %llu %llu %d\n", |
87 | "%llu %llu %d\n", | 87 | &inet->inet_saddr, ntohs(inet->inet_sport), |
88 | &inet->saddr, ntohs(inet->sport), | 88 | &inet->inet_daddr, ntohs(inet->inet_dport), size, |
89 | &inet->daddr, ntohs(inet->dport), size, | 89 | hc->tx_s, hc->tx_rtt, hc->tx_p, |
90 | hctx->ccid3hctx_s, hctx->ccid3hctx_rtt, | 90 | hc->tx_x_calc, hc->tx_x_recv >> 6, |
91 | hctx->ccid3hctx_p, hctx->ccid3hctx_x_calc, | 91 | hc->tx_x >> 6, hc->tx_t_ipi); |
92 | hctx->ccid3hctx_x_recv >> 6, | ||
93 | hctx->ccid3hctx_x >> 6, hctx->ccid3hctx_t_ipi); | ||
94 | else | 92 | else |
95 | printl("%pI4:%u %pI4:%u %d\n", | 93 | printl("%pI4:%u %pI4:%u %d\n", |
96 | &inet->saddr, ntohs(inet->sport), | 94 | &inet->inet_saddr, ntohs(inet->inet_sport), |
97 | &inet->daddr, ntohs(inet->dport), size); | 95 | &inet->inet_daddr, ntohs(inet->inet_dport), |
96 | size); | ||
98 | } | 97 | } |
99 | 98 | ||
100 | jprobe_return(); | 99 | jprobe_return(); |
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index a156319fd0ac..671cd1413d59 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -278,7 +278,7 @@ int dccp_disconnect(struct sock *sk, int flags) | |||
278 | sk->sk_send_head = NULL; | 278 | sk->sk_send_head = NULL; |
279 | } | 279 | } |
280 | 280 | ||
281 | inet->dport = 0; | 281 | inet->inet_dport = 0; |
282 | 282 | ||
283 | if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) | 283 | if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) |
284 | inet_reset_saddr(sk); | 284 | inet_reset_saddr(sk); |
@@ -290,7 +290,7 @@ int dccp_disconnect(struct sock *sk, int flags) | |||
290 | inet_csk_delack_init(sk); | 290 | inet_csk_delack_init(sk); |
291 | __sk_dst_reset(sk); | 291 | __sk_dst_reset(sk); |
292 | 292 | ||
293 | WARN_ON(inet->num && !icsk->icsk_bind_hash); | 293 | WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); |
294 | 294 | ||
295 | sk->sk_error_report(sk); | 295 | sk->sk_error_report(sk); |
296 | return err; | 296 | return err; |
@@ -1060,11 +1060,12 @@ static int __init dccp_init(void) | |||
1060 | for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++) | 1060 | for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++) |
1061 | ; | 1061 | ; |
1062 | do { | 1062 | do { |
1063 | dccp_hashinfo.ehash_size = (1UL << ehash_order) * PAGE_SIZE / | 1063 | unsigned long hash_size = (1UL << ehash_order) * PAGE_SIZE / |
1064 | sizeof(struct inet_ehash_bucket); | 1064 | sizeof(struct inet_ehash_bucket); |
1065 | while (dccp_hashinfo.ehash_size & | 1065 | |
1066 | (dccp_hashinfo.ehash_size - 1)) | 1066 | while (hash_size & (hash_size - 1)) |
1067 | dccp_hashinfo.ehash_size--; | 1067 | hash_size--; |
1068 | dccp_hashinfo.ehash_mask = hash_size - 1; | ||
1068 | dccp_hashinfo.ehash = (struct inet_ehash_bucket *) | 1069 | dccp_hashinfo.ehash = (struct inet_ehash_bucket *) |
1069 | __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, ehash_order); | 1070 | __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, ehash_order); |
1070 | } while (!dccp_hashinfo.ehash && --ehash_order > 0); | 1071 | } while (!dccp_hashinfo.ehash && --ehash_order > 0); |
@@ -1074,7 +1075,7 @@ static int __init dccp_init(void) | |||
1074 | goto out_free_bind_bucket_cachep; | 1075 | goto out_free_bind_bucket_cachep; |
1075 | } | 1076 | } |
1076 | 1077 | ||
1077 | for (i = 0; i < dccp_hashinfo.ehash_size; i++) { | 1078 | for (i = 0; i <= dccp_hashinfo.ehash_mask; i++) { |
1078 | INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i); | 1079 | INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i); |
1079 | INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].twchain, i); | 1080 | INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].twchain, i); |
1080 | } | 1081 | } |
@@ -1153,7 +1154,7 @@ static void __exit dccp_fini(void) | |||
1153 | get_order(dccp_hashinfo.bhash_size * | 1154 | get_order(dccp_hashinfo.bhash_size * |
1154 | sizeof(struct inet_bind_hashbucket))); | 1155 | sizeof(struct inet_bind_hashbucket))); |
1155 | free_pages((unsigned long)dccp_hashinfo.ehash, | 1156 | free_pages((unsigned long)dccp_hashinfo.ehash, |
1156 | get_order(dccp_hashinfo.ehash_size * | 1157 | get_order((dccp_hashinfo.ehash_mask + 1) * |
1157 | sizeof(struct inet_ehash_bucket))); | 1158 | sizeof(struct inet_ehash_bucket))); |
1158 | inet_ehash_locks_free(&dccp_hashinfo); | 1159 | inet_ehash_locks_free(&dccp_hashinfo); |
1159 | kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); | 1160 | kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); |
diff --git a/net/dccp/timer.c b/net/dccp/timer.c index 162d1e683c39..bbfeb5eae46a 100644 --- a/net/dccp/timer.c +++ b/net/dccp/timer.c | |||
@@ -38,7 +38,7 @@ static int dccp_write_timeout(struct sock *sk) | |||
38 | 38 | ||
39 | if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) { | 39 | if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) { |
40 | if (icsk->icsk_retransmits != 0) | 40 | if (icsk->icsk_retransmits != 0) |
41 | dst_negative_advice(&sk->sk_dst_cache); | 41 | dst_negative_advice(&sk->sk_dst_cache, sk); |
42 | retry_until = icsk->icsk_syn_retries ? | 42 | retry_until = icsk->icsk_syn_retries ? |
43 | : sysctl_dccp_request_retries; | 43 | : sysctl_dccp_request_retries; |
44 | } else { | 44 | } else { |
@@ -63,7 +63,7 @@ static int dccp_write_timeout(struct sock *sk) | |||
63 | Golden words :-). | 63 | Golden words :-). |
64 | */ | 64 | */ |
65 | 65 | ||
66 | dst_negative_advice(&sk->sk_dst_cache); | 66 | dst_negative_advice(&sk->sk_dst_cache, sk); |
67 | } | 67 | } |
68 | 68 | ||
69 | retry_until = sysctl_dccp_retries2; | 69 | retry_until = sysctl_dccp_retries2; |
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 7a58c87baf17..664965c87e16 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c | |||
@@ -1955,7 +1955,7 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1955 | } | 1955 | } |
1956 | 1956 | ||
1957 | if ((flags & MSG_TRYHARD) && sk->sk_dst_cache) | 1957 | if ((flags & MSG_TRYHARD) && sk->sk_dst_cache) |
1958 | dst_negative_advice(&sk->sk_dst_cache); | 1958 | dst_negative_advice(&sk->sk_dst_cache, sk); |
1959 | 1959 | ||
1960 | mss = scp->segsize_rem; | 1960 | mss = scp->segsize_rem; |
1961 | fctype = scp->services_rem & NSP_FC_MASK; | 1961 | fctype = scp->services_rem & NSP_FC_MASK; |
@@ -2325,7 +2325,7 @@ static const struct file_operations dn_socket_seq_fops = { | |||
2325 | }; | 2325 | }; |
2326 | #endif | 2326 | #endif |
2327 | 2327 | ||
2328 | static struct net_proto_family dn_family_ops = { | 2328 | static const struct net_proto_family dn_family_ops = { |
2329 | .family = AF_DECnet, | 2329 | .family = AF_DECnet, |
2330 | .create = dn_create, | 2330 | .create = dn_create, |
2331 | .owner = THIS_MODULE, | 2331 | .owner = THIS_MODULE, |
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c index 0e0254fd767d..5e9426a11c3e 100644 --- a/net/econet/af_econet.c +++ b/net/econet/af_econet.c | |||
@@ -457,15 +457,15 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
457 | iov[0].iov_len = size; | 457 | iov[0].iov_len = size; |
458 | for (i = 0; i < msg->msg_iovlen; i++) { | 458 | for (i = 0; i < msg->msg_iovlen; i++) { |
459 | void __user *base = msg->msg_iov[i].iov_base; | 459 | void __user *base = msg->msg_iov[i].iov_base; |
460 | size_t len = msg->msg_iov[i].iov_len; | 460 | size_t iov_len = msg->msg_iov[i].iov_len; |
461 | /* Check it now since we switch to KERNEL_DS later. */ | 461 | /* Check it now since we switch to KERNEL_DS later. */ |
462 | if (!access_ok(VERIFY_READ, base, len)) { | 462 | if (!access_ok(VERIFY_READ, base, iov_len)) { |
463 | mutex_unlock(&econet_mutex); | 463 | mutex_unlock(&econet_mutex); |
464 | return -EFAULT; | 464 | return -EFAULT; |
465 | } | 465 | } |
466 | iov[i+1].iov_base = base; | 466 | iov[i+1].iov_base = base; |
467 | iov[i+1].iov_len = len; | 467 | iov[i+1].iov_len = iov_len; |
468 | size += len; | 468 | size += iov_len; |
469 | } | 469 | } |
470 | 470 | ||
471 | /* Get a skbuff (no data, just holds our cb information) */ | 471 | /* Get a skbuff (no data, just holds our cb information) */ |
@@ -742,7 +742,7 @@ static int econet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg | |||
742 | return 0; | 742 | return 0; |
743 | } | 743 | } |
744 | 744 | ||
745 | static struct net_proto_family econet_family_ops = { | 745 | static const struct net_proto_family econet_family_ops = { |
746 | .family = PF_ECONET, | 746 | .family = PF_ECONET, |
747 | .create = econet_create, | 747 | .create = econet_create, |
748 | .owner = THIS_MODULE, | 748 | .owner = THIS_MODULE, |
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c index cd949d5e451b..309348fba72b 100644 --- a/net/ieee802154/af_ieee802154.c +++ b/net/ieee802154/af_ieee802154.c | |||
@@ -285,7 +285,7 @@ out: | |||
285 | return rc; | 285 | return rc; |
286 | } | 286 | } |
287 | 287 | ||
288 | static struct net_proto_family ieee802154_family_ops = { | 288 | static const struct net_proto_family ieee802154_family_ops = { |
289 | .family = PF_IEEE802154, | 289 | .family = PF_IEEE802154, |
290 | .create = ieee802154_create, | 290 | .create = ieee802154_create, |
291 | .owner = THIS_MODULE, | 291 | .owner = THIS_MODULE, |
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c index a413b1bf4465..9aac5aee1575 100644 --- a/net/ieee802154/dgram.c +++ b/net/ieee802154/dgram.c | |||
@@ -303,7 +303,7 @@ static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
303 | if (err) | 303 | if (err) |
304 | goto done; | 304 | goto done; |
305 | 305 | ||
306 | sock_recv_timestamp(msg, sk, skb); | 306 | sock_recv_ts_and_drops(msg, sk, skb); |
307 | 307 | ||
308 | if (flags & MSG_TRUNC) | 308 | if (flags & MSG_TRUNC) |
309 | copied = skb->len; | 309 | copied = skb->len; |
@@ -318,7 +318,6 @@ out: | |||
318 | static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb) | 318 | static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb) |
319 | { | 319 | { |
320 | if (sock_queue_rcv_skb(sk, skb) < 0) { | 320 | if (sock_queue_rcv_skb(sk, skb) < 0) { |
321 | atomic_inc(&sk->sk_drops); | ||
322 | kfree_skb(skb); | 321 | kfree_skb(skb); |
323 | return NET_RX_DROP; | 322 | return NET_RX_DROP; |
324 | } | 323 | } |
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c index 30e74eee07d6..9c9b85c00033 100644 --- a/net/ieee802154/raw.c +++ b/net/ieee802154/raw.c | |||
@@ -191,7 +191,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
191 | if (err) | 191 | if (err) |
192 | goto done; | 192 | goto done; |
193 | 193 | ||
194 | sock_recv_timestamp(msg, sk, skb); | 194 | sock_recv_ts_and_drops(msg, sk, skb); |
195 | 195 | ||
196 | if (flags & MSG_TRUNC) | 196 | if (flags & MSG_TRUNC) |
197 | copied = skb->len; | 197 | copied = skb->len; |
@@ -206,7 +206,6 @@ out: | |||
206 | static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb) | 206 | static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb) |
207 | { | 207 | { |
208 | if (sock_queue_rcv_skb(sk, skb) < 0) { | 208 | if (sock_queue_rcv_skb(sk, skb) < 0) { |
209 | atomic_inc(&sk->sk_drops); | ||
210 | kfree_skb(skb); | 209 | kfree_skb(skb); |
211 | return NET_RX_DROP; | 210 | return NET_RX_DROP; |
212 | } | 211 | } |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 57737b8d1711..538e84d0bcba 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -174,12 +174,12 @@ static int inet_autobind(struct sock *sk) | |||
174 | /* We may need to bind the socket. */ | 174 | /* We may need to bind the socket. */ |
175 | lock_sock(sk); | 175 | lock_sock(sk); |
176 | inet = inet_sk(sk); | 176 | inet = inet_sk(sk); |
177 | if (!inet->num) { | 177 | if (!inet->inet_num) { |
178 | if (sk->sk_prot->get_port(sk, 0)) { | 178 | if (sk->sk_prot->get_port(sk, 0)) { |
179 | release_sock(sk); | 179 | release_sock(sk); |
180 | return -EAGAIN; | 180 | return -EAGAIN; |
181 | } | 181 | } |
182 | inet->sport = htons(inet->num); | 182 | inet->inet_sport = htons(inet->inet_num); |
183 | } | 183 | } |
184 | release_sock(sk); | 184 | release_sock(sk); |
185 | return 0; | 185 | return 0; |
@@ -354,7 +354,7 @@ lookup_protocol: | |||
354 | inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0; | 354 | inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0; |
355 | 355 | ||
356 | if (SOCK_RAW == sock->type) { | 356 | if (SOCK_RAW == sock->type) { |
357 | inet->num = protocol; | 357 | inet->inet_num = protocol; |
358 | if (IPPROTO_RAW == protocol) | 358 | if (IPPROTO_RAW == protocol) |
359 | inet->hdrincl = 1; | 359 | inet->hdrincl = 1; |
360 | } | 360 | } |
@@ -364,7 +364,7 @@ lookup_protocol: | |||
364 | else | 364 | else |
365 | inet->pmtudisc = IP_PMTUDISC_WANT; | 365 | inet->pmtudisc = IP_PMTUDISC_WANT; |
366 | 366 | ||
367 | inet->id = 0; | 367 | inet->inet_id = 0; |
368 | 368 | ||
369 | sock_init_data(sock, sk); | 369 | sock_init_data(sock, sk); |
370 | 370 | ||
@@ -381,13 +381,13 @@ lookup_protocol: | |||
381 | 381 | ||
382 | sk_refcnt_debug_inc(sk); | 382 | sk_refcnt_debug_inc(sk); |
383 | 383 | ||
384 | if (inet->num) { | 384 | if (inet->inet_num) { |
385 | /* It assumes that any protocol which allows | 385 | /* It assumes that any protocol which allows |
386 | * the user to assign a number at socket | 386 | * the user to assign a number at socket |
387 | * creation time automatically | 387 | * creation time automatically |
388 | * shares. | 388 | * shares. |
389 | */ | 389 | */ |
390 | inet->sport = htons(inet->num); | 390 | inet->inet_sport = htons(inet->inet_num); |
391 | /* Add to protocol hash chains. */ | 391 | /* Add to protocol hash chains. */ |
392 | sk->sk_prot->hash(sk); | 392 | sk->sk_prot->hash(sk); |
393 | } | 393 | } |
@@ -494,27 +494,27 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
494 | 494 | ||
495 | /* Check these errors (active socket, double bind). */ | 495 | /* Check these errors (active socket, double bind). */ |
496 | err = -EINVAL; | 496 | err = -EINVAL; |
497 | if (sk->sk_state != TCP_CLOSE || inet->num) | 497 | if (sk->sk_state != TCP_CLOSE || inet->inet_num) |
498 | goto out_release_sock; | 498 | goto out_release_sock; |
499 | 499 | ||
500 | inet->rcv_saddr = inet->saddr = addr->sin_addr.s_addr; | 500 | inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr; |
501 | if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) | 501 | if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) |
502 | inet->saddr = 0; /* Use device */ | 502 | inet->inet_saddr = 0; /* Use device */ |
503 | 503 | ||
504 | /* Make sure we are allowed to bind here. */ | 504 | /* Make sure we are allowed to bind here. */ |
505 | if (sk->sk_prot->get_port(sk, snum)) { | 505 | if (sk->sk_prot->get_port(sk, snum)) { |
506 | inet->saddr = inet->rcv_saddr = 0; | 506 | inet->inet_saddr = inet->inet_rcv_saddr = 0; |
507 | err = -EADDRINUSE; | 507 | err = -EADDRINUSE; |
508 | goto out_release_sock; | 508 | goto out_release_sock; |
509 | } | 509 | } |
510 | 510 | ||
511 | if (inet->rcv_saddr) | 511 | if (inet->inet_rcv_saddr) |
512 | sk->sk_userlocks |= SOCK_BINDADDR_LOCK; | 512 | sk->sk_userlocks |= SOCK_BINDADDR_LOCK; |
513 | if (snum) | 513 | if (snum) |
514 | sk->sk_userlocks |= SOCK_BINDPORT_LOCK; | 514 | sk->sk_userlocks |= SOCK_BINDPORT_LOCK; |
515 | inet->sport = htons(inet->num); | 515 | inet->inet_sport = htons(inet->inet_num); |
516 | inet->daddr = 0; | 516 | inet->inet_daddr = 0; |
517 | inet->dport = 0; | 517 | inet->inet_dport = 0; |
518 | sk_dst_reset(sk); | 518 | sk_dst_reset(sk); |
519 | err = 0; | 519 | err = 0; |
520 | out_release_sock: | 520 | out_release_sock: |
@@ -532,7 +532,7 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr, | |||
532 | if (uaddr->sa_family == AF_UNSPEC) | 532 | if (uaddr->sa_family == AF_UNSPEC) |
533 | return sk->sk_prot->disconnect(sk, flags); | 533 | return sk->sk_prot->disconnect(sk, flags); |
534 | 534 | ||
535 | if (!inet_sk(sk)->num && inet_autobind(sk)) | 535 | if (!inet_sk(sk)->inet_num && inet_autobind(sk)) |
536 | return -EAGAIN; | 536 | return -EAGAIN; |
537 | return sk->sk_prot->connect(sk, (struct sockaddr *)uaddr, addr_len); | 537 | return sk->sk_prot->connect(sk, (struct sockaddr *)uaddr, addr_len); |
538 | } | 538 | } |
@@ -685,21 +685,21 @@ int inet_getname(struct socket *sock, struct sockaddr *uaddr, | |||
685 | { | 685 | { |
686 | struct sock *sk = sock->sk; | 686 | struct sock *sk = sock->sk; |
687 | struct inet_sock *inet = inet_sk(sk); | 687 | struct inet_sock *inet = inet_sk(sk); |
688 | struct sockaddr_in *sin = (struct sockaddr_in *)uaddr; | 688 | DECLARE_SOCKADDR(struct sockaddr_in *, sin, uaddr); |
689 | 689 | ||
690 | sin->sin_family = AF_INET; | 690 | sin->sin_family = AF_INET; |
691 | if (peer) { | 691 | if (peer) { |
692 | if (!inet->dport || | 692 | if (!inet->inet_dport || |
693 | (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) && | 693 | (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) && |
694 | peer == 1)) | 694 | peer == 1)) |
695 | return -ENOTCONN; | 695 | return -ENOTCONN; |
696 | sin->sin_port = inet->dport; | 696 | sin->sin_port = inet->inet_dport; |
697 | sin->sin_addr.s_addr = inet->daddr; | 697 | sin->sin_addr.s_addr = inet->inet_daddr; |
698 | } else { | 698 | } else { |
699 | __be32 addr = inet->rcv_saddr; | 699 | __be32 addr = inet->inet_rcv_saddr; |
700 | if (!addr) | 700 | if (!addr) |
701 | addr = inet->saddr; | 701 | addr = inet->inet_saddr; |
702 | sin->sin_port = inet->sport; | 702 | sin->sin_port = inet->inet_sport; |
703 | sin->sin_addr.s_addr = addr; | 703 | sin->sin_addr.s_addr = addr; |
704 | } | 704 | } |
705 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); | 705 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); |
@@ -714,7 +714,7 @@ int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
714 | struct sock *sk = sock->sk; | 714 | struct sock *sk = sock->sk; |
715 | 715 | ||
716 | /* We may need to bind the socket. */ | 716 | /* We may need to bind the socket. */ |
717 | if (!inet_sk(sk)->num && inet_autobind(sk)) | 717 | if (!inet_sk(sk)->inet_num && inet_autobind(sk)) |
718 | return -EAGAIN; | 718 | return -EAGAIN; |
719 | 719 | ||
720 | return sk->sk_prot->sendmsg(iocb, sk, msg, size); | 720 | return sk->sk_prot->sendmsg(iocb, sk, msg, size); |
@@ -728,7 +728,7 @@ static ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, | |||
728 | struct sock *sk = sock->sk; | 728 | struct sock *sk = sock->sk; |
729 | 729 | ||
730 | /* We may need to bind the socket. */ | 730 | /* We may need to bind the socket. */ |
731 | if (!inet_sk(sk)->num && inet_autobind(sk)) | 731 | if (!inet_sk(sk)->inet_num && inet_autobind(sk)) |
732 | return -EAGAIN; | 732 | return -EAGAIN; |
733 | 733 | ||
734 | if (sk->sk_prot->sendpage) | 734 | if (sk->sk_prot->sendpage) |
@@ -931,7 +931,7 @@ static const struct proto_ops inet_sockraw_ops = { | |||
931 | #endif | 931 | #endif |
932 | }; | 932 | }; |
933 | 933 | ||
934 | static struct net_proto_family inet_family_ops = { | 934 | static const struct net_proto_family inet_family_ops = { |
935 | .family = PF_INET, | 935 | .family = PF_INET, |
936 | .create = inet_create, | 936 | .create = inet_create, |
937 | .owner = THIS_MODULE, | 937 | .owner = THIS_MODULE, |
@@ -1059,9 +1059,9 @@ static int inet_sk_reselect_saddr(struct sock *sk) | |||
1059 | struct inet_sock *inet = inet_sk(sk); | 1059 | struct inet_sock *inet = inet_sk(sk); |
1060 | int err; | 1060 | int err; |
1061 | struct rtable *rt; | 1061 | struct rtable *rt; |
1062 | __be32 old_saddr = inet->saddr; | 1062 | __be32 old_saddr = inet->inet_saddr; |
1063 | __be32 new_saddr; | 1063 | __be32 new_saddr; |
1064 | __be32 daddr = inet->daddr; | 1064 | __be32 daddr = inet->inet_daddr; |
1065 | 1065 | ||
1066 | if (inet->opt && inet->opt->srr) | 1066 | if (inet->opt && inet->opt->srr) |
1067 | daddr = inet->opt->faddr; | 1067 | daddr = inet->opt->faddr; |
@@ -1071,7 +1071,7 @@ static int inet_sk_reselect_saddr(struct sock *sk) | |||
1071 | RT_CONN_FLAGS(sk), | 1071 | RT_CONN_FLAGS(sk), |
1072 | sk->sk_bound_dev_if, | 1072 | sk->sk_bound_dev_if, |
1073 | sk->sk_protocol, | 1073 | sk->sk_protocol, |
1074 | inet->sport, inet->dport, sk, 0); | 1074 | inet->inet_sport, inet->inet_dport, sk, 0); |
1075 | if (err) | 1075 | if (err) |
1076 | return err; | 1076 | return err; |
1077 | 1077 | ||
@@ -1087,7 +1087,7 @@ static int inet_sk_reselect_saddr(struct sock *sk) | |||
1087 | __func__, &old_saddr, &new_saddr); | 1087 | __func__, &old_saddr, &new_saddr); |
1088 | } | 1088 | } |
1089 | 1089 | ||
1090 | inet->saddr = inet->rcv_saddr = new_saddr; | 1090 | inet->inet_saddr = inet->inet_rcv_saddr = new_saddr; |
1091 | 1091 | ||
1092 | /* | 1092 | /* |
1093 | * XXX The only one ugly spot where we need to | 1093 | * XXX The only one ugly spot where we need to |
@@ -1113,7 +1113,7 @@ int inet_sk_rebuild_header(struct sock *sk) | |||
1113 | return 0; | 1113 | return 0; |
1114 | 1114 | ||
1115 | /* Reroute. */ | 1115 | /* Reroute. */ |
1116 | daddr = inet->daddr; | 1116 | daddr = inet->inet_daddr; |
1117 | if (inet->opt && inet->opt->srr) | 1117 | if (inet->opt && inet->opt->srr) |
1118 | daddr = inet->opt->faddr; | 1118 | daddr = inet->opt->faddr; |
1119 | { | 1119 | { |
@@ -1123,7 +1123,7 @@ int inet_sk_rebuild_header(struct sock *sk) | |||
1123 | .nl_u = { | 1123 | .nl_u = { |
1124 | .ip4_u = { | 1124 | .ip4_u = { |
1125 | .daddr = daddr, | 1125 | .daddr = daddr, |
1126 | .saddr = inet->saddr, | 1126 | .saddr = inet->inet_saddr, |
1127 | .tos = RT_CONN_FLAGS(sk), | 1127 | .tos = RT_CONN_FLAGS(sk), |
1128 | }, | 1128 | }, |
1129 | }, | 1129 | }, |
@@ -1131,8 +1131,8 @@ int inet_sk_rebuild_header(struct sock *sk) | |||
1131 | .flags = inet_sk_flowi_flags(sk), | 1131 | .flags = inet_sk_flowi_flags(sk), |
1132 | .uli_u = { | 1132 | .uli_u = { |
1133 | .ports = { | 1133 | .ports = { |
1134 | .sport = inet->sport, | 1134 | .sport = inet->inet_sport, |
1135 | .dport = inet->dport, | 1135 | .dport = inet->inet_dport, |
1136 | }, | 1136 | }, |
1137 | }, | 1137 | }, |
1138 | }; | 1138 | }; |
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 5c662703eb1e..d07b0c1dd350 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c | |||
@@ -1,3 +1,4 @@ | |||
1 | #include <crypto/hash.h> | ||
1 | #include <linux/err.h> | 2 | #include <linux/err.h> |
2 | #include <linux/module.h> | 3 | #include <linux/module.h> |
3 | #include <net/ip.h> | 4 | #include <net/ip.h> |
@@ -5,10 +6,67 @@ | |||
5 | #include <net/ah.h> | 6 | #include <net/ah.h> |
6 | #include <linux/crypto.h> | 7 | #include <linux/crypto.h> |
7 | #include <linux/pfkeyv2.h> | 8 | #include <linux/pfkeyv2.h> |
8 | #include <linux/spinlock.h> | 9 | #include <linux/scatterlist.h> |
9 | #include <net/icmp.h> | 10 | #include <net/icmp.h> |
10 | #include <net/protocol.h> | 11 | #include <net/protocol.h> |
11 | 12 | ||
13 | struct ah_skb_cb { | ||
14 | struct xfrm_skb_cb xfrm; | ||
15 | void *tmp; | ||
16 | }; | ||
17 | |||
18 | #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0])) | ||
19 | |||
20 | static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags, | ||
21 | unsigned int size) | ||
22 | { | ||
23 | unsigned int len; | ||
24 | |||
25 | len = size + crypto_ahash_digestsize(ahash) + | ||
26 | (crypto_ahash_alignmask(ahash) & | ||
27 | ~(crypto_tfm_ctx_alignment() - 1)); | ||
28 | |||
29 | len = ALIGN(len, crypto_tfm_ctx_alignment()); | ||
30 | |||
31 | len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash); | ||
32 | len = ALIGN(len, __alignof__(struct scatterlist)); | ||
33 | |||
34 | len += sizeof(struct scatterlist) * nfrags; | ||
35 | |||
36 | return kmalloc(len, GFP_ATOMIC); | ||
37 | } | ||
38 | |||
39 | static inline u8 *ah_tmp_auth(void *tmp, unsigned int offset) | ||
40 | { | ||
41 | return tmp + offset; | ||
42 | } | ||
43 | |||
44 | static inline u8 *ah_tmp_icv(struct crypto_ahash *ahash, void *tmp, | ||
45 | unsigned int offset) | ||
46 | { | ||
47 | return PTR_ALIGN((u8 *)tmp + offset, crypto_ahash_alignmask(ahash) + 1); | ||
48 | } | ||
49 | |||
50 | static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash, | ||
51 | u8 *icv) | ||
52 | { | ||
53 | struct ahash_request *req; | ||
54 | |||
55 | req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash), | ||
56 | crypto_tfm_ctx_alignment()); | ||
57 | |||
58 | ahash_request_set_tfm(req, ahash); | ||
59 | |||
60 | return req; | ||
61 | } | ||
62 | |||
63 | static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash, | ||
64 | struct ahash_request *req) | ||
65 | { | ||
66 | return (void *)ALIGN((unsigned long)(req + 1) + | ||
67 | crypto_ahash_reqsize(ahash), | ||
68 | __alignof__(struct scatterlist)); | ||
69 | } | ||
12 | 70 | ||
13 | /* Clear mutable options and find final destination to substitute | 71 | /* Clear mutable options and find final destination to substitute |
14 | * into IP header for icv calculation. Options are already checked | 72 | * into IP header for icv calculation. Options are already checked |
@@ -54,20 +112,72 @@ static int ip_clear_mutable_options(struct iphdr *iph, __be32 *daddr) | |||
54 | return 0; | 112 | return 0; |
55 | } | 113 | } |
56 | 114 | ||
115 | static void ah_output_done(struct crypto_async_request *base, int err) | ||
116 | { | ||
117 | u8 *icv; | ||
118 | struct iphdr *iph; | ||
119 | struct sk_buff *skb = base->data; | ||
120 | struct xfrm_state *x = skb_dst(skb)->xfrm; | ||
121 | struct ah_data *ahp = x->data; | ||
122 | struct iphdr *top_iph = ip_hdr(skb); | ||
123 | struct ip_auth_hdr *ah = ip_auth_hdr(skb); | ||
124 | int ihl = ip_hdrlen(skb); | ||
125 | |||
126 | iph = AH_SKB_CB(skb)->tmp; | ||
127 | icv = ah_tmp_icv(ahp->ahash, iph, ihl); | ||
128 | memcpy(ah->auth_data, icv, ahp->icv_trunc_len); | ||
129 | |||
130 | top_iph->tos = iph->tos; | ||
131 | top_iph->ttl = iph->ttl; | ||
132 | top_iph->frag_off = iph->frag_off; | ||
133 | if (top_iph->ihl != 5) { | ||
134 | top_iph->daddr = iph->daddr; | ||
135 | memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); | ||
136 | } | ||
137 | |||
138 | err = ah->nexthdr; | ||
139 | |||
140 | kfree(AH_SKB_CB(skb)->tmp); | ||
141 | xfrm_output_resume(skb, err); | ||
142 | } | ||
143 | |||
57 | static int ah_output(struct xfrm_state *x, struct sk_buff *skb) | 144 | static int ah_output(struct xfrm_state *x, struct sk_buff *skb) |
58 | { | 145 | { |
59 | int err; | 146 | int err; |
147 | int nfrags; | ||
148 | int ihl; | ||
149 | u8 *icv; | ||
150 | struct sk_buff *trailer; | ||
151 | struct crypto_ahash *ahash; | ||
152 | struct ahash_request *req; | ||
153 | struct scatterlist *sg; | ||
60 | struct iphdr *iph, *top_iph; | 154 | struct iphdr *iph, *top_iph; |
61 | struct ip_auth_hdr *ah; | 155 | struct ip_auth_hdr *ah; |
62 | struct ah_data *ahp; | 156 | struct ah_data *ahp; |
63 | union { | 157 | |
64 | struct iphdr iph; | 158 | ahp = x->data; |
65 | char buf[60]; | 159 | ahash = ahp->ahash; |
66 | } tmp_iph; | 160 | |
161 | if ((err = skb_cow_data(skb, 0, &trailer)) < 0) | ||
162 | goto out; | ||
163 | nfrags = err; | ||
67 | 164 | ||
68 | skb_push(skb, -skb_network_offset(skb)); | 165 | skb_push(skb, -skb_network_offset(skb)); |
166 | ah = ip_auth_hdr(skb); | ||
167 | ihl = ip_hdrlen(skb); | ||
168 | |||
169 | err = -ENOMEM; | ||
170 | iph = ah_alloc_tmp(ahash, nfrags, ihl); | ||
171 | if (!iph) | ||
172 | goto out; | ||
173 | |||
174 | icv = ah_tmp_icv(ahash, iph, ihl); | ||
175 | req = ah_tmp_req(ahash, icv); | ||
176 | sg = ah_req_sg(ahash, req); | ||
177 | |||
178 | memset(ah->auth_data, 0, ahp->icv_trunc_len); | ||
179 | |||
69 | top_iph = ip_hdr(skb); | 180 | top_iph = ip_hdr(skb); |
70 | iph = &tmp_iph.iph; | ||
71 | 181 | ||
72 | iph->tos = top_iph->tos; | 182 | iph->tos = top_iph->tos; |
73 | iph->ttl = top_iph->ttl; | 183 | iph->ttl = top_iph->ttl; |
@@ -78,10 +188,9 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb) | |||
78 | memcpy(iph+1, top_iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); | 188 | memcpy(iph+1, top_iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); |
79 | err = ip_clear_mutable_options(top_iph, &top_iph->daddr); | 189 | err = ip_clear_mutable_options(top_iph, &top_iph->daddr); |
80 | if (err) | 190 | if (err) |
81 | goto error; | 191 | goto out_free; |
82 | } | 192 | } |
83 | 193 | ||
84 | ah = ip_auth_hdr(skb); | ||
85 | ah->nexthdr = *skb_mac_header(skb); | 194 | ah->nexthdr = *skb_mac_header(skb); |
86 | *skb_mac_header(skb) = IPPROTO_AH; | 195 | *skb_mac_header(skb) = IPPROTO_AH; |
87 | 196 | ||
@@ -91,20 +200,31 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb) | |||
91 | top_iph->ttl = 0; | 200 | top_iph->ttl = 0; |
92 | top_iph->check = 0; | 201 | top_iph->check = 0; |
93 | 202 | ||
94 | ahp = x->data; | ||
95 | ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2; | 203 | ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2; |
96 | 204 | ||
97 | ah->reserved = 0; | 205 | ah->reserved = 0; |
98 | ah->spi = x->id.spi; | 206 | ah->spi = x->id.spi; |
99 | ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output); | 207 | ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output); |
100 | 208 | ||
101 | spin_lock_bh(&x->lock); | 209 | sg_init_table(sg, nfrags); |
102 | err = ah_mac_digest(ahp, skb, ah->auth_data); | 210 | skb_to_sgvec(skb, sg, 0, skb->len); |
103 | memcpy(ah->auth_data, ahp->work_icv, ahp->icv_trunc_len); | ||
104 | spin_unlock_bh(&x->lock); | ||
105 | 211 | ||
106 | if (err) | 212 | ahash_request_set_crypt(req, sg, icv, skb->len); |
107 | goto error; | 213 | ahash_request_set_callback(req, 0, ah_output_done, skb); |
214 | |||
215 | AH_SKB_CB(skb)->tmp = iph; | ||
216 | |||
217 | err = crypto_ahash_digest(req); | ||
218 | if (err) { | ||
219 | if (err == -EINPROGRESS) | ||
220 | goto out; | ||
221 | |||
222 | if (err == -EBUSY) | ||
223 | err = NET_XMIT_DROP; | ||
224 | goto out_free; | ||
225 | } | ||
226 | |||
227 | memcpy(ah->auth_data, icv, ahp->icv_trunc_len); | ||
108 | 228 | ||
109 | top_iph->tos = iph->tos; | 229 | top_iph->tos = iph->tos; |
110 | top_iph->ttl = iph->ttl; | 230 | top_iph->ttl = iph->ttl; |
@@ -114,28 +234,67 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb) | |||
114 | memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); | 234 | memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); |
115 | } | 235 | } |
116 | 236 | ||
117 | err = 0; | 237 | out_free: |
118 | 238 | kfree(iph); | |
119 | error: | 239 | out: |
120 | return err; | 240 | return err; |
121 | } | 241 | } |
122 | 242 | ||
243 | static void ah_input_done(struct crypto_async_request *base, int err) | ||
244 | { | ||
245 | u8 *auth_data; | ||
246 | u8 *icv; | ||
247 | struct iphdr *work_iph; | ||
248 | struct sk_buff *skb = base->data; | ||
249 | struct xfrm_state *x = xfrm_input_state(skb); | ||
250 | struct ah_data *ahp = x->data; | ||
251 | struct ip_auth_hdr *ah = ip_auth_hdr(skb); | ||
252 | int ihl = ip_hdrlen(skb); | ||
253 | int ah_hlen = (ah->hdrlen + 2) << 2; | ||
254 | |||
255 | work_iph = AH_SKB_CB(skb)->tmp; | ||
256 | auth_data = ah_tmp_auth(work_iph, ihl); | ||
257 | icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len); | ||
258 | |||
259 | err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0; | ||
260 | if (err) | ||
261 | goto out; | ||
262 | |||
263 | skb->network_header += ah_hlen; | ||
264 | memcpy(skb_network_header(skb), work_iph, ihl); | ||
265 | __skb_pull(skb, ah_hlen + ihl); | ||
266 | skb_set_transport_header(skb, -ihl); | ||
267 | |||
268 | err = ah->nexthdr; | ||
269 | out: | ||
270 | kfree(AH_SKB_CB(skb)->tmp); | ||
271 | xfrm_input_resume(skb, err); | ||
272 | } | ||
273 | |||
123 | static int ah_input(struct xfrm_state *x, struct sk_buff *skb) | 274 | static int ah_input(struct xfrm_state *x, struct sk_buff *skb) |
124 | { | 275 | { |
125 | int ah_hlen; | 276 | int ah_hlen; |
126 | int ihl; | 277 | int ihl; |
127 | int nexthdr; | 278 | int nexthdr; |
128 | int err = -EINVAL; | 279 | int nfrags; |
129 | struct iphdr *iph; | 280 | u8 *auth_data; |
281 | u8 *icv; | ||
282 | struct sk_buff *trailer; | ||
283 | struct crypto_ahash *ahash; | ||
284 | struct ahash_request *req; | ||
285 | struct scatterlist *sg; | ||
286 | struct iphdr *iph, *work_iph; | ||
130 | struct ip_auth_hdr *ah; | 287 | struct ip_auth_hdr *ah; |
131 | struct ah_data *ahp; | 288 | struct ah_data *ahp; |
132 | char work_buf[60]; | 289 | int err = -ENOMEM; |
133 | 290 | ||
134 | if (!pskb_may_pull(skb, sizeof(*ah))) | 291 | if (!pskb_may_pull(skb, sizeof(*ah))) |
135 | goto out; | 292 | goto out; |
136 | 293 | ||
137 | ah = (struct ip_auth_hdr *)skb->data; | 294 | ah = (struct ip_auth_hdr *)skb->data; |
138 | ahp = x->data; | 295 | ahp = x->data; |
296 | ahash = ahp->ahash; | ||
297 | |||
139 | nexthdr = ah->nexthdr; | 298 | nexthdr = ah->nexthdr; |
140 | ah_hlen = (ah->hdrlen + 2) << 2; | 299 | ah_hlen = (ah->hdrlen + 2) << 2; |
141 | 300 | ||
@@ -156,9 +315,24 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb) | |||
156 | 315 | ||
157 | ah = (struct ip_auth_hdr *)skb->data; | 316 | ah = (struct ip_auth_hdr *)skb->data; |
158 | iph = ip_hdr(skb); | 317 | iph = ip_hdr(skb); |
318 | ihl = ip_hdrlen(skb); | ||
319 | |||
320 | if ((err = skb_cow_data(skb, 0, &trailer)) < 0) | ||
321 | goto out; | ||
322 | nfrags = err; | ||
323 | |||
324 | work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len); | ||
325 | if (!work_iph) | ||
326 | goto out; | ||
327 | |||
328 | auth_data = ah_tmp_auth(work_iph, ihl); | ||
329 | icv = ah_tmp_icv(ahash, auth_data, ahp->icv_trunc_len); | ||
330 | req = ah_tmp_req(ahash, icv); | ||
331 | sg = ah_req_sg(ahash, req); | ||
159 | 332 | ||
160 | ihl = skb->data - skb_network_header(skb); | 333 | memcpy(work_iph, iph, ihl); |
161 | memcpy(work_buf, iph, ihl); | 334 | memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); |
335 | memset(ah->auth_data, 0, ahp->icv_trunc_len); | ||
162 | 336 | ||
163 | iph->ttl = 0; | 337 | iph->ttl = 0; |
164 | iph->tos = 0; | 338 | iph->tos = 0; |
@@ -166,35 +340,44 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb) | |||
166 | iph->check = 0; | 340 | iph->check = 0; |
167 | if (ihl > sizeof(*iph)) { | 341 | if (ihl > sizeof(*iph)) { |
168 | __be32 dummy; | 342 | __be32 dummy; |
169 | if (ip_clear_mutable_options(iph, &dummy)) | 343 | err = ip_clear_mutable_options(iph, &dummy); |
170 | goto out; | 344 | if (err) |
345 | goto out_free; | ||
171 | } | 346 | } |
172 | 347 | ||
173 | spin_lock(&x->lock); | 348 | skb_push(skb, ihl); |
174 | { | ||
175 | u8 auth_data[MAX_AH_AUTH_LEN]; | ||
176 | 349 | ||
177 | memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); | 350 | sg_init_table(sg, nfrags); |
178 | skb_push(skb, ihl); | 351 | skb_to_sgvec(skb, sg, 0, skb->len); |
179 | err = ah_mac_digest(ahp, skb, ah->auth_data); | 352 | |
180 | if (err) | 353 | ahash_request_set_crypt(req, sg, icv, skb->len); |
181 | goto unlock; | 354 | ahash_request_set_callback(req, 0, ah_input_done, skb); |
182 | if (memcmp(ahp->work_icv, auth_data, ahp->icv_trunc_len)) | 355 | |
183 | err = -EBADMSG; | 356 | AH_SKB_CB(skb)->tmp = work_iph; |
357 | |||
358 | err = crypto_ahash_digest(req); | ||
359 | if (err) { | ||
360 | if (err == -EINPROGRESS) | ||
361 | goto out; | ||
362 | |||
363 | if (err == -EBUSY) | ||
364 | err = NET_XMIT_DROP; | ||
365 | goto out_free; | ||
184 | } | 366 | } |
185 | unlock: | ||
186 | spin_unlock(&x->lock); | ||
187 | 367 | ||
368 | err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0; | ||
188 | if (err) | 369 | if (err) |
189 | goto out; | 370 | goto out_free; |
190 | 371 | ||
191 | skb->network_header += ah_hlen; | 372 | skb->network_header += ah_hlen; |
192 | memcpy(skb_network_header(skb), work_buf, ihl); | 373 | memcpy(skb_network_header(skb), work_iph, ihl); |
193 | skb->transport_header = skb->network_header; | ||
194 | __skb_pull(skb, ah_hlen + ihl); | 374 | __skb_pull(skb, ah_hlen + ihl); |
375 | skb_set_transport_header(skb, -ihl); | ||
195 | 376 | ||
196 | return nexthdr; | 377 | err = nexthdr; |
197 | 378 | ||
379 | out_free: | ||
380 | kfree (work_iph); | ||
198 | out: | 381 | out: |
199 | return err; | 382 | return err; |
200 | } | 383 | } |
@@ -222,7 +405,7 @@ static int ah_init_state(struct xfrm_state *x) | |||
222 | { | 405 | { |
223 | struct ah_data *ahp = NULL; | 406 | struct ah_data *ahp = NULL; |
224 | struct xfrm_algo_desc *aalg_desc; | 407 | struct xfrm_algo_desc *aalg_desc; |
225 | struct crypto_hash *tfm; | 408 | struct crypto_ahash *ahash; |
226 | 409 | ||
227 | if (!x->aalg) | 410 | if (!x->aalg) |
228 | goto error; | 411 | goto error; |
@@ -231,31 +414,31 @@ static int ah_init_state(struct xfrm_state *x) | |||
231 | goto error; | 414 | goto error; |
232 | 415 | ||
233 | ahp = kzalloc(sizeof(*ahp), GFP_KERNEL); | 416 | ahp = kzalloc(sizeof(*ahp), GFP_KERNEL); |
234 | if (ahp == NULL) | 417 | if (!ahp) |
235 | return -ENOMEM; | 418 | return -ENOMEM; |
236 | 419 | ||
237 | tfm = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC); | 420 | ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0); |
238 | if (IS_ERR(tfm)) | 421 | if (IS_ERR(ahash)) |
239 | goto error; | 422 | goto error; |
240 | 423 | ||
241 | ahp->tfm = tfm; | 424 | ahp->ahash = ahash; |
242 | if (crypto_hash_setkey(tfm, x->aalg->alg_key, | 425 | if (crypto_ahash_setkey(ahash, x->aalg->alg_key, |
243 | (x->aalg->alg_key_len + 7) / 8)) | 426 | (x->aalg->alg_key_len + 7) / 8)) |
244 | goto error; | 427 | goto error; |
245 | 428 | ||
246 | /* | 429 | /* |
247 | * Lookup the algorithm description maintained by xfrm_algo, | 430 | * Lookup the algorithm description maintained by xfrm_algo, |
248 | * verify crypto transform properties, and store information | 431 | * verify crypto transform properties, and store information |
249 | * we need for AH processing. This lookup cannot fail here | 432 | * we need for AH processing. This lookup cannot fail here |
250 | * after a successful crypto_alloc_hash(). | 433 | * after a successful crypto_alloc_ahash(). |
251 | */ | 434 | */ |
252 | aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); | 435 | aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); |
253 | BUG_ON(!aalg_desc); | 436 | BUG_ON(!aalg_desc); |
254 | 437 | ||
255 | if (aalg_desc->uinfo.auth.icv_fullbits/8 != | 438 | if (aalg_desc->uinfo.auth.icv_fullbits/8 != |
256 | crypto_hash_digestsize(tfm)) { | 439 | crypto_ahash_digestsize(ahash)) { |
257 | printk(KERN_INFO "AH: %s digestsize %u != %hu\n", | 440 | printk(KERN_INFO "AH: %s digestsize %u != %hu\n", |
258 | x->aalg->alg_name, crypto_hash_digestsize(tfm), | 441 | x->aalg->alg_name, crypto_ahash_digestsize(ahash), |
259 | aalg_desc->uinfo.auth.icv_fullbits/8); | 442 | aalg_desc->uinfo.auth.icv_fullbits/8); |
260 | goto error; | 443 | goto error; |
261 | } | 444 | } |
@@ -265,10 +448,6 @@ static int ah_init_state(struct xfrm_state *x) | |||
265 | 448 | ||
266 | BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN); | 449 | BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN); |
267 | 450 | ||
268 | ahp->work_icv = kmalloc(ahp->icv_full_len, GFP_KERNEL); | ||
269 | if (!ahp->work_icv) | ||
270 | goto error; | ||
271 | |||
272 | x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + | 451 | x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + |
273 | ahp->icv_trunc_len); | 452 | ahp->icv_trunc_len); |
274 | if (x->props.mode == XFRM_MODE_TUNNEL) | 453 | if (x->props.mode == XFRM_MODE_TUNNEL) |
@@ -279,8 +458,7 @@ static int ah_init_state(struct xfrm_state *x) | |||
279 | 458 | ||
280 | error: | 459 | error: |
281 | if (ahp) { | 460 | if (ahp) { |
282 | kfree(ahp->work_icv); | 461 | crypto_free_ahash(ahp->ahash); |
283 | crypto_free_hash(ahp->tfm); | ||
284 | kfree(ahp); | 462 | kfree(ahp); |
285 | } | 463 | } |
286 | return -EINVAL; | 464 | return -EINVAL; |
@@ -293,8 +471,7 @@ static void ah_destroy(struct xfrm_state *x) | |||
293 | if (!ahp) | 471 | if (!ahp) |
294 | return; | 472 | return; |
295 | 473 | ||
296 | kfree(ahp->work_icv); | 474 | crypto_free_ahash(ahp->ahash); |
297 | crypto_free_hash(ahp->tfm); | ||
298 | kfree(ahp); | 475 | kfree(ahp); |
299 | } | 476 | } |
300 | 477 | ||
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 039cc1ffe977..1e029dc75455 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c | |||
@@ -2017,7 +2017,7 @@ req_setattr_failure: | |||
2017 | * values on failure. | 2017 | * values on failure. |
2018 | * | 2018 | * |
2019 | */ | 2019 | */ |
2020 | int cipso_v4_delopt(struct ip_options **opt_ptr) | 2020 | static int cipso_v4_delopt(struct ip_options **opt_ptr) |
2021 | { | 2021 | { |
2022 | int hdr_delta = 0; | 2022 | int hdr_delta = 0; |
2023 | struct ip_options *opt = *opt_ptr; | 2023 | struct ip_options *opt = *opt_ptr; |
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index 5e6c5a0f3fde..fb2465811b48 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c | |||
@@ -39,7 +39,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
39 | sk_dst_reset(sk); | 39 | sk_dst_reset(sk); |
40 | 40 | ||
41 | oif = sk->sk_bound_dev_if; | 41 | oif = sk->sk_bound_dev_if; |
42 | saddr = inet->saddr; | 42 | saddr = inet->inet_saddr; |
43 | if (ipv4_is_multicast(usin->sin_addr.s_addr)) { | 43 | if (ipv4_is_multicast(usin->sin_addr.s_addr)) { |
44 | if (!oif) | 44 | if (!oif) |
45 | oif = inet->mc_index; | 45 | oif = inet->mc_index; |
@@ -49,7 +49,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
49 | err = ip_route_connect(&rt, usin->sin_addr.s_addr, saddr, | 49 | err = ip_route_connect(&rt, usin->sin_addr.s_addr, saddr, |
50 | RT_CONN_FLAGS(sk), oif, | 50 | RT_CONN_FLAGS(sk), oif, |
51 | sk->sk_protocol, | 51 | sk->sk_protocol, |
52 | inet->sport, usin->sin_port, sk, 1); | 52 | inet->inet_sport, usin->sin_port, sk, 1); |
53 | if (err) { | 53 | if (err) { |
54 | if (err == -ENETUNREACH) | 54 | if (err == -ENETUNREACH) |
55 | IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); | 55 | IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); |
@@ -60,14 +60,14 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
60 | ip_rt_put(rt); | 60 | ip_rt_put(rt); |
61 | return -EACCES; | 61 | return -EACCES; |
62 | } | 62 | } |
63 | if (!inet->saddr) | 63 | if (!inet->inet_saddr) |
64 | inet->saddr = rt->rt_src; /* Update source address */ | 64 | inet->inet_saddr = rt->rt_src; /* Update source address */ |
65 | if (!inet->rcv_saddr) | 65 | if (!inet->inet_rcv_saddr) |
66 | inet->rcv_saddr = rt->rt_src; | 66 | inet->inet_rcv_saddr = rt->rt_src; |
67 | inet->daddr = rt->rt_dst; | 67 | inet->inet_daddr = rt->rt_dst; |
68 | inet->dport = usin->sin_port; | 68 | inet->inet_dport = usin->sin_port; |
69 | sk->sk_state = TCP_ESTABLISHED; | 69 | sk->sk_state = TCP_ESTABLISHED; |
70 | inet->id = jiffies; | 70 | inet->inet_id = jiffies; |
71 | 71 | ||
72 | sk_dst_set(sk, &rt->u.dst); | 72 | sk_dst_set(sk, &rt->u.dst); |
73 | return(0); | 73 | return(0); |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index e2f950592566..f73dbed0f0d7 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -125,7 +125,7 @@ void fib_select_default(struct net *net, | |||
125 | #endif | 125 | #endif |
126 | tb = fib_get_table(net, table); | 126 | tb = fib_get_table(net, table); |
127 | if (FIB_RES_GW(*res) && FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) | 127 | if (FIB_RES_GW(*res) && FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) |
128 | tb->tb_select_default(tb, flp, res); | 128 | fib_table_select_default(tb, flp, res); |
129 | } | 129 | } |
130 | 130 | ||
131 | static void fib_flush(struct net *net) | 131 | static void fib_flush(struct net *net) |
@@ -139,7 +139,7 @@ static void fib_flush(struct net *net) | |||
139 | for (h = 0; h < FIB_TABLE_HASHSZ; h++) { | 139 | for (h = 0; h < FIB_TABLE_HASHSZ; h++) { |
140 | head = &net->ipv4.fib_table_hash[h]; | 140 | head = &net->ipv4.fib_table_hash[h]; |
141 | hlist_for_each_entry(tb, node, head, tb_hlist) | 141 | hlist_for_each_entry(tb, node, head, tb_hlist) |
142 | flushed += tb->tb_flush(tb); | 142 | flushed += fib_table_flush(tb); |
143 | } | 143 | } |
144 | 144 | ||
145 | if (flushed) | 145 | if (flushed) |
@@ -162,7 +162,7 @@ struct net_device * ip_dev_find(struct net *net, __be32 addr) | |||
162 | #endif | 162 | #endif |
163 | 163 | ||
164 | local_table = fib_get_table(net, RT_TABLE_LOCAL); | 164 | local_table = fib_get_table(net, RT_TABLE_LOCAL); |
165 | if (!local_table || local_table->tb_lookup(local_table, &fl, &res)) | 165 | if (!local_table || fib_table_lookup(local_table, &fl, &res)) |
166 | return NULL; | 166 | return NULL; |
167 | if (res.type != RTN_LOCAL) | 167 | if (res.type != RTN_LOCAL) |
168 | goto out; | 168 | goto out; |
@@ -200,7 +200,7 @@ static inline unsigned __inet_dev_addr_type(struct net *net, | |||
200 | local_table = fib_get_table(net, RT_TABLE_LOCAL); | 200 | local_table = fib_get_table(net, RT_TABLE_LOCAL); |
201 | if (local_table) { | 201 | if (local_table) { |
202 | ret = RTN_UNICAST; | 202 | ret = RTN_UNICAST; |
203 | if (!local_table->tb_lookup(local_table, &fl, &res)) { | 203 | if (!fib_table_lookup(local_table, &fl, &res)) { |
204 | if (!dev || dev == res.fi->fib_dev) | 204 | if (!dev || dev == res.fi->fib_dev) |
205 | ret = res.type; | 205 | ret = res.type; |
206 | fib_res_put(&res); | 206 | fib_res_put(&res); |
@@ -473,13 +473,13 @@ int ip_rt_ioctl(struct net *net, unsigned int cmd, void __user *arg) | |||
473 | if (cmd == SIOCDELRT) { | 473 | if (cmd == SIOCDELRT) { |
474 | tb = fib_get_table(net, cfg.fc_table); | 474 | tb = fib_get_table(net, cfg.fc_table); |
475 | if (tb) | 475 | if (tb) |
476 | err = tb->tb_delete(tb, &cfg); | 476 | err = fib_table_delete(tb, &cfg); |
477 | else | 477 | else |
478 | err = -ESRCH; | 478 | err = -ESRCH; |
479 | } else { | 479 | } else { |
480 | tb = fib_new_table(net, cfg.fc_table); | 480 | tb = fib_new_table(net, cfg.fc_table); |
481 | if (tb) | 481 | if (tb) |
482 | err = tb->tb_insert(tb, &cfg); | 482 | err = fib_table_insert(tb, &cfg); |
483 | else | 483 | else |
484 | err = -ENOBUFS; | 484 | err = -ENOBUFS; |
485 | } | 485 | } |
@@ -594,7 +594,7 @@ static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *ar | |||
594 | goto errout; | 594 | goto errout; |
595 | } | 595 | } |
596 | 596 | ||
597 | err = tb->tb_delete(tb, &cfg); | 597 | err = fib_table_delete(tb, &cfg); |
598 | errout: | 598 | errout: |
599 | return err; | 599 | return err; |
600 | } | 600 | } |
@@ -616,7 +616,7 @@ static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *ar | |||
616 | goto errout; | 616 | goto errout; |
617 | } | 617 | } |
618 | 618 | ||
619 | err = tb->tb_insert(tb, &cfg); | 619 | err = fib_table_insert(tb, &cfg); |
620 | errout: | 620 | errout: |
621 | return err; | 621 | return err; |
622 | } | 622 | } |
@@ -647,7 +647,7 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) | |||
647 | if (dumped) | 647 | if (dumped) |
648 | memset(&cb->args[2], 0, sizeof(cb->args) - | 648 | memset(&cb->args[2], 0, sizeof(cb->args) - |
649 | 2 * sizeof(cb->args[0])); | 649 | 2 * sizeof(cb->args[0])); |
650 | if (tb->tb_dump(tb, skb, cb) < 0) | 650 | if (fib_table_dump(tb, skb, cb) < 0) |
651 | goto out; | 651 | goto out; |
652 | dumped = 1; | 652 | dumped = 1; |
653 | next: | 653 | next: |
@@ -701,9 +701,9 @@ static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifad | |||
701 | cfg.fc_scope = RT_SCOPE_HOST; | 701 | cfg.fc_scope = RT_SCOPE_HOST; |
702 | 702 | ||
703 | if (cmd == RTM_NEWROUTE) | 703 | if (cmd == RTM_NEWROUTE) |
704 | tb->tb_insert(tb, &cfg); | 704 | fib_table_insert(tb, &cfg); |
705 | else | 705 | else |
706 | tb->tb_delete(tb, &cfg); | 706 | fib_table_delete(tb, &cfg); |
707 | } | 707 | } |
708 | 708 | ||
709 | void fib_add_ifaddr(struct in_ifaddr *ifa) | 709 | void fib_add_ifaddr(struct in_ifaddr *ifa) |
@@ -832,7 +832,7 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb ) | |||
832 | local_bh_disable(); | 832 | local_bh_disable(); |
833 | 833 | ||
834 | frn->tb_id = tb->tb_id; | 834 | frn->tb_id = tb->tb_id; |
835 | frn->err = tb->tb_lookup(tb, &fl, &res); | 835 | frn->err = fib_table_lookup(tb, &fl, &res); |
836 | 836 | ||
837 | if (!frn->err) { | 837 | if (!frn->err) { |
838 | frn->prefixlen = res.prefixlen; | 838 | frn->prefixlen = res.prefixlen; |
@@ -1009,7 +1009,7 @@ static void __net_exit ip_fib_net_exit(struct net *net) | |||
1009 | head = &net->ipv4.fib_table_hash[i]; | 1009 | head = &net->ipv4.fib_table_hash[i]; |
1010 | hlist_for_each_entry_safe(tb, node, tmp, head, tb_hlist) { | 1010 | hlist_for_each_entry_safe(tb, node, tmp, head, tb_hlist) { |
1011 | hlist_del(node); | 1011 | hlist_del(node); |
1012 | tb->tb_flush(tb); | 1012 | fib_table_flush(tb); |
1013 | kfree(tb); | 1013 | kfree(tb); |
1014 | } | 1014 | } |
1015 | } | 1015 | } |
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c index ecd39454235c..14972017b9c2 100644 --- a/net/ipv4/fib_hash.c +++ b/net/ipv4/fib_hash.c | |||
@@ -242,8 +242,8 @@ fn_new_zone(struct fn_hash *table, int z) | |||
242 | return fz; | 242 | return fz; |
243 | } | 243 | } |
244 | 244 | ||
245 | static int | 245 | int fib_table_lookup(struct fib_table *tb, |
246 | fn_hash_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result *res) | 246 | const struct flowi *flp, struct fib_result *res) |
247 | { | 247 | { |
248 | int err; | 248 | int err; |
249 | struct fn_zone *fz; | 249 | struct fn_zone *fz; |
@@ -274,8 +274,8 @@ out: | |||
274 | return err; | 274 | return err; |
275 | } | 275 | } |
276 | 276 | ||
277 | static void | 277 | void fib_table_select_default(struct fib_table *tb, |
278 | fn_hash_select_default(struct fib_table *tb, const struct flowi *flp, struct fib_result *res) | 278 | const struct flowi *flp, struct fib_result *res) |
279 | { | 279 | { |
280 | int order, last_idx; | 280 | int order, last_idx; |
281 | struct hlist_node *node; | 281 | struct hlist_node *node; |
@@ -366,7 +366,7 @@ static struct fib_node *fib_find_node(struct fn_zone *fz, __be32 key) | |||
366 | return NULL; | 366 | return NULL; |
367 | } | 367 | } |
368 | 368 | ||
369 | static int fn_hash_insert(struct fib_table *tb, struct fib_config *cfg) | 369 | int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) |
370 | { | 370 | { |
371 | struct fn_hash *table = (struct fn_hash *) tb->tb_data; | 371 | struct fn_hash *table = (struct fn_hash *) tb->tb_data; |
372 | struct fib_node *new_f = NULL; | 372 | struct fib_node *new_f = NULL; |
@@ -544,8 +544,7 @@ out: | |||
544 | return err; | 544 | return err; |
545 | } | 545 | } |
546 | 546 | ||
547 | 547 | int fib_table_delete(struct fib_table *tb, struct fib_config *cfg) | |
548 | static int fn_hash_delete(struct fib_table *tb, struct fib_config *cfg) | ||
549 | { | 548 | { |
550 | struct fn_hash *table = (struct fn_hash *)tb->tb_data; | 549 | struct fn_hash *table = (struct fn_hash *)tb->tb_data; |
551 | struct fib_node *f; | 550 | struct fib_node *f; |
@@ -662,7 +661,7 @@ static int fn_flush_list(struct fn_zone *fz, int idx) | |||
662 | return found; | 661 | return found; |
663 | } | 662 | } |
664 | 663 | ||
665 | static int fn_hash_flush(struct fib_table *tb) | 664 | int fib_table_flush(struct fib_table *tb) |
666 | { | 665 | { |
667 | struct fn_hash *table = (struct fn_hash *) tb->tb_data; | 666 | struct fn_hash *table = (struct fn_hash *) tb->tb_data; |
668 | struct fn_zone *fz; | 667 | struct fn_zone *fz; |
@@ -743,7 +742,8 @@ fn_hash_dump_zone(struct sk_buff *skb, struct netlink_callback *cb, | |||
743 | return skb->len; | 742 | return skb->len; |
744 | } | 743 | } |
745 | 744 | ||
746 | static int fn_hash_dump(struct fib_table *tb, struct sk_buff *skb, struct netlink_callback *cb) | 745 | int fib_table_dump(struct fib_table *tb, struct sk_buff *skb, |
746 | struct netlink_callback *cb) | ||
747 | { | 747 | { |
748 | int m, s_m; | 748 | int m, s_m; |
749 | struct fn_zone *fz; | 749 | struct fn_zone *fz; |
@@ -787,12 +787,7 @@ struct fib_table *fib_hash_table(u32 id) | |||
787 | 787 | ||
788 | tb->tb_id = id; | 788 | tb->tb_id = id; |
789 | tb->tb_default = -1; | 789 | tb->tb_default = -1; |
790 | tb->tb_lookup = fn_hash_lookup; | 790 | |
791 | tb->tb_insert = fn_hash_insert; | ||
792 | tb->tb_delete = fn_hash_delete; | ||
793 | tb->tb_flush = fn_hash_flush; | ||
794 | tb->tb_select_default = fn_hash_select_default; | ||
795 | tb->tb_dump = fn_hash_dump; | ||
796 | memset(tb->tb_data, 0, sizeof(struct fn_hash)); | 791 | memset(tb->tb_data, 0, sizeof(struct fn_hash)); |
797 | return tb; | 792 | return tb; |
798 | } | 793 | } |
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index 92d9d97ec5e3..835262c2b867 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c | |||
@@ -94,7 +94,7 @@ static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp, | |||
94 | if ((tbl = fib_get_table(rule->fr_net, rule->table)) == NULL) | 94 | if ((tbl = fib_get_table(rule->fr_net, rule->table)) == NULL) |
95 | goto errout; | 95 | goto errout; |
96 | 96 | ||
97 | err = tbl->tb_lookup(tbl, flp, (struct fib_result *) arg->result); | 97 | err = fib_table_lookup(tbl, flp, (struct fib_result *) arg->result); |
98 | if (err > 0) | 98 | if (err > 0) |
99 | err = -EAGAIN; | 99 | err = -EAGAIN; |
100 | errout: | 100 | errout: |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 291bdf50a21f..af5d89792860 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -1174,7 +1174,7 @@ done: | |||
1174 | /* | 1174 | /* |
1175 | * Caller must hold RTNL. | 1175 | * Caller must hold RTNL. |
1176 | */ | 1176 | */ |
1177 | static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg) | 1177 | int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) |
1178 | { | 1178 | { |
1179 | struct trie *t = (struct trie *) tb->tb_data; | 1179 | struct trie *t = (struct trie *) tb->tb_data; |
1180 | struct fib_alias *fa, *new_fa; | 1180 | struct fib_alias *fa, *new_fa; |
@@ -1373,8 +1373,8 @@ static int check_leaf(struct trie *t, struct leaf *l, | |||
1373 | return 1; | 1373 | return 1; |
1374 | } | 1374 | } |
1375 | 1375 | ||
1376 | static int fn_trie_lookup(struct fib_table *tb, const struct flowi *flp, | 1376 | int fib_table_lookup(struct fib_table *tb, const struct flowi *flp, |
1377 | struct fib_result *res) | 1377 | struct fib_result *res) |
1378 | { | 1378 | { |
1379 | struct trie *t = (struct trie *) tb->tb_data; | 1379 | struct trie *t = (struct trie *) tb->tb_data; |
1380 | int ret; | 1380 | int ret; |
@@ -1595,7 +1595,7 @@ static void trie_leaf_remove(struct trie *t, struct leaf *l) | |||
1595 | /* | 1595 | /* |
1596 | * Caller must hold RTNL. | 1596 | * Caller must hold RTNL. |
1597 | */ | 1597 | */ |
1598 | static int fn_trie_delete(struct fib_table *tb, struct fib_config *cfg) | 1598 | int fib_table_delete(struct fib_table *tb, struct fib_config *cfg) |
1599 | { | 1599 | { |
1600 | struct trie *t = (struct trie *) tb->tb_data; | 1600 | struct trie *t = (struct trie *) tb->tb_data; |
1601 | u32 key, mask; | 1601 | u32 key, mask; |
@@ -1786,7 +1786,7 @@ static struct leaf *trie_leafindex(struct trie *t, int index) | |||
1786 | /* | 1786 | /* |
1787 | * Caller must hold RTNL. | 1787 | * Caller must hold RTNL. |
1788 | */ | 1788 | */ |
1789 | static int fn_trie_flush(struct fib_table *tb) | 1789 | int fib_table_flush(struct fib_table *tb) |
1790 | { | 1790 | { |
1791 | struct trie *t = (struct trie *) tb->tb_data; | 1791 | struct trie *t = (struct trie *) tb->tb_data; |
1792 | struct leaf *l, *ll = NULL; | 1792 | struct leaf *l, *ll = NULL; |
@@ -1807,9 +1807,9 @@ static int fn_trie_flush(struct fib_table *tb) | |||
1807 | return found; | 1807 | return found; |
1808 | } | 1808 | } |
1809 | 1809 | ||
1810 | static void fn_trie_select_default(struct fib_table *tb, | 1810 | void fib_table_select_default(struct fib_table *tb, |
1811 | const struct flowi *flp, | 1811 | const struct flowi *flp, |
1812 | struct fib_result *res) | 1812 | struct fib_result *res) |
1813 | { | 1813 | { |
1814 | struct trie *t = (struct trie *) tb->tb_data; | 1814 | struct trie *t = (struct trie *) tb->tb_data; |
1815 | int order, last_idx; | 1815 | int order, last_idx; |
@@ -1952,8 +1952,8 @@ static int fn_trie_dump_leaf(struct leaf *l, struct fib_table *tb, | |||
1952 | return skb->len; | 1952 | return skb->len; |
1953 | } | 1953 | } |
1954 | 1954 | ||
1955 | static int fn_trie_dump(struct fib_table *tb, struct sk_buff *skb, | 1955 | int fib_table_dump(struct fib_table *tb, struct sk_buff *skb, |
1956 | struct netlink_callback *cb) | 1956 | struct netlink_callback *cb) |
1957 | { | 1957 | { |
1958 | struct leaf *l; | 1958 | struct leaf *l; |
1959 | struct trie *t = (struct trie *) tb->tb_data; | 1959 | struct trie *t = (struct trie *) tb->tb_data; |
@@ -2020,12 +2020,6 @@ struct fib_table *fib_hash_table(u32 id) | |||
2020 | 2020 | ||
2021 | tb->tb_id = id; | 2021 | tb->tb_id = id; |
2022 | tb->tb_default = -1; | 2022 | tb->tb_default = -1; |
2023 | tb->tb_lookup = fn_trie_lookup; | ||
2024 | tb->tb_insert = fn_trie_insert; | ||
2025 | tb->tb_delete = fn_trie_delete; | ||
2026 | tb->tb_flush = fn_trie_flush; | ||
2027 | tb->tb_select_default = fn_trie_select_default; | ||
2028 | tb->tb_dump = fn_trie_dump; | ||
2029 | 2023 | ||
2030 | t = (struct trie *) tb->tb_data; | 2024 | t = (struct trie *) tb->tb_data; |
2031 | memset(t, 0, sizeof(*t)); | 2025 | memset(t, 0, sizeof(*t)); |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 5bc13fe816d1..84adb5754c96 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -1165,6 +1165,10 @@ static int __net_init icmp_sk_init(struct net *net) | |||
1165 | sk->sk_sndbuf = | 1165 | sk->sk_sndbuf = |
1166 | (2 * ((64 * 1024) + sizeof(struct sk_buff))); | 1166 | (2 * ((64 * 1024) + sizeof(struct sk_buff))); |
1167 | 1167 | ||
1168 | /* | ||
1169 | * Speedup sock_wfree() | ||
1170 | */ | ||
1171 | sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); | ||
1168 | inet_sk(sk)->pmtudisc = IP_PMTUDISC_DONT; | 1172 | inet_sk(sk)->pmtudisc = IP_PMTUDISC_DONT; |
1169 | } | 1173 | } |
1170 | 1174 | ||
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 537731b3bcb3..26fb50e91311 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -358,6 +358,7 @@ struct dst_entry *inet_csk_route_req(struct sock *sk, | |||
358 | const struct inet_request_sock *ireq = inet_rsk(req); | 358 | const struct inet_request_sock *ireq = inet_rsk(req); |
359 | struct ip_options *opt = inet_rsk(req)->opt; | 359 | struct ip_options *opt = inet_rsk(req)->opt; |
360 | struct flowi fl = { .oif = sk->sk_bound_dev_if, | 360 | struct flowi fl = { .oif = sk->sk_bound_dev_if, |
361 | .mark = sk->sk_mark, | ||
361 | .nl_u = { .ip4_u = | 362 | .nl_u = { .ip4_u = |
362 | { .daddr = ((opt && opt->srr) ? | 363 | { .daddr = ((opt && opt->srr) ? |
363 | opt->faddr : | 364 | opt->faddr : |
@@ -367,7 +368,7 @@ struct dst_entry *inet_csk_route_req(struct sock *sk, | |||
367 | .proto = sk->sk_protocol, | 368 | .proto = sk->sk_protocol, |
368 | .flags = inet_sk_flowi_flags(sk), | 369 | .flags = inet_sk_flowi_flags(sk), |
369 | .uli_u = { .ports = | 370 | .uli_u = { .ports = |
370 | { .sport = inet_sk(sk)->sport, | 371 | { .sport = inet_sk(sk)->inet_sport, |
371 | .dport = ireq->rmt_port } } }; | 372 | .dport = ireq->rmt_port } } }; |
372 | struct net *net = sock_net(sk); | 373 | struct net *net = sock_net(sk); |
373 | 374 | ||
@@ -574,9 +575,9 @@ struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req, | |||
574 | newsk->sk_state = TCP_SYN_RECV; | 575 | newsk->sk_state = TCP_SYN_RECV; |
575 | newicsk->icsk_bind_hash = NULL; | 576 | newicsk->icsk_bind_hash = NULL; |
576 | 577 | ||
577 | inet_sk(newsk)->dport = inet_rsk(req)->rmt_port; | 578 | inet_sk(newsk)->inet_dport = inet_rsk(req)->rmt_port; |
578 | inet_sk(newsk)->num = ntohs(inet_rsk(req)->loc_port); | 579 | inet_sk(newsk)->inet_num = ntohs(inet_rsk(req)->loc_port); |
579 | inet_sk(newsk)->sport = inet_rsk(req)->loc_port; | 580 | inet_sk(newsk)->inet_sport = inet_rsk(req)->loc_port; |
580 | newsk->sk_write_space = sk_stream_write_space; | 581 | newsk->sk_write_space = sk_stream_write_space; |
581 | 582 | ||
582 | newicsk->icsk_retransmits = 0; | 583 | newicsk->icsk_retransmits = 0; |
@@ -607,8 +608,8 @@ void inet_csk_destroy_sock(struct sock *sk) | |||
607 | /* It cannot be in hash table! */ | 608 | /* It cannot be in hash table! */ |
608 | WARN_ON(!sk_unhashed(sk)); | 609 | WARN_ON(!sk_unhashed(sk)); |
609 | 610 | ||
610 | /* If it has not 0 inet_sk(sk)->num, it must be bound */ | 611 | /* If it has not 0 inet_sk(sk)->inet_num, it must be bound */ |
611 | WARN_ON(inet_sk(sk)->num && !inet_csk(sk)->icsk_bind_hash); | 612 | WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash); |
612 | 613 | ||
613 | sk->sk_prot->destroy(sk); | 614 | sk->sk_prot->destroy(sk); |
614 | 615 | ||
@@ -643,8 +644,8 @@ int inet_csk_listen_start(struct sock *sk, const int nr_table_entries) | |||
643 | * after validation is complete. | 644 | * after validation is complete. |
644 | */ | 645 | */ |
645 | sk->sk_state = TCP_LISTEN; | 646 | sk->sk_state = TCP_LISTEN; |
646 | if (!sk->sk_prot->get_port(sk, inet->num)) { | 647 | if (!sk->sk_prot->get_port(sk, inet->inet_num)) { |
647 | inet->sport = htons(inet->num); | 648 | inet->inet_sport = htons(inet->inet_num); |
648 | 649 | ||
649 | sk_dst_reset(sk); | 650 | sk_dst_reset(sk); |
650 | sk->sk_prot->hash(sk); | 651 | sk->sk_prot->hash(sk); |
@@ -720,8 +721,8 @@ void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr) | |||
720 | const struct inet_sock *inet = inet_sk(sk); | 721 | const struct inet_sock *inet = inet_sk(sk); |
721 | 722 | ||
722 | sin->sin_family = AF_INET; | 723 | sin->sin_family = AF_INET; |
723 | sin->sin_addr.s_addr = inet->daddr; | 724 | sin->sin_addr.s_addr = inet->inet_daddr; |
724 | sin->sin_port = inet->dport; | 725 | sin->sin_port = inet->inet_dport; |
725 | } | 726 | } |
726 | 727 | ||
727 | EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr); | 728 | EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr); |
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index a706a47f4dbb..bdb78dd180ce 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c | |||
@@ -116,10 +116,10 @@ static int inet_csk_diag_fill(struct sock *sk, | |||
116 | r->id.idiag_cookie[0] = (u32)(unsigned long)sk; | 116 | r->id.idiag_cookie[0] = (u32)(unsigned long)sk; |
117 | r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1); | 117 | r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1); |
118 | 118 | ||
119 | r->id.idiag_sport = inet->sport; | 119 | r->id.idiag_sport = inet->inet_sport; |
120 | r->id.idiag_dport = inet->dport; | 120 | r->id.idiag_dport = inet->inet_dport; |
121 | r->id.idiag_src[0] = inet->rcv_saddr; | 121 | r->id.idiag_src[0] = inet->inet_rcv_saddr; |
122 | r->id.idiag_dst[0] = inet->daddr; | 122 | r->id.idiag_dst[0] = inet->inet_daddr; |
123 | 123 | ||
124 | #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) | 124 | #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) |
125 | if (r->idiag_family == AF_INET6) { | 125 | if (r->idiag_family == AF_INET6) { |
@@ -504,11 +504,11 @@ static int inet_csk_diag_dump(struct sock *sk, | |||
504 | } else | 504 | } else |
505 | #endif | 505 | #endif |
506 | { | 506 | { |
507 | entry.saddr = &inet->rcv_saddr; | 507 | entry.saddr = &inet->inet_rcv_saddr; |
508 | entry.daddr = &inet->daddr; | 508 | entry.daddr = &inet->inet_daddr; |
509 | } | 509 | } |
510 | entry.sport = inet->num; | 510 | entry.sport = inet->inet_num; |
511 | entry.dport = ntohs(inet->dport); | 511 | entry.dport = ntohs(inet->inet_dport); |
512 | entry.userlocks = sk->sk_userlocks; | 512 | entry.userlocks = sk->sk_userlocks; |
513 | 513 | ||
514 | if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry)) | 514 | if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry)) |
@@ -584,7 +584,7 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, | |||
584 | if (tmo < 0) | 584 | if (tmo < 0) |
585 | tmo = 0; | 585 | tmo = 0; |
586 | 586 | ||
587 | r->id.idiag_sport = inet->sport; | 587 | r->id.idiag_sport = inet->inet_sport; |
588 | r->id.idiag_dport = ireq->rmt_port; | 588 | r->id.idiag_dport = ireq->rmt_port; |
589 | r->id.idiag_src[0] = ireq->loc_addr; | 589 | r->id.idiag_src[0] = ireq->loc_addr; |
590 | r->id.idiag_dst[0] = ireq->rmt_addr; | 590 | r->id.idiag_dst[0] = ireq->rmt_addr; |
@@ -639,7 +639,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, | |||
639 | 639 | ||
640 | if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { | 640 | if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { |
641 | bc = (struct rtattr *)(r + 1); | 641 | bc = (struct rtattr *)(r + 1); |
642 | entry.sport = inet->num; | 642 | entry.sport = inet->inet_num; |
643 | entry.userlocks = sk->sk_userlocks; | 643 | entry.userlocks = sk->sk_userlocks; |
644 | } | 644 | } |
645 | 645 | ||
@@ -732,7 +732,7 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
732 | continue; | 732 | continue; |
733 | } | 733 | } |
734 | 734 | ||
735 | if (r->id.idiag_sport != inet->sport && | 735 | if (r->id.idiag_sport != inet->inet_sport && |
736 | r->id.idiag_sport) | 736 | r->id.idiag_sport) |
737 | goto next_listen; | 737 | goto next_listen; |
738 | 738 | ||
@@ -774,7 +774,7 @@ skip_listen_ht: | |||
774 | if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV))) | 774 | if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV))) |
775 | goto unlock; | 775 | goto unlock; |
776 | 776 | ||
777 | for (i = s_i; i < hashinfo->ehash_size; i++) { | 777 | for (i = s_i; i <= hashinfo->ehash_mask; i++) { |
778 | struct inet_ehash_bucket *head = &hashinfo->ehash[i]; | 778 | struct inet_ehash_bucket *head = &hashinfo->ehash[i]; |
779 | spinlock_t *lock = inet_ehash_lockp(hashinfo, i); | 779 | spinlock_t *lock = inet_ehash_lockp(hashinfo, i); |
780 | struct sock *sk; | 780 | struct sock *sk; |
@@ -797,10 +797,10 @@ skip_listen_ht: | |||
797 | goto next_normal; | 797 | goto next_normal; |
798 | if (!(r->idiag_states & (1 << sk->sk_state))) | 798 | if (!(r->idiag_states & (1 << sk->sk_state))) |
799 | goto next_normal; | 799 | goto next_normal; |
800 | if (r->id.idiag_sport != inet->sport && | 800 | if (r->id.idiag_sport != inet->inet_sport && |
801 | r->id.idiag_sport) | 801 | r->id.idiag_sport) |
802 | goto next_normal; | 802 | goto next_normal; |
803 | if (r->id.idiag_dport != inet->dport && | 803 | if (r->id.idiag_dport != inet->inet_dport && |
804 | r->id.idiag_dport) | 804 | r->id.idiag_dport) |
805 | goto next_normal; | 805 | goto next_normal; |
806 | if (inet_csk_diag_dump(sk, skb, cb) < 0) { | 806 | if (inet_csk_diag_dump(sk, skb, cb) < 0) { |
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 625cc5f64c94..47ad7aab51e3 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c | |||
@@ -64,7 +64,7 @@ void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, | |||
64 | 64 | ||
65 | atomic_inc(&hashinfo->bsockets); | 65 | atomic_inc(&hashinfo->bsockets); |
66 | 66 | ||
67 | inet_sk(sk)->num = snum; | 67 | inet_sk(sk)->inet_num = snum; |
68 | sk_add_bind_node(sk, &tb->owners); | 68 | sk_add_bind_node(sk, &tb->owners); |
69 | tb->num_owners++; | 69 | tb->num_owners++; |
70 | inet_csk(sk)->icsk_bind_hash = tb; | 70 | inet_csk(sk)->icsk_bind_hash = tb; |
@@ -76,7 +76,7 @@ void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, | |||
76 | static void __inet_put_port(struct sock *sk) | 76 | static void __inet_put_port(struct sock *sk) |
77 | { | 77 | { |
78 | struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; | 78 | struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; |
79 | const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->num, | 79 | const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->inet_num, |
80 | hashinfo->bhash_size); | 80 | hashinfo->bhash_size); |
81 | struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; | 81 | struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; |
82 | struct inet_bind_bucket *tb; | 82 | struct inet_bind_bucket *tb; |
@@ -88,7 +88,7 @@ static void __inet_put_port(struct sock *sk) | |||
88 | __sk_del_bind_node(sk); | 88 | __sk_del_bind_node(sk); |
89 | tb->num_owners--; | 89 | tb->num_owners--; |
90 | inet_csk(sk)->icsk_bind_hash = NULL; | 90 | inet_csk(sk)->icsk_bind_hash = NULL; |
91 | inet_sk(sk)->num = 0; | 91 | inet_sk(sk)->inet_num = 0; |
92 | inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); | 92 | inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); |
93 | spin_unlock(&head->lock); | 93 | spin_unlock(&head->lock); |
94 | } | 94 | } |
@@ -105,7 +105,7 @@ EXPORT_SYMBOL(inet_put_port); | |||
105 | void __inet_inherit_port(struct sock *sk, struct sock *child) | 105 | void __inet_inherit_port(struct sock *sk, struct sock *child) |
106 | { | 106 | { |
107 | struct inet_hashinfo *table = sk->sk_prot->h.hashinfo; | 107 | struct inet_hashinfo *table = sk->sk_prot->h.hashinfo; |
108 | const int bhash = inet_bhashfn(sock_net(sk), inet_sk(child)->num, | 108 | const int bhash = inet_bhashfn(sock_net(sk), inet_sk(child)->inet_num, |
109 | table->bhash_size); | 109 | table->bhash_size); |
110 | struct inet_bind_hashbucket *head = &table->bhash[bhash]; | 110 | struct inet_bind_hashbucket *head = &table->bhash[bhash]; |
111 | struct inet_bind_bucket *tb; | 111 | struct inet_bind_bucket *tb; |
@@ -126,9 +126,9 @@ static inline int compute_score(struct sock *sk, struct net *net, | |||
126 | int score = -1; | 126 | int score = -1; |
127 | struct inet_sock *inet = inet_sk(sk); | 127 | struct inet_sock *inet = inet_sk(sk); |
128 | 128 | ||
129 | if (net_eq(sock_net(sk), net) && inet->num == hnum && | 129 | if (net_eq(sock_net(sk), net) && inet->inet_num == hnum && |
130 | !ipv6_only_sock(sk)) { | 130 | !ipv6_only_sock(sk)) { |
131 | __be32 rcv_saddr = inet->rcv_saddr; | 131 | __be32 rcv_saddr = inet->inet_rcv_saddr; |
132 | score = sk->sk_family == PF_INET ? 1 : 0; | 132 | score = sk->sk_family == PF_INET ? 1 : 0; |
133 | if (rcv_saddr) { | 133 | if (rcv_saddr) { |
134 | if (rcv_saddr != daddr) | 134 | if (rcv_saddr != daddr) |
@@ -209,7 +209,7 @@ struct sock * __inet_lookup_established(struct net *net, | |||
209 | * have wildcards anyways. | 209 | * have wildcards anyways. |
210 | */ | 210 | */ |
211 | unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport); | 211 | unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport); |
212 | unsigned int slot = hash & (hashinfo->ehash_size - 1); | 212 | unsigned int slot = hash & hashinfo->ehash_mask; |
213 | struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; | 213 | struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; |
214 | 214 | ||
215 | rcu_read_lock(); | 215 | rcu_read_lock(); |
@@ -273,13 +273,14 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row, | |||
273 | { | 273 | { |
274 | struct inet_hashinfo *hinfo = death_row->hashinfo; | 274 | struct inet_hashinfo *hinfo = death_row->hashinfo; |
275 | struct inet_sock *inet = inet_sk(sk); | 275 | struct inet_sock *inet = inet_sk(sk); |
276 | __be32 daddr = inet->rcv_saddr; | 276 | __be32 daddr = inet->inet_rcv_saddr; |
277 | __be32 saddr = inet->daddr; | 277 | __be32 saddr = inet->inet_daddr; |
278 | int dif = sk->sk_bound_dev_if; | 278 | int dif = sk->sk_bound_dev_if; |
279 | INET_ADDR_COOKIE(acookie, saddr, daddr) | 279 | INET_ADDR_COOKIE(acookie, saddr, daddr) |
280 | const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport); | 280 | const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport); |
281 | struct net *net = sock_net(sk); | 281 | struct net *net = sock_net(sk); |
282 | unsigned int hash = inet_ehashfn(net, daddr, lport, saddr, inet->dport); | 282 | unsigned int hash = inet_ehashfn(net, daddr, lport, |
283 | saddr, inet->inet_dport); | ||
283 | struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); | 284 | struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); |
284 | spinlock_t *lock = inet_ehash_lockp(hinfo, hash); | 285 | spinlock_t *lock = inet_ehash_lockp(hinfo, hash); |
285 | struct sock *sk2; | 286 | struct sock *sk2; |
@@ -312,8 +313,8 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row, | |||
312 | unique: | 313 | unique: |
313 | /* Must record num and sport now. Otherwise we will see | 314 | /* Must record num and sport now. Otherwise we will see |
314 | * in hash table socket with a funny identity. */ | 315 | * in hash table socket with a funny identity. */ |
315 | inet->num = lport; | 316 | inet->inet_num = lport; |
316 | inet->sport = htons(lport); | 317 | inet->inet_sport = htons(lport); |
317 | sk->sk_hash = hash; | 318 | sk->sk_hash = hash; |
318 | WARN_ON(!sk_unhashed(sk)); | 319 | WARN_ON(!sk_unhashed(sk)); |
319 | __sk_nulls_add_node_rcu(sk, &head->chain); | 320 | __sk_nulls_add_node_rcu(sk, &head->chain); |
@@ -341,8 +342,9 @@ not_unique: | |||
341 | static inline u32 inet_sk_port_offset(const struct sock *sk) | 342 | static inline u32 inet_sk_port_offset(const struct sock *sk) |
342 | { | 343 | { |
343 | const struct inet_sock *inet = inet_sk(sk); | 344 | const struct inet_sock *inet = inet_sk(sk); |
344 | return secure_ipv4_port_ephemeral(inet->rcv_saddr, inet->daddr, | 345 | return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr, |
345 | inet->dport); | 346 | inet->inet_daddr, |
347 | inet->inet_dport); | ||
346 | } | 348 | } |
347 | 349 | ||
348 | void __inet_hash_nolisten(struct sock *sk) | 350 | void __inet_hash_nolisten(struct sock *sk) |
@@ -424,7 +426,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, | |||
424 | void (*hash)(struct sock *sk)) | 426 | void (*hash)(struct sock *sk)) |
425 | { | 427 | { |
426 | struct inet_hashinfo *hinfo = death_row->hashinfo; | 428 | struct inet_hashinfo *hinfo = death_row->hashinfo; |
427 | const unsigned short snum = inet_sk(sk)->num; | 429 | const unsigned short snum = inet_sk(sk)->inet_num; |
428 | struct inet_bind_hashbucket *head; | 430 | struct inet_bind_hashbucket *head; |
429 | struct inet_bind_bucket *tb; | 431 | struct inet_bind_bucket *tb; |
430 | int ret; | 432 | int ret; |
@@ -485,7 +487,7 @@ ok: | |||
485 | /* Head lock still held and bh's disabled */ | 487 | /* Head lock still held and bh's disabled */ |
486 | inet_bind_hash(sk, tb, port); | 488 | inet_bind_hash(sk, tb, port); |
487 | if (sk_unhashed(sk)) { | 489 | if (sk_unhashed(sk)) { |
488 | inet_sk(sk)->sport = htons(port); | 490 | inet_sk(sk)->inet_sport = htons(port); |
489 | hash(sk); | 491 | hash(sk); |
490 | } | 492 | } |
491 | spin_unlock(&head->lock); | 493 | spin_unlock(&head->lock); |
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 13f0781f35cd..1f5d508bb18b 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c | |||
@@ -86,7 +86,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, | |||
86 | Note, that any socket with inet->num != 0 MUST be bound in | 86 | Note, that any socket with inet->num != 0 MUST be bound in |
87 | binding cache, even if it is closed. | 87 | binding cache, even if it is closed. |
88 | */ | 88 | */ |
89 | bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->num, | 89 | bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num, |
90 | hashinfo->bhash_size)]; | 90 | hashinfo->bhash_size)]; |
91 | spin_lock(&bhead->lock); | 91 | spin_lock(&bhead->lock); |
92 | tw->tw_tb = icsk->icsk_bind_hash; | 92 | tw->tw_tb = icsk->icsk_bind_hash; |
@@ -124,14 +124,14 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat | |||
124 | kmemcheck_annotate_bitfield(tw, flags); | 124 | kmemcheck_annotate_bitfield(tw, flags); |
125 | 125 | ||
126 | /* Give us an identity. */ | 126 | /* Give us an identity. */ |
127 | tw->tw_daddr = inet->daddr; | 127 | tw->tw_daddr = inet->inet_daddr; |
128 | tw->tw_rcv_saddr = inet->rcv_saddr; | 128 | tw->tw_rcv_saddr = inet->inet_rcv_saddr; |
129 | tw->tw_bound_dev_if = sk->sk_bound_dev_if; | 129 | tw->tw_bound_dev_if = sk->sk_bound_dev_if; |
130 | tw->tw_num = inet->num; | 130 | tw->tw_num = inet->inet_num; |
131 | tw->tw_state = TCP_TIME_WAIT; | 131 | tw->tw_state = TCP_TIME_WAIT; |
132 | tw->tw_substate = state; | 132 | tw->tw_substate = state; |
133 | tw->tw_sport = inet->sport; | 133 | tw->tw_sport = inet->inet_sport; |
134 | tw->tw_dport = inet->dport; | 134 | tw->tw_dport = inet->inet_dport; |
135 | tw->tw_family = sk->sk_family; | 135 | tw->tw_family = sk->sk_family; |
136 | tw->tw_reuse = sk->sk_reuse; | 136 | tw->tw_reuse = sk->sk_reuse; |
137 | tw->tw_hash = sk->sk_hash; | 137 | tw->tw_hash = sk->sk_hash; |
@@ -430,7 +430,7 @@ void inet_twsk_purge(struct net *net, struct inet_hashinfo *hashinfo, | |||
430 | int h; | 430 | int h; |
431 | 431 | ||
432 | local_bh_disable(); | 432 | local_bh_disable(); |
433 | for (h = 0; h < (hashinfo->ehash_size); h++) { | 433 | for (h = 0; h <= hashinfo->ehash_mask; h++) { |
434 | struct inet_ehash_bucket *head = | 434 | struct inet_ehash_bucket *head = |
435 | inet_ehash_bucket(hashinfo, h); | 435 | inet_ehash_bucket(hashinfo, h); |
436 | spinlock_t *lock = inet_ehash_lockp(hashinfo, h); | 436 | spinlock_t *lock = inet_ehash_lockp(hashinfo, h); |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 41ada9904d31..a77807d449e3 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -156,8 +156,13 @@ struct ipgre_net { | |||
156 | #define tunnels_r tunnels[2] | 156 | #define tunnels_r tunnels[2] |
157 | #define tunnels_l tunnels[1] | 157 | #define tunnels_l tunnels[1] |
158 | #define tunnels_wc tunnels[0] | 158 | #define tunnels_wc tunnels[0] |
159 | /* | ||
160 | * Locking : hash tables are protected by RCU and a spinlock | ||
161 | */ | ||
162 | static DEFINE_SPINLOCK(ipgre_lock); | ||
159 | 163 | ||
160 | static DEFINE_RWLOCK(ipgre_lock); | 164 | #define for_each_ip_tunnel_rcu(start) \ |
165 | for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) | ||
161 | 166 | ||
162 | /* Given src, dst and key, find appropriate for input tunnel. */ | 167 | /* Given src, dst and key, find appropriate for input tunnel. */ |
163 | 168 | ||
@@ -175,7 +180,7 @@ static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev, | |||
175 | ARPHRD_ETHER : ARPHRD_IPGRE; | 180 | ARPHRD_ETHER : ARPHRD_IPGRE; |
176 | int score, cand_score = 4; | 181 | int score, cand_score = 4; |
177 | 182 | ||
178 | for (t = ign->tunnels_r_l[h0^h1]; t; t = t->next) { | 183 | for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) { |
179 | if (local != t->parms.iph.saddr || | 184 | if (local != t->parms.iph.saddr || |
180 | remote != t->parms.iph.daddr || | 185 | remote != t->parms.iph.daddr || |
181 | key != t->parms.i_key || | 186 | key != t->parms.i_key || |
@@ -200,7 +205,7 @@ static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev, | |||
200 | } | 205 | } |
201 | } | 206 | } |
202 | 207 | ||
203 | for (t = ign->tunnels_r[h0^h1]; t; t = t->next) { | 208 | for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) { |
204 | if (remote != t->parms.iph.daddr || | 209 | if (remote != t->parms.iph.daddr || |
205 | key != t->parms.i_key || | 210 | key != t->parms.i_key || |
206 | !(t->dev->flags & IFF_UP)) | 211 | !(t->dev->flags & IFF_UP)) |
@@ -224,7 +229,7 @@ static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev, | |||
224 | } | 229 | } |
225 | } | 230 | } |
226 | 231 | ||
227 | for (t = ign->tunnels_l[h1]; t; t = t->next) { | 232 | for_each_ip_tunnel_rcu(ign->tunnels_l[h1]) { |
228 | if ((local != t->parms.iph.saddr && | 233 | if ((local != t->parms.iph.saddr && |
229 | (local != t->parms.iph.daddr || | 234 | (local != t->parms.iph.daddr || |
230 | !ipv4_is_multicast(local))) || | 235 | !ipv4_is_multicast(local))) || |
@@ -250,7 +255,7 @@ static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev, | |||
250 | } | 255 | } |
251 | } | 256 | } |
252 | 257 | ||
253 | for (t = ign->tunnels_wc[h1]; t; t = t->next) { | 258 | for_each_ip_tunnel_rcu(ign->tunnels_wc[h1]) { |
254 | if (t->parms.i_key != key || | 259 | if (t->parms.i_key != key || |
255 | !(t->dev->flags & IFF_UP)) | 260 | !(t->dev->flags & IFF_UP)) |
256 | continue; | 261 | continue; |
@@ -276,8 +281,9 @@ static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev, | |||
276 | if (cand != NULL) | 281 | if (cand != NULL) |
277 | return cand; | 282 | return cand; |
278 | 283 | ||
279 | if (ign->fb_tunnel_dev->flags & IFF_UP) | 284 | dev = ign->fb_tunnel_dev; |
280 | return netdev_priv(ign->fb_tunnel_dev); | 285 | if (dev->flags & IFF_UP) |
286 | return netdev_priv(dev); | ||
281 | 287 | ||
282 | return NULL; | 288 | return NULL; |
283 | } | 289 | } |
@@ -311,10 +317,10 @@ static void ipgre_tunnel_link(struct ipgre_net *ign, struct ip_tunnel *t) | |||
311 | { | 317 | { |
312 | struct ip_tunnel **tp = ipgre_bucket(ign, t); | 318 | struct ip_tunnel **tp = ipgre_bucket(ign, t); |
313 | 319 | ||
320 | spin_lock_bh(&ipgre_lock); | ||
314 | t->next = *tp; | 321 | t->next = *tp; |
315 | write_lock_bh(&ipgre_lock); | 322 | rcu_assign_pointer(*tp, t); |
316 | *tp = t; | 323 | spin_unlock_bh(&ipgre_lock); |
317 | write_unlock_bh(&ipgre_lock); | ||
318 | } | 324 | } |
319 | 325 | ||
320 | static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t) | 326 | static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t) |
@@ -323,9 +329,9 @@ static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t) | |||
323 | 329 | ||
324 | for (tp = ipgre_bucket(ign, t); *tp; tp = &(*tp)->next) { | 330 | for (tp = ipgre_bucket(ign, t); *tp; tp = &(*tp)->next) { |
325 | if (t == *tp) { | 331 | if (t == *tp) { |
326 | write_lock_bh(&ipgre_lock); | 332 | spin_lock_bh(&ipgre_lock); |
327 | *tp = t->next; | 333 | *tp = t->next; |
328 | write_unlock_bh(&ipgre_lock); | 334 | spin_unlock_bh(&ipgre_lock); |
329 | break; | 335 | break; |
330 | } | 336 | } |
331 | } | 337 | } |
@@ -476,7 +482,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info) | |||
476 | break; | 482 | break; |
477 | } | 483 | } |
478 | 484 | ||
479 | read_lock(&ipgre_lock); | 485 | rcu_read_lock(); |
480 | t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr, | 486 | t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr, |
481 | flags & GRE_KEY ? | 487 | flags & GRE_KEY ? |
482 | *(((__be32 *)p) + (grehlen / 4) - 1) : 0, | 488 | *(((__be32 *)p) + (grehlen / 4) - 1) : 0, |
@@ -494,7 +500,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info) | |||
494 | t->err_count = 1; | 500 | t->err_count = 1; |
495 | t->err_time = jiffies; | 501 | t->err_time = jiffies; |
496 | out: | 502 | out: |
497 | read_unlock(&ipgre_lock); | 503 | rcu_read_unlock(); |
498 | return; | 504 | return; |
499 | } | 505 | } |
500 | 506 | ||
@@ -573,7 +579,7 @@ static int ipgre_rcv(struct sk_buff *skb) | |||
573 | 579 | ||
574 | gre_proto = *(__be16 *)(h + 2); | 580 | gre_proto = *(__be16 *)(h + 2); |
575 | 581 | ||
576 | read_lock(&ipgre_lock); | 582 | rcu_read_lock(); |
577 | if ((tunnel = ipgre_tunnel_lookup(skb->dev, | 583 | if ((tunnel = ipgre_tunnel_lookup(skb->dev, |
578 | iph->saddr, iph->daddr, key, | 584 | iph->saddr, iph->daddr, key, |
579 | gre_proto))) { | 585 | gre_proto))) { |
@@ -647,13 +653,13 @@ static int ipgre_rcv(struct sk_buff *skb) | |||
647 | ipgre_ecn_decapsulate(iph, skb); | 653 | ipgre_ecn_decapsulate(iph, skb); |
648 | 654 | ||
649 | netif_rx(skb); | 655 | netif_rx(skb); |
650 | read_unlock(&ipgre_lock); | 656 | rcu_read_unlock(); |
651 | return(0); | 657 | return(0); |
652 | } | 658 | } |
653 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); | 659 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); |
654 | 660 | ||
655 | drop: | 661 | drop: |
656 | read_unlock(&ipgre_lock); | 662 | rcu_read_unlock(); |
657 | drop_nolock: | 663 | drop_nolock: |
658 | kfree_skb(skb); | 664 | kfree_skb(skb); |
659 | return(0); | 665 | return(0); |
@@ -662,7 +668,8 @@ drop_nolock: | |||
662 | static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | 668 | static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) |
663 | { | 669 | { |
664 | struct ip_tunnel *tunnel = netdev_priv(dev); | 670 | struct ip_tunnel *tunnel = netdev_priv(dev); |
665 | struct net_device_stats *stats = &tunnel->dev->stats; | 671 | struct net_device_stats *stats = &dev->stats; |
672 | struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); | ||
666 | struct iphdr *old_iph = ip_hdr(skb); | 673 | struct iphdr *old_iph = ip_hdr(skb); |
667 | struct iphdr *tiph; | 674 | struct iphdr *tiph; |
668 | u8 tos; | 675 | u8 tos; |
@@ -810,7 +817,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
810 | struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); | 817 | struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); |
811 | if (!new_skb) { | 818 | if (!new_skb) { |
812 | ip_rt_put(rt); | 819 | ip_rt_put(rt); |
813 | stats->tx_dropped++; | 820 | txq->tx_dropped++; |
814 | dev_kfree_skb(skb); | 821 | dev_kfree_skb(skb); |
815 | return NETDEV_TX_OK; | 822 | return NETDEV_TX_OK; |
816 | } | 823 | } |
@@ -1283,16 +1290,19 @@ static const struct net_protocol ipgre_protocol = { | |||
1283 | .netns_ok = 1, | 1290 | .netns_ok = 1, |
1284 | }; | 1291 | }; |
1285 | 1292 | ||
1286 | static void ipgre_destroy_tunnels(struct ipgre_net *ign) | 1293 | static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head) |
1287 | { | 1294 | { |
1288 | int prio; | 1295 | int prio; |
1289 | 1296 | ||
1290 | for (prio = 0; prio < 4; prio++) { | 1297 | for (prio = 0; prio < 4; prio++) { |
1291 | int h; | 1298 | int h; |
1292 | for (h = 0; h < HASH_SIZE; h++) { | 1299 | for (h = 0; h < HASH_SIZE; h++) { |
1293 | struct ip_tunnel *t; | 1300 | struct ip_tunnel *t = ign->tunnels[prio][h]; |
1294 | while ((t = ign->tunnels[prio][h]) != NULL) | 1301 | |
1295 | unregister_netdevice(t->dev); | 1302 | while (t != NULL) { |
1303 | unregister_netdevice_queue(t->dev, head); | ||
1304 | t = t->next; | ||
1305 | } | ||
1296 | } | 1306 | } |
1297 | } | 1307 | } |
1298 | } | 1308 | } |
@@ -1340,10 +1350,12 @@ err_alloc: | |||
1340 | static void ipgre_exit_net(struct net *net) | 1350 | static void ipgre_exit_net(struct net *net) |
1341 | { | 1351 | { |
1342 | struct ipgre_net *ign; | 1352 | struct ipgre_net *ign; |
1353 | LIST_HEAD(list); | ||
1343 | 1354 | ||
1344 | ign = net_generic(net, ipgre_net_id); | 1355 | ign = net_generic(net, ipgre_net_id); |
1345 | rtnl_lock(); | 1356 | rtnl_lock(); |
1346 | ipgre_destroy_tunnels(ign); | 1357 | ipgre_destroy_tunnels(ign, &list); |
1358 | unregister_netdevice_many(&list); | ||
1347 | rtnl_unlock(); | 1359 | rtnl_unlock(); |
1348 | kfree(ign); | 1360 | kfree(ign); |
1349 | } | 1361 | } |
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 6c98b43badf4..fdf51badc8e5 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
@@ -161,7 +161,7 @@ int ip_call_ra_chain(struct sk_buff *skb) | |||
161 | /* If socket is bound to an interface, only report | 161 | /* If socket is bound to an interface, only report |
162 | * the packet if it came from that interface. | 162 | * the packet if it came from that interface. |
163 | */ | 163 | */ |
164 | if (sk && inet_sk(sk)->num == protocol && | 164 | if (sk && inet_sk(sk)->inet_num == protocol && |
165 | (!sk->sk_bound_dev_if || | 165 | (!sk->sk_bound_dev_if || |
166 | sk->sk_bound_dev_if == dev->ifindex) && | 166 | sk->sk_bound_dev_if == dev->ifindex) && |
167 | sock_net(sk) == dev_net(dev)) { | 167 | sock_net(sk) == dev_net(dev)) { |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index f9895180f481..322b40864ac0 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -329,7 +329,7 @@ int ip_queue_xmit(struct sk_buff *skb, int ipfragok) | |||
329 | __be32 daddr; | 329 | __be32 daddr; |
330 | 330 | ||
331 | /* Use correct destination address if we have options. */ | 331 | /* Use correct destination address if we have options. */ |
332 | daddr = inet->daddr; | 332 | daddr = inet->inet_daddr; |
333 | if(opt && opt->srr) | 333 | if(opt && opt->srr) |
334 | daddr = opt->faddr; | 334 | daddr = opt->faddr; |
335 | 335 | ||
@@ -338,13 +338,13 @@ int ip_queue_xmit(struct sk_buff *skb, int ipfragok) | |||
338 | .mark = sk->sk_mark, | 338 | .mark = sk->sk_mark, |
339 | .nl_u = { .ip4_u = | 339 | .nl_u = { .ip4_u = |
340 | { .daddr = daddr, | 340 | { .daddr = daddr, |
341 | .saddr = inet->saddr, | 341 | .saddr = inet->inet_saddr, |
342 | .tos = RT_CONN_FLAGS(sk) } }, | 342 | .tos = RT_CONN_FLAGS(sk) } }, |
343 | .proto = sk->sk_protocol, | 343 | .proto = sk->sk_protocol, |
344 | .flags = inet_sk_flowi_flags(sk), | 344 | .flags = inet_sk_flowi_flags(sk), |
345 | .uli_u = { .ports = | 345 | .uli_u = { .ports = |
346 | { .sport = inet->sport, | 346 | { .sport = inet->inet_sport, |
347 | .dport = inet->dport } } }; | 347 | .dport = inet->inet_dport } } }; |
348 | 348 | ||
349 | /* If this fails, retransmit mechanism of transport layer will | 349 | /* If this fails, retransmit mechanism of transport layer will |
350 | * keep trying until route appears or the connection times | 350 | * keep trying until route appears or the connection times |
@@ -379,7 +379,7 @@ packet_routed: | |||
379 | 379 | ||
380 | if (opt && opt->optlen) { | 380 | if (opt && opt->optlen) { |
381 | iph->ihl += opt->optlen >> 2; | 381 | iph->ihl += opt->optlen >> 2; |
382 | ip_options_build(skb, opt, inet->daddr, rt, 0); | 382 | ip_options_build(skb, opt, inet->inet_daddr, rt, 0); |
383 | } | 383 | } |
384 | 384 | ||
385 | ip_select_ident_more(iph, &rt->u.dst, sk, | 385 | ip_select_ident_more(iph, &rt->u.dst, sk, |
@@ -846,7 +846,8 @@ int ip_append_data(struct sock *sk, | |||
846 | maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; | 846 | maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; |
847 | 847 | ||
848 | if (inet->cork.length + length > 0xFFFF - fragheaderlen) { | 848 | if (inet->cork.length + length > 0xFFFF - fragheaderlen) { |
849 | ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu-exthdrlen); | 849 | ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, |
850 | mtu-exthdrlen); | ||
850 | return -EMSGSIZE; | 851 | return -EMSGSIZE; |
851 | } | 852 | } |
852 | 853 | ||
@@ -1100,7 +1101,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page, | |||
1100 | maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; | 1101 | maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; |
1101 | 1102 | ||
1102 | if (inet->cork.length + size > 0xFFFF - fragheaderlen) { | 1103 | if (inet->cork.length + size > 0xFFFF - fragheaderlen) { |
1103 | ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu); | 1104 | ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, mtu); |
1104 | return -EMSGSIZE; | 1105 | return -EMSGSIZE; |
1105 | } | 1106 | } |
1106 | 1107 | ||
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index e982b5c1ee17..cafad9baff03 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -245,7 +245,7 @@ int ip_ra_control(struct sock *sk, unsigned char on, | |||
245 | { | 245 | { |
246 | struct ip_ra_chain *ra, *new_ra, **rap; | 246 | struct ip_ra_chain *ra, *new_ra, **rap; |
247 | 247 | ||
248 | if (sk->sk_type != SOCK_RAW || inet_sk(sk)->num == IPPROTO_RAW) | 248 | if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW) |
249 | return -EINVAL; | 249 | return -EINVAL; |
250 | 250 | ||
251 | new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; | 251 | new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; |
@@ -480,7 +480,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
480 | case IP_OPTIONS: | 480 | case IP_OPTIONS: |
481 | { | 481 | { |
482 | struct ip_options *opt = NULL; | 482 | struct ip_options *opt = NULL; |
483 | if (optlen > 40 || optlen < 0) | 483 | if (optlen > 40) |
484 | goto e_inval; | 484 | goto e_inval; |
485 | err = ip_options_get_from_user(sock_net(sk), &opt, | 485 | err = ip_options_get_from_user(sock_net(sk), &opt, |
486 | optval, optlen); | 486 | optval, optlen); |
@@ -492,7 +492,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
492 | if (sk->sk_family == PF_INET || | 492 | if (sk->sk_family == PF_INET || |
493 | (!((1 << sk->sk_state) & | 493 | (!((1 << sk->sk_state) & |
494 | (TCPF_LISTEN | TCPF_CLOSE)) && | 494 | (TCPF_LISTEN | TCPF_CLOSE)) && |
495 | inet->daddr != LOOPBACK4_IPV6)) { | 495 | inet->inet_daddr != LOOPBACK4_IPV6)) { |
496 | #endif | 496 | #endif |
497 | if (inet->opt) | 497 | if (inet->opt) |
498 | icsk->icsk_ext_hdr_len -= inet->opt->optlen; | 498 | icsk->icsk_ext_hdr_len -= inet->opt->optlen; |
@@ -575,7 +575,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
575 | inet->hdrincl = val ? 1 : 0; | 575 | inet->hdrincl = val ? 1 : 0; |
576 | break; | 576 | break; |
577 | case IP_MTU_DISCOVER: | 577 | case IP_MTU_DISCOVER: |
578 | if (val < 0 || val > 3) | 578 | if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_PROBE) |
579 | goto e_inval; | 579 | goto e_inval; |
580 | inet->pmtudisc = val; | 580 | inet->pmtudisc = val; |
581 | break; | 581 | break; |
@@ -1180,8 +1180,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, | |||
1180 | if (inet->cmsg_flags & IP_CMSG_PKTINFO) { | 1180 | if (inet->cmsg_flags & IP_CMSG_PKTINFO) { |
1181 | struct in_pktinfo info; | 1181 | struct in_pktinfo info; |
1182 | 1182 | ||
1183 | info.ipi_addr.s_addr = inet->rcv_saddr; | 1183 | info.ipi_addr.s_addr = inet->inet_rcv_saddr; |
1184 | info.ipi_spec_dst.s_addr = inet->rcv_saddr; | 1184 | info.ipi_spec_dst.s_addr = inet->inet_rcv_saddr; |
1185 | info.ipi_ifindex = inet->mc_index; | 1185 | info.ipi_ifindex = inet->mc_index; |
1186 | put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info); | 1186 | put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info); |
1187 | } | 1187 | } |
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 08ccd344de7a..a2ca53da4372 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -134,7 +134,13 @@ static void ipip_fb_tunnel_init(struct net_device *dev); | |||
134 | static void ipip_tunnel_init(struct net_device *dev); | 134 | static void ipip_tunnel_init(struct net_device *dev); |
135 | static void ipip_tunnel_setup(struct net_device *dev); | 135 | static void ipip_tunnel_setup(struct net_device *dev); |
136 | 136 | ||
137 | static DEFINE_RWLOCK(ipip_lock); | 137 | /* |
138 | * Locking : hash tables are protected by RCU and a spinlock | ||
139 | */ | ||
140 | static DEFINE_SPINLOCK(ipip_lock); | ||
141 | |||
142 | #define for_each_ip_tunnel_rcu(start) \ | ||
143 | for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) | ||
138 | 144 | ||
139 | static struct ip_tunnel * ipip_tunnel_lookup(struct net *net, | 145 | static struct ip_tunnel * ipip_tunnel_lookup(struct net *net, |
140 | __be32 remote, __be32 local) | 146 | __be32 remote, __be32 local) |
@@ -144,20 +150,21 @@ static struct ip_tunnel * ipip_tunnel_lookup(struct net *net, | |||
144 | struct ip_tunnel *t; | 150 | struct ip_tunnel *t; |
145 | struct ipip_net *ipn = net_generic(net, ipip_net_id); | 151 | struct ipip_net *ipn = net_generic(net, ipip_net_id); |
146 | 152 | ||
147 | for (t = ipn->tunnels_r_l[h0^h1]; t; t = t->next) { | 153 | for_each_ip_tunnel_rcu(ipn->tunnels_r_l[h0 ^ h1]) |
148 | if (local == t->parms.iph.saddr && | 154 | if (local == t->parms.iph.saddr && |
149 | remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) | 155 | remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) |
150 | return t; | 156 | return t; |
151 | } | 157 | |
152 | for (t = ipn->tunnels_r[h0]; t; t = t->next) { | 158 | for_each_ip_tunnel_rcu(ipn->tunnels_r[h0]) |
153 | if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) | 159 | if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) |
154 | return t; | 160 | return t; |
155 | } | 161 | |
156 | for (t = ipn->tunnels_l[h1]; t; t = t->next) { | 162 | for_each_ip_tunnel_rcu(ipn->tunnels_l[h1]) |
157 | if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP)) | 163 | if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP)) |
158 | return t; | 164 | return t; |
159 | } | 165 | |
160 | if ((t = ipn->tunnels_wc[0]) != NULL && (t->dev->flags&IFF_UP)) | 166 | t = rcu_dereference(ipn->tunnels_wc[0]); |
167 | if (t && (t->dev->flags&IFF_UP)) | ||
161 | return t; | 168 | return t; |
162 | return NULL; | 169 | return NULL; |
163 | } | 170 | } |
@@ -193,9 +200,9 @@ static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t) | |||
193 | 200 | ||
194 | for (tp = ipip_bucket(ipn, t); *tp; tp = &(*tp)->next) { | 201 | for (tp = ipip_bucket(ipn, t); *tp; tp = &(*tp)->next) { |
195 | if (t == *tp) { | 202 | if (t == *tp) { |
196 | write_lock_bh(&ipip_lock); | 203 | spin_lock_bh(&ipip_lock); |
197 | *tp = t->next; | 204 | *tp = t->next; |
198 | write_unlock_bh(&ipip_lock); | 205 | spin_unlock_bh(&ipip_lock); |
199 | break; | 206 | break; |
200 | } | 207 | } |
201 | } | 208 | } |
@@ -205,10 +212,10 @@ static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t) | |||
205 | { | 212 | { |
206 | struct ip_tunnel **tp = ipip_bucket(ipn, t); | 213 | struct ip_tunnel **tp = ipip_bucket(ipn, t); |
207 | 214 | ||
215 | spin_lock_bh(&ipip_lock); | ||
208 | t->next = *tp; | 216 | t->next = *tp; |
209 | write_lock_bh(&ipip_lock); | 217 | rcu_assign_pointer(*tp, t); |
210 | *tp = t; | 218 | spin_unlock_bh(&ipip_lock); |
211 | write_unlock_bh(&ipip_lock); | ||
212 | } | 219 | } |
213 | 220 | ||
214 | static struct ip_tunnel * ipip_tunnel_locate(struct net *net, | 221 | static struct ip_tunnel * ipip_tunnel_locate(struct net *net, |
@@ -267,9 +274,9 @@ static void ipip_tunnel_uninit(struct net_device *dev) | |||
267 | struct ipip_net *ipn = net_generic(net, ipip_net_id); | 274 | struct ipip_net *ipn = net_generic(net, ipip_net_id); |
268 | 275 | ||
269 | if (dev == ipn->fb_tunnel_dev) { | 276 | if (dev == ipn->fb_tunnel_dev) { |
270 | write_lock_bh(&ipip_lock); | 277 | spin_lock_bh(&ipip_lock); |
271 | ipn->tunnels_wc[0] = NULL; | 278 | ipn->tunnels_wc[0] = NULL; |
272 | write_unlock_bh(&ipip_lock); | 279 | spin_unlock_bh(&ipip_lock); |
273 | } else | 280 | } else |
274 | ipip_tunnel_unlink(ipn, netdev_priv(dev)); | 281 | ipip_tunnel_unlink(ipn, netdev_priv(dev)); |
275 | dev_put(dev); | 282 | dev_put(dev); |
@@ -318,7 +325,7 @@ static int ipip_err(struct sk_buff *skb, u32 info) | |||
318 | 325 | ||
319 | err = -ENOENT; | 326 | err = -ENOENT; |
320 | 327 | ||
321 | read_lock(&ipip_lock); | 328 | rcu_read_lock(); |
322 | t = ipip_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr); | 329 | t = ipip_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr); |
323 | if (t == NULL || t->parms.iph.daddr == 0) | 330 | if (t == NULL || t->parms.iph.daddr == 0) |
324 | goto out; | 331 | goto out; |
@@ -333,7 +340,7 @@ static int ipip_err(struct sk_buff *skb, u32 info) | |||
333 | t->err_count = 1; | 340 | t->err_count = 1; |
334 | t->err_time = jiffies; | 341 | t->err_time = jiffies; |
335 | out: | 342 | out: |
336 | read_unlock(&ipip_lock); | 343 | rcu_read_unlock(); |
337 | return err; | 344 | return err; |
338 | } | 345 | } |
339 | 346 | ||
@@ -351,11 +358,11 @@ static int ipip_rcv(struct sk_buff *skb) | |||
351 | struct ip_tunnel *tunnel; | 358 | struct ip_tunnel *tunnel; |
352 | const struct iphdr *iph = ip_hdr(skb); | 359 | const struct iphdr *iph = ip_hdr(skb); |
353 | 360 | ||
354 | read_lock(&ipip_lock); | 361 | rcu_read_lock(); |
355 | if ((tunnel = ipip_tunnel_lookup(dev_net(skb->dev), | 362 | if ((tunnel = ipip_tunnel_lookup(dev_net(skb->dev), |
356 | iph->saddr, iph->daddr)) != NULL) { | 363 | iph->saddr, iph->daddr)) != NULL) { |
357 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { | 364 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { |
358 | read_unlock(&ipip_lock); | 365 | rcu_read_unlock(); |
359 | kfree_skb(skb); | 366 | kfree_skb(skb); |
360 | return 0; | 367 | return 0; |
361 | } | 368 | } |
@@ -374,10 +381,10 @@ static int ipip_rcv(struct sk_buff *skb) | |||
374 | nf_reset(skb); | 381 | nf_reset(skb); |
375 | ipip_ecn_decapsulate(iph, skb); | 382 | ipip_ecn_decapsulate(iph, skb); |
376 | netif_rx(skb); | 383 | netif_rx(skb); |
377 | read_unlock(&ipip_lock); | 384 | rcu_read_unlock(); |
378 | return 0; | 385 | return 0; |
379 | } | 386 | } |
380 | read_unlock(&ipip_lock); | 387 | rcu_read_unlock(); |
381 | 388 | ||
382 | return -1; | 389 | return -1; |
383 | } | 390 | } |
@@ -390,7 +397,8 @@ static int ipip_rcv(struct sk_buff *skb) | |||
390 | static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | 397 | static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) |
391 | { | 398 | { |
392 | struct ip_tunnel *tunnel = netdev_priv(dev); | 399 | struct ip_tunnel *tunnel = netdev_priv(dev); |
393 | struct net_device_stats *stats = &tunnel->dev->stats; | 400 | struct net_device_stats *stats = &dev->stats; |
401 | struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); | ||
394 | struct iphdr *tiph = &tunnel->parms.iph; | 402 | struct iphdr *tiph = &tunnel->parms.iph; |
395 | u8 tos = tunnel->parms.iph.tos; | 403 | u8 tos = tunnel->parms.iph.tos; |
396 | __be16 df = tiph->frag_off; | 404 | __be16 df = tiph->frag_off; |
@@ -478,7 +486,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
478 | struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); | 486 | struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); |
479 | if (!new_skb) { | 487 | if (!new_skb) { |
480 | ip_rt_put(rt); | 488 | ip_rt_put(rt); |
481 | stats->tx_dropped++; | 489 | txq->tx_dropped++; |
482 | dev_kfree_skb(skb); | 490 | dev_kfree_skb(skb); |
483 | return NETDEV_TX_OK; | 491 | return NETDEV_TX_OK; |
484 | } | 492 | } |
@@ -746,16 +754,19 @@ static struct xfrm_tunnel ipip_handler = { | |||
746 | static const char banner[] __initconst = | 754 | static const char banner[] __initconst = |
747 | KERN_INFO "IPv4 over IPv4 tunneling driver\n"; | 755 | KERN_INFO "IPv4 over IPv4 tunneling driver\n"; |
748 | 756 | ||
749 | static void ipip_destroy_tunnels(struct ipip_net *ipn) | 757 | static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head) |
750 | { | 758 | { |
751 | int prio; | 759 | int prio; |
752 | 760 | ||
753 | for (prio = 1; prio < 4; prio++) { | 761 | for (prio = 1; prio < 4; prio++) { |
754 | int h; | 762 | int h; |
755 | for (h = 0; h < HASH_SIZE; h++) { | 763 | for (h = 0; h < HASH_SIZE; h++) { |
756 | struct ip_tunnel *t; | 764 | struct ip_tunnel *t = ipn->tunnels[prio][h]; |
757 | while ((t = ipn->tunnels[prio][h]) != NULL) | 765 | |
758 | unregister_netdevice(t->dev); | 766 | while (t != NULL) { |
767 | unregister_netdevice_queue(t->dev, head); | ||
768 | t = t->next; | ||
769 | } | ||
759 | } | 770 | } |
760 | } | 771 | } |
761 | } | 772 | } |
@@ -808,11 +819,13 @@ err_alloc: | |||
808 | static void ipip_exit_net(struct net *net) | 819 | static void ipip_exit_net(struct net *net) |
809 | { | 820 | { |
810 | struct ipip_net *ipn; | 821 | struct ipip_net *ipn; |
822 | LIST_HEAD(list); | ||
811 | 823 | ||
812 | ipn = net_generic(net, ipip_net_id); | 824 | ipn = net_generic(net, ipip_net_id); |
813 | rtnl_lock(); | 825 | rtnl_lock(); |
814 | ipip_destroy_tunnels(ipn); | 826 | ipip_destroy_tunnels(ipn, &list); |
815 | unregister_netdevice(ipn->fb_tunnel_dev); | 827 | unregister_netdevice_queue(ipn->fb_tunnel_dev, &list); |
828 | unregister_netdevice_many(&list); | ||
816 | rtnl_unlock(); | 829 | rtnl_unlock(); |
817 | kfree(ipn); | 830 | kfree(ipn); |
818 | } | 831 | } |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 630a56df7b47..ef4ee45b928f 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -275,7 +275,8 @@ failure: | |||
275 | * @notify: Set to 1, if the caller is a notifier_call | 275 | * @notify: Set to 1, if the caller is a notifier_call |
276 | */ | 276 | */ |
277 | 277 | ||
278 | static int vif_delete(struct net *net, int vifi, int notify) | 278 | static int vif_delete(struct net *net, int vifi, int notify, |
279 | struct list_head *head) | ||
279 | { | 280 | { |
280 | struct vif_device *v; | 281 | struct vif_device *v; |
281 | struct net_device *dev; | 282 | struct net_device *dev; |
@@ -319,7 +320,7 @@ static int vif_delete(struct net *net, int vifi, int notify) | |||
319 | } | 320 | } |
320 | 321 | ||
321 | if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER) && !notify) | 322 | if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER) && !notify) |
322 | unregister_netdevice(dev); | 323 | unregister_netdevice_queue(dev, head); |
323 | 324 | ||
324 | dev_put(dev); | 325 | dev_put(dev); |
325 | return 0; | 326 | return 0; |
@@ -469,8 +470,18 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock) | |||
469 | return err; | 470 | return err; |
470 | } | 471 | } |
471 | break; | 472 | break; |
473 | |||
474 | case VIFF_USE_IFINDEX: | ||
472 | case 0: | 475 | case 0: |
473 | dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr); | 476 | if (vifc->vifc_flags == VIFF_USE_IFINDEX) { |
477 | dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex); | ||
478 | if (dev && dev->ip_ptr == NULL) { | ||
479 | dev_put(dev); | ||
480 | return -EADDRNOTAVAIL; | ||
481 | } | ||
482 | } else | ||
483 | dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr); | ||
484 | |||
474 | if (!dev) | 485 | if (!dev) |
475 | return -EADDRNOTAVAIL; | 486 | return -EADDRNOTAVAIL; |
476 | err = dev_set_allmulti(dev, 1); | 487 | err = dev_set_allmulti(dev, 1); |
@@ -860,14 +871,16 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) | |||
860 | static void mroute_clean_tables(struct net *net) | 871 | static void mroute_clean_tables(struct net *net) |
861 | { | 872 | { |
862 | int i; | 873 | int i; |
874 | LIST_HEAD(list); | ||
863 | 875 | ||
864 | /* | 876 | /* |
865 | * Shut down all active vif entries | 877 | * Shut down all active vif entries |
866 | */ | 878 | */ |
867 | for (i = 0; i < net->ipv4.maxvif; i++) { | 879 | for (i = 0; i < net->ipv4.maxvif; i++) { |
868 | if (!(net->ipv4.vif_table[i].flags&VIFF_STATIC)) | 880 | if (!(net->ipv4.vif_table[i].flags&VIFF_STATIC)) |
869 | vif_delete(net, i, 0); | 881 | vif_delete(net, i, 0, &list); |
870 | } | 882 | } |
883 | unregister_netdevice_many(&list); | ||
871 | 884 | ||
872 | /* | 885 | /* |
873 | * Wipe the cache | 886 | * Wipe the cache |
@@ -946,7 +959,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi | |||
946 | switch (optname) { | 959 | switch (optname) { |
947 | case MRT_INIT: | 960 | case MRT_INIT: |
948 | if (sk->sk_type != SOCK_RAW || | 961 | if (sk->sk_type != SOCK_RAW || |
949 | inet_sk(sk)->num != IPPROTO_IGMP) | 962 | inet_sk(sk)->inet_num != IPPROTO_IGMP) |
950 | return -EOPNOTSUPP; | 963 | return -EOPNOTSUPP; |
951 | if (optlen != sizeof(int)) | 964 | if (optlen != sizeof(int)) |
952 | return -ENOPROTOOPT; | 965 | return -ENOPROTOOPT; |
@@ -983,7 +996,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi | |||
983 | if (optname == MRT_ADD_VIF) { | 996 | if (optname == MRT_ADD_VIF) { |
984 | ret = vif_add(net, &vif, sk == net->ipv4.mroute_sk); | 997 | ret = vif_add(net, &vif, sk == net->ipv4.mroute_sk); |
985 | } else { | 998 | } else { |
986 | ret = vif_delete(net, vif.vifc_vifi, 0); | 999 | ret = vif_delete(net, vif.vifc_vifi, 0, NULL); |
987 | } | 1000 | } |
988 | rtnl_unlock(); | 1001 | rtnl_unlock(); |
989 | return ret; | 1002 | return ret; |
@@ -1146,6 +1159,7 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v | |||
1146 | struct net *net = dev_net(dev); | 1159 | struct net *net = dev_net(dev); |
1147 | struct vif_device *v; | 1160 | struct vif_device *v; |
1148 | int ct; | 1161 | int ct; |
1162 | LIST_HEAD(list); | ||
1149 | 1163 | ||
1150 | if (!net_eq(dev_net(dev), net)) | 1164 | if (!net_eq(dev_net(dev), net)) |
1151 | return NOTIFY_DONE; | 1165 | return NOTIFY_DONE; |
@@ -1155,8 +1169,9 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v | |||
1155 | v = &net->ipv4.vif_table[0]; | 1169 | v = &net->ipv4.vif_table[0]; |
1156 | for (ct = 0; ct < net->ipv4.maxvif; ct++, v++) { | 1170 | for (ct = 0; ct < net->ipv4.maxvif; ct++, v++) { |
1157 | if (v->dev == dev) | 1171 | if (v->dev == dev) |
1158 | vif_delete(net, ct, 1); | 1172 | vif_delete(net, ct, 1, &list); |
1159 | } | 1173 | } |
1174 | unregister_netdevice_many(&list); | ||
1160 | return NOTIFY_DONE; | 1175 | return NOTIFY_DONE; |
1161 | } | 1176 | } |
1162 | 1177 | ||
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index aa95bb82ee6c..9cd423ffafa8 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | |||
@@ -255,10 +255,10 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len) | |||
255 | struct nf_conntrack_tuple tuple; | 255 | struct nf_conntrack_tuple tuple; |
256 | 256 | ||
257 | memset(&tuple, 0, sizeof(tuple)); | 257 | memset(&tuple, 0, sizeof(tuple)); |
258 | tuple.src.u3.ip = inet->rcv_saddr; | 258 | tuple.src.u3.ip = inet->inet_rcv_saddr; |
259 | tuple.src.u.tcp.port = inet->sport; | 259 | tuple.src.u.tcp.port = inet->inet_sport; |
260 | tuple.dst.u3.ip = inet->daddr; | 260 | tuple.dst.u3.ip = inet->inet_daddr; |
261 | tuple.dst.u.tcp.port = inet->dport; | 261 | tuple.dst.u.tcp.port = inet->inet_dport; |
262 | tuple.src.l3num = PF_INET; | 262 | tuple.src.l3num = PF_INET; |
263 | tuple.dst.protonum = sk->sk_protocol; | 263 | tuple.dst.protonum = sk->sk_protocol; |
264 | 264 | ||
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index ab996f9c0fe0..ce154b47f1da 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -87,7 +87,7 @@ void raw_hash_sk(struct sock *sk) | |||
87 | struct raw_hashinfo *h = sk->sk_prot->h.raw_hash; | 87 | struct raw_hashinfo *h = sk->sk_prot->h.raw_hash; |
88 | struct hlist_head *head; | 88 | struct hlist_head *head; |
89 | 89 | ||
90 | head = &h->ht[inet_sk(sk)->num & (RAW_HTABLE_SIZE - 1)]; | 90 | head = &h->ht[inet_sk(sk)->inet_num & (RAW_HTABLE_SIZE - 1)]; |
91 | 91 | ||
92 | write_lock_bh(&h->lock); | 92 | write_lock_bh(&h->lock); |
93 | sk_add_node(sk, head); | 93 | sk_add_node(sk, head); |
@@ -115,9 +115,9 @@ static struct sock *__raw_v4_lookup(struct net *net, struct sock *sk, | |||
115 | sk_for_each_from(sk, node) { | 115 | sk_for_each_from(sk, node) { |
116 | struct inet_sock *inet = inet_sk(sk); | 116 | struct inet_sock *inet = inet_sk(sk); |
117 | 117 | ||
118 | if (net_eq(sock_net(sk), net) && inet->num == num && | 118 | if (net_eq(sock_net(sk), net) && inet->inet_num == num && |
119 | !(inet->daddr && inet->daddr != raddr) && | 119 | !(inet->inet_daddr && inet->inet_daddr != raddr) && |
120 | !(inet->rcv_saddr && inet->rcv_saddr != laddr) && | 120 | !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) && |
121 | !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) | 121 | !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) |
122 | goto found; /* gotcha */ | 122 | goto found; /* gotcha */ |
123 | } | 123 | } |
@@ -292,7 +292,6 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb) | |||
292 | /* Charge it to the socket. */ | 292 | /* Charge it to the socket. */ |
293 | 293 | ||
294 | if (sock_queue_rcv_skb(sk, skb) < 0) { | 294 | if (sock_queue_rcv_skb(sk, skb) < 0) { |
295 | atomic_inc(&sk->sk_drops); | ||
296 | kfree_skb(skb); | 295 | kfree_skb(skb); |
297 | return NET_RX_DROP; | 296 | return NET_RX_DROP; |
298 | } | 297 | } |
@@ -327,7 +326,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, | |||
327 | int err; | 326 | int err; |
328 | 327 | ||
329 | if (length > rt->u.dst.dev->mtu) { | 328 | if (length > rt->u.dst.dev->mtu) { |
330 | ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, | 329 | ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, |
331 | rt->u.dst.dev->mtu); | 330 | rt->u.dst.dev->mtu); |
332 | return -EMSGSIZE; | 331 | return -EMSGSIZE; |
333 | } | 332 | } |
@@ -500,10 +499,10 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
500 | err = -EDESTADDRREQ; | 499 | err = -EDESTADDRREQ; |
501 | if (sk->sk_state != TCP_ESTABLISHED) | 500 | if (sk->sk_state != TCP_ESTABLISHED) |
502 | goto out; | 501 | goto out; |
503 | daddr = inet->daddr; | 502 | daddr = inet->inet_daddr; |
504 | } | 503 | } |
505 | 504 | ||
506 | ipc.addr = inet->saddr; | 505 | ipc.addr = inet->inet_saddr; |
507 | ipc.opt = NULL; | 506 | ipc.opt = NULL; |
508 | ipc.shtx.flags = 0; | 507 | ipc.shtx.flags = 0; |
509 | ipc.oif = sk->sk_bound_dev_if; | 508 | ipc.oif = sk->sk_bound_dev_if; |
@@ -645,9 +644,9 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
645 | if (addr->sin_addr.s_addr && chk_addr_ret != RTN_LOCAL && | 644 | if (addr->sin_addr.s_addr && chk_addr_ret != RTN_LOCAL && |
646 | chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) | 645 | chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) |
647 | goto out; | 646 | goto out; |
648 | inet->rcv_saddr = inet->saddr = addr->sin_addr.s_addr; | 647 | inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr; |
649 | if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) | 648 | if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) |
650 | inet->saddr = 0; /* Use device */ | 649 | inet->inet_saddr = 0; /* Use device */ |
651 | sk_dst_reset(sk); | 650 | sk_dst_reset(sk); |
652 | ret = 0; | 651 | ret = 0; |
653 | out: return ret; | 652 | out: return ret; |
@@ -692,7 +691,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
692 | if (err) | 691 | if (err) |
693 | goto done; | 692 | goto done; |
694 | 693 | ||
695 | sock_recv_timestamp(msg, sk, skb); | 694 | sock_recv_ts_and_drops(msg, sk, skb); |
696 | 695 | ||
697 | /* Copy the address. */ | 696 | /* Copy the address. */ |
698 | if (sin) { | 697 | if (sin) { |
@@ -717,7 +716,7 @@ static int raw_init(struct sock *sk) | |||
717 | { | 716 | { |
718 | struct raw_sock *rp = raw_sk(sk); | 717 | struct raw_sock *rp = raw_sk(sk); |
719 | 718 | ||
720 | if (inet_sk(sk)->num == IPPROTO_ICMP) | 719 | if (inet_sk(sk)->inet_num == IPPROTO_ICMP) |
721 | memset(&rp->filter, 0, sizeof(rp->filter)); | 720 | memset(&rp->filter, 0, sizeof(rp->filter)); |
722 | return 0; | 721 | return 0; |
723 | } | 722 | } |
@@ -754,7 +753,7 @@ static int do_raw_setsockopt(struct sock *sk, int level, int optname, | |||
754 | char __user *optval, unsigned int optlen) | 753 | char __user *optval, unsigned int optlen) |
755 | { | 754 | { |
756 | if (optname == ICMP_FILTER) { | 755 | if (optname == ICMP_FILTER) { |
757 | if (inet_sk(sk)->num != IPPROTO_ICMP) | 756 | if (inet_sk(sk)->inet_num != IPPROTO_ICMP) |
758 | return -EOPNOTSUPP; | 757 | return -EOPNOTSUPP; |
759 | else | 758 | else |
760 | return raw_seticmpfilter(sk, optval, optlen); | 759 | return raw_seticmpfilter(sk, optval, optlen); |
@@ -784,7 +783,7 @@ static int do_raw_getsockopt(struct sock *sk, int level, int optname, | |||
784 | char __user *optval, int __user *optlen) | 783 | char __user *optval, int __user *optlen) |
785 | { | 784 | { |
786 | if (optname == ICMP_FILTER) { | 785 | if (optname == ICMP_FILTER) { |
787 | if (inet_sk(sk)->num != IPPROTO_ICMP) | 786 | if (inet_sk(sk)->inet_num != IPPROTO_ICMP) |
788 | return -EOPNOTSUPP; | 787 | return -EOPNOTSUPP; |
789 | else | 788 | else |
790 | return raw_geticmpfilter(sk, optval, optlen); | 789 | return raw_geticmpfilter(sk, optval, optlen); |
@@ -943,10 +942,10 @@ EXPORT_SYMBOL_GPL(raw_seq_stop); | |||
943 | static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) | 942 | static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) |
944 | { | 943 | { |
945 | struct inet_sock *inet = inet_sk(sp); | 944 | struct inet_sock *inet = inet_sk(sp); |
946 | __be32 dest = inet->daddr, | 945 | __be32 dest = inet->inet_daddr, |
947 | src = inet->rcv_saddr; | 946 | src = inet->inet_rcv_saddr; |
948 | __u16 destp = 0, | 947 | __u16 destp = 0, |
949 | srcp = inet->num; | 948 | srcp = inet->inet_num; |
950 | 949 | ||
951 | seq_printf(seq, "%4d: %08X:%04X %08X:%04X" | 950 | seq_printf(seq, "%4d: %08X:%04X %08X:%04X" |
952 | " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", | 951 | " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index bb4199252026..68fb22702051 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1628,9 +1628,6 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph, | |||
1628 | __be32 daddr = iph->daddr; | 1628 | __be32 daddr = iph->daddr; |
1629 | unsigned short est_mtu = 0; | 1629 | unsigned short est_mtu = 0; |
1630 | 1630 | ||
1631 | if (ipv4_config.no_pmtu_disc) | ||
1632 | return 0; | ||
1633 | |||
1634 | for (k = 0; k < 2; k++) { | 1631 | for (k = 0; k < 2; k++) { |
1635 | for (i = 0; i < 2; i++) { | 1632 | for (i = 0; i < 2; i++) { |
1636 | unsigned hash = rt_hash(daddr, skeys[i], ikeys[k], | 1633 | unsigned hash = rt_hash(daddr, skeys[i], ikeys[k], |
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index a6e0e077ac33..3146cc401748 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c | |||
@@ -276,13 +276,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
276 | 276 | ||
277 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); | 277 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); |
278 | 278 | ||
279 | /* check for timestamp cookie support */ | ||
280 | memset(&tcp_opt, 0, sizeof(tcp_opt)); | ||
281 | tcp_parse_options(skb, &tcp_opt, 0); | ||
282 | |||
283 | if (tcp_opt.saw_tstamp) | ||
284 | cookie_check_timestamp(&tcp_opt); | ||
285 | |||
286 | ret = NULL; | 279 | ret = NULL; |
287 | req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */ | 280 | req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */ |
288 | if (!req) | 281 | if (!req) |
@@ -298,12 +291,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
298 | ireq->loc_addr = ip_hdr(skb)->daddr; | 291 | ireq->loc_addr = ip_hdr(skb)->daddr; |
299 | ireq->rmt_addr = ip_hdr(skb)->saddr; | 292 | ireq->rmt_addr = ip_hdr(skb)->saddr; |
300 | ireq->ecn_ok = 0; | 293 | ireq->ecn_ok = 0; |
301 | ireq->snd_wscale = tcp_opt.snd_wscale; | ||
302 | ireq->rcv_wscale = tcp_opt.rcv_wscale; | ||
303 | ireq->sack_ok = tcp_opt.sack_ok; | ||
304 | ireq->wscale_ok = tcp_opt.wscale_ok; | ||
305 | ireq->tstamp_ok = tcp_opt.saw_tstamp; | ||
306 | req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; | ||
307 | 294 | ||
308 | /* We throwed the options of the initial SYN away, so we hope | 295 | /* We throwed the options of the initial SYN away, so we hope |
309 | * the ACK carries the same options again (see RFC1122 4.2.3.8) | 296 | * the ACK carries the same options again (see RFC1122 4.2.3.8) |
@@ -333,7 +320,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
333 | * no easy way to do this. | 320 | * no easy way to do this. |
334 | */ | 321 | */ |
335 | { | 322 | { |
336 | struct flowi fl = { .nl_u = { .ip4_u = | 323 | struct flowi fl = { .mark = sk->sk_mark, |
324 | .nl_u = { .ip4_u = | ||
337 | { .daddr = ((opt && opt->srr) ? | 325 | { .daddr = ((opt && opt->srr) ? |
338 | opt->faddr : | 326 | opt->faddr : |
339 | ireq->rmt_addr), | 327 | ireq->rmt_addr), |
@@ -351,6 +339,20 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
351 | } | 339 | } |
352 | } | 340 | } |
353 | 341 | ||
342 | /* check for timestamp cookie support */ | ||
343 | memset(&tcp_opt, 0, sizeof(tcp_opt)); | ||
344 | tcp_parse_options(skb, &tcp_opt, 0, &rt->u.dst); | ||
345 | |||
346 | if (tcp_opt.saw_tstamp) | ||
347 | cookie_check_timestamp(&tcp_opt); | ||
348 | |||
349 | ireq->snd_wscale = tcp_opt.snd_wscale; | ||
350 | ireq->rcv_wscale = tcp_opt.rcv_wscale; | ||
351 | ireq->sack_ok = tcp_opt.sack_ok; | ||
352 | ireq->wscale_ok = tcp_opt.wscale_ok; | ||
353 | ireq->tstamp_ok = tcp_opt.saw_tstamp; | ||
354 | req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; | ||
355 | |||
354 | /* Try to redo what tcp_v4_send_synack did. */ | 356 | /* Try to redo what tcp_v4_send_synack did. */ |
355 | req->window_clamp = tp->window_clamp ? :dst_metric(&rt->u.dst, RTAX_WINDOW); | 357 | req->window_clamp = tp->window_clamp ? :dst_metric(&rt->u.dst, RTAX_WINDOW); |
356 | 358 | ||
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 98440ad82558..e0cfa633680a 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -2037,7 +2037,7 @@ int tcp_disconnect(struct sock *sk, int flags) | |||
2037 | __skb_queue_purge(&sk->sk_async_wait_queue); | 2037 | __skb_queue_purge(&sk->sk_async_wait_queue); |
2038 | #endif | 2038 | #endif |
2039 | 2039 | ||
2040 | inet->dport = 0; | 2040 | inet->inet_dport = 0; |
2041 | 2041 | ||
2042 | if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) | 2042 | if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) |
2043 | inet_reset_saddr(sk); | 2043 | inet_reset_saddr(sk); |
@@ -2061,7 +2061,7 @@ int tcp_disconnect(struct sock *sk, int flags) | |||
2061 | memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); | 2061 | memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); |
2062 | __sk_dst_reset(sk); | 2062 | __sk_dst_reset(sk); |
2063 | 2063 | ||
2064 | WARN_ON(inet->num && !icsk->icsk_bind_hash); | 2064 | WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); |
2065 | 2065 | ||
2066 | sk->sk_error_report(sk); | 2066 | sk->sk_error_report(sk); |
2067 | return err; | 2067 | return err; |
@@ -2898,11 +2898,10 @@ void __init tcp_init(void) | |||
2898 | (totalram_pages >= 128 * 1024) ? | 2898 | (totalram_pages >= 128 * 1024) ? |
2899 | 13 : 15, | 2899 | 13 : 15, |
2900 | 0, | 2900 | 0, |
2901 | &tcp_hashinfo.ehash_size, | ||
2902 | NULL, | 2901 | NULL, |
2902 | &tcp_hashinfo.ehash_mask, | ||
2903 | thash_entries ? 0 : 512 * 1024); | 2903 | thash_entries ? 0 : 512 * 1024); |
2904 | tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size; | 2904 | for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) { |
2905 | for (i = 0; i < tcp_hashinfo.ehash_size; i++) { | ||
2906 | INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i); | 2905 | INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i); |
2907 | INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].twchain, i); | 2906 | INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].twchain, i); |
2908 | } | 2907 | } |
@@ -2911,7 +2910,7 @@ void __init tcp_init(void) | |||
2911 | tcp_hashinfo.bhash = | 2910 | tcp_hashinfo.bhash = |
2912 | alloc_large_system_hash("TCP bind", | 2911 | alloc_large_system_hash("TCP bind", |
2913 | sizeof(struct inet_bind_hashbucket), | 2912 | sizeof(struct inet_bind_hashbucket), |
2914 | tcp_hashinfo.ehash_size, | 2913 | tcp_hashinfo.ehash_mask + 1, |
2915 | (totalram_pages >= 128 * 1024) ? | 2914 | (totalram_pages >= 128 * 1024) ? |
2916 | 13 : 15, | 2915 | 13 : 15, |
2917 | 0, | 2916 | 0, |
@@ -2966,8 +2965,8 @@ void __init tcp_init(void) | |||
2966 | sysctl_tcp_rmem[2] = max(87380, max_share); | 2965 | sysctl_tcp_rmem[2] = max(87380, max_share); |
2967 | 2966 | ||
2968 | printk(KERN_INFO "TCP: Hash tables configured " | 2967 | printk(KERN_INFO "TCP: Hash tables configured " |
2969 | "(established %d bind %d)\n", | 2968 | "(established %u bind %u)\n", |
2970 | tcp_hashinfo.ehash_size, tcp_hashinfo.bhash_size); | 2969 | tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); |
2971 | 2970 | ||
2972 | tcp_register_congestion_control(&tcp_reno); | 2971 | tcp_register_congestion_control(&tcp_reno); |
2973 | } | 2972 | } |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index d86784be7ab3..ba0eab65fe80 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2300,7 +2300,7 @@ static inline int tcp_fackets_out(struct tcp_sock *tp) | |||
2300 | * they differ. Since neither occurs due to loss, TCP should really | 2300 | * they differ. Since neither occurs due to loss, TCP should really |
2301 | * ignore them. | 2301 | * ignore them. |
2302 | */ | 2302 | */ |
2303 | static inline int tcp_dupack_heurestics(struct tcp_sock *tp) | 2303 | static inline int tcp_dupack_heuristics(struct tcp_sock *tp) |
2304 | { | 2304 | { |
2305 | return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1; | 2305 | return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1; |
2306 | } | 2306 | } |
@@ -2425,7 +2425,7 @@ static int tcp_time_to_recover(struct sock *sk) | |||
2425 | return 1; | 2425 | return 1; |
2426 | 2426 | ||
2427 | /* Not-A-Trick#2 : Classic rule... */ | 2427 | /* Not-A-Trick#2 : Classic rule... */ |
2428 | if (tcp_dupack_heurestics(tp) > tp->reordering) | 2428 | if (tcp_dupack_heuristics(tp) > tp->reordering) |
2429 | return 1; | 2429 | return 1; |
2430 | 2430 | ||
2431 | /* Trick#3 : when we use RFC2988 timer restart, fast | 2431 | /* Trick#3 : when we use RFC2988 timer restart, fast |
@@ -3698,12 +3698,14 @@ old_ack: | |||
3698 | * the fast version below fails. | 3698 | * the fast version below fails. |
3699 | */ | 3699 | */ |
3700 | void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, | 3700 | void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, |
3701 | int estab) | 3701 | int estab, struct dst_entry *dst) |
3702 | { | 3702 | { |
3703 | unsigned char *ptr; | 3703 | unsigned char *ptr; |
3704 | struct tcphdr *th = tcp_hdr(skb); | 3704 | struct tcphdr *th = tcp_hdr(skb); |
3705 | int length = (th->doff * 4) - sizeof(struct tcphdr); | 3705 | int length = (th->doff * 4) - sizeof(struct tcphdr); |
3706 | 3706 | ||
3707 | BUG_ON(!estab && !dst); | ||
3708 | |||
3707 | ptr = (unsigned char *)(th + 1); | 3709 | ptr = (unsigned char *)(th + 1); |
3708 | opt_rx->saw_tstamp = 0; | 3710 | opt_rx->saw_tstamp = 0; |
3709 | 3711 | ||
@@ -3737,7 +3739,8 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, | |||
3737 | break; | 3739 | break; |
3738 | case TCPOPT_WINDOW: | 3740 | case TCPOPT_WINDOW: |
3739 | if (opsize == TCPOLEN_WINDOW && th->syn && | 3741 | if (opsize == TCPOLEN_WINDOW && th->syn && |
3740 | !estab && sysctl_tcp_window_scaling) { | 3742 | !estab && sysctl_tcp_window_scaling && |
3743 | !dst_feature(dst, RTAX_FEATURE_NO_WSCALE)) { | ||
3741 | __u8 snd_wscale = *(__u8 *)ptr; | 3744 | __u8 snd_wscale = *(__u8 *)ptr; |
3742 | opt_rx->wscale_ok = 1; | 3745 | opt_rx->wscale_ok = 1; |
3743 | if (snd_wscale > 14) { | 3746 | if (snd_wscale > 14) { |
@@ -3753,7 +3756,8 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, | |||
3753 | case TCPOPT_TIMESTAMP: | 3756 | case TCPOPT_TIMESTAMP: |
3754 | if ((opsize == TCPOLEN_TIMESTAMP) && | 3757 | if ((opsize == TCPOLEN_TIMESTAMP) && |
3755 | ((estab && opt_rx->tstamp_ok) || | 3758 | ((estab && opt_rx->tstamp_ok) || |
3756 | (!estab && sysctl_tcp_timestamps))) { | 3759 | (!estab && sysctl_tcp_timestamps && |
3760 | !dst_feature(dst, RTAX_FEATURE_NO_TSTAMP)))) { | ||
3757 | opt_rx->saw_tstamp = 1; | 3761 | opt_rx->saw_tstamp = 1; |
3758 | opt_rx->rcv_tsval = get_unaligned_be32(ptr); | 3762 | opt_rx->rcv_tsval = get_unaligned_be32(ptr); |
3759 | opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4); | 3763 | opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4); |
@@ -3761,7 +3765,8 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, | |||
3761 | break; | 3765 | break; |
3762 | case TCPOPT_SACK_PERM: | 3766 | case TCPOPT_SACK_PERM: |
3763 | if (opsize == TCPOLEN_SACK_PERM && th->syn && | 3767 | if (opsize == TCPOLEN_SACK_PERM && th->syn && |
3764 | !estab && sysctl_tcp_sack) { | 3768 | !estab && sysctl_tcp_sack && |
3769 | !dst_feature(dst, RTAX_FEATURE_NO_SACK)) { | ||
3765 | opt_rx->sack_ok = 1; | 3770 | opt_rx->sack_ok = 1; |
3766 | tcp_sack_reset(opt_rx); | 3771 | tcp_sack_reset(opt_rx); |
3767 | } | 3772 | } |
@@ -3820,7 +3825,7 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th, | |||
3820 | if (tcp_parse_aligned_timestamp(tp, th)) | 3825 | if (tcp_parse_aligned_timestamp(tp, th)) |
3821 | return 1; | 3826 | return 1; |
3822 | } | 3827 | } |
3823 | tcp_parse_options(skb, &tp->rx_opt, 1); | 3828 | tcp_parse_options(skb, &tp->rx_opt, 1, NULL); |
3824 | return 1; | 3829 | return 1; |
3825 | } | 3830 | } |
3826 | 3831 | ||
@@ -4075,8 +4080,10 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, | |||
4075 | static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) | 4080 | static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) |
4076 | { | 4081 | { |
4077 | struct tcp_sock *tp = tcp_sk(sk); | 4082 | struct tcp_sock *tp = tcp_sk(sk); |
4083 | struct dst_entry *dst = __sk_dst_get(sk); | ||
4078 | 4084 | ||
4079 | if (tcp_is_sack(tp) && sysctl_tcp_dsack) { | 4085 | if (tcp_is_sack(tp) && sysctl_tcp_dsack && |
4086 | !dst_feature(dst, RTAX_FEATURE_NO_DSACK)) { | ||
4080 | int mib_idx; | 4087 | int mib_idx; |
4081 | 4088 | ||
4082 | if (before(seq, tp->rcv_nxt)) | 4089 | if (before(seq, tp->rcv_nxt)) |
@@ -4105,13 +4112,15 @@ static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq) | |||
4105 | static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) | 4112 | static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) |
4106 | { | 4113 | { |
4107 | struct tcp_sock *tp = tcp_sk(sk); | 4114 | struct tcp_sock *tp = tcp_sk(sk); |
4115 | struct dst_entry *dst = __sk_dst_get(sk); | ||
4108 | 4116 | ||
4109 | if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && | 4117 | if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && |
4110 | before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { | 4118 | before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { |
4111 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); | 4119 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); |
4112 | tcp_enter_quickack_mode(sk); | 4120 | tcp_enter_quickack_mode(sk); |
4113 | 4121 | ||
4114 | if (tcp_is_sack(tp) && sysctl_tcp_dsack) { | 4122 | if (tcp_is_sack(tp) && sysctl_tcp_dsack && |
4123 | !dst_feature(dst, RTAX_FEATURE_NO_DSACK)) { | ||
4115 | u32 end_seq = TCP_SKB_CB(skb)->end_seq; | 4124 | u32 end_seq = TCP_SKB_CB(skb)->end_seq; |
4116 | 4125 | ||
4117 | if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) | 4126 | if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) |
@@ -5364,8 +5373,9 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, | |||
5364 | struct tcp_sock *tp = tcp_sk(sk); | 5373 | struct tcp_sock *tp = tcp_sk(sk); |
5365 | struct inet_connection_sock *icsk = inet_csk(sk); | 5374 | struct inet_connection_sock *icsk = inet_csk(sk); |
5366 | int saved_clamp = tp->rx_opt.mss_clamp; | 5375 | int saved_clamp = tp->rx_opt.mss_clamp; |
5376 | struct dst_entry *dst = __sk_dst_get(sk); | ||
5367 | 5377 | ||
5368 | tcp_parse_options(skb, &tp->rx_opt, 0); | 5378 | tcp_parse_options(skb, &tp->rx_opt, 0, dst); |
5369 | 5379 | ||
5370 | if (th->ack) { | 5380 | if (th->ack) { |
5371 | /* rfc793: | 5381 | /* rfc793: |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 7cda24b53f61..657ae334f125 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -165,10 +165,10 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
165 | nexthop = inet->opt->faddr; | 165 | nexthop = inet->opt->faddr; |
166 | } | 166 | } |
167 | 167 | ||
168 | tmp = ip_route_connect(&rt, nexthop, inet->saddr, | 168 | tmp = ip_route_connect(&rt, nexthop, inet->inet_saddr, |
169 | RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, | 169 | RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, |
170 | IPPROTO_TCP, | 170 | IPPROTO_TCP, |
171 | inet->sport, usin->sin_port, sk, 1); | 171 | inet->inet_sport, usin->sin_port, sk, 1); |
172 | if (tmp < 0) { | 172 | if (tmp < 0) { |
173 | if (tmp == -ENETUNREACH) | 173 | if (tmp == -ENETUNREACH) |
174 | IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); | 174 | IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); |
@@ -183,11 +183,11 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
183 | if (!inet->opt || !inet->opt->srr) | 183 | if (!inet->opt || !inet->opt->srr) |
184 | daddr = rt->rt_dst; | 184 | daddr = rt->rt_dst; |
185 | 185 | ||
186 | if (!inet->saddr) | 186 | if (!inet->inet_saddr) |
187 | inet->saddr = rt->rt_src; | 187 | inet->inet_saddr = rt->rt_src; |
188 | inet->rcv_saddr = inet->saddr; | 188 | inet->inet_rcv_saddr = inet->inet_saddr; |
189 | 189 | ||
190 | if (tp->rx_opt.ts_recent_stamp && inet->daddr != daddr) { | 190 | if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) { |
191 | /* Reset inherited state */ | 191 | /* Reset inherited state */ |
192 | tp->rx_opt.ts_recent = 0; | 192 | tp->rx_opt.ts_recent = 0; |
193 | tp->rx_opt.ts_recent_stamp = 0; | 193 | tp->rx_opt.ts_recent_stamp = 0; |
@@ -210,8 +210,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
210 | } | 210 | } |
211 | } | 211 | } |
212 | 212 | ||
213 | inet->dport = usin->sin_port; | 213 | inet->inet_dport = usin->sin_port; |
214 | inet->daddr = daddr; | 214 | inet->inet_daddr = daddr; |
215 | 215 | ||
216 | inet_csk(sk)->icsk_ext_hdr_len = 0; | 216 | inet_csk(sk)->icsk_ext_hdr_len = 0; |
217 | if (inet->opt) | 217 | if (inet->opt) |
@@ -230,7 +230,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
230 | goto failure; | 230 | goto failure; |
231 | 231 | ||
232 | err = ip_route_newports(&rt, IPPROTO_TCP, | 232 | err = ip_route_newports(&rt, IPPROTO_TCP, |
233 | inet->sport, inet->dport, sk); | 233 | inet->inet_sport, inet->inet_dport, sk); |
234 | if (err) | 234 | if (err) |
235 | goto failure; | 235 | goto failure; |
236 | 236 | ||
@@ -239,12 +239,12 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
239 | sk_setup_caps(sk, &rt->u.dst); | 239 | sk_setup_caps(sk, &rt->u.dst); |
240 | 240 | ||
241 | if (!tp->write_seq) | 241 | if (!tp->write_seq) |
242 | tp->write_seq = secure_tcp_sequence_number(inet->saddr, | 242 | tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr, |
243 | inet->daddr, | 243 | inet->inet_daddr, |
244 | inet->sport, | 244 | inet->inet_sport, |
245 | usin->sin_port); | 245 | usin->sin_port); |
246 | 246 | ||
247 | inet->id = tp->write_seq ^ jiffies; | 247 | inet->inet_id = tp->write_seq ^ jiffies; |
248 | 248 | ||
249 | err = tcp_connect(sk); | 249 | err = tcp_connect(sk); |
250 | rt = NULL; | 250 | rt = NULL; |
@@ -261,7 +261,7 @@ failure: | |||
261 | tcp_set_state(sk, TCP_CLOSE); | 261 | tcp_set_state(sk, TCP_CLOSE); |
262 | ip_rt_put(rt); | 262 | ip_rt_put(rt); |
263 | sk->sk_route_caps = 0; | 263 | sk->sk_route_caps = 0; |
264 | inet->dport = 0; | 264 | inet->inet_dport = 0; |
265 | return err; | 265 | return err; |
266 | } | 266 | } |
267 | 267 | ||
@@ -520,12 +520,13 @@ void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb) | |||
520 | struct tcphdr *th = tcp_hdr(skb); | 520 | struct tcphdr *th = tcp_hdr(skb); |
521 | 521 | ||
522 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 522 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
523 | th->check = ~tcp_v4_check(len, inet->saddr, | 523 | th->check = ~tcp_v4_check(len, inet->inet_saddr, |
524 | inet->daddr, 0); | 524 | inet->inet_daddr, 0); |
525 | skb->csum_start = skb_transport_header(skb) - skb->head; | 525 | skb->csum_start = skb_transport_header(skb) - skb->head; |
526 | skb->csum_offset = offsetof(struct tcphdr, check); | 526 | skb->csum_offset = offsetof(struct tcphdr, check); |
527 | } else { | 527 | } else { |
528 | th->check = tcp_v4_check(len, inet->saddr, inet->daddr, | 528 | th->check = tcp_v4_check(len, inet->inet_saddr, |
529 | inet->inet_daddr, | ||
529 | csum_partial(th, | 530 | csum_partial(th, |
530 | th->doff << 2, | 531 | th->doff << 2, |
531 | skb->csum)); | 532 | skb->csum)); |
@@ -848,7 +849,7 @@ static struct tcp_md5sig_key * | |||
848 | struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, | 849 | struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, |
849 | struct sock *addr_sk) | 850 | struct sock *addr_sk) |
850 | { | 851 | { |
851 | return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->daddr); | 852 | return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->inet_daddr); |
852 | } | 853 | } |
853 | 854 | ||
854 | EXPORT_SYMBOL(tcp_v4_md5_lookup); | 855 | EXPORT_SYMBOL(tcp_v4_md5_lookup); |
@@ -923,7 +924,7 @@ EXPORT_SYMBOL(tcp_v4_md5_do_add); | |||
923 | static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk, | 924 | static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk, |
924 | u8 *newkey, u8 newkeylen) | 925 | u8 *newkey, u8 newkeylen) |
925 | { | 926 | { |
926 | return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->daddr, | 927 | return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->inet_daddr, |
927 | newkey, newkeylen); | 928 | newkey, newkeylen); |
928 | } | 929 | } |
929 | 930 | ||
@@ -1089,8 +1090,8 @@ int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, | |||
1089 | __be32 saddr, daddr; | 1090 | __be32 saddr, daddr; |
1090 | 1091 | ||
1091 | if (sk) { | 1092 | if (sk) { |
1092 | saddr = inet_sk(sk)->saddr; | 1093 | saddr = inet_sk(sk)->inet_saddr; |
1093 | daddr = inet_sk(sk)->daddr; | 1094 | daddr = inet_sk(sk)->inet_daddr; |
1094 | } else if (req) { | 1095 | } else if (req) { |
1095 | saddr = inet_rsk(req)->loc_addr; | 1096 | saddr = inet_rsk(req)->loc_addr; |
1096 | daddr = inet_rsk(req)->rmt_addr; | 1097 | daddr = inet_rsk(req)->rmt_addr; |
@@ -1256,11 +1257,21 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1256 | tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops; | 1257 | tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops; |
1257 | #endif | 1258 | #endif |
1258 | 1259 | ||
1260 | ireq = inet_rsk(req); | ||
1261 | ireq->loc_addr = daddr; | ||
1262 | ireq->rmt_addr = saddr; | ||
1263 | ireq->no_srccheck = inet_sk(sk)->transparent; | ||
1264 | ireq->opt = tcp_v4_save_options(sk, skb); | ||
1265 | |||
1266 | dst = inet_csk_route_req(sk, req); | ||
1267 | if(!dst) | ||
1268 | goto drop_and_free; | ||
1269 | |||
1259 | tcp_clear_options(&tmp_opt); | 1270 | tcp_clear_options(&tmp_opt); |
1260 | tmp_opt.mss_clamp = 536; | 1271 | tmp_opt.mss_clamp = 536; |
1261 | tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss; | 1272 | tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss; |
1262 | 1273 | ||
1263 | tcp_parse_options(skb, &tmp_opt, 0); | 1274 | tcp_parse_options(skb, &tmp_opt, 0, dst); |
1264 | 1275 | ||
1265 | if (want_cookie && !tmp_opt.saw_tstamp) | 1276 | if (want_cookie && !tmp_opt.saw_tstamp) |
1266 | tcp_clear_options(&tmp_opt); | 1277 | tcp_clear_options(&tmp_opt); |
@@ -1269,14 +1280,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1269 | 1280 | ||
1270 | tcp_openreq_init(req, &tmp_opt, skb); | 1281 | tcp_openreq_init(req, &tmp_opt, skb); |
1271 | 1282 | ||
1272 | ireq = inet_rsk(req); | ||
1273 | ireq->loc_addr = daddr; | ||
1274 | ireq->rmt_addr = saddr; | ||
1275 | ireq->no_srccheck = inet_sk(sk)->transparent; | ||
1276 | ireq->opt = tcp_v4_save_options(sk, skb); | ||
1277 | |||
1278 | if (security_inet_conn_request(sk, skb, req)) | 1283 | if (security_inet_conn_request(sk, skb, req)) |
1279 | goto drop_and_free; | 1284 | goto drop_and_release; |
1280 | 1285 | ||
1281 | if (!want_cookie) | 1286 | if (!want_cookie) |
1282 | TCP_ECN_create_request(req, tcp_hdr(skb)); | 1287 | TCP_ECN_create_request(req, tcp_hdr(skb)); |
@@ -1301,7 +1306,6 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1301 | */ | 1306 | */ |
1302 | if (tmp_opt.saw_tstamp && | 1307 | if (tmp_opt.saw_tstamp && |
1303 | tcp_death_row.sysctl_tw_recycle && | 1308 | tcp_death_row.sysctl_tw_recycle && |
1304 | (dst = inet_csk_route_req(sk, req)) != NULL && | ||
1305 | (peer = rt_get_peer((struct rtable *)dst)) != NULL && | 1309 | (peer = rt_get_peer((struct rtable *)dst)) != NULL && |
1306 | peer->v4daddr == saddr) { | 1310 | peer->v4daddr == saddr) { |
1307 | if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL && | 1311 | if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL && |
@@ -1380,9 +1384,9 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1380 | newtp = tcp_sk(newsk); | 1384 | newtp = tcp_sk(newsk); |
1381 | newinet = inet_sk(newsk); | 1385 | newinet = inet_sk(newsk); |
1382 | ireq = inet_rsk(req); | 1386 | ireq = inet_rsk(req); |
1383 | newinet->daddr = ireq->rmt_addr; | 1387 | newinet->inet_daddr = ireq->rmt_addr; |
1384 | newinet->rcv_saddr = ireq->loc_addr; | 1388 | newinet->inet_rcv_saddr = ireq->loc_addr; |
1385 | newinet->saddr = ireq->loc_addr; | 1389 | newinet->inet_saddr = ireq->loc_addr; |
1386 | newinet->opt = ireq->opt; | 1390 | newinet->opt = ireq->opt; |
1387 | ireq->opt = NULL; | 1391 | ireq->opt = NULL; |
1388 | newinet->mc_index = inet_iif(skb); | 1392 | newinet->mc_index = inet_iif(skb); |
@@ -1390,7 +1394,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1390 | inet_csk(newsk)->icsk_ext_hdr_len = 0; | 1394 | inet_csk(newsk)->icsk_ext_hdr_len = 0; |
1391 | if (newinet->opt) | 1395 | if (newinet->opt) |
1392 | inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen; | 1396 | inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen; |
1393 | newinet->id = newtp->write_seq ^ jiffies; | 1397 | newinet->inet_id = newtp->write_seq ^ jiffies; |
1394 | 1398 | ||
1395 | tcp_mtup_init(newsk); | 1399 | tcp_mtup_init(newsk); |
1396 | tcp_sync_mss(newsk, dst_mtu(dst)); | 1400 | tcp_sync_mss(newsk, dst_mtu(dst)); |
@@ -1403,7 +1407,8 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1403 | 1407 | ||
1404 | #ifdef CONFIG_TCP_MD5SIG | 1408 | #ifdef CONFIG_TCP_MD5SIG |
1405 | /* Copy over the MD5 key from the original socket */ | 1409 | /* Copy over the MD5 key from the original socket */ |
1406 | if ((key = tcp_v4_md5_do_lookup(sk, newinet->daddr)) != NULL) { | 1410 | key = tcp_v4_md5_do_lookup(sk, newinet->inet_daddr); |
1411 | if (key != NULL) { | ||
1407 | /* | 1412 | /* |
1408 | * We're using one, so create a matching key | 1413 | * We're using one, so create a matching key |
1409 | * on the newsk structure. If we fail to get | 1414 | * on the newsk structure. If we fail to get |
@@ -1412,7 +1417,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1412 | */ | 1417 | */ |
1413 | char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC); | 1418 | char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC); |
1414 | if (newkey != NULL) | 1419 | if (newkey != NULL) |
1415 | tcp_v4_md5_do_add(newsk, newinet->daddr, | 1420 | tcp_v4_md5_do_add(newsk, newinet->inet_daddr, |
1416 | newkey, key->keylen); | 1421 | newkey, key->keylen); |
1417 | newsk->sk_route_caps &= ~NETIF_F_GSO_MASK; | 1422 | newsk->sk_route_caps &= ~NETIF_F_GSO_MASK; |
1418 | } | 1423 | } |
@@ -1711,8 +1716,8 @@ int tcp_v4_remember_stamp(struct sock *sk) | |||
1711 | struct inet_peer *peer = NULL; | 1716 | struct inet_peer *peer = NULL; |
1712 | int release_it = 0; | 1717 | int release_it = 0; |
1713 | 1718 | ||
1714 | if (!rt || rt->rt_dst != inet->daddr) { | 1719 | if (!rt || rt->rt_dst != inet->inet_daddr) { |
1715 | peer = inet_getpeer(inet->daddr, 1); | 1720 | peer = inet_getpeer(inet->inet_daddr, 1); |
1716 | release_it = 1; | 1721 | release_it = 1; |
1717 | } else { | 1722 | } else { |
1718 | if (!rt->peer) | 1723 | if (!rt->peer) |
@@ -2000,7 +2005,7 @@ static void *established_get_first(struct seq_file *seq) | |||
2000 | struct net *net = seq_file_net(seq); | 2005 | struct net *net = seq_file_net(seq); |
2001 | void *rc = NULL; | 2006 | void *rc = NULL; |
2002 | 2007 | ||
2003 | for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) { | 2008 | for (st->bucket = 0; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) { |
2004 | struct sock *sk; | 2009 | struct sock *sk; |
2005 | struct hlist_nulls_node *node; | 2010 | struct hlist_nulls_node *node; |
2006 | struct inet_timewait_sock *tw; | 2011 | struct inet_timewait_sock *tw; |
@@ -2061,10 +2066,10 @@ get_tw: | |||
2061 | st->state = TCP_SEQ_STATE_ESTABLISHED; | 2066 | st->state = TCP_SEQ_STATE_ESTABLISHED; |
2062 | 2067 | ||
2063 | /* Look for next non empty bucket */ | 2068 | /* Look for next non empty bucket */ |
2064 | while (++st->bucket < tcp_hashinfo.ehash_size && | 2069 | while (++st->bucket <= tcp_hashinfo.ehash_mask && |
2065 | empty_bucket(st)) | 2070 | empty_bucket(st)) |
2066 | ; | 2071 | ; |
2067 | if (st->bucket >= tcp_hashinfo.ehash_size) | 2072 | if (st->bucket > tcp_hashinfo.ehash_mask) |
2068 | return NULL; | 2073 | return NULL; |
2069 | 2074 | ||
2070 | spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); | 2075 | spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); |
@@ -2225,7 +2230,7 @@ static void get_openreq4(struct sock *sk, struct request_sock *req, | |||
2225 | " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n", | 2230 | " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n", |
2226 | i, | 2231 | i, |
2227 | ireq->loc_addr, | 2232 | ireq->loc_addr, |
2228 | ntohs(inet_sk(sk)->sport), | 2233 | ntohs(inet_sk(sk)->inet_sport), |
2229 | ireq->rmt_addr, | 2234 | ireq->rmt_addr, |
2230 | ntohs(ireq->rmt_port), | 2235 | ntohs(ireq->rmt_port), |
2231 | TCP_SYN_RECV, | 2236 | TCP_SYN_RECV, |
@@ -2248,10 +2253,10 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len) | |||
2248 | struct tcp_sock *tp = tcp_sk(sk); | 2253 | struct tcp_sock *tp = tcp_sk(sk); |
2249 | const struct inet_connection_sock *icsk = inet_csk(sk); | 2254 | const struct inet_connection_sock *icsk = inet_csk(sk); |
2250 | struct inet_sock *inet = inet_sk(sk); | 2255 | struct inet_sock *inet = inet_sk(sk); |
2251 | __be32 dest = inet->daddr; | 2256 | __be32 dest = inet->inet_daddr; |
2252 | __be32 src = inet->rcv_saddr; | 2257 | __be32 src = inet->inet_rcv_saddr; |
2253 | __u16 destp = ntohs(inet->dport); | 2258 | __u16 destp = ntohs(inet->inet_dport); |
2254 | __u16 srcp = ntohs(inet->sport); | 2259 | __u16 srcp = ntohs(inet->inet_sport); |
2255 | 2260 | ||
2256 | if (icsk->icsk_pending == ICSK_TIME_RETRANS) { | 2261 | if (icsk->icsk_pending == ICSK_TIME_RETRANS) { |
2257 | timer_active = 1; | 2262 | timer_active = 1; |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 4c03598ed924..463d51b53d37 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -100,9 +100,9 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, | |||
100 | struct tcp_options_received tmp_opt; | 100 | struct tcp_options_received tmp_opt; |
101 | int paws_reject = 0; | 101 | int paws_reject = 0; |
102 | 102 | ||
103 | tmp_opt.saw_tstamp = 0; | ||
104 | if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { | 103 | if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { |
105 | tcp_parse_options(skb, &tmp_opt, 0); | 104 | tmp_opt.tstamp_ok = 1; |
105 | tcp_parse_options(skb, &tmp_opt, 1, NULL); | ||
106 | 106 | ||
107 | if (tmp_opt.saw_tstamp) { | 107 | if (tmp_opt.saw_tstamp) { |
108 | tmp_opt.ts_recent = tcptw->tw_ts_recent; | 108 | tmp_opt.ts_recent = tcptw->tw_ts_recent; |
@@ -500,10 +500,11 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, | |||
500 | int paws_reject = 0; | 500 | int paws_reject = 0; |
501 | struct tcp_options_received tmp_opt; | 501 | struct tcp_options_received tmp_opt; |
502 | struct sock *child; | 502 | struct sock *child; |
503 | struct dst_entry *dst = inet_csk_route_req(sk, req); | ||
503 | 504 | ||
504 | tmp_opt.saw_tstamp = 0; | 505 | tmp_opt.saw_tstamp = 0; |
505 | if (th->doff > (sizeof(struct tcphdr)>>2)) { | 506 | if (th->doff > (sizeof(struct tcphdr)>>2)) { |
506 | tcp_parse_options(skb, &tmp_opt, 0); | 507 | tcp_parse_options(skb, &tmp_opt, 0, dst); |
507 | 508 | ||
508 | if (tmp_opt.saw_tstamp) { | 509 | if (tmp_opt.saw_tstamp) { |
509 | tmp_opt.ts_recent = req->ts_recent; | 510 | tmp_opt.ts_recent = req->ts_recent; |
@@ -516,6 +517,8 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, | |||
516 | } | 517 | } |
517 | } | 518 | } |
518 | 519 | ||
520 | dst_release(dst); | ||
521 | |||
519 | /* Check for pure retransmitted SYN. */ | 522 | /* Check for pure retransmitted SYN. */ |
520 | if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn && | 523 | if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn && |
521 | flg == TCP_FLAG_SYN && | 524 | flg == TCP_FLAG_SYN && |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index fcd278a7080e..616c686ca253 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -464,6 +464,7 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, | |||
464 | struct tcp_md5sig_key **md5) { | 464 | struct tcp_md5sig_key **md5) { |
465 | struct tcp_sock *tp = tcp_sk(sk); | 465 | struct tcp_sock *tp = tcp_sk(sk); |
466 | unsigned size = 0; | 466 | unsigned size = 0; |
467 | struct dst_entry *dst = __sk_dst_get(sk); | ||
467 | 468 | ||
468 | #ifdef CONFIG_TCP_MD5SIG | 469 | #ifdef CONFIG_TCP_MD5SIG |
469 | *md5 = tp->af_specific->md5_lookup(sk, sk); | 470 | *md5 = tp->af_specific->md5_lookup(sk, sk); |
@@ -487,18 +488,22 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, | |||
487 | opts->mss = tcp_advertise_mss(sk); | 488 | opts->mss = tcp_advertise_mss(sk); |
488 | size += TCPOLEN_MSS_ALIGNED; | 489 | size += TCPOLEN_MSS_ALIGNED; |
489 | 490 | ||
490 | if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { | 491 | if (likely(sysctl_tcp_timestamps && |
492 | !dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) && | ||
493 | *md5 == NULL)) { | ||
491 | opts->options |= OPTION_TS; | 494 | opts->options |= OPTION_TS; |
492 | opts->tsval = TCP_SKB_CB(skb)->when; | 495 | opts->tsval = TCP_SKB_CB(skb)->when; |
493 | opts->tsecr = tp->rx_opt.ts_recent; | 496 | opts->tsecr = tp->rx_opt.ts_recent; |
494 | size += TCPOLEN_TSTAMP_ALIGNED; | 497 | size += TCPOLEN_TSTAMP_ALIGNED; |
495 | } | 498 | } |
496 | if (likely(sysctl_tcp_window_scaling)) { | 499 | if (likely(sysctl_tcp_window_scaling && |
500 | !dst_feature(dst, RTAX_FEATURE_NO_WSCALE))) { | ||
497 | opts->ws = tp->rx_opt.rcv_wscale; | 501 | opts->ws = tp->rx_opt.rcv_wscale; |
498 | opts->options |= OPTION_WSCALE; | 502 | opts->options |= OPTION_WSCALE; |
499 | size += TCPOLEN_WSCALE_ALIGNED; | 503 | size += TCPOLEN_WSCALE_ALIGNED; |
500 | } | 504 | } |
501 | if (likely(sysctl_tcp_sack)) { | 505 | if (likely(sysctl_tcp_sack && |
506 | !dst_feature(dst, RTAX_FEATURE_NO_SACK))) { | ||
502 | opts->options |= OPTION_SACK_ADVERTISE; | 507 | opts->options |= OPTION_SACK_ADVERTISE; |
503 | if (unlikely(!(OPTION_TS & opts->options))) | 508 | if (unlikely(!(OPTION_TS & opts->options))) |
504 | size += TCPOLEN_SACKPERM_ALIGNED; | 509 | size += TCPOLEN_SACKPERM_ALIGNED; |
@@ -661,8 +666,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
661 | 666 | ||
662 | /* Build TCP header and checksum it. */ | 667 | /* Build TCP header and checksum it. */ |
663 | th = tcp_hdr(skb); | 668 | th = tcp_hdr(skb); |
664 | th->source = inet->sport; | 669 | th->source = inet->inet_sport; |
665 | th->dest = inet->dport; | 670 | th->dest = inet->inet_dport; |
666 | th->seq = htonl(tcb->seq); | 671 | th->seq = htonl(tcb->seq); |
667 | th->ack_seq = htonl(tp->rcv_nxt); | 672 | th->ack_seq = htonl(tp->rcv_nxt); |
668 | *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | | 673 | *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | |
@@ -2315,7 +2320,9 @@ static void tcp_connect_init(struct sock *sk) | |||
2315 | * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. | 2320 | * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. |
2316 | */ | 2321 | */ |
2317 | tp->tcp_header_len = sizeof(struct tcphdr) + | 2322 | tp->tcp_header_len = sizeof(struct tcphdr) + |
2318 | (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); | 2323 | (sysctl_tcp_timestamps && |
2324 | (!dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) ? | ||
2325 | TCPOLEN_TSTAMP_ALIGNED : 0)); | ||
2319 | 2326 | ||
2320 | #ifdef CONFIG_TCP_MD5SIG | 2327 | #ifdef CONFIG_TCP_MD5SIG |
2321 | if (tp->af_specific->md5_lookup(sk, sk) != NULL) | 2328 | if (tp->af_specific->md5_lookup(sk, sk) != NULL) |
@@ -2341,7 +2348,8 @@ static void tcp_connect_init(struct sock *sk) | |||
2341 | tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), | 2348 | tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), |
2342 | &tp->rcv_wnd, | 2349 | &tp->rcv_wnd, |
2343 | &tp->window_clamp, | 2350 | &tp->window_clamp, |
2344 | sysctl_tcp_window_scaling, | 2351 | (sysctl_tcp_window_scaling && |
2352 | !dst_feature(dst, RTAX_FEATURE_NO_WSCALE)), | ||
2345 | &rcv_wscale); | 2353 | &rcv_wscale); |
2346 | 2354 | ||
2347 | tp->rx_opt.rcv_wscale = rcv_wscale; | 2355 | tp->rx_opt.rcv_wscale = rcv_wscale; |
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index 59f5b5e7c566..7a3cc2ffad84 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c | |||
@@ -94,7 +94,8 @@ static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
94 | const struct inet_sock *inet = inet_sk(sk); | 94 | const struct inet_sock *inet = inet_sk(sk); |
95 | 95 | ||
96 | /* Only update if port matches */ | 96 | /* Only update if port matches */ |
97 | if ((port == 0 || ntohs(inet->dport) == port || ntohs(inet->sport) == port) | 97 | if ((port == 0 || ntohs(inet->inet_dport) == port || |
98 | ntohs(inet->inet_sport) == port) | ||
98 | && (full || tp->snd_cwnd != tcp_probe.lastcwnd)) { | 99 | && (full || tp->snd_cwnd != tcp_probe.lastcwnd)) { |
99 | 100 | ||
100 | spin_lock(&tcp_probe.lock); | 101 | spin_lock(&tcp_probe.lock); |
@@ -103,10 +104,10 @@ static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
103 | struct tcp_log *p = tcp_probe.log + tcp_probe.head; | 104 | struct tcp_log *p = tcp_probe.log + tcp_probe.head; |
104 | 105 | ||
105 | p->tstamp = ktime_get(); | 106 | p->tstamp = ktime_get(); |
106 | p->saddr = inet->saddr; | 107 | p->saddr = inet->inet_saddr; |
107 | p->sport = inet->sport; | 108 | p->sport = inet->inet_sport; |
108 | p->daddr = inet->daddr; | 109 | p->daddr = inet->inet_daddr; |
109 | p->dport = inet->dport; | 110 | p->dport = inet->inet_dport; |
110 | p->length = skb->len; | 111 | p->length = skb->len; |
111 | p->snd_nxt = tp->snd_nxt; | 112 | p->snd_nxt = tp->snd_nxt; |
112 | p->snd_una = tp->snd_una; | 113 | p->snd_una = tp->snd_una; |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index cdb2ca7684d4..8353a538cd4c 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -141,14 +141,14 @@ static int tcp_write_timeout(struct sock *sk) | |||
141 | 141 | ||
142 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { | 142 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { |
143 | if (icsk->icsk_retransmits) | 143 | if (icsk->icsk_retransmits) |
144 | dst_negative_advice(&sk->sk_dst_cache); | 144 | dst_negative_advice(&sk->sk_dst_cache, sk); |
145 | retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; | 145 | retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; |
146 | } else { | 146 | } else { |
147 | if (retransmits_timed_out(sk, sysctl_tcp_retries1)) { | 147 | if (retransmits_timed_out(sk, sysctl_tcp_retries1)) { |
148 | /* Black hole detection */ | 148 | /* Black hole detection */ |
149 | tcp_mtu_probing(icsk, sk); | 149 | tcp_mtu_probing(icsk, sk); |
150 | 150 | ||
151 | dst_negative_advice(&sk->sk_dst_cache); | 151 | dst_negative_advice(&sk->sk_dst_cache, sk); |
152 | } | 152 | } |
153 | 153 | ||
154 | retry_until = sysctl_tcp_retries2; | 154 | retry_until = sysctl_tcp_retries2; |
@@ -303,15 +303,15 @@ void tcp_retransmit_timer(struct sock *sk) | |||
303 | struct inet_sock *inet = inet_sk(sk); | 303 | struct inet_sock *inet = inet_sk(sk); |
304 | if (sk->sk_family == AF_INET) { | 304 | if (sk->sk_family == AF_INET) { |
305 | LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n", | 305 | LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n", |
306 | &inet->daddr, ntohs(inet->dport), | 306 | &inet->inet_daddr, ntohs(inet->inet_dport), |
307 | inet->num, tp->snd_una, tp->snd_nxt); | 307 | inet->inet_num, tp->snd_una, tp->snd_nxt); |
308 | } | 308 | } |
309 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 309 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
310 | else if (sk->sk_family == AF_INET6) { | 310 | else if (sk->sk_family == AF_INET6) { |
311 | struct ipv6_pinfo *np = inet6_sk(sk); | 311 | struct ipv6_pinfo *np = inet6_sk(sk); |
312 | LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n", | 312 | LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n", |
313 | &np->daddr, ntohs(inet->dport), | 313 | &np->daddr, ntohs(inet->inet_dport), |
314 | inet->num, tp->snd_una, tp->snd_nxt); | 314 | inet->inet_num, tp->snd_una, tp->snd_nxt); |
315 | } | 315 | } |
316 | #endif | 316 | #endif |
317 | #endif | 317 | #endif |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index d0d436d6216c..4274c1cc78fd 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -106,7 +106,7 @@ | |||
106 | #include <net/xfrm.h> | 106 | #include <net/xfrm.h> |
107 | #include "udp_impl.h" | 107 | #include "udp_impl.h" |
108 | 108 | ||
109 | struct udp_table udp_table; | 109 | struct udp_table udp_table __read_mostly; |
110 | EXPORT_SYMBOL(udp_table); | 110 | EXPORT_SYMBOL(udp_table); |
111 | 111 | ||
112 | int sysctl_udp_mem[3] __read_mostly; | 112 | int sysctl_udp_mem[3] __read_mostly; |
@@ -121,14 +121,16 @@ EXPORT_SYMBOL(sysctl_udp_wmem_min); | |||
121 | atomic_t udp_memory_allocated; | 121 | atomic_t udp_memory_allocated; |
122 | EXPORT_SYMBOL(udp_memory_allocated); | 122 | EXPORT_SYMBOL(udp_memory_allocated); |
123 | 123 | ||
124 | #define PORTS_PER_CHAIN (65536 / UDP_HTABLE_SIZE) | 124 | #define MAX_UDP_PORTS 65536 |
125 | #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN) | ||
125 | 126 | ||
126 | static int udp_lib_lport_inuse(struct net *net, __u16 num, | 127 | static int udp_lib_lport_inuse(struct net *net, __u16 num, |
127 | const struct udp_hslot *hslot, | 128 | const struct udp_hslot *hslot, |
128 | unsigned long *bitmap, | 129 | unsigned long *bitmap, |
129 | struct sock *sk, | 130 | struct sock *sk, |
130 | int (*saddr_comp)(const struct sock *sk1, | 131 | int (*saddr_comp)(const struct sock *sk1, |
131 | const struct sock *sk2)) | 132 | const struct sock *sk2), |
133 | unsigned int log) | ||
132 | { | 134 | { |
133 | struct sock *sk2; | 135 | struct sock *sk2; |
134 | struct hlist_nulls_node *node; | 136 | struct hlist_nulls_node *node; |
@@ -142,8 +144,7 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num, | |||
142 | || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && | 144 | || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && |
143 | (*saddr_comp)(sk, sk2)) { | 145 | (*saddr_comp)(sk, sk2)) { |
144 | if (bitmap) | 146 | if (bitmap) |
145 | __set_bit(sk2->sk_hash / UDP_HTABLE_SIZE, | 147 | __set_bit(sk2->sk_hash >> log, bitmap); |
146 | bitmap); | ||
147 | else | 148 | else |
148 | return 1; | 149 | return 1; |
149 | } | 150 | } |
@@ -180,13 +181,15 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, | |||
180 | /* | 181 | /* |
181 | * force rand to be an odd multiple of UDP_HTABLE_SIZE | 182 | * force rand to be an odd multiple of UDP_HTABLE_SIZE |
182 | */ | 183 | */ |
183 | rand = (rand | 1) * UDP_HTABLE_SIZE; | 184 | rand = (rand | 1) * (udptable->mask + 1); |
184 | for (last = first + UDP_HTABLE_SIZE; first != last; first++) { | 185 | for (last = first + udptable->mask + 1; |
185 | hslot = &udptable->hash[udp_hashfn(net, first)]; | 186 | first != last; |
187 | first++) { | ||
188 | hslot = udp_hashslot(udptable, net, first); | ||
186 | bitmap_zero(bitmap, PORTS_PER_CHAIN); | 189 | bitmap_zero(bitmap, PORTS_PER_CHAIN); |
187 | spin_lock_bh(&hslot->lock); | 190 | spin_lock_bh(&hslot->lock); |
188 | udp_lib_lport_inuse(net, snum, hslot, bitmap, sk, | 191 | udp_lib_lport_inuse(net, snum, hslot, bitmap, sk, |
189 | saddr_comp); | 192 | saddr_comp, udptable->log); |
190 | 193 | ||
191 | snum = first; | 194 | snum = first; |
192 | /* | 195 | /* |
@@ -196,7 +199,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, | |||
196 | */ | 199 | */ |
197 | do { | 200 | do { |
198 | if (low <= snum && snum <= high && | 201 | if (low <= snum && snum <= high && |
199 | !test_bit(snum / UDP_HTABLE_SIZE, bitmap)) | 202 | !test_bit(snum >> udptable->log, bitmap)) |
200 | goto found; | 203 | goto found; |
201 | snum += rand; | 204 | snum += rand; |
202 | } while (snum != first); | 205 | } while (snum != first); |
@@ -204,13 +207,14 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, | |||
204 | } | 207 | } |
205 | goto fail; | 208 | goto fail; |
206 | } else { | 209 | } else { |
207 | hslot = &udptable->hash[udp_hashfn(net, snum)]; | 210 | hslot = udp_hashslot(udptable, net, snum); |
208 | spin_lock_bh(&hslot->lock); | 211 | spin_lock_bh(&hslot->lock); |
209 | if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, saddr_comp)) | 212 | if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, |
213 | saddr_comp, 0)) | ||
210 | goto fail_unlock; | 214 | goto fail_unlock; |
211 | } | 215 | } |
212 | found: | 216 | found: |
213 | inet_sk(sk)->num = snum; | 217 | inet_sk(sk)->inet_num = snum; |
214 | sk->sk_hash = snum; | 218 | sk->sk_hash = snum; |
215 | if (sk_unhashed(sk)) { | 219 | if (sk_unhashed(sk)) { |
216 | sk_nulls_add_node_rcu(sk, &hslot->head); | 220 | sk_nulls_add_node_rcu(sk, &hslot->head); |
@@ -229,8 +233,8 @@ static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2) | |||
229 | struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2); | 233 | struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2); |
230 | 234 | ||
231 | return (!ipv6_only_sock(sk2) && | 235 | return (!ipv6_only_sock(sk2) && |
232 | (!inet1->rcv_saddr || !inet2->rcv_saddr || | 236 | (!inet1->inet_rcv_saddr || !inet2->inet_rcv_saddr || |
233 | inet1->rcv_saddr == inet2->rcv_saddr)); | 237 | inet1->inet_rcv_saddr == inet2->inet_rcv_saddr)); |
234 | } | 238 | } |
235 | 239 | ||
236 | int udp_v4_get_port(struct sock *sk, unsigned short snum) | 240 | int udp_v4_get_port(struct sock *sk, unsigned short snum) |
@@ -249,18 +253,18 @@ static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr, | |||
249 | struct inet_sock *inet = inet_sk(sk); | 253 | struct inet_sock *inet = inet_sk(sk); |
250 | 254 | ||
251 | score = (sk->sk_family == PF_INET ? 1 : 0); | 255 | score = (sk->sk_family == PF_INET ? 1 : 0); |
252 | if (inet->rcv_saddr) { | 256 | if (inet->inet_rcv_saddr) { |
253 | if (inet->rcv_saddr != daddr) | 257 | if (inet->inet_rcv_saddr != daddr) |
254 | return -1; | 258 | return -1; |
255 | score += 2; | 259 | score += 2; |
256 | } | 260 | } |
257 | if (inet->daddr) { | 261 | if (inet->inet_daddr) { |
258 | if (inet->daddr != saddr) | 262 | if (inet->inet_daddr != saddr) |
259 | return -1; | 263 | return -1; |
260 | score += 2; | 264 | score += 2; |
261 | } | 265 | } |
262 | if (inet->dport) { | 266 | if (inet->inet_dport) { |
263 | if (inet->dport != sport) | 267 | if (inet->inet_dport != sport) |
264 | return -1; | 268 | return -1; |
265 | score += 2; | 269 | score += 2; |
266 | } | 270 | } |
@@ -283,7 +287,7 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, | |||
283 | struct sock *sk, *result; | 287 | struct sock *sk, *result; |
284 | struct hlist_nulls_node *node; | 288 | struct hlist_nulls_node *node; |
285 | unsigned short hnum = ntohs(dport); | 289 | unsigned short hnum = ntohs(dport); |
286 | unsigned int hash = udp_hashfn(net, hnum); | 290 | unsigned int hash = udp_hashfn(net, hnum, udptable->mask); |
287 | struct udp_hslot *hslot = &udptable->hash[hash]; | 291 | struct udp_hslot *hslot = &udptable->hash[hash]; |
288 | int score, badness; | 292 | int score, badness; |
289 | 293 | ||
@@ -356,9 +360,10 @@ static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk, | |||
356 | 360 | ||
357 | if (!net_eq(sock_net(s), net) || | 361 | if (!net_eq(sock_net(s), net) || |
358 | s->sk_hash != hnum || | 362 | s->sk_hash != hnum || |
359 | (inet->daddr && inet->daddr != rmt_addr) || | 363 | (inet->inet_daddr && inet->inet_daddr != rmt_addr) || |
360 | (inet->dport != rmt_port && inet->dport) || | 364 | (inet->inet_dport != rmt_port && inet->inet_dport) || |
361 | (inet->rcv_saddr && inet->rcv_saddr != loc_addr) || | 365 | (inet->inet_rcv_saddr && |
366 | inet->inet_rcv_saddr != loc_addr) || | ||
362 | ipv6_only_sock(s) || | 367 | ipv6_only_sock(s) || |
363 | (s->sk_bound_dev_if && s->sk_bound_dev_if != dif)) | 368 | (s->sk_bound_dev_if && s->sk_bound_dev_if != dif)) |
364 | continue; | 369 | continue; |
@@ -642,14 +647,14 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
642 | } else { | 647 | } else { |
643 | if (sk->sk_state != TCP_ESTABLISHED) | 648 | if (sk->sk_state != TCP_ESTABLISHED) |
644 | return -EDESTADDRREQ; | 649 | return -EDESTADDRREQ; |
645 | daddr = inet->daddr; | 650 | daddr = inet->inet_daddr; |
646 | dport = inet->dport; | 651 | dport = inet->inet_dport; |
647 | /* Open fast path for connected socket. | 652 | /* Open fast path for connected socket. |
648 | Route will not be used, if at least one option is set. | 653 | Route will not be used, if at least one option is set. |
649 | */ | 654 | */ |
650 | connected = 1; | 655 | connected = 1; |
651 | } | 656 | } |
652 | ipc.addr = inet->saddr; | 657 | ipc.addr = inet->inet_saddr; |
653 | 658 | ||
654 | ipc.oif = sk->sk_bound_dev_if; | 659 | ipc.oif = sk->sk_bound_dev_if; |
655 | err = sock_tx_timestamp(msg, sk, &ipc.shtx); | 660 | err = sock_tx_timestamp(msg, sk, &ipc.shtx); |
@@ -704,7 +709,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
704 | .proto = sk->sk_protocol, | 709 | .proto = sk->sk_protocol, |
705 | .flags = inet_sk_flowi_flags(sk), | 710 | .flags = inet_sk_flowi_flags(sk), |
706 | .uli_u = { .ports = | 711 | .uli_u = { .ports = |
707 | { .sport = inet->sport, | 712 | { .sport = inet->inet_sport, |
708 | .dport = dport } } }; | 713 | .dport = dport } } }; |
709 | struct net *net = sock_net(sk); | 714 | struct net *net = sock_net(sk); |
710 | 715 | ||
@@ -748,7 +753,7 @@ back_from_confirm: | |||
748 | inet->cork.fl.fl4_dst = daddr; | 753 | inet->cork.fl.fl4_dst = daddr; |
749 | inet->cork.fl.fl_ip_dport = dport; | 754 | inet->cork.fl.fl_ip_dport = dport; |
750 | inet->cork.fl.fl4_src = saddr; | 755 | inet->cork.fl.fl4_src = saddr; |
751 | inet->cork.fl.fl_ip_sport = inet->sport; | 756 | inet->cork.fl.fl_ip_sport = inet->inet_sport; |
752 | up->pending = AF_INET; | 757 | up->pending = AF_INET; |
753 | 758 | ||
754 | do_append_data: | 759 | do_append_data: |
@@ -862,6 +867,7 @@ static unsigned int first_packet_length(struct sock *sk) | |||
862 | udp_lib_checksum_complete(skb)) { | 867 | udp_lib_checksum_complete(skb)) { |
863 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, | 868 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, |
864 | IS_UDPLITE(sk)); | 869 | IS_UDPLITE(sk)); |
870 | atomic_inc(&sk->sk_drops); | ||
865 | __skb_unlink(skb, rcvq); | 871 | __skb_unlink(skb, rcvq); |
866 | __skb_queue_tail(&list_kill, skb); | 872 | __skb_queue_tail(&list_kill, skb); |
867 | } | 873 | } |
@@ -982,7 +988,7 @@ try_again: | |||
982 | UDP_INC_STATS_USER(sock_net(sk), | 988 | UDP_INC_STATS_USER(sock_net(sk), |
983 | UDP_MIB_INDATAGRAMS, is_udplite); | 989 | UDP_MIB_INDATAGRAMS, is_udplite); |
984 | 990 | ||
985 | sock_recv_timestamp(msg, sk, skb); | 991 | sock_recv_ts_and_drops(msg, sk, skb); |
986 | 992 | ||
987 | /* Copy the address. */ | 993 | /* Copy the address. */ |
988 | if (sin) { | 994 | if (sin) { |
@@ -1025,15 +1031,15 @@ int udp_disconnect(struct sock *sk, int flags) | |||
1025 | */ | 1031 | */ |
1026 | 1032 | ||
1027 | sk->sk_state = TCP_CLOSE; | 1033 | sk->sk_state = TCP_CLOSE; |
1028 | inet->daddr = 0; | 1034 | inet->inet_daddr = 0; |
1029 | inet->dport = 0; | 1035 | inet->inet_dport = 0; |
1030 | sk->sk_bound_dev_if = 0; | 1036 | sk->sk_bound_dev_if = 0; |
1031 | if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) | 1037 | if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) |
1032 | inet_reset_saddr(sk); | 1038 | inet_reset_saddr(sk); |
1033 | 1039 | ||
1034 | if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) { | 1040 | if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) { |
1035 | sk->sk_prot->unhash(sk); | 1041 | sk->sk_prot->unhash(sk); |
1036 | inet->sport = 0; | 1042 | inet->inet_sport = 0; |
1037 | } | 1043 | } |
1038 | sk_dst_reset(sk); | 1044 | sk_dst_reset(sk); |
1039 | return 0; | 1045 | return 0; |
@@ -1044,12 +1050,12 @@ void udp_lib_unhash(struct sock *sk) | |||
1044 | { | 1050 | { |
1045 | if (sk_hashed(sk)) { | 1051 | if (sk_hashed(sk)) { |
1046 | struct udp_table *udptable = sk->sk_prot->h.udp_table; | 1052 | struct udp_table *udptable = sk->sk_prot->h.udp_table; |
1047 | unsigned int hash = udp_hashfn(sock_net(sk), sk->sk_hash); | 1053 | struct udp_hslot *hslot = udp_hashslot(udptable, sock_net(sk), |
1048 | struct udp_hslot *hslot = &udptable->hash[hash]; | 1054 | sk->sk_hash); |
1049 | 1055 | ||
1050 | spin_lock_bh(&hslot->lock); | 1056 | spin_lock_bh(&hslot->lock); |
1051 | if (sk_nulls_del_node_init_rcu(sk)) { | 1057 | if (sk_nulls_del_node_init_rcu(sk)) { |
1052 | inet_sk(sk)->num = 0; | 1058 | inet_sk(sk)->inet_num = 0; |
1053 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); | 1059 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); |
1054 | } | 1060 | } |
1055 | spin_unlock_bh(&hslot->lock); | 1061 | spin_unlock_bh(&hslot->lock); |
@@ -1059,25 +1065,22 @@ EXPORT_SYMBOL(udp_lib_unhash); | |||
1059 | 1065 | ||
1060 | static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | 1066 | static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
1061 | { | 1067 | { |
1062 | int is_udplite = IS_UDPLITE(sk); | 1068 | int rc = sock_queue_rcv_skb(sk, skb); |
1063 | int rc; | 1069 | |
1070 | if (rc < 0) { | ||
1071 | int is_udplite = IS_UDPLITE(sk); | ||
1064 | 1072 | ||
1065 | if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) { | ||
1066 | /* Note that an ENOMEM error is charged twice */ | 1073 | /* Note that an ENOMEM error is charged twice */ |
1067 | if (rc == -ENOMEM) { | 1074 | if (rc == -ENOMEM) |
1068 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, | 1075 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, |
1069 | is_udplite); | 1076 | is_udplite); |
1070 | atomic_inc(&sk->sk_drops); | 1077 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); |
1071 | } | 1078 | kfree_skb(skb); |
1072 | goto drop; | 1079 | return -1; |
1073 | } | 1080 | } |
1074 | 1081 | ||
1075 | return 0; | 1082 | return 0; |
1076 | 1083 | ||
1077 | drop: | ||
1078 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); | ||
1079 | kfree_skb(skb); | ||
1080 | return -1; | ||
1081 | } | 1084 | } |
1082 | 1085 | ||
1083 | /* returns: | 1086 | /* returns: |
@@ -1184,6 +1187,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
1184 | 1187 | ||
1185 | drop: | 1188 | drop: |
1186 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); | 1189 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); |
1190 | atomic_inc(&sk->sk_drops); | ||
1187 | kfree_skb(skb); | 1191 | kfree_skb(skb); |
1188 | return -1; | 1192 | return -1; |
1189 | } | 1193 | } |
@@ -1200,7 +1204,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, | |||
1200 | struct udp_table *udptable) | 1204 | struct udp_table *udptable) |
1201 | { | 1205 | { |
1202 | struct sock *sk; | 1206 | struct sock *sk; |
1203 | struct udp_hslot *hslot = &udptable->hash[udp_hashfn(net, ntohs(uh->dest))]; | 1207 | struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest)); |
1204 | int dif; | 1208 | int dif; |
1205 | 1209 | ||
1206 | spin_lock(&hslot->lock); | 1210 | spin_lock(&hslot->lock); |
@@ -1622,9 +1626,14 @@ static struct sock *udp_get_first(struct seq_file *seq, int start) | |||
1622 | struct udp_iter_state *state = seq->private; | 1626 | struct udp_iter_state *state = seq->private; |
1623 | struct net *net = seq_file_net(seq); | 1627 | struct net *net = seq_file_net(seq); |
1624 | 1628 | ||
1625 | for (state->bucket = start; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) { | 1629 | for (state->bucket = start; state->bucket <= state->udp_table->mask; |
1630 | ++state->bucket) { | ||
1626 | struct hlist_nulls_node *node; | 1631 | struct hlist_nulls_node *node; |
1627 | struct udp_hslot *hslot = &state->udp_table->hash[state->bucket]; | 1632 | struct udp_hslot *hslot = &state->udp_table->hash[state->bucket]; |
1633 | |||
1634 | if (hlist_nulls_empty(&hslot->head)) | ||
1635 | continue; | ||
1636 | |||
1628 | spin_lock_bh(&hslot->lock); | 1637 | spin_lock_bh(&hslot->lock); |
1629 | sk_nulls_for_each(sk, node, &hslot->head) { | 1638 | sk_nulls_for_each(sk, node, &hslot->head) { |
1630 | if (!net_eq(sock_net(sk), net)) | 1639 | if (!net_eq(sock_net(sk), net)) |
@@ -1649,7 +1658,7 @@ static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) | |||
1649 | } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family)); | 1658 | } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family)); |
1650 | 1659 | ||
1651 | if (!sk) { | 1660 | if (!sk) { |
1652 | if (state->bucket < UDP_HTABLE_SIZE) | 1661 | if (state->bucket <= state->udp_table->mask) |
1653 | spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); | 1662 | spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); |
1654 | return udp_get_first(seq, state->bucket + 1); | 1663 | return udp_get_first(seq, state->bucket + 1); |
1655 | } | 1664 | } |
@@ -1669,7 +1678,7 @@ static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos) | |||
1669 | static void *udp_seq_start(struct seq_file *seq, loff_t *pos) | 1678 | static void *udp_seq_start(struct seq_file *seq, loff_t *pos) |
1670 | { | 1679 | { |
1671 | struct udp_iter_state *state = seq->private; | 1680 | struct udp_iter_state *state = seq->private; |
1672 | state->bucket = UDP_HTABLE_SIZE; | 1681 | state->bucket = MAX_UDP_PORTS; |
1673 | 1682 | ||
1674 | return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN; | 1683 | return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN; |
1675 | } | 1684 | } |
@@ -1691,7 +1700,7 @@ static void udp_seq_stop(struct seq_file *seq, void *v) | |||
1691 | { | 1700 | { |
1692 | struct udp_iter_state *state = seq->private; | 1701 | struct udp_iter_state *state = seq->private; |
1693 | 1702 | ||
1694 | if (state->bucket < UDP_HTABLE_SIZE) | 1703 | if (state->bucket <= state->udp_table->mask) |
1695 | spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); | 1704 | spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); |
1696 | } | 1705 | } |
1697 | 1706 | ||
@@ -1746,12 +1755,12 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f, | |||
1746 | int bucket, int *len) | 1755 | int bucket, int *len) |
1747 | { | 1756 | { |
1748 | struct inet_sock *inet = inet_sk(sp); | 1757 | struct inet_sock *inet = inet_sk(sp); |
1749 | __be32 dest = inet->daddr; | 1758 | __be32 dest = inet->inet_daddr; |
1750 | __be32 src = inet->rcv_saddr; | 1759 | __be32 src = inet->inet_rcv_saddr; |
1751 | __u16 destp = ntohs(inet->dport); | 1760 | __u16 destp = ntohs(inet->inet_dport); |
1752 | __u16 srcp = ntohs(inet->sport); | 1761 | __u16 srcp = ntohs(inet->inet_sport); |
1753 | 1762 | ||
1754 | seq_printf(f, "%4d: %08X:%04X %08X:%04X" | 1763 | seq_printf(f, "%5d: %08X:%04X %08X:%04X" |
1755 | " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d%n", | 1764 | " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d%n", |
1756 | bucket, src, srcp, dest, destp, sp->sk_state, | 1765 | bucket, src, srcp, dest, destp, sp->sk_state, |
1757 | sk_wmem_alloc_get(sp), | 1766 | sk_wmem_alloc_get(sp), |
@@ -1817,11 +1826,43 @@ void udp4_proc_exit(void) | |||
1817 | } | 1826 | } |
1818 | #endif /* CONFIG_PROC_FS */ | 1827 | #endif /* CONFIG_PROC_FS */ |
1819 | 1828 | ||
1820 | void __init udp_table_init(struct udp_table *table) | 1829 | static __initdata unsigned long uhash_entries; |
1830 | static int __init set_uhash_entries(char *str) | ||
1821 | { | 1831 | { |
1822 | int i; | 1832 | if (!str) |
1833 | return 0; | ||
1834 | uhash_entries = simple_strtoul(str, &str, 0); | ||
1835 | if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN) | ||
1836 | uhash_entries = UDP_HTABLE_SIZE_MIN; | ||
1837 | return 1; | ||
1838 | } | ||
1839 | __setup("uhash_entries=", set_uhash_entries); | ||
1823 | 1840 | ||
1824 | for (i = 0; i < UDP_HTABLE_SIZE; i++) { | 1841 | void __init udp_table_init(struct udp_table *table, const char *name) |
1842 | { | ||
1843 | unsigned int i; | ||
1844 | |||
1845 | if (!CONFIG_BASE_SMALL) | ||
1846 | table->hash = alloc_large_system_hash(name, | ||
1847 | sizeof(struct udp_hslot), | ||
1848 | uhash_entries, | ||
1849 | 21, /* one slot per 2 MB */ | ||
1850 | 0, | ||
1851 | &table->log, | ||
1852 | &table->mask, | ||
1853 | 64 * 1024); | ||
1854 | /* | ||
1855 | * Make sure hash table has the minimum size | ||
1856 | */ | ||
1857 | if (CONFIG_BASE_SMALL || table->mask < UDP_HTABLE_SIZE_MIN - 1) { | ||
1858 | table->hash = kmalloc(UDP_HTABLE_SIZE_MIN * | ||
1859 | sizeof(struct udp_hslot), GFP_KERNEL); | ||
1860 | if (!table->hash) | ||
1861 | panic(name); | ||
1862 | table->log = ilog2(UDP_HTABLE_SIZE_MIN); | ||
1863 | table->mask = UDP_HTABLE_SIZE_MIN - 1; | ||
1864 | } | ||
1865 | for (i = 0; i <= table->mask; i++) { | ||
1825 | INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i); | 1866 | INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i); |
1826 | spin_lock_init(&table->hash[i].lock); | 1867 | spin_lock_init(&table->hash[i].lock); |
1827 | } | 1868 | } |
@@ -1831,7 +1872,7 @@ void __init udp_init(void) | |||
1831 | { | 1872 | { |
1832 | unsigned long nr_pages, limit; | 1873 | unsigned long nr_pages, limit; |
1833 | 1874 | ||
1834 | udp_table_init(&udp_table); | 1875 | udp_table_init(&udp_table, "UDP"); |
1835 | /* Set the pressure threshold up by the same strategy of TCP. It is a | 1876 | /* Set the pressure threshold up by the same strategy of TCP. It is a |
1836 | * fraction of global memory that is up to 1/2 at 256 MB, decreasing | 1877 | * fraction of global memory that is up to 1/2 at 256 MB, decreasing |
1837 | * toward zero with the amount of memory, with a floor of 128 pages. | 1878 | * toward zero with the amount of memory, with a floor of 128 pages. |
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c index 95248d7f75ec..470c504b9554 100644 --- a/net/ipv4/udplite.c +++ b/net/ipv4/udplite.c | |||
@@ -12,7 +12,7 @@ | |||
12 | */ | 12 | */ |
13 | #include "udp_impl.h" | 13 | #include "udp_impl.h" |
14 | 14 | ||
15 | struct udp_table udplite_table; | 15 | struct udp_table udplite_table __read_mostly; |
16 | EXPORT_SYMBOL(udplite_table); | 16 | EXPORT_SYMBOL(udplite_table); |
17 | 17 | ||
18 | static int udplite_rcv(struct sk_buff *skb) | 18 | static int udplite_rcv(struct sk_buff *skb) |
@@ -110,7 +110,7 @@ static inline int udplite4_proc_init(void) | |||
110 | 110 | ||
111 | void __init udplite4_register(void) | 111 | void __init udplite4_register(void) |
112 | { | 112 | { |
113 | udp_table_init(&udplite_table); | 113 | udp_table_init(&udplite_table, "UDP-Lite"); |
114 | if (proto_register(&udplite_prot, 1)) | 114 | if (proto_register(&udplite_prot, 1)) |
115 | goto out_register_err; | 115 | goto out_register_err; |
116 | 116 | ||
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index ead6c7a42f44..a578096152ab 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig | |||
@@ -170,6 +170,25 @@ config IPV6_SIT | |||
170 | 170 | ||
171 | Saying M here will produce a module called sit. If unsure, say Y. | 171 | Saying M here will produce a module called sit. If unsure, say Y. |
172 | 172 | ||
173 | config IPV6_SIT_6RD | ||
174 | bool "IPv6: IPv6 Rapid Deployment (6RD) (EXPERIMENTAL)" | ||
175 | depends on IPV6_SIT && EXPERIMENTAL | ||
176 | default n | ||
177 | ---help--- | ||
178 | IPv6 Rapid Deployment (6rd; draft-ietf-softwire-ipv6-6rd) builds upon | ||
179 | mechanisms of 6to4 (RFC3056) to enable a service provider to rapidly | ||
180 | deploy IPv6 unicast service to IPv4 sites to which it provides | ||
181 | customer premise equipment. Like 6to4, it utilizes stateless IPv6 in | ||
182 | IPv4 encapsulation in order to transit IPv4-only network | ||
183 | infrastructure. Unlike 6to4, a 6rd service provider uses an IPv6 | ||
184 | prefix of its own in place of the fixed 6to4 prefix. | ||
185 | |||
186 | With this option enabled, the SIT driver offers 6rd functionality by | ||
187 | providing additional ioctl API to configure the IPv6 Prefix for in | ||
188 | stead of static 2002::/16 for 6to4. | ||
189 | |||
190 | If unsure, say N. | ||
191 | |||
173 | config IPV6_NDISC_NODETYPE | 192 | config IPV6_NDISC_NODETYPE |
174 | bool | 193 | bool |
175 | 194 | ||
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 1fd0a3d775d2..918648409612 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -3708,6 +3708,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, | |||
3708 | #endif | 3708 | #endif |
3709 | array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6; | 3709 | array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6; |
3710 | array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad; | 3710 | array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad; |
3711 | array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao; | ||
3711 | } | 3712 | } |
3712 | 3713 | ||
3713 | static inline size_t inet6_if_nlmsg_size(void) | 3714 | static inline size_t inet6_if_nlmsg_size(void) |
@@ -4353,6 +4354,14 @@ static struct addrconf_sysctl_table | |||
4353 | .proc_handler = proc_dointvec, | 4354 | .proc_handler = proc_dointvec, |
4354 | }, | 4355 | }, |
4355 | { | 4356 | { |
4357 | .ctl_name = CTL_UNNUMBERED, | ||
4358 | .procname = "force_tllao", | ||
4359 | .data = &ipv6_devconf.force_tllao, | ||
4360 | .maxlen = sizeof(int), | ||
4361 | .mode = 0644, | ||
4362 | .proc_handler = proc_dointvec | ||
4363 | }, | ||
4364 | { | ||
4356 | .ctl_name = 0, /* sentinel */ | 4365 | .ctl_name = 0, /* sentinel */ |
4357 | } | 4366 | } |
4358 | }, | 4367 | }, |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index e127a32f9540..b6d058818673 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -185,7 +185,7 @@ lookup_protocol: | |||
185 | inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0; | 185 | inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0; |
186 | 186 | ||
187 | if (SOCK_RAW == sock->type) { | 187 | if (SOCK_RAW == sock->type) { |
188 | inet->num = protocol; | 188 | inet->inet_num = protocol; |
189 | if (IPPROTO_RAW == protocol) | 189 | if (IPPROTO_RAW == protocol) |
190 | inet->hdrincl = 1; | 190 | inet->hdrincl = 1; |
191 | } | 191 | } |
@@ -228,12 +228,12 @@ lookup_protocol: | |||
228 | */ | 228 | */ |
229 | sk_refcnt_debug_inc(sk); | 229 | sk_refcnt_debug_inc(sk); |
230 | 230 | ||
231 | if (inet->num) { | 231 | if (inet->inet_num) { |
232 | /* It assumes that any protocol which allows | 232 | /* It assumes that any protocol which allows |
233 | * the user to assign a number at socket | 233 | * the user to assign a number at socket |
234 | * creation time automatically shares. | 234 | * creation time automatically shares. |
235 | */ | 235 | */ |
236 | inet->sport = htons(inet->num); | 236 | inet->inet_sport = htons(inet->inet_num); |
237 | sk->sk_prot->hash(sk); | 237 | sk->sk_prot->hash(sk); |
238 | } | 238 | } |
239 | if (sk->sk_prot->init) { | 239 | if (sk->sk_prot->init) { |
@@ -281,7 +281,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
281 | lock_sock(sk); | 281 | lock_sock(sk); |
282 | 282 | ||
283 | /* Check these errors (active socket, double bind). */ | 283 | /* Check these errors (active socket, double bind). */ |
284 | if (sk->sk_state != TCP_CLOSE || inet->num) { | 284 | if (sk->sk_state != TCP_CLOSE || inet->inet_num) { |
285 | err = -EINVAL; | 285 | err = -EINVAL; |
286 | goto out; | 286 | goto out; |
287 | } | 287 | } |
@@ -353,8 +353,8 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
353 | } | 353 | } |
354 | } | 354 | } |
355 | 355 | ||
356 | inet->rcv_saddr = v4addr; | 356 | inet->inet_rcv_saddr = v4addr; |
357 | inet->saddr = v4addr; | 357 | inet->inet_saddr = v4addr; |
358 | 358 | ||
359 | ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr); | 359 | ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr); |
360 | 360 | ||
@@ -375,9 +375,9 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
375 | } | 375 | } |
376 | if (snum) | 376 | if (snum) |
377 | sk->sk_userlocks |= SOCK_BINDPORT_LOCK; | 377 | sk->sk_userlocks |= SOCK_BINDPORT_LOCK; |
378 | inet->sport = htons(inet->num); | 378 | inet->inet_sport = htons(inet->inet_num); |
379 | inet->dport = 0; | 379 | inet->inet_dport = 0; |
380 | inet->daddr = 0; | 380 | inet->inet_daddr = 0; |
381 | out: | 381 | out: |
382 | release_sock(sk); | 382 | release_sock(sk); |
383 | return err; | 383 | return err; |
@@ -441,12 +441,12 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr, | |||
441 | sin->sin6_flowinfo = 0; | 441 | sin->sin6_flowinfo = 0; |
442 | sin->sin6_scope_id = 0; | 442 | sin->sin6_scope_id = 0; |
443 | if (peer) { | 443 | if (peer) { |
444 | if (!inet->dport) | 444 | if (!inet->inet_dport) |
445 | return -ENOTCONN; | 445 | return -ENOTCONN; |
446 | if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) && | 446 | if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) && |
447 | peer == 1) | 447 | peer == 1) |
448 | return -ENOTCONN; | 448 | return -ENOTCONN; |
449 | sin->sin6_port = inet->dport; | 449 | sin->sin6_port = inet->inet_dport; |
450 | ipv6_addr_copy(&sin->sin6_addr, &np->daddr); | 450 | ipv6_addr_copy(&sin->sin6_addr, &np->daddr); |
451 | if (np->sndflow) | 451 | if (np->sndflow) |
452 | sin->sin6_flowinfo = np->flow_label; | 452 | sin->sin6_flowinfo = np->flow_label; |
@@ -456,7 +456,7 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr, | |||
456 | else | 456 | else |
457 | ipv6_addr_copy(&sin->sin6_addr, &np->rcv_saddr); | 457 | ipv6_addr_copy(&sin->sin6_addr, &np->rcv_saddr); |
458 | 458 | ||
459 | sin->sin6_port = inet->sport; | 459 | sin->sin6_port = inet->inet_sport; |
460 | } | 460 | } |
461 | if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) | 461 | if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) |
462 | sin->sin6_scope_id = sk->sk_bound_dev_if; | 462 | sin->sin6_scope_id = sk->sk_bound_dev_if; |
@@ -552,7 +552,7 @@ const struct proto_ops inet6_dgram_ops = { | |||
552 | #endif | 552 | #endif |
553 | }; | 553 | }; |
554 | 554 | ||
555 | static struct net_proto_family inet6_family_ops = { | 555 | static const struct net_proto_family inet6_family_ops = { |
556 | .family = PF_INET6, | 556 | .family = PF_INET6, |
557 | .create = inet6_create, | 557 | .create = inet6_create, |
558 | .owner = THIS_MODULE, | 558 | .owner = THIS_MODULE, |
@@ -654,8 +654,9 @@ int inet6_sk_rebuild_header(struct sock *sk) | |||
654 | ipv6_addr_copy(&fl.fl6_src, &np->saddr); | 654 | ipv6_addr_copy(&fl.fl6_src, &np->saddr); |
655 | fl.fl6_flowlabel = np->flow_label; | 655 | fl.fl6_flowlabel = np->flow_label; |
656 | fl.oif = sk->sk_bound_dev_if; | 656 | fl.oif = sk->sk_bound_dev_if; |
657 | fl.fl_ip_dport = inet->dport; | 657 | fl.mark = sk->sk_mark; |
658 | fl.fl_ip_sport = inet->sport; | 658 | fl.fl_ip_dport = inet->inet_dport; |
659 | fl.fl_ip_sport = inet->inet_sport; | ||
659 | security_sk_classify_flow(sk, &fl); | 660 | security_sk_classify_flow(sk, &fl); |
660 | 661 | ||
661 | if (np->opt && np->opt->srcrt) { | 662 | if (np->opt && np->opt->srcrt) { |
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index c1589e2f1dc9..0f526f8ea518 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c | |||
@@ -24,18 +24,92 @@ | |||
24 | * This file is derived from net/ipv4/ah.c. | 24 | * This file is derived from net/ipv4/ah.c. |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <crypto/hash.h> | ||
27 | #include <linux/module.h> | 28 | #include <linux/module.h> |
28 | #include <net/ip.h> | 29 | #include <net/ip.h> |
29 | #include <net/ah.h> | 30 | #include <net/ah.h> |
30 | #include <linux/crypto.h> | 31 | #include <linux/crypto.h> |
31 | #include <linux/pfkeyv2.h> | 32 | #include <linux/pfkeyv2.h> |
32 | #include <linux/spinlock.h> | ||
33 | #include <linux/string.h> | 33 | #include <linux/string.h> |
34 | #include <linux/scatterlist.h> | ||
34 | #include <net/icmp.h> | 35 | #include <net/icmp.h> |
35 | #include <net/ipv6.h> | 36 | #include <net/ipv6.h> |
36 | #include <net/protocol.h> | 37 | #include <net/protocol.h> |
37 | #include <net/xfrm.h> | 38 | #include <net/xfrm.h> |
38 | 39 | ||
40 | #define IPV6HDR_BASELEN 8 | ||
41 | |||
42 | struct tmp_ext { | ||
43 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | ||
44 | struct in6_addr saddr; | ||
45 | #endif | ||
46 | struct in6_addr daddr; | ||
47 | char hdrs[0]; | ||
48 | }; | ||
49 | |||
50 | struct ah_skb_cb { | ||
51 | struct xfrm_skb_cb xfrm; | ||
52 | void *tmp; | ||
53 | }; | ||
54 | |||
55 | #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0])) | ||
56 | |||
57 | static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags, | ||
58 | unsigned int size) | ||
59 | { | ||
60 | unsigned int len; | ||
61 | |||
62 | len = size + crypto_ahash_digestsize(ahash) + | ||
63 | (crypto_ahash_alignmask(ahash) & | ||
64 | ~(crypto_tfm_ctx_alignment() - 1)); | ||
65 | |||
66 | len = ALIGN(len, crypto_tfm_ctx_alignment()); | ||
67 | |||
68 | len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash); | ||
69 | len = ALIGN(len, __alignof__(struct scatterlist)); | ||
70 | |||
71 | len += sizeof(struct scatterlist) * nfrags; | ||
72 | |||
73 | return kmalloc(len, GFP_ATOMIC); | ||
74 | } | ||
75 | |||
76 | static inline struct tmp_ext *ah_tmp_ext(void *base) | ||
77 | { | ||
78 | return base + IPV6HDR_BASELEN; | ||
79 | } | ||
80 | |||
81 | static inline u8 *ah_tmp_auth(u8 *tmp, unsigned int offset) | ||
82 | { | ||
83 | return tmp + offset; | ||
84 | } | ||
85 | |||
86 | static inline u8 *ah_tmp_icv(struct crypto_ahash *ahash, void *tmp, | ||
87 | unsigned int offset) | ||
88 | { | ||
89 | return PTR_ALIGN((u8 *)tmp + offset, crypto_ahash_alignmask(ahash) + 1); | ||
90 | } | ||
91 | |||
92 | static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash, | ||
93 | u8 *icv) | ||
94 | { | ||
95 | struct ahash_request *req; | ||
96 | |||
97 | req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash), | ||
98 | crypto_tfm_ctx_alignment()); | ||
99 | |||
100 | ahash_request_set_tfm(req, ahash); | ||
101 | |||
102 | return req; | ||
103 | } | ||
104 | |||
105 | static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash, | ||
106 | struct ahash_request *req) | ||
107 | { | ||
108 | return (void *)ALIGN((unsigned long)(req + 1) + | ||
109 | crypto_ahash_reqsize(ahash), | ||
110 | __alignof__(struct scatterlist)); | ||
111 | } | ||
112 | |||
39 | static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr) | 113 | static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr) |
40 | { | 114 | { |
41 | u8 *opt = (u8 *)opthdr; | 115 | u8 *opt = (u8 *)opthdr; |
@@ -218,24 +292,85 @@ static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len, int dir) | |||
218 | return 0; | 292 | return 0; |
219 | } | 293 | } |
220 | 294 | ||
295 | static void ah6_output_done(struct crypto_async_request *base, int err) | ||
296 | { | ||
297 | int extlen; | ||
298 | u8 *iph_base; | ||
299 | u8 *icv; | ||
300 | struct sk_buff *skb = base->data; | ||
301 | struct xfrm_state *x = skb_dst(skb)->xfrm; | ||
302 | struct ah_data *ahp = x->data; | ||
303 | struct ipv6hdr *top_iph = ipv6_hdr(skb); | ||
304 | struct ip_auth_hdr *ah = ip_auth_hdr(skb); | ||
305 | struct tmp_ext *iph_ext; | ||
306 | |||
307 | extlen = skb_network_header_len(skb) - sizeof(struct ipv6hdr); | ||
308 | if (extlen) | ||
309 | extlen += sizeof(*iph_ext); | ||
310 | |||
311 | iph_base = AH_SKB_CB(skb)->tmp; | ||
312 | iph_ext = ah_tmp_ext(iph_base); | ||
313 | icv = ah_tmp_icv(ahp->ahash, iph_ext, extlen); | ||
314 | |||
315 | memcpy(ah->auth_data, icv, ahp->icv_trunc_len); | ||
316 | memcpy(top_iph, iph_base, IPV6HDR_BASELEN); | ||
317 | |||
318 | if (extlen) { | ||
319 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | ||
320 | memcpy(&top_iph->saddr, iph_ext, extlen); | ||
321 | #else | ||
322 | memcpy(&top_iph->daddr, iph_ext, extlen); | ||
323 | #endif | ||
324 | } | ||
325 | |||
326 | err = ah->nexthdr; | ||
327 | |||
328 | kfree(AH_SKB_CB(skb)->tmp); | ||
329 | xfrm_output_resume(skb, err); | ||
330 | } | ||
331 | |||
221 | static int ah6_output(struct xfrm_state *x, struct sk_buff *skb) | 332 | static int ah6_output(struct xfrm_state *x, struct sk_buff *skb) |
222 | { | 333 | { |
223 | int err; | 334 | int err; |
335 | int nfrags; | ||
224 | int extlen; | 336 | int extlen; |
337 | u8 *iph_base; | ||
338 | u8 *icv; | ||
339 | u8 nexthdr; | ||
340 | struct sk_buff *trailer; | ||
341 | struct crypto_ahash *ahash; | ||
342 | struct ahash_request *req; | ||
343 | struct scatterlist *sg; | ||
225 | struct ipv6hdr *top_iph; | 344 | struct ipv6hdr *top_iph; |
226 | struct ip_auth_hdr *ah; | 345 | struct ip_auth_hdr *ah; |
227 | struct ah_data *ahp; | 346 | struct ah_data *ahp; |
228 | u8 nexthdr; | 347 | struct tmp_ext *iph_ext; |
229 | char tmp_base[8]; | 348 | |
230 | struct { | 349 | ahp = x->data; |
231 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 350 | ahash = ahp->ahash; |
232 | struct in6_addr saddr; | 351 | |
233 | #endif | 352 | if ((err = skb_cow_data(skb, 0, &trailer)) < 0) |
234 | struct in6_addr daddr; | 353 | goto out; |
235 | char hdrs[0]; | 354 | nfrags = err; |
236 | } *tmp_ext; | ||
237 | 355 | ||
238 | skb_push(skb, -skb_network_offset(skb)); | 356 | skb_push(skb, -skb_network_offset(skb)); |
357 | extlen = skb_network_header_len(skb) - sizeof(struct ipv6hdr); | ||
358 | if (extlen) | ||
359 | extlen += sizeof(*iph_ext); | ||
360 | |||
361 | err = -ENOMEM; | ||
362 | iph_base = ah_alloc_tmp(ahash, nfrags, IPV6HDR_BASELEN + extlen); | ||
363 | if (!iph_base) | ||
364 | goto out; | ||
365 | |||
366 | iph_ext = ah_tmp_ext(iph_base); | ||
367 | icv = ah_tmp_icv(ahash, iph_ext, extlen); | ||
368 | req = ah_tmp_req(ahash, icv); | ||
369 | sg = ah_req_sg(ahash, req); | ||
370 | |||
371 | ah = ip_auth_hdr(skb); | ||
372 | memset(ah->auth_data, 0, ahp->icv_trunc_len); | ||
373 | |||
239 | top_iph = ipv6_hdr(skb); | 374 | top_iph = ipv6_hdr(skb); |
240 | top_iph->payload_len = htons(skb->len - sizeof(*top_iph)); | 375 | top_iph->payload_len = htons(skb->len - sizeof(*top_iph)); |
241 | 376 | ||
@@ -245,31 +380,22 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb) | |||
245 | /* When there are no extension headers, we only need to save the first | 380 | /* When there are no extension headers, we only need to save the first |
246 | * 8 bytes of the base IP header. | 381 | * 8 bytes of the base IP header. |
247 | */ | 382 | */ |
248 | memcpy(tmp_base, top_iph, sizeof(tmp_base)); | 383 | memcpy(iph_base, top_iph, IPV6HDR_BASELEN); |
249 | 384 | ||
250 | tmp_ext = NULL; | ||
251 | extlen = skb_transport_offset(skb) - sizeof(struct ipv6hdr); | ||
252 | if (extlen) { | 385 | if (extlen) { |
253 | extlen += sizeof(*tmp_ext); | ||
254 | tmp_ext = kmalloc(extlen, GFP_ATOMIC); | ||
255 | if (!tmp_ext) { | ||
256 | err = -ENOMEM; | ||
257 | goto error; | ||
258 | } | ||
259 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 386 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) |
260 | memcpy(tmp_ext, &top_iph->saddr, extlen); | 387 | memcpy(iph_ext, &top_iph->saddr, extlen); |
261 | #else | 388 | #else |
262 | memcpy(tmp_ext, &top_iph->daddr, extlen); | 389 | memcpy(iph_ext, &top_iph->daddr, extlen); |
263 | #endif | 390 | #endif |
264 | err = ipv6_clear_mutable_options(top_iph, | 391 | err = ipv6_clear_mutable_options(top_iph, |
265 | extlen - sizeof(*tmp_ext) + | 392 | extlen - sizeof(*iph_ext) + |
266 | sizeof(*top_iph), | 393 | sizeof(*top_iph), |
267 | XFRM_POLICY_OUT); | 394 | XFRM_POLICY_OUT); |
268 | if (err) | 395 | if (err) |
269 | goto error_free_iph; | 396 | goto out_free; |
270 | } | 397 | } |
271 | 398 | ||
272 | ah = ip_auth_hdr(skb); | ||
273 | ah->nexthdr = nexthdr; | 399 | ah->nexthdr = nexthdr; |
274 | 400 | ||
275 | top_iph->priority = 0; | 401 | top_iph->priority = 0; |
@@ -278,36 +404,80 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb) | |||
278 | top_iph->flow_lbl[2] = 0; | 404 | top_iph->flow_lbl[2] = 0; |
279 | top_iph->hop_limit = 0; | 405 | top_iph->hop_limit = 0; |
280 | 406 | ||
281 | ahp = x->data; | ||
282 | ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2; | 407 | ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2; |
283 | 408 | ||
284 | ah->reserved = 0; | 409 | ah->reserved = 0; |
285 | ah->spi = x->id.spi; | 410 | ah->spi = x->id.spi; |
286 | ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output); | 411 | ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output); |
287 | 412 | ||
288 | spin_lock_bh(&x->lock); | 413 | sg_init_table(sg, nfrags); |
289 | err = ah_mac_digest(ahp, skb, ah->auth_data); | 414 | skb_to_sgvec(skb, sg, 0, skb->len); |
290 | memcpy(ah->auth_data, ahp->work_icv, ahp->icv_trunc_len); | ||
291 | spin_unlock_bh(&x->lock); | ||
292 | 415 | ||
293 | if (err) | 416 | ahash_request_set_crypt(req, sg, icv, skb->len); |
294 | goto error_free_iph; | 417 | ahash_request_set_callback(req, 0, ah6_output_done, skb); |
418 | |||
419 | AH_SKB_CB(skb)->tmp = iph_base; | ||
295 | 420 | ||
296 | memcpy(top_iph, tmp_base, sizeof(tmp_base)); | 421 | err = crypto_ahash_digest(req); |
297 | if (tmp_ext) { | 422 | if (err) { |
423 | if (err == -EINPROGRESS) | ||
424 | goto out; | ||
425 | |||
426 | if (err == -EBUSY) | ||
427 | err = NET_XMIT_DROP; | ||
428 | goto out_free; | ||
429 | } | ||
430 | |||
431 | memcpy(ah->auth_data, icv, ahp->icv_trunc_len); | ||
432 | memcpy(top_iph, iph_base, IPV6HDR_BASELEN); | ||
433 | |||
434 | if (extlen) { | ||
298 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 435 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) |
299 | memcpy(&top_iph->saddr, tmp_ext, extlen); | 436 | memcpy(&top_iph->saddr, iph_ext, extlen); |
300 | #else | 437 | #else |
301 | memcpy(&top_iph->daddr, tmp_ext, extlen); | 438 | memcpy(&top_iph->daddr, iph_ext, extlen); |
302 | #endif | 439 | #endif |
303 | error_free_iph: | ||
304 | kfree(tmp_ext); | ||
305 | } | 440 | } |
306 | 441 | ||
307 | error: | 442 | out_free: |
443 | kfree(iph_base); | ||
444 | out: | ||
308 | return err; | 445 | return err; |
309 | } | 446 | } |
310 | 447 | ||
448 | static void ah6_input_done(struct crypto_async_request *base, int err) | ||
449 | { | ||
450 | u8 *auth_data; | ||
451 | u8 *icv; | ||
452 | u8 *work_iph; | ||
453 | struct sk_buff *skb = base->data; | ||
454 | struct xfrm_state *x = xfrm_input_state(skb); | ||
455 | struct ah_data *ahp = x->data; | ||
456 | struct ip_auth_hdr *ah = ip_auth_hdr(skb); | ||
457 | int hdr_len = skb_network_header_len(skb); | ||
458 | int ah_hlen = (ah->hdrlen + 2) << 2; | ||
459 | |||
460 | work_iph = AH_SKB_CB(skb)->tmp; | ||
461 | auth_data = ah_tmp_auth(work_iph, hdr_len); | ||
462 | icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len); | ||
463 | |||
464 | err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0; | ||
465 | if (err) | ||
466 | goto out; | ||
467 | |||
468 | skb->network_header += ah_hlen; | ||
469 | memcpy(skb_network_header(skb), work_iph, hdr_len); | ||
470 | __skb_pull(skb, ah_hlen + hdr_len); | ||
471 | skb_set_transport_header(skb, -hdr_len); | ||
472 | |||
473 | err = ah->nexthdr; | ||
474 | out: | ||
475 | kfree(AH_SKB_CB(skb)->tmp); | ||
476 | xfrm_input_resume(skb, err); | ||
477 | } | ||
478 | |||
479 | |||
480 | |||
311 | static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) | 481 | static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) |
312 | { | 482 | { |
313 | /* | 483 | /* |
@@ -325,14 +495,21 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) | |||
325 | * There is offset of AH before IPv6 header after the process. | 495 | * There is offset of AH before IPv6 header after the process. |
326 | */ | 496 | */ |
327 | 497 | ||
498 | u8 *auth_data; | ||
499 | u8 *icv; | ||
500 | u8 *work_iph; | ||
501 | struct sk_buff *trailer; | ||
502 | struct crypto_ahash *ahash; | ||
503 | struct ahash_request *req; | ||
504 | struct scatterlist *sg; | ||
328 | struct ip_auth_hdr *ah; | 505 | struct ip_auth_hdr *ah; |
329 | struct ipv6hdr *ip6h; | 506 | struct ipv6hdr *ip6h; |
330 | struct ah_data *ahp; | 507 | struct ah_data *ahp; |
331 | unsigned char *tmp_hdr = NULL; | ||
332 | u16 hdr_len; | 508 | u16 hdr_len; |
333 | u16 ah_hlen; | 509 | u16 ah_hlen; |
334 | int nexthdr; | 510 | int nexthdr; |
335 | int err = -EINVAL; | 511 | int nfrags; |
512 | int err = -ENOMEM; | ||
336 | 513 | ||
337 | if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr))) | 514 | if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr))) |
338 | goto out; | 515 | goto out; |
@@ -345,9 +522,11 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) | |||
345 | 522 | ||
346 | skb->ip_summed = CHECKSUM_NONE; | 523 | skb->ip_summed = CHECKSUM_NONE; |
347 | 524 | ||
348 | hdr_len = skb->data - skb_network_header(skb); | 525 | hdr_len = skb_network_header_len(skb); |
349 | ah = (struct ip_auth_hdr *)skb->data; | 526 | ah = (struct ip_auth_hdr *)skb->data; |
350 | ahp = x->data; | 527 | ahp = x->data; |
528 | ahash = ahp->ahash; | ||
529 | |||
351 | nexthdr = ah->nexthdr; | 530 | nexthdr = ah->nexthdr; |
352 | ah_hlen = (ah->hdrlen + 2) << 2; | 531 | ah_hlen = (ah->hdrlen + 2) << 2; |
353 | 532 | ||
@@ -358,48 +537,67 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) | |||
358 | if (!pskb_may_pull(skb, ah_hlen)) | 537 | if (!pskb_may_pull(skb, ah_hlen)) |
359 | goto out; | 538 | goto out; |
360 | 539 | ||
361 | tmp_hdr = kmemdup(skb_network_header(skb), hdr_len, GFP_ATOMIC); | ||
362 | if (!tmp_hdr) | ||
363 | goto out; | ||
364 | ip6h = ipv6_hdr(skb); | 540 | ip6h = ipv6_hdr(skb); |
541 | |||
542 | skb_push(skb, hdr_len); | ||
543 | |||
544 | if ((err = skb_cow_data(skb, 0, &trailer)) < 0) | ||
545 | goto out; | ||
546 | nfrags = err; | ||
547 | |||
548 | work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len); | ||
549 | if (!work_iph) | ||
550 | goto out; | ||
551 | |||
552 | auth_data = ah_tmp_auth(work_iph, hdr_len); | ||
553 | icv = ah_tmp_icv(ahash, auth_data, ahp->icv_trunc_len); | ||
554 | req = ah_tmp_req(ahash, icv); | ||
555 | sg = ah_req_sg(ahash, req); | ||
556 | |||
557 | memcpy(work_iph, ip6h, hdr_len); | ||
558 | memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); | ||
559 | memset(ah->auth_data, 0, ahp->icv_trunc_len); | ||
560 | |||
365 | if (ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN)) | 561 | if (ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN)) |
366 | goto free_out; | 562 | goto out_free; |
563 | |||
367 | ip6h->priority = 0; | 564 | ip6h->priority = 0; |
368 | ip6h->flow_lbl[0] = 0; | 565 | ip6h->flow_lbl[0] = 0; |
369 | ip6h->flow_lbl[1] = 0; | 566 | ip6h->flow_lbl[1] = 0; |
370 | ip6h->flow_lbl[2] = 0; | 567 | ip6h->flow_lbl[2] = 0; |
371 | ip6h->hop_limit = 0; | 568 | ip6h->hop_limit = 0; |
372 | 569 | ||
373 | spin_lock(&x->lock); | 570 | sg_init_table(sg, nfrags); |
374 | { | 571 | skb_to_sgvec(skb, sg, 0, skb->len); |
375 | u8 auth_data[MAX_AH_AUTH_LEN]; | ||
376 | 572 | ||
377 | memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); | 573 | ahash_request_set_crypt(req, sg, icv, skb->len); |
378 | memset(ah->auth_data, 0, ahp->icv_trunc_len); | 574 | ahash_request_set_callback(req, 0, ah6_input_done, skb); |
379 | skb_push(skb, hdr_len); | 575 | |
380 | err = ah_mac_digest(ahp, skb, ah->auth_data); | 576 | AH_SKB_CB(skb)->tmp = work_iph; |
381 | if (err) | 577 | |
382 | goto unlock; | 578 | err = crypto_ahash_digest(req); |
383 | if (memcmp(ahp->work_icv, auth_data, ahp->icv_trunc_len)) | 579 | if (err) { |
384 | err = -EBADMSG; | 580 | if (err == -EINPROGRESS) |
581 | goto out; | ||
582 | |||
583 | if (err == -EBUSY) | ||
584 | err = NET_XMIT_DROP; | ||
585 | goto out_free; | ||
385 | } | 586 | } |
386 | unlock: | ||
387 | spin_unlock(&x->lock); | ||
388 | 587 | ||
588 | err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0; | ||
389 | if (err) | 589 | if (err) |
390 | goto free_out; | 590 | goto out_free; |
391 | 591 | ||
392 | skb->network_header += ah_hlen; | 592 | skb->network_header += ah_hlen; |
393 | memcpy(skb_network_header(skb), tmp_hdr, hdr_len); | 593 | memcpy(skb_network_header(skb), work_iph, hdr_len); |
394 | skb->transport_header = skb->network_header; | 594 | skb->transport_header = skb->network_header; |
395 | __skb_pull(skb, ah_hlen + hdr_len); | 595 | __skb_pull(skb, ah_hlen + hdr_len); |
396 | 596 | ||
397 | kfree(tmp_hdr); | 597 | err = nexthdr; |
398 | 598 | ||
399 | return nexthdr; | 599 | out_free: |
400 | 600 | kfree(work_iph); | |
401 | free_out: | ||
402 | kfree(tmp_hdr); | ||
403 | out: | 601 | out: |
404 | return err; | 602 | return err; |
405 | } | 603 | } |
@@ -430,7 +628,7 @@ static int ah6_init_state(struct xfrm_state *x) | |||
430 | { | 628 | { |
431 | struct ah_data *ahp = NULL; | 629 | struct ah_data *ahp = NULL; |
432 | struct xfrm_algo_desc *aalg_desc; | 630 | struct xfrm_algo_desc *aalg_desc; |
433 | struct crypto_hash *tfm; | 631 | struct crypto_ahash *ahash; |
434 | 632 | ||
435 | if (!x->aalg) | 633 | if (!x->aalg) |
436 | goto error; | 634 | goto error; |
@@ -442,12 +640,12 @@ static int ah6_init_state(struct xfrm_state *x) | |||
442 | if (ahp == NULL) | 640 | if (ahp == NULL) |
443 | return -ENOMEM; | 641 | return -ENOMEM; |
444 | 642 | ||
445 | tfm = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC); | 643 | ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0); |
446 | if (IS_ERR(tfm)) | 644 | if (IS_ERR(ahash)) |
447 | goto error; | 645 | goto error; |
448 | 646 | ||
449 | ahp->tfm = tfm; | 647 | ahp->ahash = ahash; |
450 | if (crypto_hash_setkey(tfm, x->aalg->alg_key, | 648 | if (crypto_ahash_setkey(ahash, x->aalg->alg_key, |
451 | (x->aalg->alg_key_len + 7) / 8)) | 649 | (x->aalg->alg_key_len + 7) / 8)) |
452 | goto error; | 650 | goto error; |
453 | 651 | ||
@@ -461,9 +659,9 @@ static int ah6_init_state(struct xfrm_state *x) | |||
461 | BUG_ON(!aalg_desc); | 659 | BUG_ON(!aalg_desc); |
462 | 660 | ||
463 | if (aalg_desc->uinfo.auth.icv_fullbits/8 != | 661 | if (aalg_desc->uinfo.auth.icv_fullbits/8 != |
464 | crypto_hash_digestsize(tfm)) { | 662 | crypto_ahash_digestsize(ahash)) { |
465 | printk(KERN_INFO "AH: %s digestsize %u != %hu\n", | 663 | printk(KERN_INFO "AH: %s digestsize %u != %hu\n", |
466 | x->aalg->alg_name, crypto_hash_digestsize(tfm), | 664 | x->aalg->alg_name, crypto_ahash_digestsize(ahash), |
467 | aalg_desc->uinfo.auth.icv_fullbits/8); | 665 | aalg_desc->uinfo.auth.icv_fullbits/8); |
468 | goto error; | 666 | goto error; |
469 | } | 667 | } |
@@ -473,10 +671,6 @@ static int ah6_init_state(struct xfrm_state *x) | |||
473 | 671 | ||
474 | BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN); | 672 | BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN); |
475 | 673 | ||
476 | ahp->work_icv = kmalloc(ahp->icv_full_len, GFP_KERNEL); | ||
477 | if (!ahp->work_icv) | ||
478 | goto error; | ||
479 | |||
480 | x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + | 674 | x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + |
481 | ahp->icv_trunc_len); | 675 | ahp->icv_trunc_len); |
482 | switch (x->props.mode) { | 676 | switch (x->props.mode) { |
@@ -495,8 +689,7 @@ static int ah6_init_state(struct xfrm_state *x) | |||
495 | 689 | ||
496 | error: | 690 | error: |
497 | if (ahp) { | 691 | if (ahp) { |
498 | kfree(ahp->work_icv); | 692 | crypto_free_ahash(ahp->ahash); |
499 | crypto_free_hash(ahp->tfm); | ||
500 | kfree(ahp); | 693 | kfree(ahp); |
501 | } | 694 | } |
502 | return -EINVAL; | 695 | return -EINVAL; |
@@ -509,8 +702,7 @@ static void ah6_destroy(struct xfrm_state *x) | |||
509 | if (!ahp) | 702 | if (!ahp) |
510 | return; | 703 | return; |
511 | 704 | ||
512 | kfree(ahp->work_icv); | 705 | crypto_free_ahash(ahp->ahash); |
513 | crypto_free_hash(ahp->tfm); | ||
514 | kfree(ahp); | 706 | kfree(ahp); |
515 | } | 707 | } |
516 | 708 | ||
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index e2bdc6d83a43..9f70452a69e7 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -98,17 +98,15 @@ ipv4_connected: | |||
98 | if (err) | 98 | if (err) |
99 | goto out; | 99 | goto out; |
100 | 100 | ||
101 | ipv6_addr_set(&np->daddr, 0, 0, htonl(0x0000ffff), inet->daddr); | 101 | ipv6_addr_set_v4mapped(inet->inet_daddr, &np->daddr); |
102 | 102 | ||
103 | if (ipv6_addr_any(&np->saddr)) { | 103 | if (ipv6_addr_any(&np->saddr)) |
104 | ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000ffff), | 104 | ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr); |
105 | inet->saddr); | 105 | |
106 | } | 106 | if (ipv6_addr_any(&np->rcv_saddr)) |
107 | ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, | ||
108 | &np->rcv_saddr); | ||
107 | 109 | ||
108 | if (ipv6_addr_any(&np->rcv_saddr)) { | ||
109 | ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000ffff), | ||
110 | inet->rcv_saddr); | ||
111 | } | ||
112 | goto out; | 110 | goto out; |
113 | } | 111 | } |
114 | 112 | ||
@@ -136,7 +134,7 @@ ipv4_connected: | |||
136 | ipv6_addr_copy(&np->daddr, daddr); | 134 | ipv6_addr_copy(&np->daddr, daddr); |
137 | np->flow_label = fl.fl6_flowlabel; | 135 | np->flow_label = fl.fl6_flowlabel; |
138 | 136 | ||
139 | inet->dport = usin->sin6_port; | 137 | inet->inet_dport = usin->sin6_port; |
140 | 138 | ||
141 | /* | 139 | /* |
142 | * Check for a route to destination an obtain the | 140 | * Check for a route to destination an obtain the |
@@ -147,8 +145,9 @@ ipv4_connected: | |||
147 | ipv6_addr_copy(&fl.fl6_dst, &np->daddr); | 145 | ipv6_addr_copy(&fl.fl6_dst, &np->daddr); |
148 | ipv6_addr_copy(&fl.fl6_src, &np->saddr); | 146 | ipv6_addr_copy(&fl.fl6_src, &np->saddr); |
149 | fl.oif = sk->sk_bound_dev_if; | 147 | fl.oif = sk->sk_bound_dev_if; |
150 | fl.fl_ip_dport = inet->dport; | 148 | fl.mark = sk->sk_mark; |
151 | fl.fl_ip_sport = inet->sport; | 149 | fl.fl_ip_dport = inet->inet_dport; |
150 | fl.fl_ip_sport = inet->inet_sport; | ||
152 | 151 | ||
153 | if (!fl.oif && (addr_type&IPV6_ADDR_MULTICAST)) | 152 | if (!fl.oif && (addr_type&IPV6_ADDR_MULTICAST)) |
154 | fl.oif = np->mcast_oif; | 153 | fl.oif = np->mcast_oif; |
@@ -190,7 +189,7 @@ ipv4_connected: | |||
190 | 189 | ||
191 | if (ipv6_addr_any(&np->rcv_saddr)) { | 190 | if (ipv6_addr_any(&np->rcv_saddr)) { |
192 | ipv6_addr_copy(&np->rcv_saddr, &fl.fl6_src); | 191 | ipv6_addr_copy(&np->rcv_saddr, &fl.fl6_src); |
193 | inet->rcv_saddr = LOOPBACK4_IPV6; | 192 | inet->inet_rcv_saddr = LOOPBACK4_IPV6; |
194 | } | 193 | } |
195 | 194 | ||
196 | ip6_dst_store(sk, dst, | 195 | ip6_dst_store(sk, dst, |
@@ -329,9 +328,8 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) | |||
329 | if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) | 328 | if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) |
330 | sin->sin6_scope_id = IP6CB(skb)->iif; | 329 | sin->sin6_scope_id = IP6CB(skb)->iif; |
331 | } else { | 330 | } else { |
332 | ipv6_addr_set(&sin->sin6_addr, 0, 0, | 331 | ipv6_addr_set_v4mapped(*(__be32 *)(nh + serr->addr_offset), |
333 | htonl(0xffff), | 332 | &sin->sin6_addr); |
334 | *(__be32 *)(nh + serr->addr_offset)); | ||
335 | } | 333 | } |
336 | } | 334 | } |
337 | 335 | ||
@@ -351,8 +349,8 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) | |||
351 | } else { | 349 | } else { |
352 | struct inet_sock *inet = inet_sk(sk); | 350 | struct inet_sock *inet = inet_sk(sk); |
353 | 351 | ||
354 | ipv6_addr_set(&sin->sin6_addr, 0, 0, | 352 | ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, |
355 | htonl(0xffff), ip_hdr(skb)->saddr); | 353 | &sin->sin6_addr); |
356 | if (inet->cmsg_flags) | 354 | if (inet->cmsg_flags) |
357 | ip_cmsg_recv(msg, skb); | 355 | ip_cmsg_recv(msg, skb); |
358 | } | 356 | } |
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index cc4797dd8325..3516e6fe2e56 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c | |||
@@ -132,7 +132,7 @@ void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr) | |||
132 | 132 | ||
133 | sin6->sin6_family = AF_INET6; | 133 | sin6->sin6_family = AF_INET6; |
134 | ipv6_addr_copy(&sin6->sin6_addr, &np->daddr); | 134 | ipv6_addr_copy(&sin6->sin6_addr, &np->daddr); |
135 | sin6->sin6_port = inet_sk(sk)->dport; | 135 | sin6->sin6_port = inet_sk(sk)->inet_dport; |
136 | /* We do not store received flowlabel for TCP */ | 136 | /* We do not store received flowlabel for TCP */ |
137 | sin6->sin6_flowinfo = 0; | 137 | sin6->sin6_flowinfo = 0; |
138 | sin6->sin6_scope_id = 0; | 138 | sin6->sin6_scope_id = 0; |
@@ -168,8 +168,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie) | |||
168 | if (dst) { | 168 | if (dst) { |
169 | struct rt6_info *rt = (struct rt6_info *)dst; | 169 | struct rt6_info *rt = (struct rt6_info *)dst; |
170 | if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) { | 170 | if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) { |
171 | sk->sk_dst_cache = NULL; | 171 | __sk_dst_reset(sk); |
172 | dst_release(dst); | ||
173 | dst = NULL; | 172 | dst = NULL; |
174 | } | 173 | } |
175 | } | 174 | } |
@@ -194,8 +193,9 @@ int inet6_csk_xmit(struct sk_buff *skb, int ipfragok) | |||
194 | fl.fl6_flowlabel = np->flow_label; | 193 | fl.fl6_flowlabel = np->flow_label; |
195 | IP6_ECN_flow_xmit(sk, fl.fl6_flowlabel); | 194 | IP6_ECN_flow_xmit(sk, fl.fl6_flowlabel); |
196 | fl.oif = sk->sk_bound_dev_if; | 195 | fl.oif = sk->sk_bound_dev_if; |
197 | fl.fl_ip_sport = inet->sport; | 196 | fl.mark = sk->sk_mark; |
198 | fl.fl_ip_dport = inet->dport; | 197 | fl.fl_ip_sport = inet->inet_sport; |
198 | fl.fl_ip_dport = inet->inet_dport; | ||
199 | security_sk_classify_flow(sk, &fl); | 199 | security_sk_classify_flow(sk, &fl); |
200 | 200 | ||
201 | if (np->opt && np->opt->srcrt) { | 201 | if (np->opt && np->opt->srcrt) { |
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 1bcc3431859e..00c6a3e6cddf 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c | |||
@@ -73,7 +73,7 @@ struct sock *__inet6_lookup_established(struct net *net, | |||
73 | * have wildcards anyways. | 73 | * have wildcards anyways. |
74 | */ | 74 | */ |
75 | unsigned int hash = inet6_ehashfn(net, daddr, hnum, saddr, sport); | 75 | unsigned int hash = inet6_ehashfn(net, daddr, hnum, saddr, sport); |
76 | unsigned int slot = hash & (hashinfo->ehash_size - 1); | 76 | unsigned int slot = hash & hashinfo->ehash_mask; |
77 | struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; | 77 | struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; |
78 | 78 | ||
79 | 79 | ||
@@ -125,7 +125,7 @@ static int inline compute_score(struct sock *sk, struct net *net, | |||
125 | { | 125 | { |
126 | int score = -1; | 126 | int score = -1; |
127 | 127 | ||
128 | if (net_eq(sock_net(sk), net) && inet_sk(sk)->num == hnum && | 128 | if (net_eq(sock_net(sk), net) && inet_sk(sk)->inet_num == hnum && |
129 | sk->sk_family == PF_INET6) { | 129 | sk->sk_family == PF_INET6) { |
130 | const struct ipv6_pinfo *np = inet6_sk(sk); | 130 | const struct ipv6_pinfo *np = inet6_sk(sk); |
131 | 131 | ||
@@ -214,10 +214,10 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row, | |||
214 | const struct in6_addr *daddr = &np->rcv_saddr; | 214 | const struct in6_addr *daddr = &np->rcv_saddr; |
215 | const struct in6_addr *saddr = &np->daddr; | 215 | const struct in6_addr *saddr = &np->daddr; |
216 | const int dif = sk->sk_bound_dev_if; | 216 | const int dif = sk->sk_bound_dev_if; |
217 | const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport); | 217 | const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport); |
218 | struct net *net = sock_net(sk); | 218 | struct net *net = sock_net(sk); |
219 | const unsigned int hash = inet6_ehashfn(net, daddr, lport, saddr, | 219 | const unsigned int hash = inet6_ehashfn(net, daddr, lport, saddr, |
220 | inet->dport); | 220 | inet->inet_dport); |
221 | struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); | 221 | struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); |
222 | spinlock_t *lock = inet_ehash_lockp(hinfo, hash); | 222 | spinlock_t *lock = inet_ehash_lockp(hinfo, hash); |
223 | struct sock *sk2; | 223 | struct sock *sk2; |
@@ -248,8 +248,8 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row, | |||
248 | unique: | 248 | unique: |
249 | /* Must record num and sport now. Otherwise we will see | 249 | /* Must record num and sport now. Otherwise we will see |
250 | * in hash table socket with a funny identity. */ | 250 | * in hash table socket with a funny identity. */ |
251 | inet->num = lport; | 251 | inet->inet_num = lport; |
252 | inet->sport = htons(lport); | 252 | inet->inet_sport = htons(lport); |
253 | WARN_ON(!sk_unhashed(sk)); | 253 | WARN_ON(!sk_unhashed(sk)); |
254 | __sk_nulls_add_node_rcu(sk, &head->chain); | 254 | __sk_nulls_add_node_rcu(sk, &head->chain); |
255 | sk->sk_hash = hash; | 255 | sk->sk_hash = hash; |
@@ -279,7 +279,7 @@ static inline u32 inet6_sk_port_offset(const struct sock *sk) | |||
279 | const struct ipv6_pinfo *np = inet6_sk(sk); | 279 | const struct ipv6_pinfo *np = inet6_sk(sk); |
280 | return secure_ipv6_port_ephemeral(np->rcv_saddr.s6_addr32, | 280 | return secure_ipv6_port_ephemeral(np->rcv_saddr.s6_addr32, |
281 | np->daddr.s6_addr32, | 281 | np->daddr.s6_addr32, |
282 | inet->dport); | 282 | inet->inet_dport); |
283 | } | 283 | } |
284 | 284 | ||
285 | int inet6_hash_connect(struct inet_timewait_death_row *death_row, | 285 | int inet6_hash_connect(struct inet_timewait_death_row *death_row, |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index c595bbe1ed99..6c1b5c98e818 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -88,8 +88,10 @@ struct ip6_tnl_net { | |||
88 | struct ip6_tnl **tnls[2]; | 88 | struct ip6_tnl **tnls[2]; |
89 | }; | 89 | }; |
90 | 90 | ||
91 | /* lock for the tunnel lists */ | 91 | /* |
92 | static DEFINE_RWLOCK(ip6_tnl_lock); | 92 | * Locking : hash tables are protected by RCU and a spinlock |
93 | */ | ||
94 | static DEFINE_SPINLOCK(ip6_tnl_lock); | ||
93 | 95 | ||
94 | static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t) | 96 | static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t) |
95 | { | 97 | { |
@@ -130,6 +132,9 @@ static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst) | |||
130 | * else %NULL | 132 | * else %NULL |
131 | **/ | 133 | **/ |
132 | 134 | ||
135 | #define for_each_ip6_tunnel_rcu(start) \ | ||
136 | for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) | ||
137 | |||
133 | static struct ip6_tnl * | 138 | static struct ip6_tnl * |
134 | ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local) | 139 | ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local) |
135 | { | 140 | { |
@@ -138,13 +143,14 @@ ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local) | |||
138 | struct ip6_tnl *t; | 143 | struct ip6_tnl *t; |
139 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); | 144 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); |
140 | 145 | ||
141 | for (t = ip6n->tnls_r_l[h0 ^ h1]; t; t = t->next) { | 146 | for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[h0 ^ h1]) { |
142 | if (ipv6_addr_equal(local, &t->parms.laddr) && | 147 | if (ipv6_addr_equal(local, &t->parms.laddr) && |
143 | ipv6_addr_equal(remote, &t->parms.raddr) && | 148 | ipv6_addr_equal(remote, &t->parms.raddr) && |
144 | (t->dev->flags & IFF_UP)) | 149 | (t->dev->flags & IFF_UP)) |
145 | return t; | 150 | return t; |
146 | } | 151 | } |
147 | if ((t = ip6n->tnls_wc[0]) != NULL && (t->dev->flags & IFF_UP)) | 152 | t = rcu_dereference(ip6n->tnls_wc[0]); |
153 | if (t && (t->dev->flags & IFF_UP)) | ||
148 | return t; | 154 | return t; |
149 | 155 | ||
150 | return NULL; | 156 | return NULL; |
@@ -186,10 +192,10 @@ ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) | |||
186 | { | 192 | { |
187 | struct ip6_tnl **tp = ip6_tnl_bucket(ip6n, &t->parms); | 193 | struct ip6_tnl **tp = ip6_tnl_bucket(ip6n, &t->parms); |
188 | 194 | ||
195 | spin_lock_bh(&ip6_tnl_lock); | ||
189 | t->next = *tp; | 196 | t->next = *tp; |
190 | write_lock_bh(&ip6_tnl_lock); | 197 | rcu_assign_pointer(*tp, t); |
191 | *tp = t; | 198 | spin_unlock_bh(&ip6_tnl_lock); |
192 | write_unlock_bh(&ip6_tnl_lock); | ||
193 | } | 199 | } |
194 | 200 | ||
195 | /** | 201 | /** |
@@ -204,9 +210,9 @@ ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) | |||
204 | 210 | ||
205 | for (tp = ip6_tnl_bucket(ip6n, &t->parms); *tp; tp = &(*tp)->next) { | 211 | for (tp = ip6_tnl_bucket(ip6n, &t->parms); *tp; tp = &(*tp)->next) { |
206 | if (t == *tp) { | 212 | if (t == *tp) { |
207 | write_lock_bh(&ip6_tnl_lock); | 213 | spin_lock_bh(&ip6_tnl_lock); |
208 | *tp = t->next; | 214 | *tp = t->next; |
209 | write_unlock_bh(&ip6_tnl_lock); | 215 | spin_unlock_bh(&ip6_tnl_lock); |
210 | break; | 216 | break; |
211 | } | 217 | } |
212 | } | 218 | } |
@@ -313,9 +319,9 @@ ip6_tnl_dev_uninit(struct net_device *dev) | |||
313 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); | 319 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); |
314 | 320 | ||
315 | if (dev == ip6n->fb_tnl_dev) { | 321 | if (dev == ip6n->fb_tnl_dev) { |
316 | write_lock_bh(&ip6_tnl_lock); | 322 | spin_lock_bh(&ip6_tnl_lock); |
317 | ip6n->tnls_wc[0] = NULL; | 323 | ip6n->tnls_wc[0] = NULL; |
318 | write_unlock_bh(&ip6_tnl_lock); | 324 | spin_unlock_bh(&ip6_tnl_lock); |
319 | } else { | 325 | } else { |
320 | ip6_tnl_unlink(ip6n, t); | 326 | ip6_tnl_unlink(ip6n, t); |
321 | } | 327 | } |
@@ -409,7 +415,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, | |||
409 | in trouble since we might need the source address for further | 415 | in trouble since we might need the source address for further |
410 | processing of the error. */ | 416 | processing of the error. */ |
411 | 417 | ||
412 | read_lock(&ip6_tnl_lock); | 418 | rcu_read_lock(); |
413 | if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, | 419 | if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, |
414 | &ipv6h->saddr)) == NULL) | 420 | &ipv6h->saddr)) == NULL) |
415 | goto out; | 421 | goto out; |
@@ -482,7 +488,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, | |||
482 | *msg = rel_msg; | 488 | *msg = rel_msg; |
483 | 489 | ||
484 | out: | 490 | out: |
485 | read_unlock(&ip6_tnl_lock); | 491 | rcu_read_unlock(); |
486 | return err; | 492 | return err; |
487 | } | 493 | } |
488 | 494 | ||
@@ -693,23 +699,23 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, | |||
693 | struct ip6_tnl *t; | 699 | struct ip6_tnl *t; |
694 | struct ipv6hdr *ipv6h = ipv6_hdr(skb); | 700 | struct ipv6hdr *ipv6h = ipv6_hdr(skb); |
695 | 701 | ||
696 | read_lock(&ip6_tnl_lock); | 702 | rcu_read_lock(); |
697 | 703 | ||
698 | if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, | 704 | if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, |
699 | &ipv6h->daddr)) != NULL) { | 705 | &ipv6h->daddr)) != NULL) { |
700 | if (t->parms.proto != ipproto && t->parms.proto != 0) { | 706 | if (t->parms.proto != ipproto && t->parms.proto != 0) { |
701 | read_unlock(&ip6_tnl_lock); | 707 | rcu_read_unlock(); |
702 | goto discard; | 708 | goto discard; |
703 | } | 709 | } |
704 | 710 | ||
705 | if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { | 711 | if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { |
706 | read_unlock(&ip6_tnl_lock); | 712 | rcu_read_unlock(); |
707 | goto discard; | 713 | goto discard; |
708 | } | 714 | } |
709 | 715 | ||
710 | if (!ip6_tnl_rcv_ctl(t)) { | 716 | if (!ip6_tnl_rcv_ctl(t)) { |
711 | t->dev->stats.rx_dropped++; | 717 | t->dev->stats.rx_dropped++; |
712 | read_unlock(&ip6_tnl_lock); | 718 | rcu_read_unlock(); |
713 | goto discard; | 719 | goto discard; |
714 | } | 720 | } |
715 | secpath_reset(skb); | 721 | secpath_reset(skb); |
@@ -727,10 +733,10 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, | |||
727 | t->dev->stats.rx_packets++; | 733 | t->dev->stats.rx_packets++; |
728 | t->dev->stats.rx_bytes += skb->len; | 734 | t->dev->stats.rx_bytes += skb->len; |
729 | netif_rx(skb); | 735 | netif_rx(skb); |
730 | read_unlock(&ip6_tnl_lock); | 736 | rcu_read_unlock(); |
731 | return 0; | 737 | return 0; |
732 | } | 738 | } |
733 | read_unlock(&ip6_tnl_lock); | 739 | rcu_read_unlock(); |
734 | return 1; | 740 | return 1; |
735 | 741 | ||
736 | discard: | 742 | discard: |
@@ -1387,14 +1393,19 @@ static void ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n) | |||
1387 | { | 1393 | { |
1388 | int h; | 1394 | int h; |
1389 | struct ip6_tnl *t; | 1395 | struct ip6_tnl *t; |
1396 | LIST_HEAD(list); | ||
1390 | 1397 | ||
1391 | for (h = 0; h < HASH_SIZE; h++) { | 1398 | for (h = 0; h < HASH_SIZE; h++) { |
1392 | while ((t = ip6n->tnls_r_l[h]) != NULL) | 1399 | t = ip6n->tnls_r_l[h]; |
1393 | unregister_netdevice(t->dev); | 1400 | while (t != NULL) { |
1401 | unregister_netdevice_queue(t->dev, &list); | ||
1402 | t = t->next; | ||
1403 | } | ||
1394 | } | 1404 | } |
1395 | 1405 | ||
1396 | t = ip6n->tnls_wc[0]; | 1406 | t = ip6n->tnls_wc[0]; |
1397 | unregister_netdevice(t->dev); | 1407 | unregister_netdevice_queue(t->dev, &list); |
1408 | unregister_netdevice_many(&list); | ||
1398 | } | 1409 | } |
1399 | 1410 | ||
1400 | static int ip6_tnl_init_net(struct net *net) | 1411 | static int ip6_tnl_init_net(struct net *net) |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 716153941fc4..52e0f74fdfe0 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -477,7 +477,7 @@ failure: | |||
477 | * Delete a VIF entry | 477 | * Delete a VIF entry |
478 | */ | 478 | */ |
479 | 479 | ||
480 | static int mif6_delete(struct net *net, int vifi) | 480 | static int mif6_delete(struct net *net, int vifi, struct list_head *head) |
481 | { | 481 | { |
482 | struct mif_device *v; | 482 | struct mif_device *v; |
483 | struct net_device *dev; | 483 | struct net_device *dev; |
@@ -519,7 +519,7 @@ static int mif6_delete(struct net *net, int vifi) | |||
519 | in6_dev->cnf.mc_forwarding--; | 519 | in6_dev->cnf.mc_forwarding--; |
520 | 520 | ||
521 | if (v->flags & MIFF_REGISTER) | 521 | if (v->flags & MIFF_REGISTER) |
522 | unregister_netdevice(dev); | 522 | unregister_netdevice_queue(dev, head); |
523 | 523 | ||
524 | dev_put(dev); | 524 | dev_put(dev); |
525 | return 0; | 525 | return 0; |
@@ -976,6 +976,7 @@ static int ip6mr_device_event(struct notifier_block *this, | |||
976 | struct net *net = dev_net(dev); | 976 | struct net *net = dev_net(dev); |
977 | struct mif_device *v; | 977 | struct mif_device *v; |
978 | int ct; | 978 | int ct; |
979 | LIST_HEAD(list); | ||
979 | 980 | ||
980 | if (event != NETDEV_UNREGISTER) | 981 | if (event != NETDEV_UNREGISTER) |
981 | return NOTIFY_DONE; | 982 | return NOTIFY_DONE; |
@@ -983,8 +984,10 @@ static int ip6mr_device_event(struct notifier_block *this, | |||
983 | v = &net->ipv6.vif6_table[0]; | 984 | v = &net->ipv6.vif6_table[0]; |
984 | for (ct = 0; ct < net->ipv6.maxvif; ct++, v++) { | 985 | for (ct = 0; ct < net->ipv6.maxvif; ct++, v++) { |
985 | if (v->dev == dev) | 986 | if (v->dev == dev) |
986 | mif6_delete(net, ct); | 987 | mif6_delete(net, ct, &list); |
987 | } | 988 | } |
989 | unregister_netdevice_many(&list); | ||
990 | |||
988 | return NOTIFY_DONE; | 991 | return NOTIFY_DONE; |
989 | } | 992 | } |
990 | 993 | ||
@@ -1188,14 +1191,16 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) | |||
1188 | static void mroute_clean_tables(struct net *net) | 1191 | static void mroute_clean_tables(struct net *net) |
1189 | { | 1192 | { |
1190 | int i; | 1193 | int i; |
1194 | LIST_HEAD(list); | ||
1191 | 1195 | ||
1192 | /* | 1196 | /* |
1193 | * Shut down all active vif entries | 1197 | * Shut down all active vif entries |
1194 | */ | 1198 | */ |
1195 | for (i = 0; i < net->ipv6.maxvif; i++) { | 1199 | for (i = 0; i < net->ipv6.maxvif; i++) { |
1196 | if (!(net->ipv6.vif6_table[i].flags & VIFF_STATIC)) | 1200 | if (!(net->ipv6.vif6_table[i].flags & VIFF_STATIC)) |
1197 | mif6_delete(net, i); | 1201 | mif6_delete(net, i, &list); |
1198 | } | 1202 | } |
1203 | unregister_netdevice_many(&list); | ||
1199 | 1204 | ||
1200 | /* | 1205 | /* |
1201 | * Wipe the cache | 1206 | * Wipe the cache |
@@ -1297,7 +1302,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns | |||
1297 | switch (optname) { | 1302 | switch (optname) { |
1298 | case MRT6_INIT: | 1303 | case MRT6_INIT: |
1299 | if (sk->sk_type != SOCK_RAW || | 1304 | if (sk->sk_type != SOCK_RAW || |
1300 | inet_sk(sk)->num != IPPROTO_ICMPV6) | 1305 | inet_sk(sk)->inet_num != IPPROTO_ICMPV6) |
1301 | return -EOPNOTSUPP; | 1306 | return -EOPNOTSUPP; |
1302 | if (optlen < sizeof(int)) | 1307 | if (optlen < sizeof(int)) |
1303 | return -EINVAL; | 1308 | return -EINVAL; |
@@ -1325,7 +1330,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns | |||
1325 | if (copy_from_user(&mifi, optval, sizeof(mifi_t))) | 1330 | if (copy_from_user(&mifi, optval, sizeof(mifi_t))) |
1326 | return -EFAULT; | 1331 | return -EFAULT; |
1327 | rtnl_lock(); | 1332 | rtnl_lock(); |
1328 | ret = mif6_delete(net, mifi); | 1333 | ret = mif6_delete(net, mifi, NULL); |
1329 | rtnl_unlock(); | 1334 | rtnl_unlock(); |
1330 | return ret; | 1335 | return ret; |
1331 | 1336 | ||
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 4f7aaf6996a3..430454ee5ead 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
@@ -64,7 +64,7 @@ int ip6_ra_control(struct sock *sk, int sel) | |||
64 | struct ip6_ra_chain *ra, *new_ra, **rap; | 64 | struct ip6_ra_chain *ra, *new_ra, **rap; |
65 | 65 | ||
66 | /* RA packet may be delivered ONLY to IPPROTO_RAW socket */ | 66 | /* RA packet may be delivered ONLY to IPPROTO_RAW socket */ |
67 | if (sk->sk_type != SOCK_RAW || inet_sk(sk)->num != IPPROTO_RAW) | 67 | if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num != IPPROTO_RAW) |
68 | return -ENOPROTOOPT; | 68 | return -ENOPROTOOPT; |
69 | 69 | ||
70 | new_ra = (sel>=0) ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; | 70 | new_ra = (sel>=0) ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; |
@@ -106,7 +106,7 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk, | |||
106 | if (inet_sk(sk)->is_icsk) { | 106 | if (inet_sk(sk)->is_icsk) { |
107 | if (opt && | 107 | if (opt && |
108 | !((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) && | 108 | !((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) && |
109 | inet_sk(sk)->daddr != LOOPBACK4_IPV6) { | 109 | inet_sk(sk)->inet_daddr != LOOPBACK4_IPV6) { |
110 | struct inet_connection_sock *icsk = inet_csk(sk); | 110 | struct inet_connection_sock *icsk = inet_csk(sk); |
111 | icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen; | 111 | icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen; |
112 | icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); | 112 | icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); |
@@ -234,7 +234,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, | |||
234 | 234 | ||
235 | case IPV6_V6ONLY: | 235 | case IPV6_V6ONLY: |
236 | if (optlen < sizeof(int) || | 236 | if (optlen < sizeof(int) || |
237 | inet_sk(sk)->num) | 237 | inet_sk(sk)->inet_num) |
238 | goto e_inval; | 238 | goto e_inval; |
239 | np->ipv6only = valbool; | 239 | np->ipv6only = valbool; |
240 | retv = 0; | 240 | retv = 0; |
@@ -424,6 +424,7 @@ sticky_done: | |||
424 | 424 | ||
425 | fl.fl6_flowlabel = 0; | 425 | fl.fl6_flowlabel = 0; |
426 | fl.oif = sk->sk_bound_dev_if; | 426 | fl.oif = sk->sk_bound_dev_if; |
427 | fl.mark = sk->sk_mark; | ||
427 | 428 | ||
428 | if (optlen == 0) | 429 | if (optlen == 0) |
429 | goto update; | 430 | goto update; |
@@ -665,7 +666,7 @@ done: | |||
665 | case IPV6_MTU_DISCOVER: | 666 | case IPV6_MTU_DISCOVER: |
666 | if (optlen < sizeof(int)) | 667 | if (optlen < sizeof(int)) |
667 | goto e_inval; | 668 | goto e_inval; |
668 | if (val<0 || val>3) | 669 | if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_PROBE) |
669 | goto e_inval; | 670 | goto e_inval; |
670 | np->pmtudisc = val; | 671 | np->pmtudisc = val; |
671 | retv = 0; | 672 | retv = 0; |
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index f74e4e2cdd06..3507cfe1e7a2 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
@@ -598,6 +598,7 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh, | |||
598 | icmp6h.icmp6_solicited = solicited; | 598 | icmp6h.icmp6_solicited = solicited; |
599 | icmp6h.icmp6_override = override; | 599 | icmp6h.icmp6_override = override; |
600 | 600 | ||
601 | inc_opt |= ifp->idev->cnf.force_tllao; | ||
601 | __ndisc_send(dev, neigh, daddr, src_addr, | 602 | __ndisc_send(dev, neigh, daddr, src_addr, |
602 | &icmp6h, solicited_addr, | 603 | &icmp6h, solicited_addr, |
603 | inc_opt ? ND_OPT_TARGET_LL_ADDR : 0); | 604 | inc_opt ? ND_OPT_TARGET_LL_ADDR : 0); |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 4f24570b0869..cb834ab7f071 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -72,7 +72,7 @@ static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk, | |||
72 | int is_multicast = ipv6_addr_is_multicast(loc_addr); | 72 | int is_multicast = ipv6_addr_is_multicast(loc_addr); |
73 | 73 | ||
74 | sk_for_each_from(sk, node) | 74 | sk_for_each_from(sk, node) |
75 | if (inet_sk(sk)->num == num) { | 75 | if (inet_sk(sk)->inet_num == num) { |
76 | struct ipv6_pinfo *np = inet6_sk(sk); | 76 | struct ipv6_pinfo *np = inet6_sk(sk); |
77 | 77 | ||
78 | if (!net_eq(sock_net(sk), net)) | 78 | if (!net_eq(sock_net(sk), net)) |
@@ -298,7 +298,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
298 | dev_put(dev); | 298 | dev_put(dev); |
299 | } | 299 | } |
300 | 300 | ||
301 | inet->rcv_saddr = inet->saddr = v4addr; | 301 | inet->inet_rcv_saddr = inet->inet_saddr = v4addr; |
302 | ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr); | 302 | ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr); |
303 | if (!(addr_type & IPV6_ADDR_MULTICAST)) | 303 | if (!(addr_type & IPV6_ADDR_MULTICAST)) |
304 | ipv6_addr_copy(&np->saddr, &addr->sin6_addr); | 304 | ipv6_addr_copy(&np->saddr, &addr->sin6_addr); |
@@ -381,8 +381,7 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb) | |||
381 | } | 381 | } |
382 | 382 | ||
383 | /* Charge it to the socket. */ | 383 | /* Charge it to the socket. */ |
384 | if (sock_queue_rcv_skb(sk,skb)<0) { | 384 | if (sock_queue_rcv_skb(sk, skb) < 0) { |
385 | atomic_inc(&sk->sk_drops); | ||
386 | kfree_skb(skb); | 385 | kfree_skb(skb); |
387 | return NET_RX_DROP; | 386 | return NET_RX_DROP; |
388 | } | 387 | } |
@@ -416,14 +415,14 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb) | |||
416 | skb_network_header_len(skb)); | 415 | skb_network_header_len(skb)); |
417 | if (!csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | 416 | if (!csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
418 | &ipv6_hdr(skb)->daddr, | 417 | &ipv6_hdr(skb)->daddr, |
419 | skb->len, inet->num, skb->csum)) | 418 | skb->len, inet->inet_num, skb->csum)) |
420 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 419 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
421 | } | 420 | } |
422 | if (!skb_csum_unnecessary(skb)) | 421 | if (!skb_csum_unnecessary(skb)) |
423 | skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | 422 | skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
424 | &ipv6_hdr(skb)->daddr, | 423 | &ipv6_hdr(skb)->daddr, |
425 | skb->len, | 424 | skb->len, |
426 | inet->num, 0)); | 425 | inet->inet_num, 0)); |
427 | 426 | ||
428 | if (inet->hdrincl) { | 427 | if (inet->hdrincl) { |
429 | if (skb_checksum_complete(skb)) { | 428 | if (skb_checksum_complete(skb)) { |
@@ -497,7 +496,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
497 | sin6->sin6_scope_id = IP6CB(skb)->iif; | 496 | sin6->sin6_scope_id = IP6CB(skb)->iif; |
498 | } | 497 | } |
499 | 498 | ||
500 | sock_recv_timestamp(msg, sk, skb); | 499 | sock_recv_ts_and_drops(msg, sk, skb); |
501 | 500 | ||
502 | if (np->rxopt.all) | 501 | if (np->rxopt.all) |
503 | datagram_recv_ctl(sk, msg, skb); | 502 | datagram_recv_ctl(sk, msg, skb); |
@@ -518,7 +517,6 @@ csum_copy_err: | |||
518 | as some normal condition. | 517 | as some normal condition. |
519 | */ | 518 | */ |
520 | err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH; | 519 | err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH; |
521 | atomic_inc(&sk->sk_drops); | ||
522 | goto out; | 520 | goto out; |
523 | } | 521 | } |
524 | 522 | ||
@@ -766,8 +764,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
766 | proto = ntohs(sin6->sin6_port); | 764 | proto = ntohs(sin6->sin6_port); |
767 | 765 | ||
768 | if (!proto) | 766 | if (!proto) |
769 | proto = inet->num; | 767 | proto = inet->inet_num; |
770 | else if (proto != inet->num) | 768 | else if (proto != inet->inet_num) |
771 | return(-EINVAL); | 769 | return(-EINVAL); |
772 | 770 | ||
773 | if (proto > 255) | 771 | if (proto > 255) |
@@ -800,7 +798,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
800 | if (sk->sk_state != TCP_ESTABLISHED) | 798 | if (sk->sk_state != TCP_ESTABLISHED) |
801 | return -EDESTADDRREQ; | 799 | return -EDESTADDRREQ; |
802 | 800 | ||
803 | proto = inet->num; | 801 | proto = inet->inet_num; |
804 | daddr = &np->daddr; | 802 | daddr = &np->daddr; |
805 | fl.fl6_flowlabel = np->flow_label; | 803 | fl.fl6_flowlabel = np->flow_label; |
806 | } | 804 | } |
@@ -967,7 +965,7 @@ static int do_rawv6_setsockopt(struct sock *sk, int level, int optname, | |||
967 | 965 | ||
968 | switch (optname) { | 966 | switch (optname) { |
969 | case IPV6_CHECKSUM: | 967 | case IPV6_CHECKSUM: |
970 | if (inet_sk(sk)->num == IPPROTO_ICMPV6 && | 968 | if (inet_sk(sk)->inet_num == IPPROTO_ICMPV6 && |
971 | level == IPPROTO_IPV6) { | 969 | level == IPPROTO_IPV6) { |
972 | /* | 970 | /* |
973 | * RFC3542 tells that IPV6_CHECKSUM socket | 971 | * RFC3542 tells that IPV6_CHECKSUM socket |
@@ -1007,7 +1005,7 @@ static int rawv6_setsockopt(struct sock *sk, int level, int optname, | |||
1007 | break; | 1005 | break; |
1008 | 1006 | ||
1009 | case SOL_ICMPV6: | 1007 | case SOL_ICMPV6: |
1010 | if (inet_sk(sk)->num != IPPROTO_ICMPV6) | 1008 | if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6) |
1011 | return -EOPNOTSUPP; | 1009 | return -EOPNOTSUPP; |
1012 | return rawv6_seticmpfilter(sk, level, optname, optval, | 1010 | return rawv6_seticmpfilter(sk, level, optname, optval, |
1013 | optlen); | 1011 | optlen); |
@@ -1030,7 +1028,7 @@ static int compat_rawv6_setsockopt(struct sock *sk, int level, int optname, | |||
1030 | case SOL_RAW: | 1028 | case SOL_RAW: |
1031 | break; | 1029 | break; |
1032 | case SOL_ICMPV6: | 1030 | case SOL_ICMPV6: |
1033 | if (inet_sk(sk)->num != IPPROTO_ICMPV6) | 1031 | if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6) |
1034 | return -EOPNOTSUPP; | 1032 | return -EOPNOTSUPP; |
1035 | return rawv6_seticmpfilter(sk, level, optname, optval, optlen); | 1033 | return rawv6_seticmpfilter(sk, level, optname, optval, optlen); |
1036 | case SOL_IPV6: | 1034 | case SOL_IPV6: |
@@ -1087,7 +1085,7 @@ static int rawv6_getsockopt(struct sock *sk, int level, int optname, | |||
1087 | break; | 1085 | break; |
1088 | 1086 | ||
1089 | case SOL_ICMPV6: | 1087 | case SOL_ICMPV6: |
1090 | if (inet_sk(sk)->num != IPPROTO_ICMPV6) | 1088 | if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6) |
1091 | return -EOPNOTSUPP; | 1089 | return -EOPNOTSUPP; |
1092 | return rawv6_geticmpfilter(sk, level, optname, optval, | 1090 | return rawv6_geticmpfilter(sk, level, optname, optval, |
1093 | optlen); | 1091 | optlen); |
@@ -1110,7 +1108,7 @@ static int compat_rawv6_getsockopt(struct sock *sk, int level, int optname, | |||
1110 | case SOL_RAW: | 1108 | case SOL_RAW: |
1111 | break; | 1109 | break; |
1112 | case SOL_ICMPV6: | 1110 | case SOL_ICMPV6: |
1113 | if (inet_sk(sk)->num != IPPROTO_ICMPV6) | 1111 | if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6) |
1114 | return -EOPNOTSUPP; | 1112 | return -EOPNOTSUPP; |
1115 | return rawv6_geticmpfilter(sk, level, optname, optval, optlen); | 1113 | return rawv6_geticmpfilter(sk, level, optname, optval, optlen); |
1116 | case SOL_IPV6: | 1114 | case SOL_IPV6: |
@@ -1157,7 +1155,7 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg) | |||
1157 | 1155 | ||
1158 | static void rawv6_close(struct sock *sk, long timeout) | 1156 | static void rawv6_close(struct sock *sk, long timeout) |
1159 | { | 1157 | { |
1160 | if (inet_sk(sk)->num == IPPROTO_RAW) | 1158 | if (inet_sk(sk)->inet_num == IPPROTO_RAW) |
1161 | ip6_ra_control(sk, -1); | 1159 | ip6_ra_control(sk, -1); |
1162 | ip6mr_sk_done(sk); | 1160 | ip6mr_sk_done(sk); |
1163 | sk_common_release(sk); | 1161 | sk_common_release(sk); |
@@ -1176,7 +1174,7 @@ static int rawv6_init_sk(struct sock *sk) | |||
1176 | { | 1174 | { |
1177 | struct raw6_sock *rp = raw6_sk(sk); | 1175 | struct raw6_sock *rp = raw6_sk(sk); |
1178 | 1176 | ||
1179 | switch (inet_sk(sk)->num) { | 1177 | switch (inet_sk(sk)->inet_num) { |
1180 | case IPPROTO_ICMPV6: | 1178 | case IPPROTO_ICMPV6: |
1181 | rp->checksum = 1; | 1179 | rp->checksum = 1; |
1182 | rp->offset = 2; | 1180 | rp->offset = 2; |
@@ -1226,7 +1224,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) | |||
1226 | dest = &np->daddr; | 1224 | dest = &np->daddr; |
1227 | src = &np->rcv_saddr; | 1225 | src = &np->rcv_saddr; |
1228 | destp = 0; | 1226 | destp = 0; |
1229 | srcp = inet_sk(sp)->num; | 1227 | srcp = inet_sk(sp)->inet_num; |
1230 | seq_printf(seq, | 1228 | seq_printf(seq, |
1231 | "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " | 1229 | "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " |
1232 | "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", | 1230 | "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index d6fe7646a8ff..df9432a46ffc 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -1471,9 +1471,10 @@ static struct rt6_info *ip6_route_redirect(struct in6_addr *dest, | |||
1471 | }, | 1471 | }, |
1472 | }, | 1472 | }, |
1473 | }, | 1473 | }, |
1474 | .gateway = *gateway, | ||
1475 | }; | 1474 | }; |
1476 | 1475 | ||
1476 | ipv6_addr_copy(&rdfl.gateway, gateway); | ||
1477 | |||
1477 | if (rt6_need_strict(dest)) | 1478 | if (rt6_need_strict(dest)) |
1478 | flags |= RT6_LOOKUP_F_IFACE; | 1479 | flags |= RT6_LOOKUP_F_IFACE; |
1479 | 1480 | ||
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index dbd19a78ca73..2362a3397e91 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -77,8 +77,17 @@ struct sit_net { | |||
77 | struct net_device *fb_tunnel_dev; | 77 | struct net_device *fb_tunnel_dev; |
78 | }; | 78 | }; |
79 | 79 | ||
80 | static DEFINE_RWLOCK(ipip6_lock); | 80 | /* |
81 | * Locking : hash tables are protected by RCU and a spinlock | ||
82 | */ | ||
83 | static DEFINE_SPINLOCK(ipip6_lock); | ||
81 | 84 | ||
85 | #define for_each_ip_tunnel_rcu(start) \ | ||
86 | for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) | ||
87 | |||
88 | /* | ||
89 | * Must be invoked with rcu_read_lock | ||
90 | */ | ||
82 | static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net, | 91 | static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net, |
83 | struct net_device *dev, __be32 remote, __be32 local) | 92 | struct net_device *dev, __be32 remote, __be32 local) |
84 | { | 93 | { |
@@ -87,26 +96,26 @@ static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net, | |||
87 | struct ip_tunnel *t; | 96 | struct ip_tunnel *t; |
88 | struct sit_net *sitn = net_generic(net, sit_net_id); | 97 | struct sit_net *sitn = net_generic(net, sit_net_id); |
89 | 98 | ||
90 | for (t = sitn->tunnels_r_l[h0^h1]; t; t = t->next) { | 99 | for_each_ip_tunnel_rcu(sitn->tunnels_r_l[h0 ^ h1]) { |
91 | if (local == t->parms.iph.saddr && | 100 | if (local == t->parms.iph.saddr && |
92 | remote == t->parms.iph.daddr && | 101 | remote == t->parms.iph.daddr && |
93 | (!dev || !t->parms.link || dev->iflink == t->parms.link) && | 102 | (!dev || !t->parms.link || dev->iflink == t->parms.link) && |
94 | (t->dev->flags & IFF_UP)) | 103 | (t->dev->flags & IFF_UP)) |
95 | return t; | 104 | return t; |
96 | } | 105 | } |
97 | for (t = sitn->tunnels_r[h0]; t; t = t->next) { | 106 | for_each_ip_tunnel_rcu(sitn->tunnels_r[h0]) { |
98 | if (remote == t->parms.iph.daddr && | 107 | if (remote == t->parms.iph.daddr && |
99 | (!dev || !t->parms.link || dev->iflink == t->parms.link) && | 108 | (!dev || !t->parms.link || dev->iflink == t->parms.link) && |
100 | (t->dev->flags & IFF_UP)) | 109 | (t->dev->flags & IFF_UP)) |
101 | return t; | 110 | return t; |
102 | } | 111 | } |
103 | for (t = sitn->tunnels_l[h1]; t; t = t->next) { | 112 | for_each_ip_tunnel_rcu(sitn->tunnels_l[h1]) { |
104 | if (local == t->parms.iph.saddr && | 113 | if (local == t->parms.iph.saddr && |
105 | (!dev || !t->parms.link || dev->iflink == t->parms.link) && | 114 | (!dev || !t->parms.link || dev->iflink == t->parms.link) && |
106 | (t->dev->flags & IFF_UP)) | 115 | (t->dev->flags & IFF_UP)) |
107 | return t; | 116 | return t; |
108 | } | 117 | } |
109 | t = sitn->tunnels_wc[0]; | 118 | t = rcu_dereference(sitn->tunnels_wc[0]); |
110 | if ((t != NULL) && (t->dev->flags & IFF_UP)) | 119 | if ((t != NULL) && (t->dev->flags & IFF_UP)) |
111 | return t; | 120 | return t; |
112 | return NULL; | 121 | return NULL; |
@@ -143,9 +152,9 @@ static void ipip6_tunnel_unlink(struct sit_net *sitn, struct ip_tunnel *t) | |||
143 | 152 | ||
144 | for (tp = ipip6_bucket(sitn, t); *tp; tp = &(*tp)->next) { | 153 | for (tp = ipip6_bucket(sitn, t); *tp; tp = &(*tp)->next) { |
145 | if (t == *tp) { | 154 | if (t == *tp) { |
146 | write_lock_bh(&ipip6_lock); | 155 | spin_lock_bh(&ipip6_lock); |
147 | *tp = t->next; | 156 | *tp = t->next; |
148 | write_unlock_bh(&ipip6_lock); | 157 | spin_unlock_bh(&ipip6_lock); |
149 | break; | 158 | break; |
150 | } | 159 | } |
151 | } | 160 | } |
@@ -155,10 +164,27 @@ static void ipip6_tunnel_link(struct sit_net *sitn, struct ip_tunnel *t) | |||
155 | { | 164 | { |
156 | struct ip_tunnel **tp = ipip6_bucket(sitn, t); | 165 | struct ip_tunnel **tp = ipip6_bucket(sitn, t); |
157 | 166 | ||
167 | spin_lock_bh(&ipip6_lock); | ||
158 | t->next = *tp; | 168 | t->next = *tp; |
159 | write_lock_bh(&ipip6_lock); | 169 | rcu_assign_pointer(*tp, t); |
160 | *tp = t; | 170 | spin_unlock_bh(&ipip6_lock); |
161 | write_unlock_bh(&ipip6_lock); | 171 | } |
172 | |||
173 | static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn) | ||
174 | { | ||
175 | #ifdef CONFIG_IPV6_SIT_6RD | ||
176 | struct ip_tunnel *t = netdev_priv(dev); | ||
177 | |||
178 | if (t->dev == sitn->fb_tunnel_dev) { | ||
179 | ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0); | ||
180 | t->ip6rd.relay_prefix = 0; | ||
181 | t->ip6rd.prefixlen = 16; | ||
182 | t->ip6rd.relay_prefixlen = 0; | ||
183 | } else { | ||
184 | struct ip_tunnel *t0 = netdev_priv(sitn->fb_tunnel_dev); | ||
185 | memcpy(&t->ip6rd, &t0->ip6rd, sizeof(t->ip6rd)); | ||
186 | } | ||
187 | #endif | ||
162 | } | 188 | } |
163 | 189 | ||
164 | static struct ip_tunnel * ipip6_tunnel_locate(struct net *net, | 190 | static struct ip_tunnel * ipip6_tunnel_locate(struct net *net, |
@@ -204,6 +230,7 @@ static struct ip_tunnel * ipip6_tunnel_locate(struct net *net, | |||
204 | 230 | ||
205 | nt->parms = *parms; | 231 | nt->parms = *parms; |
206 | ipip6_tunnel_init(dev); | 232 | ipip6_tunnel_init(dev); |
233 | ipip6_tunnel_clone_6rd(dev, sitn); | ||
207 | 234 | ||
208 | if (parms->i_flags & SIT_ISATAP) | 235 | if (parms->i_flags & SIT_ISATAP) |
209 | dev->priv_flags |= IFF_ISATAP; | 236 | dev->priv_flags |= IFF_ISATAP; |
@@ -222,15 +249,22 @@ failed: | |||
222 | return NULL; | 249 | return NULL; |
223 | } | 250 | } |
224 | 251 | ||
252 | static DEFINE_SPINLOCK(ipip6_prl_lock); | ||
253 | |||
254 | #define for_each_prl_rcu(start) \ | ||
255 | for (prl = rcu_dereference(start); \ | ||
256 | prl; \ | ||
257 | prl = rcu_dereference(prl->next)) | ||
258 | |||
225 | static struct ip_tunnel_prl_entry * | 259 | static struct ip_tunnel_prl_entry * |
226 | __ipip6_tunnel_locate_prl(struct ip_tunnel *t, __be32 addr) | 260 | __ipip6_tunnel_locate_prl(struct ip_tunnel *t, __be32 addr) |
227 | { | 261 | { |
228 | struct ip_tunnel_prl_entry *p = (struct ip_tunnel_prl_entry *)NULL; | 262 | struct ip_tunnel_prl_entry *prl; |
229 | 263 | ||
230 | for (p = t->prl; p; p = p->next) | 264 | for_each_prl_rcu(t->prl) |
231 | if (p->addr == addr) | 265 | if (prl->addr == addr) |
232 | break; | 266 | break; |
233 | return p; | 267 | return prl; |
234 | 268 | ||
235 | } | 269 | } |
236 | 270 | ||
@@ -255,7 +289,7 @@ static int ipip6_tunnel_get_prl(struct ip_tunnel *t, | |||
255 | kcalloc(cmax, sizeof(*kp), GFP_KERNEL) : | 289 | kcalloc(cmax, sizeof(*kp), GFP_KERNEL) : |
256 | NULL; | 290 | NULL; |
257 | 291 | ||
258 | read_lock(&ipip6_lock); | 292 | rcu_read_lock(); |
259 | 293 | ||
260 | ca = t->prl_count < cmax ? t->prl_count : cmax; | 294 | ca = t->prl_count < cmax ? t->prl_count : cmax; |
261 | 295 | ||
@@ -273,7 +307,7 @@ static int ipip6_tunnel_get_prl(struct ip_tunnel *t, | |||
273 | } | 307 | } |
274 | 308 | ||
275 | c = 0; | 309 | c = 0; |
276 | for (prl = t->prl; prl; prl = prl->next) { | 310 | for_each_prl_rcu(t->prl) { |
277 | if (c >= cmax) | 311 | if (c >= cmax) |
278 | break; | 312 | break; |
279 | if (kprl.addr != htonl(INADDR_ANY) && prl->addr != kprl.addr) | 313 | if (kprl.addr != htonl(INADDR_ANY) && prl->addr != kprl.addr) |
@@ -285,7 +319,7 @@ static int ipip6_tunnel_get_prl(struct ip_tunnel *t, | |||
285 | break; | 319 | break; |
286 | } | 320 | } |
287 | out: | 321 | out: |
288 | read_unlock(&ipip6_lock); | 322 | rcu_read_unlock(); |
289 | 323 | ||
290 | len = sizeof(*kp) * c; | 324 | len = sizeof(*kp) * c; |
291 | ret = 0; | 325 | ret = 0; |
@@ -306,12 +340,14 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg) | |||
306 | if (a->addr == htonl(INADDR_ANY)) | 340 | if (a->addr == htonl(INADDR_ANY)) |
307 | return -EINVAL; | 341 | return -EINVAL; |
308 | 342 | ||
309 | write_lock(&ipip6_lock); | 343 | spin_lock(&ipip6_prl_lock); |
310 | 344 | ||
311 | for (p = t->prl; p; p = p->next) { | 345 | for (p = t->prl; p; p = p->next) { |
312 | if (p->addr == a->addr) { | 346 | if (p->addr == a->addr) { |
313 | if (chg) | 347 | if (chg) { |
314 | goto update; | 348 | p->flags = a->flags; |
349 | goto out; | ||
350 | } | ||
315 | err = -EEXIST; | 351 | err = -EEXIST; |
316 | goto out; | 352 | goto out; |
317 | } | 353 | } |
@@ -328,46 +364,63 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg) | |||
328 | goto out; | 364 | goto out; |
329 | } | 365 | } |
330 | 366 | ||
367 | INIT_RCU_HEAD(&p->rcu_head); | ||
331 | p->next = t->prl; | 368 | p->next = t->prl; |
332 | t->prl = p; | ||
333 | t->prl_count++; | ||
334 | update: | ||
335 | p->addr = a->addr; | 369 | p->addr = a->addr; |
336 | p->flags = a->flags; | 370 | p->flags = a->flags; |
371 | t->prl_count++; | ||
372 | rcu_assign_pointer(t->prl, p); | ||
337 | out: | 373 | out: |
338 | write_unlock(&ipip6_lock); | 374 | spin_unlock(&ipip6_prl_lock); |
339 | return err; | 375 | return err; |
340 | } | 376 | } |
341 | 377 | ||
378 | static void prl_entry_destroy_rcu(struct rcu_head *head) | ||
379 | { | ||
380 | kfree(container_of(head, struct ip_tunnel_prl_entry, rcu_head)); | ||
381 | } | ||
382 | |||
383 | static void prl_list_destroy_rcu(struct rcu_head *head) | ||
384 | { | ||
385 | struct ip_tunnel_prl_entry *p, *n; | ||
386 | |||
387 | p = container_of(head, struct ip_tunnel_prl_entry, rcu_head); | ||
388 | do { | ||
389 | n = p->next; | ||
390 | kfree(p); | ||
391 | p = n; | ||
392 | } while (p); | ||
393 | } | ||
394 | |||
342 | static int | 395 | static int |
343 | ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a) | 396 | ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a) |
344 | { | 397 | { |
345 | struct ip_tunnel_prl_entry *x, **p; | 398 | struct ip_tunnel_prl_entry *x, **p; |
346 | int err = 0; | 399 | int err = 0; |
347 | 400 | ||
348 | write_lock(&ipip6_lock); | 401 | spin_lock(&ipip6_prl_lock); |
349 | 402 | ||
350 | if (a && a->addr != htonl(INADDR_ANY)) { | 403 | if (a && a->addr != htonl(INADDR_ANY)) { |
351 | for (p = &t->prl; *p; p = &(*p)->next) { | 404 | for (p = &t->prl; *p; p = &(*p)->next) { |
352 | if ((*p)->addr == a->addr) { | 405 | if ((*p)->addr == a->addr) { |
353 | x = *p; | 406 | x = *p; |
354 | *p = x->next; | 407 | *p = x->next; |
355 | kfree(x); | 408 | call_rcu(&x->rcu_head, prl_entry_destroy_rcu); |
356 | t->prl_count--; | 409 | t->prl_count--; |
357 | goto out; | 410 | goto out; |
358 | } | 411 | } |
359 | } | 412 | } |
360 | err = -ENXIO; | 413 | err = -ENXIO; |
361 | } else { | 414 | } else { |
362 | while (t->prl) { | 415 | if (t->prl) { |
416 | t->prl_count = 0; | ||
363 | x = t->prl; | 417 | x = t->prl; |
364 | t->prl = t->prl->next; | 418 | call_rcu(&x->rcu_head, prl_list_destroy_rcu); |
365 | kfree(x); | 419 | t->prl = NULL; |
366 | t->prl_count--; | ||
367 | } | 420 | } |
368 | } | 421 | } |
369 | out: | 422 | out: |
370 | write_unlock(&ipip6_lock); | 423 | spin_unlock(&ipip6_prl_lock); |
371 | return err; | 424 | return err; |
372 | } | 425 | } |
373 | 426 | ||
@@ -377,7 +430,7 @@ isatap_chksrc(struct sk_buff *skb, struct iphdr *iph, struct ip_tunnel *t) | |||
377 | struct ip_tunnel_prl_entry *p; | 430 | struct ip_tunnel_prl_entry *p; |
378 | int ok = 1; | 431 | int ok = 1; |
379 | 432 | ||
380 | read_lock(&ipip6_lock); | 433 | rcu_read_lock(); |
381 | p = __ipip6_tunnel_locate_prl(t, iph->saddr); | 434 | p = __ipip6_tunnel_locate_prl(t, iph->saddr); |
382 | if (p) { | 435 | if (p) { |
383 | if (p->flags & PRL_DEFAULT) | 436 | if (p->flags & PRL_DEFAULT) |
@@ -393,7 +446,7 @@ isatap_chksrc(struct sk_buff *skb, struct iphdr *iph, struct ip_tunnel *t) | |||
393 | else | 446 | else |
394 | ok = 0; | 447 | ok = 0; |
395 | } | 448 | } |
396 | read_unlock(&ipip6_lock); | 449 | rcu_read_unlock(); |
397 | return ok; | 450 | return ok; |
398 | } | 451 | } |
399 | 452 | ||
@@ -403,9 +456,9 @@ static void ipip6_tunnel_uninit(struct net_device *dev) | |||
403 | struct sit_net *sitn = net_generic(net, sit_net_id); | 456 | struct sit_net *sitn = net_generic(net, sit_net_id); |
404 | 457 | ||
405 | if (dev == sitn->fb_tunnel_dev) { | 458 | if (dev == sitn->fb_tunnel_dev) { |
406 | write_lock_bh(&ipip6_lock); | 459 | spin_lock_bh(&ipip6_lock); |
407 | sitn->tunnels_wc[0] = NULL; | 460 | sitn->tunnels_wc[0] = NULL; |
408 | write_unlock_bh(&ipip6_lock); | 461 | spin_unlock_bh(&ipip6_lock); |
409 | dev_put(dev); | 462 | dev_put(dev); |
410 | } else { | 463 | } else { |
411 | ipip6_tunnel_unlink(sitn, netdev_priv(dev)); | 464 | ipip6_tunnel_unlink(sitn, netdev_priv(dev)); |
@@ -458,7 +511,7 @@ static int ipip6_err(struct sk_buff *skb, u32 info) | |||
458 | 511 | ||
459 | err = -ENOENT; | 512 | err = -ENOENT; |
460 | 513 | ||
461 | read_lock(&ipip6_lock); | 514 | rcu_read_lock(); |
462 | t = ipip6_tunnel_lookup(dev_net(skb->dev), | 515 | t = ipip6_tunnel_lookup(dev_net(skb->dev), |
463 | skb->dev, | 516 | skb->dev, |
464 | iph->daddr, | 517 | iph->daddr, |
@@ -476,7 +529,7 @@ static int ipip6_err(struct sk_buff *skb, u32 info) | |||
476 | t->err_count = 1; | 529 | t->err_count = 1; |
477 | t->err_time = jiffies; | 530 | t->err_time = jiffies; |
478 | out: | 531 | out: |
479 | read_unlock(&ipip6_lock); | 532 | rcu_read_unlock(); |
480 | return err; | 533 | return err; |
481 | } | 534 | } |
482 | 535 | ||
@@ -496,7 +549,7 @@ static int ipip6_rcv(struct sk_buff *skb) | |||
496 | 549 | ||
497 | iph = ip_hdr(skb); | 550 | iph = ip_hdr(skb); |
498 | 551 | ||
499 | read_lock(&ipip6_lock); | 552 | rcu_read_lock(); |
500 | tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev, | 553 | tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev, |
501 | iph->saddr, iph->daddr); | 554 | iph->saddr, iph->daddr); |
502 | if (tunnel != NULL) { | 555 | if (tunnel != NULL) { |
@@ -510,7 +563,7 @@ static int ipip6_rcv(struct sk_buff *skb) | |||
510 | if ((tunnel->dev->priv_flags & IFF_ISATAP) && | 563 | if ((tunnel->dev->priv_flags & IFF_ISATAP) && |
511 | !isatap_chksrc(skb, iph, tunnel)) { | 564 | !isatap_chksrc(skb, iph, tunnel)) { |
512 | tunnel->dev->stats.rx_errors++; | 565 | tunnel->dev->stats.rx_errors++; |
513 | read_unlock(&ipip6_lock); | 566 | rcu_read_unlock(); |
514 | kfree_skb(skb); | 567 | kfree_skb(skb); |
515 | return 0; | 568 | return 0; |
516 | } | 569 | } |
@@ -521,28 +574,52 @@ static int ipip6_rcv(struct sk_buff *skb) | |||
521 | nf_reset(skb); | 574 | nf_reset(skb); |
522 | ipip6_ecn_decapsulate(iph, skb); | 575 | ipip6_ecn_decapsulate(iph, skb); |
523 | netif_rx(skb); | 576 | netif_rx(skb); |
524 | read_unlock(&ipip6_lock); | 577 | rcu_read_unlock(); |
525 | return 0; | 578 | return 0; |
526 | } | 579 | } |
527 | 580 | ||
528 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); | 581 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); |
529 | read_unlock(&ipip6_lock); | 582 | rcu_read_unlock(); |
530 | out: | 583 | out: |
531 | kfree_skb(skb); | 584 | kfree_skb(skb); |
532 | return 0; | 585 | return 0; |
533 | } | 586 | } |
534 | 587 | ||
535 | /* Returns the embedded IPv4 address if the IPv6 address | 588 | /* |
536 | comes from 6to4 (RFC 3056) addr space */ | 589 | * Returns the embedded IPv4 address if the IPv6 address |
537 | 590 | * comes from 6rd / 6to4 (RFC 3056) addr space. | |
538 | static inline __be32 try_6to4(struct in6_addr *v6dst) | 591 | */ |
592 | static inline | ||
593 | __be32 try_6rd(struct in6_addr *v6dst, struct ip_tunnel *tunnel) | ||
539 | { | 594 | { |
540 | __be32 dst = 0; | 595 | __be32 dst = 0; |
541 | 596 | ||
597 | #ifdef CONFIG_IPV6_SIT_6RD | ||
598 | if (ipv6_prefix_equal(v6dst, &tunnel->ip6rd.prefix, | ||
599 | tunnel->ip6rd.prefixlen)) { | ||
600 | unsigned pbw0, pbi0; | ||
601 | int pbi1; | ||
602 | u32 d; | ||
603 | |||
604 | pbw0 = tunnel->ip6rd.prefixlen >> 5; | ||
605 | pbi0 = tunnel->ip6rd.prefixlen & 0x1f; | ||
606 | |||
607 | d = (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >> | ||
608 | tunnel->ip6rd.relay_prefixlen; | ||
609 | |||
610 | pbi1 = pbi0 - tunnel->ip6rd.relay_prefixlen; | ||
611 | if (pbi1 > 0) | ||
612 | d |= ntohl(v6dst->s6_addr32[pbw0 + 1]) >> | ||
613 | (32 - pbi1); | ||
614 | |||
615 | dst = tunnel->ip6rd.relay_prefix | htonl(d); | ||
616 | } | ||
617 | #else | ||
542 | if (v6dst->s6_addr16[0] == htons(0x2002)) { | 618 | if (v6dst->s6_addr16[0] == htons(0x2002)) { |
543 | /* 6to4 v6 addr has 16 bits prefix, 32 v4addr, 16 SLA, ... */ | 619 | /* 6to4 v6 addr has 16 bits prefix, 32 v4addr, 16 SLA, ... */ |
544 | memcpy(&dst, &v6dst->s6_addr16[1], 4); | 620 | memcpy(&dst, &v6dst->s6_addr16[1], 4); |
545 | } | 621 | } |
622 | #endif | ||
546 | return dst; | 623 | return dst; |
547 | } | 624 | } |
548 | 625 | ||
@@ -555,7 +632,8 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
555 | struct net_device *dev) | 632 | struct net_device *dev) |
556 | { | 633 | { |
557 | struct ip_tunnel *tunnel = netdev_priv(dev); | 634 | struct ip_tunnel *tunnel = netdev_priv(dev); |
558 | struct net_device_stats *stats = &tunnel->dev->stats; | 635 | struct net_device_stats *stats = &dev->stats; |
636 | struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); | ||
559 | struct iphdr *tiph = &tunnel->parms.iph; | 637 | struct iphdr *tiph = &tunnel->parms.iph; |
560 | struct ipv6hdr *iph6 = ipv6_hdr(skb); | 638 | struct ipv6hdr *iph6 = ipv6_hdr(skb); |
561 | u8 tos = tunnel->parms.iph.tos; | 639 | u8 tos = tunnel->parms.iph.tos; |
@@ -595,7 +673,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
595 | } | 673 | } |
596 | 674 | ||
597 | if (!dst) | 675 | if (!dst) |
598 | dst = try_6to4(&iph6->daddr); | 676 | dst = try_6rd(&iph6->daddr, tunnel); |
599 | 677 | ||
600 | if (!dst) { | 678 | if (!dst) { |
601 | struct neighbour *neigh = NULL; | 679 | struct neighbour *neigh = NULL; |
@@ -688,7 +766,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
688 | struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); | 766 | struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); |
689 | if (!new_skb) { | 767 | if (!new_skb) { |
690 | ip_rt_put(rt); | 768 | ip_rt_put(rt); |
691 | stats->tx_dropped++; | 769 | txq->tx_dropped++; |
692 | dev_kfree_skb(skb); | 770 | dev_kfree_skb(skb); |
693 | return NETDEV_TX_OK; | 771 | return NETDEV_TX_OK; |
694 | } | 772 | } |
@@ -785,9 +863,15 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
785 | struct ip_tunnel *t; | 863 | struct ip_tunnel *t; |
786 | struct net *net = dev_net(dev); | 864 | struct net *net = dev_net(dev); |
787 | struct sit_net *sitn = net_generic(net, sit_net_id); | 865 | struct sit_net *sitn = net_generic(net, sit_net_id); |
866 | #ifdef CONFIG_IPV6_SIT_6RD | ||
867 | struct ip_tunnel_6rd ip6rd; | ||
868 | #endif | ||
788 | 869 | ||
789 | switch (cmd) { | 870 | switch (cmd) { |
790 | case SIOCGETTUNNEL: | 871 | case SIOCGETTUNNEL: |
872 | #ifdef CONFIG_IPV6_SIT_6RD | ||
873 | case SIOCGET6RD: | ||
874 | #endif | ||
791 | t = NULL; | 875 | t = NULL; |
792 | if (dev == sitn->fb_tunnel_dev) { | 876 | if (dev == sitn->fb_tunnel_dev) { |
793 | if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { | 877 | if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { |
@@ -798,9 +882,25 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
798 | } | 882 | } |
799 | if (t == NULL) | 883 | if (t == NULL) |
800 | t = netdev_priv(dev); | 884 | t = netdev_priv(dev); |
801 | memcpy(&p, &t->parms, sizeof(p)); | 885 | |
802 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) | 886 | err = -EFAULT; |
803 | err = -EFAULT; | 887 | if (cmd == SIOCGETTUNNEL) { |
888 | memcpy(&p, &t->parms, sizeof(p)); | ||
889 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, | ||
890 | sizeof(p))) | ||
891 | goto done; | ||
892 | #ifdef CONFIG_IPV6_SIT_6RD | ||
893 | } else { | ||
894 | ipv6_addr_copy(&ip6rd.prefix, &t->ip6rd.prefix); | ||
895 | ip6rd.relay_prefix = t->ip6rd.relay_prefix; | ||
896 | ip6rd.prefixlen = t->ip6rd.prefixlen; | ||
897 | ip6rd.relay_prefixlen = t->ip6rd.relay_prefixlen; | ||
898 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &ip6rd, | ||
899 | sizeof(ip6rd))) | ||
900 | goto done; | ||
901 | #endif | ||
902 | } | ||
903 | err = 0; | ||
804 | break; | 904 | break; |
805 | 905 | ||
806 | case SIOCADDTUNNEL: | 906 | case SIOCADDTUNNEL: |
@@ -921,6 +1021,54 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
921 | netdev_state_change(dev); | 1021 | netdev_state_change(dev); |
922 | break; | 1022 | break; |
923 | 1023 | ||
1024 | #ifdef CONFIG_IPV6_SIT_6RD | ||
1025 | case SIOCADD6RD: | ||
1026 | case SIOCCHG6RD: | ||
1027 | case SIOCDEL6RD: | ||
1028 | err = -EPERM; | ||
1029 | if (!capable(CAP_NET_ADMIN)) | ||
1030 | goto done; | ||
1031 | |||
1032 | err = -EFAULT; | ||
1033 | if (copy_from_user(&ip6rd, ifr->ifr_ifru.ifru_data, | ||
1034 | sizeof(ip6rd))) | ||
1035 | goto done; | ||
1036 | |||
1037 | t = netdev_priv(dev); | ||
1038 | |||
1039 | if (cmd != SIOCDEL6RD) { | ||
1040 | struct in6_addr prefix; | ||
1041 | __be32 relay_prefix; | ||
1042 | |||
1043 | err = -EINVAL; | ||
1044 | if (ip6rd.relay_prefixlen > 32 || | ||
1045 | ip6rd.prefixlen + (32 - ip6rd.relay_prefixlen) > 64) | ||
1046 | goto done; | ||
1047 | |||
1048 | ipv6_addr_prefix(&prefix, &ip6rd.prefix, | ||
1049 | ip6rd.prefixlen); | ||
1050 | if (!ipv6_addr_equal(&prefix, &ip6rd.prefix)) | ||
1051 | goto done; | ||
1052 | if (ip6rd.relay_prefixlen) | ||
1053 | relay_prefix = ip6rd.relay_prefix & | ||
1054 | htonl(0xffffffffUL << | ||
1055 | (32 - ip6rd.relay_prefixlen)); | ||
1056 | else | ||
1057 | relay_prefix = 0; | ||
1058 | if (relay_prefix != ip6rd.relay_prefix) | ||
1059 | goto done; | ||
1060 | |||
1061 | ipv6_addr_copy(&t->ip6rd.prefix, &prefix); | ||
1062 | t->ip6rd.relay_prefix = relay_prefix; | ||
1063 | t->ip6rd.prefixlen = ip6rd.prefixlen; | ||
1064 | t->ip6rd.relay_prefixlen = ip6rd.relay_prefixlen; | ||
1065 | } else | ||
1066 | ipip6_tunnel_clone_6rd(dev, sitn); | ||
1067 | |||
1068 | err = 0; | ||
1069 | break; | ||
1070 | #endif | ||
1071 | |||
924 | default: | 1072 | default: |
925 | err = -EINVAL; | 1073 | err = -EINVAL; |
926 | } | 1074 | } |
@@ -997,16 +1145,19 @@ static struct xfrm_tunnel sit_handler = { | |||
997 | .priority = 1, | 1145 | .priority = 1, |
998 | }; | 1146 | }; |
999 | 1147 | ||
1000 | static void sit_destroy_tunnels(struct sit_net *sitn) | 1148 | static void sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head) |
1001 | { | 1149 | { |
1002 | int prio; | 1150 | int prio; |
1003 | 1151 | ||
1004 | for (prio = 1; prio < 4; prio++) { | 1152 | for (prio = 1; prio < 4; prio++) { |
1005 | int h; | 1153 | int h; |
1006 | for (h = 0; h < HASH_SIZE; h++) { | 1154 | for (h = 0; h < HASH_SIZE; h++) { |
1007 | struct ip_tunnel *t; | 1155 | struct ip_tunnel *t = sitn->tunnels[prio][h]; |
1008 | while ((t = sitn->tunnels[prio][h]) != NULL) | 1156 | |
1009 | unregister_netdevice(t->dev); | 1157 | while (t != NULL) { |
1158 | unregister_netdevice_queue(t->dev, head); | ||
1159 | t = t->next; | ||
1160 | } | ||
1010 | } | 1161 | } |
1011 | } | 1162 | } |
1012 | } | 1163 | } |
@@ -1039,6 +1190,7 @@ static int sit_init_net(struct net *net) | |||
1039 | dev_net_set(sitn->fb_tunnel_dev, net); | 1190 | dev_net_set(sitn->fb_tunnel_dev, net); |
1040 | 1191 | ||
1041 | ipip6_fb_tunnel_init(sitn->fb_tunnel_dev); | 1192 | ipip6_fb_tunnel_init(sitn->fb_tunnel_dev); |
1193 | ipip6_tunnel_clone_6rd(sitn->fb_tunnel_dev, sitn); | ||
1042 | 1194 | ||
1043 | if ((err = register_netdev(sitn->fb_tunnel_dev))) | 1195 | if ((err = register_netdev(sitn->fb_tunnel_dev))) |
1044 | goto err_reg_dev; | 1196 | goto err_reg_dev; |
@@ -1059,11 +1211,13 @@ err_alloc: | |||
1059 | static void sit_exit_net(struct net *net) | 1211 | static void sit_exit_net(struct net *net) |
1060 | { | 1212 | { |
1061 | struct sit_net *sitn; | 1213 | struct sit_net *sitn; |
1214 | LIST_HEAD(list); | ||
1062 | 1215 | ||
1063 | sitn = net_generic(net, sit_net_id); | 1216 | sitn = net_generic(net, sit_net_id); |
1064 | rtnl_lock(); | 1217 | rtnl_lock(); |
1065 | sit_destroy_tunnels(sitn); | 1218 | sit_destroy_tunnels(sitn, &list); |
1066 | unregister_netdevice(sitn->fb_tunnel_dev); | 1219 | unregister_netdevice_queue(sitn->fb_tunnel_dev, &list); |
1220 | unregister_netdevice_many(&list); | ||
1067 | rtnl_unlock(); | 1221 | rtnl_unlock(); |
1068 | kfree(sitn); | 1222 | kfree(sitn); |
1069 | } | 1223 | } |
@@ -1078,6 +1232,7 @@ static void __exit sit_cleanup(void) | |||
1078 | xfrm4_tunnel_deregister(&sit_handler, AF_INET6); | 1232 | xfrm4_tunnel_deregister(&sit_handler, AF_INET6); |
1079 | 1233 | ||
1080 | unregister_pernet_gen_device(sit_net_id, &sit_net_ops); | 1234 | unregister_pernet_gen_device(sit_net_id, &sit_net_ops); |
1235 | rcu_barrier(); /* Wait for completion of call_rcu()'s */ | ||
1081 | } | 1236 | } |
1082 | 1237 | ||
1083 | static int __init sit_init(void) | 1238 | static int __init sit_init(void) |
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 6b6ae913b5d4..612fc53e0bb9 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c | |||
@@ -184,13 +184,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | |||
184 | 184 | ||
185 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); | 185 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); |
186 | 186 | ||
187 | /* check for timestamp cookie support */ | ||
188 | memset(&tcp_opt, 0, sizeof(tcp_opt)); | ||
189 | tcp_parse_options(skb, &tcp_opt, 0); | ||
190 | |||
191 | if (tcp_opt.saw_tstamp) | ||
192 | cookie_check_timestamp(&tcp_opt); | ||
193 | |||
194 | ret = NULL; | 187 | ret = NULL; |
195 | req = inet6_reqsk_alloc(&tcp6_request_sock_ops); | 188 | req = inet6_reqsk_alloc(&tcp6_request_sock_ops); |
196 | if (!req) | 189 | if (!req) |
@@ -224,12 +217,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | |||
224 | req->expires = 0UL; | 217 | req->expires = 0UL; |
225 | req->retrans = 0; | 218 | req->retrans = 0; |
226 | ireq->ecn_ok = 0; | 219 | ireq->ecn_ok = 0; |
227 | ireq->snd_wscale = tcp_opt.snd_wscale; | ||
228 | ireq->rcv_wscale = tcp_opt.rcv_wscale; | ||
229 | ireq->sack_ok = tcp_opt.sack_ok; | ||
230 | ireq->wscale_ok = tcp_opt.wscale_ok; | ||
231 | ireq->tstamp_ok = tcp_opt.saw_tstamp; | ||
232 | req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; | ||
233 | treq->rcv_isn = ntohl(th->seq) - 1; | 220 | treq->rcv_isn = ntohl(th->seq) - 1; |
234 | treq->snt_isn = cookie; | 221 | treq->snt_isn = cookie; |
235 | 222 | ||
@@ -252,8 +239,9 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | |||
252 | } | 239 | } |
253 | ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr); | 240 | ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr); |
254 | fl.oif = sk->sk_bound_dev_if; | 241 | fl.oif = sk->sk_bound_dev_if; |
242 | fl.mark = sk->sk_mark; | ||
255 | fl.fl_ip_dport = inet_rsk(req)->rmt_port; | 243 | fl.fl_ip_dport = inet_rsk(req)->rmt_port; |
256 | fl.fl_ip_sport = inet_sk(sk)->sport; | 244 | fl.fl_ip_sport = inet_sk(sk)->inet_sport; |
257 | security_req_classify_flow(req, &fl); | 245 | security_req_classify_flow(req, &fl); |
258 | if (ip6_dst_lookup(sk, &dst, &fl)) | 246 | if (ip6_dst_lookup(sk, &dst, &fl)) |
259 | goto out_free; | 247 | goto out_free; |
@@ -264,6 +252,21 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | |||
264 | goto out_free; | 252 | goto out_free; |
265 | } | 253 | } |
266 | 254 | ||
255 | /* check for timestamp cookie support */ | ||
256 | memset(&tcp_opt, 0, sizeof(tcp_opt)); | ||
257 | tcp_parse_options(skb, &tcp_opt, 0, dst); | ||
258 | |||
259 | if (tcp_opt.saw_tstamp) | ||
260 | cookie_check_timestamp(&tcp_opt); | ||
261 | |||
262 | req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; | ||
263 | |||
264 | ireq->snd_wscale = tcp_opt.snd_wscale; | ||
265 | ireq->rcv_wscale = tcp_opt.rcv_wscale; | ||
266 | ireq->sack_ok = tcp_opt.sack_ok; | ||
267 | ireq->wscale_ok = tcp_opt.wscale_ok; | ||
268 | ireq->tstamp_ok = tcp_opt.saw_tstamp; | ||
269 | |||
267 | req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); | 270 | req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); |
268 | tcp_select_initial_window(tcp_full_space(sk), req->mss, | 271 | tcp_select_initial_window(tcp_full_space(sk), req->mss, |
269 | &req->rcv_wnd, &req->window_clamp, | 272 | &req->rcv_wnd, &req->window_clamp, |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 21d100b68b19..34925f089e07 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -226,10 +226,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
226 | #endif | 226 | #endif |
227 | goto failure; | 227 | goto failure; |
228 | } else { | 228 | } else { |
229 | ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF), | 229 | ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr); |
230 | inet->saddr); | 230 | ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, |
231 | ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF), | 231 | &np->rcv_saddr); |
232 | inet->rcv_saddr); | ||
233 | } | 232 | } |
234 | 233 | ||
235 | return err; | 234 | return err; |
@@ -243,8 +242,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
243 | ipv6_addr_copy(&fl.fl6_src, | 242 | ipv6_addr_copy(&fl.fl6_src, |
244 | (saddr ? saddr : &np->saddr)); | 243 | (saddr ? saddr : &np->saddr)); |
245 | fl.oif = sk->sk_bound_dev_if; | 244 | fl.oif = sk->sk_bound_dev_if; |
245 | fl.mark = sk->sk_mark; | ||
246 | fl.fl_ip_dport = usin->sin6_port; | 246 | fl.fl_ip_dport = usin->sin6_port; |
247 | fl.fl_ip_sport = inet->sport; | 247 | fl.fl_ip_sport = inet->inet_sport; |
248 | 248 | ||
249 | if (np->opt && np->opt->srcrt) { | 249 | if (np->opt && np->opt->srcrt) { |
250 | struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt; | 250 | struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt; |
@@ -276,7 +276,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
276 | 276 | ||
277 | /* set the source address */ | 277 | /* set the source address */ |
278 | ipv6_addr_copy(&np->saddr, saddr); | 278 | ipv6_addr_copy(&np->saddr, saddr); |
279 | inet->rcv_saddr = LOOPBACK4_IPV6; | 279 | inet->inet_rcv_saddr = LOOPBACK4_IPV6; |
280 | 280 | ||
281 | sk->sk_gso_type = SKB_GSO_TCPV6; | 281 | sk->sk_gso_type = SKB_GSO_TCPV6; |
282 | __ip6_dst_store(sk, dst, NULL, NULL); | 282 | __ip6_dst_store(sk, dst, NULL, NULL); |
@@ -288,7 +288,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
288 | 288 | ||
289 | tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); | 289 | tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); |
290 | 290 | ||
291 | inet->dport = usin->sin6_port; | 291 | inet->inet_dport = usin->sin6_port; |
292 | 292 | ||
293 | tcp_set_state(sk, TCP_SYN_SENT); | 293 | tcp_set_state(sk, TCP_SYN_SENT); |
294 | err = inet6_hash_connect(&tcp_death_row, sk); | 294 | err = inet6_hash_connect(&tcp_death_row, sk); |
@@ -298,8 +298,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
298 | if (!tp->write_seq) | 298 | if (!tp->write_seq) |
299 | tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32, | 299 | tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32, |
300 | np->daddr.s6_addr32, | 300 | np->daddr.s6_addr32, |
301 | inet->sport, | 301 | inet->inet_sport, |
302 | inet->dport); | 302 | inet->inet_dport); |
303 | 303 | ||
304 | err = tcp_connect(sk); | 304 | err = tcp_connect(sk); |
305 | if (err) | 305 | if (err) |
@@ -311,7 +311,7 @@ late_failure: | |||
311 | tcp_set_state(sk, TCP_CLOSE); | 311 | tcp_set_state(sk, TCP_CLOSE); |
312 | __sk_dst_reset(sk); | 312 | __sk_dst_reset(sk); |
313 | failure: | 313 | failure: |
314 | inet->dport = 0; | 314 | inet->inet_dport = 0; |
315 | sk->sk_route_caps = 0; | 315 | sk->sk_route_caps = 0; |
316 | return err; | 316 | return err; |
317 | } | 317 | } |
@@ -383,8 +383,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
383 | ipv6_addr_copy(&fl.fl6_dst, &np->daddr); | 383 | ipv6_addr_copy(&fl.fl6_dst, &np->daddr); |
384 | ipv6_addr_copy(&fl.fl6_src, &np->saddr); | 384 | ipv6_addr_copy(&fl.fl6_src, &np->saddr); |
385 | fl.oif = sk->sk_bound_dev_if; | 385 | fl.oif = sk->sk_bound_dev_if; |
386 | fl.fl_ip_dport = inet->dport; | 386 | fl.mark = sk->sk_mark; |
387 | fl.fl_ip_sport = inet->sport; | 387 | fl.fl_ip_dport = inet->inet_dport; |
388 | fl.fl_ip_sport = inet->inet_sport; | ||
388 | security_skb_classify_flow(skb, &fl); | 389 | security_skb_classify_flow(skb, &fl); |
389 | 390 | ||
390 | if ((err = ip6_dst_lookup(sk, &dst, &fl))) { | 391 | if ((err = ip6_dst_lookup(sk, &dst, &fl))) { |
@@ -477,6 +478,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req) | |||
477 | ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr); | 478 | ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr); |
478 | fl.fl6_flowlabel = 0; | 479 | fl.fl6_flowlabel = 0; |
479 | fl.oif = treq->iif; | 480 | fl.oif = treq->iif; |
481 | fl.mark = sk->sk_mark; | ||
480 | fl.fl_ip_dport = inet_rsk(req)->rmt_port; | 482 | fl.fl_ip_dport = inet_rsk(req)->rmt_port; |
481 | fl.fl_ip_sport = inet_rsk(req)->loc_port; | 483 | fl.fl_ip_sport = inet_rsk(req)->loc_port; |
482 | security_req_classify_flow(req, &fl); | 484 | security_req_classify_flow(req, &fl); |
@@ -1165,6 +1167,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1165 | struct tcp_sock *tp = tcp_sk(sk); | 1167 | struct tcp_sock *tp = tcp_sk(sk); |
1166 | struct request_sock *req = NULL; | 1168 | struct request_sock *req = NULL; |
1167 | __u32 isn = TCP_SKB_CB(skb)->when; | 1169 | __u32 isn = TCP_SKB_CB(skb)->when; |
1170 | struct dst_entry *dst = __sk_dst_get(sk); | ||
1168 | #ifdef CONFIG_SYN_COOKIES | 1171 | #ifdef CONFIG_SYN_COOKIES |
1169 | int want_cookie = 0; | 1172 | int want_cookie = 0; |
1170 | #else | 1173 | #else |
@@ -1203,7 +1206,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1203 | tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); | 1206 | tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); |
1204 | tmp_opt.user_mss = tp->rx_opt.user_mss; | 1207 | tmp_opt.user_mss = tp->rx_opt.user_mss; |
1205 | 1208 | ||
1206 | tcp_parse_options(skb, &tmp_opt, 0); | 1209 | tcp_parse_options(skb, &tmp_opt, 0, dst); |
1207 | 1210 | ||
1208 | if (want_cookie && !tmp_opt.saw_tstamp) | 1211 | if (want_cookie && !tmp_opt.saw_tstamp) |
1209 | tcp_clear_options(&tmp_opt); | 1212 | tcp_clear_options(&tmp_opt); |
@@ -1290,11 +1293,9 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1290 | 1293 | ||
1291 | memcpy(newnp, np, sizeof(struct ipv6_pinfo)); | 1294 | memcpy(newnp, np, sizeof(struct ipv6_pinfo)); |
1292 | 1295 | ||
1293 | ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF), | 1296 | ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr); |
1294 | newinet->daddr); | ||
1295 | 1297 | ||
1296 | ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF), | 1298 | ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr); |
1297 | newinet->saddr); | ||
1298 | 1299 | ||
1299 | ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr); | 1300 | ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr); |
1300 | 1301 | ||
@@ -1345,6 +1346,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1345 | } | 1346 | } |
1346 | ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr); | 1347 | ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr); |
1347 | fl.oif = sk->sk_bound_dev_if; | 1348 | fl.oif = sk->sk_bound_dev_if; |
1349 | fl.mark = sk->sk_mark; | ||
1348 | fl.fl_ip_dport = inet_rsk(req)->rmt_port; | 1350 | fl.fl_ip_dport = inet_rsk(req)->rmt_port; |
1349 | fl.fl_ip_sport = inet_rsk(req)->loc_port; | 1351 | fl.fl_ip_sport = inet_rsk(req)->loc_port; |
1350 | security_req_classify_flow(req, &fl); | 1352 | security_req_classify_flow(req, &fl); |
@@ -1431,7 +1433,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1431 | newtp->advmss = dst_metric(dst, RTAX_ADVMSS); | 1433 | newtp->advmss = dst_metric(dst, RTAX_ADVMSS); |
1432 | tcp_initialize_rcv_mss(newsk); | 1434 | tcp_initialize_rcv_mss(newsk); |
1433 | 1435 | ||
1434 | newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6; | 1436 | newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; |
1437 | newinet->inet_rcv_saddr = LOOPBACK4_IPV6; | ||
1435 | 1438 | ||
1436 | #ifdef CONFIG_TCP_MD5SIG | 1439 | #ifdef CONFIG_TCP_MD5SIG |
1437 | /* Copy over the MD5 key from the original socket */ | 1440 | /* Copy over the MD5 key from the original socket */ |
@@ -1931,8 +1934,8 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) | |||
1931 | 1934 | ||
1932 | dest = &np->daddr; | 1935 | dest = &np->daddr; |
1933 | src = &np->rcv_saddr; | 1936 | src = &np->rcv_saddr; |
1934 | destp = ntohs(inet->dport); | 1937 | destp = ntohs(inet->inet_dport); |
1935 | srcp = ntohs(inet->sport); | 1938 | srcp = ntohs(inet->inet_sport); |
1936 | 1939 | ||
1937 | if (icsk->icsk_pending == ICSK_TIME_RETRANS) { | 1940 | if (icsk->icsk_pending == ICSK_TIME_RETRANS) { |
1938 | timer_active = 1; | 1941 | timer_active = 1; |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 3a60f12b34ed..d3b59d73f507 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -53,7 +53,7 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) | |||
53 | { | 53 | { |
54 | const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr; | 54 | const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr; |
55 | const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2); | 55 | const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2); |
56 | __be32 sk_rcv_saddr = inet_sk(sk)->rcv_saddr; | 56 | __be32 sk1_rcv_saddr = inet_sk(sk)->inet_rcv_saddr; |
57 | __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2); | 57 | __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2); |
58 | int sk_ipv6only = ipv6_only_sock(sk); | 58 | int sk_ipv6only = ipv6_only_sock(sk); |
59 | int sk2_ipv6only = inet_v6_ipv6only(sk2); | 59 | int sk2_ipv6only = inet_v6_ipv6only(sk2); |
@@ -63,8 +63,8 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) | |||
63 | /* if both are mapped, treat as IPv4 */ | 63 | /* if both are mapped, treat as IPv4 */ |
64 | if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) | 64 | if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) |
65 | return (!sk2_ipv6only && | 65 | return (!sk2_ipv6only && |
66 | (!sk_rcv_saddr || !sk2_rcv_saddr || | 66 | (!sk1_rcv_saddr || !sk2_rcv_saddr || |
67 | sk_rcv_saddr == sk2_rcv_saddr)); | 67 | sk1_rcv_saddr == sk2_rcv_saddr)); |
68 | 68 | ||
69 | if (addr_type2 == IPV6_ADDR_ANY && | 69 | if (addr_type2 == IPV6_ADDR_ANY && |
70 | !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED)) | 70 | !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED)) |
@@ -100,8 +100,8 @@ static inline int compute_score(struct sock *sk, struct net *net, | |||
100 | struct inet_sock *inet = inet_sk(sk); | 100 | struct inet_sock *inet = inet_sk(sk); |
101 | 101 | ||
102 | score = 0; | 102 | score = 0; |
103 | if (inet->dport) { | 103 | if (inet->inet_dport) { |
104 | if (inet->dport != sport) | 104 | if (inet->inet_dport != sport) |
105 | return -1; | 105 | return -1; |
106 | score++; | 106 | score++; |
107 | } | 107 | } |
@@ -132,7 +132,7 @@ static struct sock *__udp6_lib_lookup(struct net *net, | |||
132 | struct sock *sk, *result; | 132 | struct sock *sk, *result; |
133 | struct hlist_nulls_node *node; | 133 | struct hlist_nulls_node *node; |
134 | unsigned short hnum = ntohs(dport); | 134 | unsigned short hnum = ntohs(dport); |
135 | unsigned int hash = udp_hashfn(net, hnum); | 135 | unsigned int hash = udp_hashfn(net, hnum, udptable->mask); |
136 | struct udp_hslot *hslot = &udptable->hash[hash]; | 136 | struct udp_hslot *hslot = &udptable->hash[hash]; |
137 | int score, badness; | 137 | int score, badness; |
138 | 138 | ||
@@ -252,7 +252,7 @@ try_again: | |||
252 | UDP_MIB_INDATAGRAMS, is_udplite); | 252 | UDP_MIB_INDATAGRAMS, is_udplite); |
253 | } | 253 | } |
254 | 254 | ||
255 | sock_recv_timestamp(msg, sk, skb); | 255 | sock_recv_ts_and_drops(msg, sk, skb); |
256 | 256 | ||
257 | /* Copy the address. */ | 257 | /* Copy the address. */ |
258 | if (msg->msg_name) { | 258 | if (msg->msg_name) { |
@@ -265,8 +265,8 @@ try_again: | |||
265 | sin6->sin6_scope_id = 0; | 265 | sin6->sin6_scope_id = 0; |
266 | 266 | ||
267 | if (is_udp4) | 267 | if (is_udp4) |
268 | ipv6_addr_set(&sin6->sin6_addr, 0, 0, | 268 | ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, |
269 | htonl(0xffff), ip_hdr(skb)->saddr); | 269 | &sin6->sin6_addr); |
270 | else { | 270 | else { |
271 | ipv6_addr_copy(&sin6->sin6_addr, | 271 | ipv6_addr_copy(&sin6->sin6_addr, |
272 | &ipv6_hdr(skb)->saddr); | 272 | &ipv6_hdr(skb)->saddr); |
@@ -385,18 +385,18 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) | |||
385 | goto drop; | 385 | goto drop; |
386 | } | 386 | } |
387 | 387 | ||
388 | if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { | 388 | if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) { |
389 | /* Note that an ENOMEM error is charged twice */ | 389 | /* Note that an ENOMEM error is charged twice */ |
390 | if (rc == -ENOMEM) { | 390 | if (rc == -ENOMEM) |
391 | UDP6_INC_STATS_BH(sock_net(sk), | 391 | UDP6_INC_STATS_BH(sock_net(sk), |
392 | UDP_MIB_RCVBUFERRORS, is_udplite); | 392 | UDP_MIB_RCVBUFERRORS, is_udplite); |
393 | atomic_inc(&sk->sk_drops); | 393 | goto drop_no_sk_drops_inc; |
394 | } | ||
395 | goto drop; | ||
396 | } | 394 | } |
397 | 395 | ||
398 | return 0; | 396 | return 0; |
399 | drop: | 397 | drop: |
398 | atomic_inc(&sk->sk_drops); | ||
399 | drop_no_sk_drops_inc: | ||
400 | UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); | 400 | UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); |
401 | kfree_skb(skb); | 401 | kfree_skb(skb); |
402 | return -1; | 402 | return -1; |
@@ -419,8 +419,8 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk, | |||
419 | 419 | ||
420 | if (s->sk_hash == num && s->sk_family == PF_INET6) { | 420 | if (s->sk_hash == num && s->sk_family == PF_INET6) { |
421 | struct ipv6_pinfo *np = inet6_sk(s); | 421 | struct ipv6_pinfo *np = inet6_sk(s); |
422 | if (inet->dport) { | 422 | if (inet->inet_dport) { |
423 | if (inet->dport != rmt_port) | 423 | if (inet->inet_dport != rmt_port) |
424 | continue; | 424 | continue; |
425 | } | 425 | } |
426 | if (!ipv6_addr_any(&np->daddr) && | 426 | if (!ipv6_addr_any(&np->daddr) && |
@@ -452,7 +452,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, | |||
452 | { | 452 | { |
453 | struct sock *sk, *sk2; | 453 | struct sock *sk, *sk2; |
454 | const struct udphdr *uh = udp_hdr(skb); | 454 | const struct udphdr *uh = udp_hdr(skb); |
455 | struct udp_hslot *hslot = &udptable->hash[udp_hashfn(net, ntohs(uh->dest))]; | 455 | struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest)); |
456 | int dif; | 456 | int dif; |
457 | 457 | ||
458 | spin_lock(&hslot->lock); | 458 | spin_lock(&hslot->lock); |
@@ -794,7 +794,7 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
794 | if (ipv6_addr_v4mapped(daddr)) { | 794 | if (ipv6_addr_v4mapped(daddr)) { |
795 | struct sockaddr_in sin; | 795 | struct sockaddr_in sin; |
796 | sin.sin_family = AF_INET; | 796 | sin.sin_family = AF_INET; |
797 | sin.sin_port = sin6 ? sin6->sin6_port : inet->dport; | 797 | sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport; |
798 | sin.sin_addr.s_addr = daddr->s6_addr32[3]; | 798 | sin.sin_addr.s_addr = daddr->s6_addr32[3]; |
799 | msg->msg_name = &sin; | 799 | msg->msg_name = &sin; |
800 | msg->msg_namelen = sizeof(sin); | 800 | msg->msg_namelen = sizeof(sin); |
@@ -867,7 +867,7 @@ do_udp_sendmsg: | |||
867 | if (sk->sk_state != TCP_ESTABLISHED) | 867 | if (sk->sk_state != TCP_ESTABLISHED) |
868 | return -EDESTADDRREQ; | 868 | return -EDESTADDRREQ; |
869 | 869 | ||
870 | fl.fl_ip_dport = inet->dport; | 870 | fl.fl_ip_dport = inet->inet_dport; |
871 | daddr = &np->daddr; | 871 | daddr = &np->daddr; |
872 | fl.fl6_flowlabel = np->flow_label; | 872 | fl.fl6_flowlabel = np->flow_label; |
873 | connected = 1; | 873 | connected = 1; |
@@ -879,6 +879,8 @@ do_udp_sendmsg: | |||
879 | if (!fl.oif) | 879 | if (!fl.oif) |
880 | fl.oif = np->sticky_pktinfo.ipi6_ifindex; | 880 | fl.oif = np->sticky_pktinfo.ipi6_ifindex; |
881 | 881 | ||
882 | fl.mark = sk->sk_mark; | ||
883 | |||
882 | if (msg->msg_controllen) { | 884 | if (msg->msg_controllen) { |
883 | opt = &opt_space; | 885 | opt = &opt_space; |
884 | memset(opt, 0, sizeof(struct ipv6_txoptions)); | 886 | memset(opt, 0, sizeof(struct ipv6_txoptions)); |
@@ -911,7 +913,7 @@ do_udp_sendmsg: | |||
911 | fl.fl6_dst.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ | 913 | fl.fl6_dst.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ |
912 | if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr)) | 914 | if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr)) |
913 | ipv6_addr_copy(&fl.fl6_src, &np->saddr); | 915 | ipv6_addr_copy(&fl.fl6_src, &np->saddr); |
914 | fl.fl_ip_sport = inet->sport; | 916 | fl.fl_ip_sport = inet->inet_sport; |
915 | 917 | ||
916 | /* merge ip6_build_xmit from ip6_output */ | 918 | /* merge ip6_build_xmit from ip6_output */ |
917 | if (opt && opt->srcrt) { | 919 | if (opt && opt->srcrt) { |
@@ -1192,10 +1194,10 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket | |||
1192 | 1194 | ||
1193 | dest = &np->daddr; | 1195 | dest = &np->daddr; |
1194 | src = &np->rcv_saddr; | 1196 | src = &np->rcv_saddr; |
1195 | destp = ntohs(inet->dport); | 1197 | destp = ntohs(inet->inet_dport); |
1196 | srcp = ntohs(inet->sport); | 1198 | srcp = ntohs(inet->inet_sport); |
1197 | seq_printf(seq, | 1199 | seq_printf(seq, |
1198 | "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " | 1200 | "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " |
1199 | "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", | 1201 | "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", |
1200 | bucket, | 1202 | bucket, |
1201 | src->s6_addr32[0], src->s6_addr32[1], | 1203 | src->s6_addr32[0], src->s6_addr32[1], |
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index 81a95c00e503..438831d33593 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c | |||
@@ -23,7 +23,7 @@ | |||
23 | */ | 23 | */ |
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/xfrm.h> | 25 | #include <linux/xfrm.h> |
26 | #include <linux/list.h> | 26 | #include <linux/rculist.h> |
27 | #include <net/ip.h> | 27 | #include <net/ip.h> |
28 | #include <net/xfrm.h> | 28 | #include <net/xfrm.h> |
29 | #include <net/ipv6.h> | 29 | #include <net/ipv6.h> |
@@ -36,14 +36,15 @@ | |||
36 | * per xfrm_address_t. | 36 | * per xfrm_address_t. |
37 | */ | 37 | */ |
38 | struct xfrm6_tunnel_spi { | 38 | struct xfrm6_tunnel_spi { |
39 | struct hlist_node list_byaddr; | 39 | struct hlist_node list_byaddr; |
40 | struct hlist_node list_byspi; | 40 | struct hlist_node list_byspi; |
41 | xfrm_address_t addr; | 41 | xfrm_address_t addr; |
42 | u32 spi; | 42 | u32 spi; |
43 | atomic_t refcnt; | 43 | atomic_t refcnt; |
44 | struct rcu_head rcu_head; | ||
44 | }; | 45 | }; |
45 | 46 | ||
46 | static DEFINE_RWLOCK(xfrm6_tunnel_spi_lock); | 47 | static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock); |
47 | 48 | ||
48 | static u32 xfrm6_tunnel_spi; | 49 | static u32 xfrm6_tunnel_spi; |
49 | 50 | ||
@@ -107,6 +108,7 @@ static void xfrm6_tunnel_spi_fini(void) | |||
107 | if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i])) | 108 | if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i])) |
108 | return; | 109 | return; |
109 | } | 110 | } |
111 | rcu_barrier(); | ||
110 | kmem_cache_destroy(xfrm6_tunnel_spi_kmem); | 112 | kmem_cache_destroy(xfrm6_tunnel_spi_kmem); |
111 | xfrm6_tunnel_spi_kmem = NULL; | 113 | xfrm6_tunnel_spi_kmem = NULL; |
112 | } | 114 | } |
@@ -116,7 +118,7 @@ static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr) | |||
116 | struct xfrm6_tunnel_spi *x6spi; | 118 | struct xfrm6_tunnel_spi *x6spi; |
117 | struct hlist_node *pos; | 119 | struct hlist_node *pos; |
118 | 120 | ||
119 | hlist_for_each_entry(x6spi, pos, | 121 | hlist_for_each_entry_rcu(x6spi, pos, |
120 | &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], | 122 | &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], |
121 | list_byaddr) { | 123 | list_byaddr) { |
122 | if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) | 124 | if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) |
@@ -131,10 +133,10 @@ __be32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr) | |||
131 | struct xfrm6_tunnel_spi *x6spi; | 133 | struct xfrm6_tunnel_spi *x6spi; |
132 | u32 spi; | 134 | u32 spi; |
133 | 135 | ||
134 | read_lock_bh(&xfrm6_tunnel_spi_lock); | 136 | rcu_read_lock_bh(); |
135 | x6spi = __xfrm6_tunnel_spi_lookup(saddr); | 137 | x6spi = __xfrm6_tunnel_spi_lookup(saddr); |
136 | spi = x6spi ? x6spi->spi : 0; | 138 | spi = x6spi ? x6spi->spi : 0; |
137 | read_unlock_bh(&xfrm6_tunnel_spi_lock); | 139 | rcu_read_unlock_bh(); |
138 | return htonl(spi); | 140 | return htonl(spi); |
139 | } | 141 | } |
140 | 142 | ||
@@ -185,14 +187,15 @@ alloc_spi: | |||
185 | if (!x6spi) | 187 | if (!x6spi) |
186 | goto out; | 188 | goto out; |
187 | 189 | ||
190 | INIT_RCU_HEAD(&x6spi->rcu_head); | ||
188 | memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr)); | 191 | memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr)); |
189 | x6spi->spi = spi; | 192 | x6spi->spi = spi; |
190 | atomic_set(&x6spi->refcnt, 1); | 193 | atomic_set(&x6spi->refcnt, 1); |
191 | 194 | ||
192 | hlist_add_head(&x6spi->list_byspi, &xfrm6_tunnel_spi_byspi[index]); | 195 | hlist_add_head_rcu(&x6spi->list_byspi, &xfrm6_tunnel_spi_byspi[index]); |
193 | 196 | ||
194 | index = xfrm6_tunnel_spi_hash_byaddr(saddr); | 197 | index = xfrm6_tunnel_spi_hash_byaddr(saddr); |
195 | hlist_add_head(&x6spi->list_byaddr, &xfrm6_tunnel_spi_byaddr[index]); | 198 | hlist_add_head_rcu(&x6spi->list_byaddr, &xfrm6_tunnel_spi_byaddr[index]); |
196 | out: | 199 | out: |
197 | return spi; | 200 | return spi; |
198 | } | 201 | } |
@@ -202,26 +205,32 @@ __be32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr) | |||
202 | struct xfrm6_tunnel_spi *x6spi; | 205 | struct xfrm6_tunnel_spi *x6spi; |
203 | u32 spi; | 206 | u32 spi; |
204 | 207 | ||
205 | write_lock_bh(&xfrm6_tunnel_spi_lock); | 208 | spin_lock_bh(&xfrm6_tunnel_spi_lock); |
206 | x6spi = __xfrm6_tunnel_spi_lookup(saddr); | 209 | x6spi = __xfrm6_tunnel_spi_lookup(saddr); |
207 | if (x6spi) { | 210 | if (x6spi) { |
208 | atomic_inc(&x6spi->refcnt); | 211 | atomic_inc(&x6spi->refcnt); |
209 | spi = x6spi->spi; | 212 | spi = x6spi->spi; |
210 | } else | 213 | } else |
211 | spi = __xfrm6_tunnel_alloc_spi(saddr); | 214 | spi = __xfrm6_tunnel_alloc_spi(saddr); |
212 | write_unlock_bh(&xfrm6_tunnel_spi_lock); | 215 | spin_unlock_bh(&xfrm6_tunnel_spi_lock); |
213 | 216 | ||
214 | return htonl(spi); | 217 | return htonl(spi); |
215 | } | 218 | } |
216 | 219 | ||
217 | EXPORT_SYMBOL(xfrm6_tunnel_alloc_spi); | 220 | EXPORT_SYMBOL(xfrm6_tunnel_alloc_spi); |
218 | 221 | ||
222 | static void x6spi_destroy_rcu(struct rcu_head *head) | ||
223 | { | ||
224 | kmem_cache_free(xfrm6_tunnel_spi_kmem, | ||
225 | container_of(head, struct xfrm6_tunnel_spi, rcu_head)); | ||
226 | } | ||
227 | |||
219 | void xfrm6_tunnel_free_spi(xfrm_address_t *saddr) | 228 | void xfrm6_tunnel_free_spi(xfrm_address_t *saddr) |
220 | { | 229 | { |
221 | struct xfrm6_tunnel_spi *x6spi; | 230 | struct xfrm6_tunnel_spi *x6spi; |
222 | struct hlist_node *pos, *n; | 231 | struct hlist_node *pos, *n; |
223 | 232 | ||
224 | write_lock_bh(&xfrm6_tunnel_spi_lock); | 233 | spin_lock_bh(&xfrm6_tunnel_spi_lock); |
225 | 234 | ||
226 | hlist_for_each_entry_safe(x6spi, pos, n, | 235 | hlist_for_each_entry_safe(x6spi, pos, n, |
227 | &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], | 236 | &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], |
@@ -229,14 +238,14 @@ void xfrm6_tunnel_free_spi(xfrm_address_t *saddr) | |||
229 | { | 238 | { |
230 | if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) { | 239 | if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) { |
231 | if (atomic_dec_and_test(&x6spi->refcnt)) { | 240 | if (atomic_dec_and_test(&x6spi->refcnt)) { |
232 | hlist_del(&x6spi->list_byaddr); | 241 | hlist_del_rcu(&x6spi->list_byaddr); |
233 | hlist_del(&x6spi->list_byspi); | 242 | hlist_del_rcu(&x6spi->list_byspi); |
234 | kmem_cache_free(xfrm6_tunnel_spi_kmem, x6spi); | 243 | call_rcu(&x6spi->rcu_head, x6spi_destroy_rcu); |
235 | break; | 244 | break; |
236 | } | 245 | } |
237 | } | 246 | } |
238 | } | 247 | } |
239 | write_unlock_bh(&xfrm6_tunnel_spi_lock); | 248 | spin_unlock_bh(&xfrm6_tunnel_spi_lock); |
240 | } | 249 | } |
241 | 250 | ||
242 | EXPORT_SYMBOL(xfrm6_tunnel_free_spi); | 251 | EXPORT_SYMBOL(xfrm6_tunnel_free_spi); |
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index 66c7a20011f3..6481ee4bdf72 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c | |||
@@ -1927,7 +1927,7 @@ static int ipx_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long | |||
1927 | * Socket family declarations | 1927 | * Socket family declarations |
1928 | */ | 1928 | */ |
1929 | 1929 | ||
1930 | static struct net_proto_family ipx_family_ops = { | 1930 | static const struct net_proto_family ipx_family_ops = { |
1931 | .family = PF_IPX, | 1931 | .family = PF_IPX, |
1932 | .create = ipx_create, | 1932 | .create = ipx_create, |
1933 | .owner = THIS_MODULE, | 1933 | .owner = THIS_MODULE, |
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index dd35641835f4..9429e4002bca 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c | |||
@@ -2463,7 +2463,7 @@ bed: | |||
2463 | return 0; | 2463 | return 0; |
2464 | } | 2464 | } |
2465 | 2465 | ||
2466 | static struct net_proto_family irda_family_ops = { | 2466 | static const struct net_proto_family irda_family_ops = { |
2467 | .family = PF_IRDA, | 2467 | .family = PF_IRDA, |
2468 | .create = irda_create, | 2468 | .create = irda_create, |
2469 | .owner = THIS_MODULE, | 2469 | .owner = THIS_MODULE, |
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index bada1b9c670b..3aebabb158a8 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -428,7 +428,6 @@ static void iucv_sock_close(struct sock *sk) | |||
428 | break; | 428 | break; |
429 | 429 | ||
430 | default: | 430 | default: |
431 | sock_set_flag(sk, SOCK_ZAPPED); | ||
432 | /* nothing to do here */ | 431 | /* nothing to do here */ |
433 | break; | 432 | break; |
434 | } | 433 | } |
@@ -536,7 +535,7 @@ void iucv_accept_enqueue(struct sock *parent, struct sock *sk) | |||
536 | list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q); | 535 | list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q); |
537 | spin_unlock_irqrestore(&par->accept_q_lock, flags); | 536 | spin_unlock_irqrestore(&par->accept_q_lock, flags); |
538 | iucv_sk(sk)->parent = parent; | 537 | iucv_sk(sk)->parent = parent; |
539 | parent->sk_ack_backlog++; | 538 | sk_acceptq_added(parent); |
540 | } | 539 | } |
541 | 540 | ||
542 | void iucv_accept_unlink(struct sock *sk) | 541 | void iucv_accept_unlink(struct sock *sk) |
@@ -547,7 +546,7 @@ void iucv_accept_unlink(struct sock *sk) | |||
547 | spin_lock_irqsave(&par->accept_q_lock, flags); | 546 | spin_lock_irqsave(&par->accept_q_lock, flags); |
548 | list_del_init(&iucv_sk(sk)->accept_q); | 547 | list_del_init(&iucv_sk(sk)->accept_q); |
549 | spin_unlock_irqrestore(&par->accept_q_lock, flags); | 548 | spin_unlock_irqrestore(&par->accept_q_lock, flags); |
550 | iucv_sk(sk)->parent->sk_ack_backlog--; | 549 | sk_acceptq_removed(iucv_sk(sk)->parent); |
551 | iucv_sk(sk)->parent = NULL; | 550 | iucv_sk(sk)->parent = NULL; |
552 | sock_put(sk); | 551 | sock_put(sk); |
553 | } | 552 | } |
@@ -1715,7 +1714,7 @@ static const struct proto_ops iucv_sock_ops = { | |||
1715 | .getsockopt = iucv_sock_getsockopt, | 1714 | .getsockopt = iucv_sock_getsockopt, |
1716 | }; | 1715 | }; |
1717 | 1716 | ||
1718 | static struct net_proto_family iucv_sock_family_ops = { | 1717 | static const struct net_proto_family iucv_sock_family_ops = { |
1719 | .family = AF_IUCV, | 1718 | .family = AF_IUCV, |
1720 | .owner = THIS_MODULE, | 1719 | .owner = THIS_MODULE, |
1721 | .create = iucv_sock_create, | 1720 | .create = iucv_sock_create, |
diff --git a/net/key/af_key.c b/net/key/af_key.c index 4e98193dfa0f..472f6594184a 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -3606,7 +3606,7 @@ static int pfkey_recvmsg(struct kiocb *kiocb, | |||
3606 | if (err) | 3606 | if (err) |
3607 | goto out_free; | 3607 | goto out_free; |
3608 | 3608 | ||
3609 | sock_recv_timestamp(msg, sk, skb); | 3609 | sock_recv_ts_and_drops(msg, sk, skb); |
3610 | 3610 | ||
3611 | err = (flags & MSG_TRUNC) ? skb->len : copied; | 3611 | err = (flags & MSG_TRUNC) ? skb->len : copied; |
3612 | 3612 | ||
@@ -3644,7 +3644,7 @@ static const struct proto_ops pfkey_ops = { | |||
3644 | .recvmsg = pfkey_recvmsg, | 3644 | .recvmsg = pfkey_recvmsg, |
3645 | }; | 3645 | }; |
3646 | 3646 | ||
3647 | static struct net_proto_family pfkey_family_ops = { | 3647 | static const struct net_proto_family pfkey_family_ops = { |
3648 | .family = PF_KEY, | 3648 | .family = PF_KEY, |
3649 | .create = pfkey_create, | 3649 | .create = pfkey_create, |
3650 | .owner = THIS_MODULE, | 3650 | .owner = THIS_MODULE, |
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 7aa4fd170104..4866b4fb0c27 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c | |||
@@ -1092,7 +1092,7 @@ out: | |||
1092 | return rc; | 1092 | return rc; |
1093 | } | 1093 | } |
1094 | 1094 | ||
1095 | static struct net_proto_family llc_ui_family_ops = { | 1095 | static const struct net_proto_family llc_ui_family_ops = { |
1096 | .family = PF_LLC, | 1096 | .family = PF_LLC, |
1097 | .create = llc_ui_create, | 1097 | .create = llc_ui_create, |
1098 | .owner = THIS_MODULE, | 1098 | .owner = THIS_MODULE, |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index b8295cbd7e8f..14f10eb91c5c 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -184,10 +184,12 @@ static int ieee80211_open(struct net_device *dev) | |||
184 | * No need to check netif_running since we do not allow | 184 | * No need to check netif_running since we do not allow |
185 | * it to start up with this invalid address. | 185 | * it to start up with this invalid address. |
186 | */ | 186 | */ |
187 | if (compare_ether_addr(null_addr, ndev->dev_addr) == 0) | 187 | if (compare_ether_addr(null_addr, ndev->dev_addr) == 0) { |
188 | memcpy(ndev->dev_addr, | 188 | memcpy(ndev->dev_addr, |
189 | local->hw.wiphy->perm_addr, | 189 | local->hw.wiphy->perm_addr, |
190 | ETH_ALEN); | 190 | ETH_ALEN); |
191 | memcpy(ndev->perm_addr, ndev->dev_addr, ETH_ALEN); | ||
192 | } | ||
191 | } | 193 | } |
192 | 194 | ||
193 | /* | 195 | /* |
@@ -312,7 +314,7 @@ static int ieee80211_open(struct net_device *dev) | |||
312 | if (sdata->vif.type == NL80211_IFTYPE_STATION) | 314 | if (sdata->vif.type == NL80211_IFTYPE_STATION) |
313 | ieee80211_queue_work(&local->hw, &sdata->u.mgd.work); | 315 | ieee80211_queue_work(&local->hw, &sdata->u.mgd.work); |
314 | 316 | ||
315 | netif_tx_start_all_queues(dev); | 317 | netif_start_queue(dev); |
316 | 318 | ||
317 | return 0; | 319 | return 0; |
318 | err_del_interface: | 320 | err_del_interface: |
@@ -341,7 +343,7 @@ static int ieee80211_stop(struct net_device *dev) | |||
341 | /* | 343 | /* |
342 | * Stop TX on this interface first. | 344 | * Stop TX on this interface first. |
343 | */ | 345 | */ |
344 | netif_tx_stop_all_queues(dev); | 346 | netif_stop_queue(dev); |
345 | 347 | ||
346 | /* | 348 | /* |
347 | * Now delete all active aggregation sessions. | 349 | * Now delete all active aggregation sessions. |
@@ -754,10 +756,6 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, | |||
754 | return 0; | 756 | return 0; |
755 | } | 757 | } |
756 | 758 | ||
757 | static struct device_type wiphy_type = { | ||
758 | .name = "wlan", | ||
759 | }; | ||
760 | |||
761 | int ieee80211_if_add(struct ieee80211_local *local, const char *name, | 759 | int ieee80211_if_add(struct ieee80211_local *local, const char *name, |
762 | struct net_device **new_dev, enum nl80211_iftype type, | 760 | struct net_device **new_dev, enum nl80211_iftype type, |
763 | struct vif_params *params) | 761 | struct vif_params *params) |
@@ -788,8 +786,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, | |||
788 | goto fail; | 786 | goto fail; |
789 | 787 | ||
790 | memcpy(ndev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN); | 788 | memcpy(ndev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN); |
789 | memcpy(ndev->perm_addr, ndev->dev_addr, ETH_ALEN); | ||
791 | SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); | 790 | SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); |
792 | SET_NETDEV_DEVTYPE(ndev, &wiphy_type); | ||
793 | 791 | ||
794 | /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */ | 792 | /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */ |
795 | sdata = netdev_priv(ndev); | 793 | sdata = netdev_priv(ndev); |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index f7364e56f1ee..9a733890eb47 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -50,7 +50,7 @@ static void ieee80211_mesh_housekeeping_timer(unsigned long data) | |||
50 | struct ieee80211_local *local = sdata->local; | 50 | struct ieee80211_local *local = sdata->local; |
51 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 51 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
52 | 52 | ||
53 | ifmsh->wrkq_flags |= MESH_WORK_HOUSEKEEPING; | 53 | set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); |
54 | 54 | ||
55 | if (local->quiescing) { | 55 | if (local->quiescing) { |
56 | set_bit(TMR_RUNNING_HK, &ifmsh->timers_running); | 56 | set_bit(TMR_RUNNING_HK, &ifmsh->timers_running); |
@@ -480,7 +480,7 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) | |||
480 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 480 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
481 | struct ieee80211_local *local = sdata->local; | 481 | struct ieee80211_local *local = sdata->local; |
482 | 482 | ||
483 | ifmsh->wrkq_flags |= MESH_WORK_HOUSEKEEPING; | 483 | set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); |
484 | ieee80211_queue_work(&local->hw, &ifmsh->work); | 484 | ieee80211_queue_work(&local->hw, &ifmsh->work); |
485 | sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL; | 485 | sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL; |
486 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON | | 486 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON | |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index dc5049d58c51..dcc14e99227c 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -458,9 +458,15 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, | |||
458 | mgmt->u.deauth.reason_code = cpu_to_le16(reason); | 458 | mgmt->u.deauth.reason_code = cpu_to_le16(reason); |
459 | 459 | ||
460 | if (stype == IEEE80211_STYPE_DEAUTH) | 460 | if (stype == IEEE80211_STYPE_DEAUTH) |
461 | cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len, cookie); | 461 | if (cookie) |
462 | __cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); | ||
463 | else | ||
464 | cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); | ||
462 | else | 465 | else |
463 | cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len, cookie); | 466 | if (cookie) |
467 | __cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len); | ||
468 | else | ||
469 | cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len); | ||
464 | ieee80211_tx_skb(sdata, skb, ifmgd->flags & IEEE80211_STA_MFP_ENABLED); | 470 | ieee80211_tx_skb(sdata, skb, ifmgd->flags & IEEE80211_STA_MFP_ENABLED); |
465 | } | 471 | } |
466 | 472 | ||
@@ -923,7 +929,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, | |||
923 | ieee80211_recalc_ps(local, -1); | 929 | ieee80211_recalc_ps(local, -1); |
924 | mutex_unlock(&local->iflist_mtx); | 930 | mutex_unlock(&local->iflist_mtx); |
925 | 931 | ||
926 | netif_tx_start_all_queues(sdata->dev); | 932 | netif_start_queue(sdata->dev); |
927 | netif_carrier_on(sdata->dev); | 933 | netif_carrier_on(sdata->dev); |
928 | } | 934 | } |
929 | 935 | ||
@@ -1055,7 +1061,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, | |||
1055 | * time -- we don't want the scan code to enable queues. | 1061 | * time -- we don't want the scan code to enable queues. |
1056 | */ | 1062 | */ |
1057 | 1063 | ||
1058 | netif_tx_stop_all_queues(sdata->dev); | 1064 | netif_stop_queue(sdata->dev); |
1059 | netif_carrier_off(sdata->dev); | 1065 | netif_carrier_off(sdata->dev); |
1060 | 1066 | ||
1061 | rcu_read_lock(); | 1067 | rcu_read_lock(); |
@@ -1958,12 +1964,10 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | |||
1958 | /* no action */ | 1964 | /* no action */ |
1959 | break; | 1965 | break; |
1960 | case RX_MGMT_CFG80211_DEAUTH: | 1966 | case RX_MGMT_CFG80211_DEAUTH: |
1961 | cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len, | 1967 | cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); |
1962 | NULL); | ||
1963 | break; | 1968 | break; |
1964 | case RX_MGMT_CFG80211_DISASSOC: | 1969 | case RX_MGMT_CFG80211_DISASSOC: |
1965 | cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len, | 1970 | cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len); |
1966 | NULL); | ||
1967 | break; | 1971 | break; |
1968 | default: | 1972 | default: |
1969 | WARN(1, "unexpected: %d", rma); | 1973 | WARN(1, "unexpected: %d", rma); |
@@ -2018,7 +2022,7 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | |||
2018 | cfg80211_send_rx_assoc(sdata->dev, (u8 *) mgmt, skb->len); | 2022 | cfg80211_send_rx_assoc(sdata->dev, (u8 *) mgmt, skb->len); |
2019 | break; | 2023 | break; |
2020 | case RX_MGMT_CFG80211_DEAUTH: | 2024 | case RX_MGMT_CFG80211_DEAUTH: |
2021 | cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len, NULL); | 2025 | cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); |
2022 | break; | 2026 | break; |
2023 | default: | 2027 | default: |
2024 | WARN(1, "unexpected: %d", rma); | 2028 | WARN(1, "unexpected: %d", rma); |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 7170bf4565a8..5c385e3c1d1f 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -1504,19 +1504,28 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | |||
1504 | /* illegal frame */ | 1504 | /* illegal frame */ |
1505 | return RX_DROP_MONITOR; | 1505 | return RX_DROP_MONITOR; |
1506 | 1506 | ||
1507 | if (!is_multicast_ether_addr(hdr->addr1) && | 1507 | if (mesh_hdr->flags & MESH_FLAGS_AE) { |
1508 | (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6)) { | ||
1509 | struct mesh_path *mppath; | 1508 | struct mesh_path *mppath; |
1509 | char *proxied_addr; | ||
1510 | char *mpp_addr; | ||
1511 | |||
1512 | if (is_multicast_ether_addr(hdr->addr1)) { | ||
1513 | mpp_addr = hdr->addr3; | ||
1514 | proxied_addr = mesh_hdr->eaddr1; | ||
1515 | } else { | ||
1516 | mpp_addr = hdr->addr4; | ||
1517 | proxied_addr = mesh_hdr->eaddr2; | ||
1518 | } | ||
1510 | 1519 | ||
1511 | rcu_read_lock(); | 1520 | rcu_read_lock(); |
1512 | mppath = mpp_path_lookup(mesh_hdr->eaddr2, sdata); | 1521 | mppath = mpp_path_lookup(proxied_addr, sdata); |
1513 | if (!mppath) { | 1522 | if (!mppath) { |
1514 | mpp_path_add(mesh_hdr->eaddr2, hdr->addr4, sdata); | 1523 | mpp_path_add(proxied_addr, mpp_addr, sdata); |
1515 | } else { | 1524 | } else { |
1516 | spin_lock_bh(&mppath->state_lock); | 1525 | spin_lock_bh(&mppath->state_lock); |
1517 | mppath->exp_time = jiffies; | 1526 | mppath->exp_time = jiffies; |
1518 | if (compare_ether_addr(mppath->mpp, hdr->addr4) != 0) | 1527 | if (compare_ether_addr(mppath->mpp, mpp_addr) != 0) |
1519 | memcpy(mppath->mpp, hdr->addr4, ETH_ALEN); | 1528 | memcpy(mppath->mpp, mpp_addr, ETH_ALEN); |
1520 | spin_unlock_bh(&mppath->state_lock); | 1529 | spin_unlock_bh(&mppath->state_lock); |
1521 | } | 1530 | } |
1522 | rcu_read_unlock(); | 1531 | rcu_read_unlock(); |
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 71e10cabf811..7a350d2690a0 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c | |||
@@ -306,10 +306,10 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) | |||
306 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | 306 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { |
307 | if (sdata->u.mgd.associated) { | 307 | if (sdata->u.mgd.associated) { |
308 | ieee80211_scan_ps_disable(sdata); | 308 | ieee80211_scan_ps_disable(sdata); |
309 | netif_tx_wake_all_queues(sdata->dev); | 309 | netif_wake_queue(sdata->dev); |
310 | } | 310 | } |
311 | } else | 311 | } else |
312 | netif_tx_wake_all_queues(sdata->dev); | 312 | netif_wake_queue(sdata->dev); |
313 | 313 | ||
314 | /* re-enable beaconing */ | 314 | /* re-enable beaconing */ |
315 | if (sdata->vif.type == NL80211_IFTYPE_AP || | 315 | if (sdata->vif.type == NL80211_IFTYPE_AP || |
@@ -364,7 +364,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local) | |||
364 | * are handled in the scan state machine | 364 | * are handled in the scan state machine |
365 | */ | 365 | */ |
366 | if (sdata->vif.type != NL80211_IFTYPE_STATION) | 366 | if (sdata->vif.type != NL80211_IFTYPE_STATION) |
367 | netif_tx_stop_all_queues(sdata->dev); | 367 | netif_stop_queue(sdata->dev); |
368 | } | 368 | } |
369 | mutex_unlock(&local->iflist_mtx); | 369 | mutex_unlock(&local->iflist_mtx); |
370 | 370 | ||
@@ -523,7 +523,7 @@ static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *loca | |||
523 | continue; | 523 | continue; |
524 | 524 | ||
525 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | 525 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { |
526 | netif_tx_stop_all_queues(sdata->dev); | 526 | netif_stop_queue(sdata->dev); |
527 | if (sdata->u.mgd.associated) | 527 | if (sdata->u.mgd.associated) |
528 | ieee80211_scan_ps_enable(sdata); | 528 | ieee80211_scan_ps_enable(sdata); |
529 | } | 529 | } |
@@ -558,7 +558,7 @@ static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *loca | |||
558 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | 558 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { |
559 | if (sdata->u.mgd.associated) | 559 | if (sdata->u.mgd.associated) |
560 | ieee80211_scan_ps_disable(sdata); | 560 | ieee80211_scan_ps_disable(sdata); |
561 | netif_tx_wake_all_queues(sdata->dev); | 561 | netif_wake_queue(sdata->dev); |
562 | } | 562 | } |
563 | } | 563 | } |
564 | mutex_unlock(&local->iflist_mtx); | 564 | mutex_unlock(&local->iflist_mtx); |
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index ebf00ad5b194..362afbd60a96 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c | |||
@@ -149,7 +149,7 @@ socket_match(const struct sk_buff *skb, const struct xt_match_param *par, | |||
149 | 149 | ||
150 | /* Ignore sockets listening on INADDR_ANY */ | 150 | /* Ignore sockets listening on INADDR_ANY */ |
151 | wildcard = (sk->sk_state != TCP_TIME_WAIT && | 151 | wildcard = (sk->sk_state != TCP_TIME_WAIT && |
152 | inet_sk(sk)->rcv_saddr == 0); | 152 | inet_sk(sk)->inet_rcv_saddr == 0); |
153 | 153 | ||
154 | /* Ignore non-transparent sockets, | 154 | /* Ignore non-transparent sockets, |
155 | if XT_SOCKET_TRANSPARENT is used */ | 155 | if XT_SOCKET_TRANSPARENT is used */ |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 19e98007691c..0cd2d8829313 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -2050,7 +2050,7 @@ static const struct proto_ops netlink_ops = { | |||
2050 | .sendpage = sock_no_sendpage, | 2050 | .sendpage = sock_no_sendpage, |
2051 | }; | 2051 | }; |
2052 | 2052 | ||
2053 | static struct net_proto_family netlink_family_ops = { | 2053 | static const struct net_proto_family netlink_family_ops = { |
2054 | .family = PF_NETLINK, | 2054 | .family = PF_NETLINK, |
2055 | .create = netlink_create, | 2055 | .create = netlink_create, |
2056 | .owner = THIS_MODULE, /* for consistency 8) */ | 2056 | .owner = THIS_MODULE, /* for consistency 8) */ |
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 44ff3f3810fa..d07ecda0a92d 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
@@ -97,25 +97,17 @@ static struct genl_ops *genl_get_cmd(u8 cmd, struct genl_family *family) | |||
97 | */ | 97 | */ |
98 | static inline u16 genl_generate_id(void) | 98 | static inline u16 genl_generate_id(void) |
99 | { | 99 | { |
100 | static u16 id_gen_idx; | 100 | static u16 id_gen_idx = GENL_MIN_ID; |
101 | int overflowed = 0; | 101 | int i; |
102 | 102 | ||
103 | do { | 103 | for (i = 0; i <= GENL_MAX_ID - GENL_MIN_ID; i++) { |
104 | if (id_gen_idx == 0) | 104 | if (!genl_family_find_byid(id_gen_idx)) |
105 | return id_gen_idx; | ||
106 | if (++id_gen_idx > GENL_MAX_ID) | ||
105 | id_gen_idx = GENL_MIN_ID; | 107 | id_gen_idx = GENL_MIN_ID; |
108 | } | ||
106 | 109 | ||
107 | if (++id_gen_idx > GENL_MAX_ID) { | 110 | return 0; |
108 | if (!overflowed) { | ||
109 | overflowed = 1; | ||
110 | id_gen_idx = 0; | ||
111 | continue; | ||
112 | } else | ||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | } while (genl_family_find_byid(id_gen_idx)); | ||
117 | |||
118 | return id_gen_idx; | ||
119 | } | 111 | } |
120 | 112 | ||
121 | static struct genl_multicast_group notify_grp; | 113 | static struct genl_multicast_group notify_grp; |
@@ -374,11 +366,6 @@ int genl_register_family(struct genl_family *family) | |||
374 | goto errout_locked; | 366 | goto errout_locked; |
375 | } | 367 | } |
376 | 368 | ||
377 | if (genl_family_find_byid(family->id)) { | ||
378 | err = -EEXIST; | ||
379 | goto errout_locked; | ||
380 | } | ||
381 | |||
382 | if (family->id == GENL_ID_GENERATE) { | 369 | if (family->id == GENL_ID_GENERATE) { |
383 | u16 newid = genl_generate_id(); | 370 | u16 newid = genl_generate_id(); |
384 | 371 | ||
@@ -388,6 +375,9 @@ int genl_register_family(struct genl_family *family) | |||
388 | } | 375 | } |
389 | 376 | ||
390 | family->id = newid; | 377 | family->id = newid; |
378 | } else if (genl_family_find_byid(family->id)) { | ||
379 | err = -EEXIST; | ||
380 | goto errout_locked; | ||
391 | } | 381 | } |
392 | 382 | ||
393 | if (family->maxattr) { | 383 | if (family->maxattr) { |
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 7a834952f67f..281fa597cae5 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c | |||
@@ -1372,7 +1372,7 @@ static const struct file_operations nr_info_fops = { | |||
1372 | }; | 1372 | }; |
1373 | #endif /* CONFIG_PROC_FS */ | 1373 | #endif /* CONFIG_PROC_FS */ |
1374 | 1374 | ||
1375 | static struct net_proto_family nr_family_ops = { | 1375 | static const struct net_proto_family nr_family_ops = { |
1376 | .family = PF_NETROM, | 1376 | .family = PF_NETROM, |
1377 | .create = nr_create, | 1377 | .create = nr_create, |
1378 | .owner = THIS_MODULE, | 1378 | .owner = THIS_MODULE, |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 48b18dad6763..95ef64e4189a 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -79,6 +79,7 @@ | |||
79 | #include <linux/module.h> | 79 | #include <linux/module.h> |
80 | #include <linux/init.h> | 80 | #include <linux/init.h> |
81 | #include <linux/mutex.h> | 81 | #include <linux/mutex.h> |
82 | #include <linux/if_vlan.h> | ||
82 | 83 | ||
83 | #ifdef CONFIG_INET | 84 | #ifdef CONFIG_INET |
84 | #include <net/inet_common.h> | 85 | #include <net/inet_common.h> |
@@ -188,7 +189,6 @@ struct packet_sock { | |||
188 | struct packet_ring_buffer tx_ring; | 189 | struct packet_ring_buffer tx_ring; |
189 | int copy_thresh; | 190 | int copy_thresh; |
190 | #endif | 191 | #endif |
191 | struct packet_type prot_hook; | ||
192 | spinlock_t bind_lock; | 192 | spinlock_t bind_lock; |
193 | struct mutex pg_vec_lock; | 193 | struct mutex pg_vec_lock; |
194 | unsigned int running:1, /* prot_hook is attached*/ | 194 | unsigned int running:1, /* prot_hook is attached*/ |
@@ -204,6 +204,7 @@ struct packet_sock { | |||
204 | unsigned int tp_reserve; | 204 | unsigned int tp_reserve; |
205 | unsigned int tp_loss:1; | 205 | unsigned int tp_loss:1; |
206 | #endif | 206 | #endif |
207 | struct packet_type prot_hook ____cacheline_aligned_in_smp; | ||
207 | }; | 208 | }; |
208 | 209 | ||
209 | struct packet_skb_cb { | 210 | struct packet_skb_cb { |
@@ -490,6 +491,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, | |||
490 | skb->protocol = proto; | 491 | skb->protocol = proto; |
491 | skb->dev = dev; | 492 | skb->dev = dev; |
492 | skb->priority = sk->sk_priority; | 493 | skb->priority = sk->sk_priority; |
494 | skb->mark = sk->sk_mark; | ||
493 | if (err) | 495 | if (err) |
494 | goto out_free; | 496 | goto out_free; |
495 | 497 | ||
@@ -626,15 +628,14 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, | |||
626 | 628 | ||
627 | spin_lock(&sk->sk_receive_queue.lock); | 629 | spin_lock(&sk->sk_receive_queue.lock); |
628 | po->stats.tp_packets++; | 630 | po->stats.tp_packets++; |
631 | skb->dropcount = atomic_read(&sk->sk_drops); | ||
629 | __skb_queue_tail(&sk->sk_receive_queue, skb); | 632 | __skb_queue_tail(&sk->sk_receive_queue, skb); |
630 | spin_unlock(&sk->sk_receive_queue.lock); | 633 | spin_unlock(&sk->sk_receive_queue.lock); |
631 | sk->sk_data_ready(sk, skb->len); | 634 | sk->sk_data_ready(sk, skb->len); |
632 | return 0; | 635 | return 0; |
633 | 636 | ||
634 | drop_n_acct: | 637 | drop_n_acct: |
635 | spin_lock(&sk->sk_receive_queue.lock); | 638 | po->stats.tp_drops = atomic_inc_return(&sk->sk_drops); |
636 | po->stats.tp_drops++; | ||
637 | spin_unlock(&sk->sk_receive_queue.lock); | ||
638 | 639 | ||
639 | drop_n_restore: | 640 | drop_n_restore: |
640 | if (skb_head != skb->data && skb_shared(skb)) { | 641 | if (skb_head != skb->data && skb_shared(skb)) { |
@@ -766,7 +767,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, | |||
766 | getnstimeofday(&ts); | 767 | getnstimeofday(&ts); |
767 | h.h2->tp_sec = ts.tv_sec; | 768 | h.h2->tp_sec = ts.tv_sec; |
768 | h.h2->tp_nsec = ts.tv_nsec; | 769 | h.h2->tp_nsec = ts.tv_nsec; |
769 | h.h2->tp_vlan_tci = skb->vlan_tci; | 770 | h.h2->tp_vlan_tci = vlan_tx_tag_get(skb); |
770 | hdrlen = sizeof(*h.h2); | 771 | hdrlen = sizeof(*h.h2); |
771 | break; | 772 | break; |
772 | default: | 773 | default: |
@@ -856,6 +857,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, | |||
856 | skb->protocol = proto; | 857 | skb->protocol = proto; |
857 | skb->dev = dev; | 858 | skb->dev = dev; |
858 | skb->priority = po->sk.sk_priority; | 859 | skb->priority = po->sk.sk_priority; |
860 | skb->mark = po->sk.sk_mark; | ||
859 | skb_shinfo(skb)->destructor_arg = ph.raw; | 861 | skb_shinfo(skb)->destructor_arg = ph.raw; |
860 | 862 | ||
861 | switch (po->tp_version) { | 863 | switch (po->tp_version) { |
@@ -1122,6 +1124,7 @@ static int packet_snd(struct socket *sock, | |||
1122 | skb->protocol = proto; | 1124 | skb->protocol = proto; |
1123 | skb->dev = dev; | 1125 | skb->dev = dev; |
1124 | skb->priority = sk->sk_priority; | 1126 | skb->priority = sk->sk_priority; |
1127 | skb->mark = sk->sk_mark; | ||
1125 | 1128 | ||
1126 | /* | 1129 | /* |
1127 | * Now send it | 1130 | * Now send it |
@@ -1472,7 +1475,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1472 | if (err) | 1475 | if (err) |
1473 | goto out_free; | 1476 | goto out_free; |
1474 | 1477 | ||
1475 | sock_recv_timestamp(msg, sk, skb); | 1478 | sock_recv_ts_and_drops(msg, sk, skb); |
1476 | 1479 | ||
1477 | if (msg->msg_name) | 1480 | if (msg->msg_name) |
1478 | memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, | 1481 | memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, |
@@ -1488,7 +1491,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1488 | aux.tp_snaplen = skb->len; | 1491 | aux.tp_snaplen = skb->len; |
1489 | aux.tp_mac = 0; | 1492 | aux.tp_mac = 0; |
1490 | aux.tp_net = skb_network_offset(skb); | 1493 | aux.tp_net = skb_network_offset(skb); |
1491 | aux.tp_vlan_tci = skb->vlan_tci; | 1494 | aux.tp_vlan_tci = vlan_tx_tag_get(skb); |
1492 | 1495 | ||
1493 | put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); | 1496 | put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); |
1494 | } | 1497 | } |
@@ -1659,11 +1662,9 @@ static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) | |||
1659 | if (--ml->count == 0) { | 1662 | if (--ml->count == 0) { |
1660 | struct net_device *dev; | 1663 | struct net_device *dev; |
1661 | *mlp = ml->next; | 1664 | *mlp = ml->next; |
1662 | dev = dev_get_by_index(sock_net(sk), ml->ifindex); | 1665 | dev = __dev_get_by_index(sock_net(sk), ml->ifindex); |
1663 | if (dev) { | 1666 | if (dev) |
1664 | packet_dev_mc(dev, ml, -1); | 1667 | packet_dev_mc(dev, ml, -1); |
1665 | dev_put(dev); | ||
1666 | } | ||
1667 | kfree(ml); | 1668 | kfree(ml); |
1668 | } | 1669 | } |
1669 | rtnl_unlock(); | 1670 | rtnl_unlock(); |
@@ -1687,11 +1688,9 @@ static void packet_flush_mclist(struct sock *sk) | |||
1687 | struct net_device *dev; | 1688 | struct net_device *dev; |
1688 | 1689 | ||
1689 | po->mclist = ml->next; | 1690 | po->mclist = ml->next; |
1690 | dev = dev_get_by_index(sock_net(sk), ml->ifindex); | 1691 | dev = __dev_get_by_index(sock_net(sk), ml->ifindex); |
1691 | if (dev != NULL) { | 1692 | if (dev != NULL) |
1692 | packet_dev_mc(dev, ml, -1); | 1693 | packet_dev_mc(dev, ml, -1); |
1693 | dev_put(dev); | ||
1694 | } | ||
1695 | kfree(ml); | 1694 | kfree(ml); |
1696 | } | 1695 | } |
1697 | rtnl_unlock(); | 1696 | rtnl_unlock(); |
@@ -2081,7 +2080,7 @@ static void packet_mm_close(struct vm_area_struct *vma) | |||
2081 | atomic_dec(&pkt_sk(sk)->mapped); | 2080 | atomic_dec(&pkt_sk(sk)->mapped); |
2082 | } | 2081 | } |
2083 | 2082 | ||
2084 | static struct vm_operations_struct packet_mmap_ops = { | 2083 | static const struct vm_operations_struct packet_mmap_ops = { |
2085 | .open = packet_mm_open, | 2084 | .open = packet_mm_open, |
2086 | .close = packet_mm_close, | 2085 | .close = packet_mm_close, |
2087 | }; | 2086 | }; |
@@ -2360,7 +2359,7 @@ static const struct proto_ops packet_ops = { | |||
2360 | .sendpage = sock_no_sendpage, | 2359 | .sendpage = sock_no_sendpage, |
2361 | }; | 2360 | }; |
2362 | 2361 | ||
2363 | static struct net_proto_family packet_family_ops = { | 2362 | static const struct net_proto_family packet_family_ops = { |
2364 | .family = PF_PACKET, | 2363 | .family = PF_PACKET, |
2365 | .create = packet_create, | 2364 | .create = packet_create, |
2366 | .owner = THIS_MODULE, | 2365 | .owner = THIS_MODULE, |
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c index f60c0c2aacba..66737aa995ea 100644 --- a/net/phonet/af_phonet.c +++ b/net/phonet/af_phonet.c | |||
@@ -118,7 +118,7 @@ out: | |||
118 | return err; | 118 | return err; |
119 | } | 119 | } |
120 | 120 | ||
121 | static struct net_proto_family phonet_proto_family = { | 121 | static const struct net_proto_family phonet_proto_family = { |
122 | .family = PF_PHONET, | 122 | .family = PF_PHONET, |
123 | .create = pn_socket_create, | 123 | .create = pn_socket_create, |
124 | .owner = THIS_MODULE, | 124 | .owner = THIS_MODULE, |
@@ -190,9 +190,8 @@ static int pn_send(struct sk_buff *skb, struct net_device *dev, | |||
190 | skb->priority = 0; | 190 | skb->priority = 0; |
191 | skb->dev = dev; | 191 | skb->dev = dev; |
192 | 192 | ||
193 | if (pn_addr(src) == pn_addr(dst)) { | 193 | if (skb->pkt_type == PACKET_LOOPBACK) { |
194 | skb_reset_mac_header(skb); | 194 | skb_reset_mac_header(skb); |
195 | skb->pkt_type = PACKET_LOOPBACK; | ||
196 | skb_orphan(skb); | 195 | skb_orphan(skb); |
197 | if (irq) | 196 | if (irq) |
198 | netif_rx(skb); | 197 | netif_rx(skb); |
@@ -222,6 +221,9 @@ static int pn_raw_send(const void *data, int len, struct net_device *dev, | |||
222 | if (skb == NULL) | 221 | if (skb == NULL) |
223 | return -ENOMEM; | 222 | return -ENOMEM; |
224 | 223 | ||
224 | if (phonet_address_lookup(dev_net(dev), pn_addr(dst)) == 0) | ||
225 | skb->pkt_type = PACKET_LOOPBACK; | ||
226 | |||
225 | skb_reserve(skb, MAX_PHONET_HEADER); | 227 | skb_reserve(skb, MAX_PHONET_HEADER); |
226 | __skb_put(skb, len); | 228 | __skb_put(skb, len); |
227 | skb_copy_to_linear_data(skb, data, len); | 229 | skb_copy_to_linear_data(skb, data, len); |
@@ -235,6 +237,7 @@ static int pn_raw_send(const void *data, int len, struct net_device *dev, | |||
235 | int pn_skb_send(struct sock *sk, struct sk_buff *skb, | 237 | int pn_skb_send(struct sock *sk, struct sk_buff *skb, |
236 | const struct sockaddr_pn *target) | 238 | const struct sockaddr_pn *target) |
237 | { | 239 | { |
240 | struct net *net = sock_net(sk); | ||
238 | struct net_device *dev; | 241 | struct net_device *dev; |
239 | struct pn_sock *pn = pn_sk(sk); | 242 | struct pn_sock *pn = pn_sk(sk); |
240 | int err; | 243 | int err; |
@@ -243,9 +246,13 @@ int pn_skb_send(struct sock *sk, struct sk_buff *skb, | |||
243 | 246 | ||
244 | err = -EHOSTUNREACH; | 247 | err = -EHOSTUNREACH; |
245 | if (sk->sk_bound_dev_if) | 248 | if (sk->sk_bound_dev_if) |
246 | dev = dev_get_by_index(sock_net(sk), sk->sk_bound_dev_if); | 249 | dev = dev_get_by_index(net, sk->sk_bound_dev_if); |
247 | else | 250 | else if (phonet_address_lookup(net, daddr) == 0) { |
248 | dev = phonet_device_get(sock_net(sk)); | 251 | dev = phonet_device_get(net); |
252 | skb->pkt_type = PACKET_LOOPBACK; | ||
253 | } else | ||
254 | dev = phonet_route_output(net, daddr); | ||
255 | |||
249 | if (!dev || !(dev->flags & IFF_UP)) | 256 | if (!dev || !(dev->flags & IFF_UP)) |
250 | goto drop; | 257 | goto drop; |
251 | 258 | ||
@@ -369,6 +376,12 @@ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev, | |||
369 | 376 | ||
370 | pn_skb_get_dst_sockaddr(skb, &sa); | 377 | pn_skb_get_dst_sockaddr(skb, &sa); |
371 | 378 | ||
379 | /* check if this is broadcasted */ | ||
380 | if (pn_sockaddr_get_addr(&sa) == PNADDR_BROADCAST) { | ||
381 | pn_deliver_sock_broadcast(net, skb); | ||
382 | goto out; | ||
383 | } | ||
384 | |||
372 | /* check if we are the destination */ | 385 | /* check if we are the destination */ |
373 | if (phonet_address_lookup(net, pn_sockaddr_get_addr(&sa)) == 0) { | 386 | if (phonet_address_lookup(net, pn_sockaddr_get_addr(&sa)) == 0) { |
374 | /* Phonet packet input */ | 387 | /* Phonet packet input */ |
@@ -381,6 +394,38 @@ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev, | |||
381 | send_obj_unreachable(skb); | 394 | send_obj_unreachable(skb); |
382 | send_reset_indications(skb); | 395 | send_reset_indications(skb); |
383 | } | 396 | } |
397 | } else if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) | ||
398 | goto out; /* Race between address deletion and loopback */ | ||
399 | else { | ||
400 | /* Phonet packet routing */ | ||
401 | struct net_device *out_dev; | ||
402 | |||
403 | out_dev = phonet_route_output(net, pn_sockaddr_get_addr(&sa)); | ||
404 | if (!out_dev) { | ||
405 | LIMIT_NETDEBUG(KERN_WARNING"No Phonet route to %02X\n", | ||
406 | pn_sockaddr_get_addr(&sa)); | ||
407 | goto out; | ||
408 | } | ||
409 | |||
410 | __skb_push(skb, sizeof(struct phonethdr)); | ||
411 | skb->dev = out_dev; | ||
412 | if (out_dev == dev) { | ||
413 | LIMIT_NETDEBUG(KERN_ERR"Phonet loop to %02X on %s\n", | ||
414 | pn_sockaddr_get_addr(&sa), dev->name); | ||
415 | goto out_dev; | ||
416 | } | ||
417 | /* Some drivers (e.g. TUN) do not allocate HW header space */ | ||
418 | if (skb_cow_head(skb, out_dev->hard_header_len)) | ||
419 | goto out_dev; | ||
420 | |||
421 | if (dev_hard_header(skb, out_dev, ETH_P_PHONET, NULL, NULL, | ||
422 | skb->len) < 0) | ||
423 | goto out_dev; | ||
424 | dev_queue_xmit(skb); | ||
425 | dev_put(out_dev); | ||
426 | return NET_RX_SUCCESS; | ||
427 | out_dev: | ||
428 | dev_put(out_dev); | ||
384 | } | 429 | } |
385 | 430 | ||
386 | out: | 431 | out: |
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c index ef5c75c372e4..67f072e94d00 100644 --- a/net/phonet/datagram.c +++ b/net/phonet/datagram.c | |||
@@ -159,11 +159,9 @@ out_nofree: | |||
159 | static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb) | 159 | static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb) |
160 | { | 160 | { |
161 | int err = sock_queue_rcv_skb(sk, skb); | 161 | int err = sock_queue_rcv_skb(sk, skb); |
162 | if (err < 0) { | 162 | |
163 | if (err < 0) | ||
163 | kfree_skb(skb); | 164 | kfree_skb(skb); |
164 | if (err == -ENOMEM) | ||
165 | atomic_inc(&sk->sk_drops); | ||
166 | } | ||
167 | return err ? NET_RX_DROP : NET_RX_SUCCESS; | 165 | return err ? NET_RX_DROP : NET_RX_SUCCESS; |
168 | } | 166 | } |
169 | 167 | ||
diff --git a/net/phonet/pep.c b/net/phonet/pep.c index 5f32d217535b..cbaa1d67d77b 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c | |||
@@ -360,8 +360,6 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
360 | err = sock_queue_rcv_skb(sk, skb); | 360 | err = sock_queue_rcv_skb(sk, skb); |
361 | if (!err) | 361 | if (!err) |
362 | return 0; | 362 | return 0; |
363 | if (err == -ENOMEM) | ||
364 | atomic_inc(&sk->sk_drops); | ||
365 | break; | 363 | break; |
366 | } | 364 | } |
367 | 365 | ||
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c index 5f42f30dd168..6d64fda1afc9 100644 --- a/net/phonet/pn_dev.c +++ b/net/phonet/pn_dev.c | |||
@@ -33,8 +33,14 @@ | |||
33 | #include <net/netns/generic.h> | 33 | #include <net/netns/generic.h> |
34 | #include <net/phonet/pn_dev.h> | 34 | #include <net/phonet/pn_dev.h> |
35 | 35 | ||
36 | struct phonet_routes { | ||
37 | spinlock_t lock; | ||
38 | struct net_device *table[64]; | ||
39 | }; | ||
40 | |||
36 | struct phonet_net { | 41 | struct phonet_net { |
37 | struct phonet_device_list pndevs; | 42 | struct phonet_device_list pndevs; |
43 | struct phonet_routes routes; | ||
38 | }; | 44 | }; |
39 | 45 | ||
40 | int phonet_net_id; | 46 | int phonet_net_id; |
@@ -154,10 +160,11 @@ int phonet_address_del(struct net_device *dev, u8 addr) | |||
154 | } | 160 | } |
155 | 161 | ||
156 | /* Gets a source address toward a destination, through a interface. */ | 162 | /* Gets a source address toward a destination, through a interface. */ |
157 | u8 phonet_address_get(struct net_device *dev, u8 addr) | 163 | u8 phonet_address_get(struct net_device *dev, u8 daddr) |
158 | { | 164 | { |
159 | struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); | 165 | struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); |
160 | struct phonet_device *pnd; | 166 | struct phonet_device *pnd; |
167 | u8 saddr; | ||
161 | 168 | ||
162 | spin_lock_bh(&pndevs->lock); | 169 | spin_lock_bh(&pndevs->lock); |
163 | pnd = __phonet_get(dev); | 170 | pnd = __phonet_get(dev); |
@@ -165,12 +172,26 @@ u8 phonet_address_get(struct net_device *dev, u8 addr) | |||
165 | BUG_ON(bitmap_empty(pnd->addrs, 64)); | 172 | BUG_ON(bitmap_empty(pnd->addrs, 64)); |
166 | 173 | ||
167 | /* Use same source address as destination, if possible */ | 174 | /* Use same source address as destination, if possible */ |
168 | if (!test_bit(addr >> 2, pnd->addrs)) | 175 | if (test_bit(daddr >> 2, pnd->addrs)) |
169 | addr = find_first_bit(pnd->addrs, 64) << 2; | 176 | saddr = daddr; |
177 | else | ||
178 | saddr = find_first_bit(pnd->addrs, 64) << 2; | ||
170 | } else | 179 | } else |
171 | addr = PN_NO_ADDR; | 180 | saddr = PN_NO_ADDR; |
172 | spin_unlock_bh(&pndevs->lock); | 181 | spin_unlock_bh(&pndevs->lock); |
173 | return addr; | 182 | |
183 | if (saddr == PN_NO_ADDR) { | ||
184 | /* Fallback to another device */ | ||
185 | struct net_device *def_dev; | ||
186 | |||
187 | def_dev = phonet_device_get(dev_net(dev)); | ||
188 | if (def_dev) { | ||
189 | if (def_dev != dev) | ||
190 | saddr = phonet_address_get(def_dev, daddr); | ||
191 | dev_put(def_dev); | ||
192 | } | ||
193 | } | ||
194 | return saddr; | ||
174 | } | 195 | } |
175 | 196 | ||
176 | int phonet_address_lookup(struct net *net, u8 addr) | 197 | int phonet_address_lookup(struct net *net, u8 addr) |
@@ -219,6 +240,27 @@ static int phonet_device_autoconf(struct net_device *dev) | |||
219 | return 0; | 240 | return 0; |
220 | } | 241 | } |
221 | 242 | ||
243 | static void phonet_route_autodel(struct net_device *dev) | ||
244 | { | ||
245 | struct phonet_net *pnn = net_generic(dev_net(dev), phonet_net_id); | ||
246 | unsigned i; | ||
247 | DECLARE_BITMAP(deleted, 64); | ||
248 | |||
249 | /* Remove left-over Phonet routes */ | ||
250 | bitmap_zero(deleted, 64); | ||
251 | spin_lock_bh(&pnn->routes.lock); | ||
252 | for (i = 0; i < 64; i++) | ||
253 | if (dev == pnn->routes.table[i]) { | ||
254 | set_bit(i, deleted); | ||
255 | pnn->routes.table[i] = NULL; | ||
256 | dev_put(dev); | ||
257 | } | ||
258 | spin_unlock_bh(&pnn->routes.lock); | ||
259 | for (i = find_first_bit(deleted, 64); i < 64; | ||
260 | i = find_next_bit(deleted, 64, i + 1)) | ||
261 | rtm_phonet_notify(RTM_DELROUTE, dev, i); | ||
262 | } | ||
263 | |||
222 | /* notify Phonet of device events */ | 264 | /* notify Phonet of device events */ |
223 | static int phonet_device_notify(struct notifier_block *me, unsigned long what, | 265 | static int phonet_device_notify(struct notifier_block *me, unsigned long what, |
224 | void *arg) | 266 | void *arg) |
@@ -232,6 +274,7 @@ static int phonet_device_notify(struct notifier_block *me, unsigned long what, | |||
232 | break; | 274 | break; |
233 | case NETDEV_UNREGISTER: | 275 | case NETDEV_UNREGISTER: |
234 | phonet_device_destroy(dev); | 276 | phonet_device_destroy(dev); |
277 | phonet_route_autodel(dev); | ||
235 | break; | 278 | break; |
236 | } | 279 | } |
237 | return 0; | 280 | return 0; |
@@ -246,7 +289,7 @@ static struct notifier_block phonet_device_notifier = { | |||
246 | /* Per-namespace Phonet devices handling */ | 289 | /* Per-namespace Phonet devices handling */ |
247 | static int phonet_init_net(struct net *net) | 290 | static int phonet_init_net(struct net *net) |
248 | { | 291 | { |
249 | struct phonet_net *pnn = kmalloc(sizeof(*pnn), GFP_KERNEL); | 292 | struct phonet_net *pnn = kzalloc(sizeof(*pnn), GFP_KERNEL); |
250 | if (!pnn) | 293 | if (!pnn) |
251 | return -ENOMEM; | 294 | return -ENOMEM; |
252 | 295 | ||
@@ -257,6 +300,7 @@ static int phonet_init_net(struct net *net) | |||
257 | 300 | ||
258 | INIT_LIST_HEAD(&pnn->pndevs.list); | 301 | INIT_LIST_HEAD(&pnn->pndevs.list); |
259 | spin_lock_init(&pnn->pndevs.lock); | 302 | spin_lock_init(&pnn->pndevs.lock); |
303 | spin_lock_init(&pnn->routes.lock); | ||
260 | net_assign_generic(net, phonet_net_id, pnn); | 304 | net_assign_generic(net, phonet_net_id, pnn); |
261 | return 0; | 305 | return 0; |
262 | } | 306 | } |
@@ -265,10 +309,19 @@ static void phonet_exit_net(struct net *net) | |||
265 | { | 309 | { |
266 | struct phonet_net *pnn = net_generic(net, phonet_net_id); | 310 | struct phonet_net *pnn = net_generic(net, phonet_net_id); |
267 | struct net_device *dev; | 311 | struct net_device *dev; |
312 | unsigned i; | ||
268 | 313 | ||
269 | rtnl_lock(); | 314 | rtnl_lock(); |
270 | for_each_netdev(net, dev) | 315 | for_each_netdev(net, dev) |
271 | phonet_device_destroy(dev); | 316 | phonet_device_destroy(dev); |
317 | |||
318 | for (i = 0; i < 64; i++) { | ||
319 | dev = pnn->routes.table[i]; | ||
320 | if (dev) { | ||
321 | rtm_phonet_notify(RTM_DELROUTE, dev, i); | ||
322 | dev_put(dev); | ||
323 | } | ||
324 | } | ||
272 | rtnl_unlock(); | 325 | rtnl_unlock(); |
273 | 326 | ||
274 | proc_net_remove(net, "phonet"); | 327 | proc_net_remove(net, "phonet"); |
@@ -300,3 +353,69 @@ void phonet_device_exit(void) | |||
300 | unregister_netdevice_notifier(&phonet_device_notifier); | 353 | unregister_netdevice_notifier(&phonet_device_notifier); |
301 | unregister_pernet_gen_device(phonet_net_id, &phonet_net_ops); | 354 | unregister_pernet_gen_device(phonet_net_id, &phonet_net_ops); |
302 | } | 355 | } |
356 | |||
357 | int phonet_route_add(struct net_device *dev, u8 daddr) | ||
358 | { | ||
359 | struct phonet_net *pnn = net_generic(dev_net(dev), phonet_net_id); | ||
360 | struct phonet_routes *routes = &pnn->routes; | ||
361 | int err = -EEXIST; | ||
362 | |||
363 | daddr = daddr >> 2; | ||
364 | spin_lock_bh(&routes->lock); | ||
365 | if (routes->table[daddr] == NULL) { | ||
366 | routes->table[daddr] = dev; | ||
367 | dev_hold(dev); | ||
368 | err = 0; | ||
369 | } | ||
370 | spin_unlock_bh(&routes->lock); | ||
371 | return err; | ||
372 | } | ||
373 | |||
374 | int phonet_route_del(struct net_device *dev, u8 daddr) | ||
375 | { | ||
376 | struct phonet_net *pnn = net_generic(dev_net(dev), phonet_net_id); | ||
377 | struct phonet_routes *routes = &pnn->routes; | ||
378 | int err = -ENOENT; | ||
379 | |||
380 | daddr = daddr >> 2; | ||
381 | spin_lock_bh(&routes->lock); | ||
382 | if (dev == routes->table[daddr]) { | ||
383 | routes->table[daddr] = NULL; | ||
384 | dev_put(dev); | ||
385 | err = 0; | ||
386 | } | ||
387 | spin_unlock_bh(&routes->lock); | ||
388 | return err; | ||
389 | } | ||
390 | |||
391 | struct net_device *phonet_route_get(struct net *net, u8 daddr) | ||
392 | { | ||
393 | struct phonet_net *pnn = net_generic(net, phonet_net_id); | ||
394 | struct phonet_routes *routes = &pnn->routes; | ||
395 | struct net_device *dev; | ||
396 | |||
397 | ASSERT_RTNL(); /* no need to hold the device */ | ||
398 | |||
399 | daddr >>= 2; | ||
400 | spin_lock_bh(&routes->lock); | ||
401 | dev = routes->table[daddr]; | ||
402 | spin_unlock_bh(&routes->lock); | ||
403 | return dev; | ||
404 | } | ||
405 | |||
406 | struct net_device *phonet_route_output(struct net *net, u8 daddr) | ||
407 | { | ||
408 | struct phonet_net *pnn = net_generic(net, phonet_net_id); | ||
409 | struct phonet_routes *routes = &pnn->routes; | ||
410 | struct net_device *dev; | ||
411 | |||
412 | spin_lock_bh(&routes->lock); | ||
413 | dev = routes->table[daddr >> 2]; | ||
414 | if (dev) | ||
415 | dev_hold(dev); | ||
416 | spin_unlock_bh(&routes->lock); | ||
417 | |||
418 | if (!dev) | ||
419 | dev = phonet_device_get(net); /* Default route */ | ||
420 | return dev; | ||
421 | } | ||
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c index d21fd3576610..d8f5d3fb9ee2 100644 --- a/net/phonet/pn_netlink.c +++ b/net/phonet/pn_netlink.c | |||
@@ -29,6 +29,8 @@ | |||
29 | #include <net/sock.h> | 29 | #include <net/sock.h> |
30 | #include <net/phonet/pn_dev.h> | 30 | #include <net/phonet/pn_dev.h> |
31 | 31 | ||
32 | /* Device address handling */ | ||
33 | |||
32 | static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr, | 34 | static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr, |
33 | u32 pid, u32 seq, int event); | 35 | u32 pid, u32 seq, int event); |
34 | 36 | ||
@@ -160,6 +162,131 @@ out: | |||
160 | return skb->len; | 162 | return skb->len; |
161 | } | 163 | } |
162 | 164 | ||
165 | /* Routes handling */ | ||
166 | |||
167 | static int fill_route(struct sk_buff *skb, struct net_device *dev, u8 dst, | ||
168 | u32 pid, u32 seq, int event) | ||
169 | { | ||
170 | struct rtmsg *rtm; | ||
171 | struct nlmsghdr *nlh; | ||
172 | |||
173 | nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), 0); | ||
174 | if (nlh == NULL) | ||
175 | return -EMSGSIZE; | ||
176 | |||
177 | rtm = nlmsg_data(nlh); | ||
178 | rtm->rtm_family = AF_PHONET; | ||
179 | rtm->rtm_dst_len = 6; | ||
180 | rtm->rtm_src_len = 0; | ||
181 | rtm->rtm_tos = 0; | ||
182 | rtm->rtm_table = RT_TABLE_MAIN; | ||
183 | rtm->rtm_protocol = RTPROT_STATIC; | ||
184 | rtm->rtm_scope = RT_SCOPE_UNIVERSE; | ||
185 | rtm->rtm_type = RTN_UNICAST; | ||
186 | rtm->rtm_flags = 0; | ||
187 | NLA_PUT_U8(skb, RTA_DST, dst); | ||
188 | NLA_PUT_U32(skb, RTA_OIF, dev->ifindex); | ||
189 | return nlmsg_end(skb, nlh); | ||
190 | |||
191 | nla_put_failure: | ||
192 | nlmsg_cancel(skb, nlh); | ||
193 | return -EMSGSIZE; | ||
194 | } | ||
195 | |||
196 | void rtm_phonet_notify(int event, struct net_device *dev, u8 dst) | ||
197 | { | ||
198 | struct sk_buff *skb; | ||
199 | int err = -ENOBUFS; | ||
200 | |||
201 | skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct ifaddrmsg)) + | ||
202 | nla_total_size(1) + nla_total_size(4), GFP_KERNEL); | ||
203 | if (skb == NULL) | ||
204 | goto errout; | ||
205 | err = fill_route(skb, dev, dst, 0, 0, event); | ||
206 | if (err < 0) { | ||
207 | WARN_ON(err == -EMSGSIZE); | ||
208 | kfree_skb(skb); | ||
209 | goto errout; | ||
210 | } | ||
211 | rtnl_notify(skb, dev_net(dev), 0, | ||
212 | RTNLGRP_PHONET_ROUTE, NULL, GFP_KERNEL); | ||
213 | return; | ||
214 | errout: | ||
215 | if (err < 0) | ||
216 | rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_ROUTE, err); | ||
217 | } | ||
218 | |||
219 | static const struct nla_policy rtm_phonet_policy[RTA_MAX+1] = { | ||
220 | [RTA_DST] = { .type = NLA_U8 }, | ||
221 | [RTA_OIF] = { .type = NLA_U32 }, | ||
222 | }; | ||
223 | |||
224 | static int route_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *attr) | ||
225 | { | ||
226 | struct net *net = sock_net(skb->sk); | ||
227 | struct nlattr *tb[RTA_MAX+1]; | ||
228 | struct net_device *dev; | ||
229 | struct rtmsg *rtm; | ||
230 | int err; | ||
231 | u8 dst; | ||
232 | |||
233 | if (!capable(CAP_SYS_ADMIN)) | ||
234 | return -EPERM; | ||
235 | |||
236 | ASSERT_RTNL(); | ||
237 | |||
238 | err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_phonet_policy); | ||
239 | if (err < 0) | ||
240 | return err; | ||
241 | |||
242 | rtm = nlmsg_data(nlh); | ||
243 | if (rtm->rtm_table != RT_TABLE_MAIN || rtm->rtm_type != RTN_UNICAST) | ||
244 | return -EINVAL; | ||
245 | if (tb[RTA_DST] == NULL || tb[RTA_OIF] == NULL) | ||
246 | return -EINVAL; | ||
247 | dst = nla_get_u8(tb[RTA_DST]); | ||
248 | if (dst & 3) /* Phonet addresses only have 6 high-order bits */ | ||
249 | return -EINVAL; | ||
250 | |||
251 | dev = __dev_get_by_index(net, nla_get_u32(tb[RTA_OIF])); | ||
252 | if (dev == NULL) | ||
253 | return -ENODEV; | ||
254 | |||
255 | if (nlh->nlmsg_type == RTM_NEWROUTE) | ||
256 | err = phonet_route_add(dev, dst); | ||
257 | else | ||
258 | err = phonet_route_del(dev, dst); | ||
259 | if (!err) | ||
260 | rtm_phonet_notify(nlh->nlmsg_type, dev, dst); | ||
261 | return err; | ||
262 | } | ||
263 | |||
264 | static int route_dumpit(struct sk_buff *skb, struct netlink_callback *cb) | ||
265 | { | ||
266 | struct net *net = sock_net(skb->sk); | ||
267 | u8 addr, addr_idx = 0, addr_start_idx = cb->args[0]; | ||
268 | |||
269 | for (addr = 0; addr < 64; addr++) { | ||
270 | struct net_device *dev; | ||
271 | |||
272 | dev = phonet_route_get(net, addr << 2); | ||
273 | if (!dev) | ||
274 | continue; | ||
275 | |||
276 | if (addr_idx++ < addr_start_idx) | ||
277 | continue; | ||
278 | if (fill_route(skb, dev, addr << 2, NETLINK_CB(cb->skb).pid, | ||
279 | cb->nlh->nlmsg_seq, RTM_NEWROUTE)) | ||
280 | goto out; | ||
281 | } | ||
282 | |||
283 | out: | ||
284 | cb->args[0] = addr_idx; | ||
285 | cb->args[1] = 0; | ||
286 | |||
287 | return skb->len; | ||
288 | } | ||
289 | |||
163 | int __init phonet_netlink_register(void) | 290 | int __init phonet_netlink_register(void) |
164 | { | 291 | { |
165 | int err = __rtnl_register(PF_PHONET, RTM_NEWADDR, addr_doit, NULL); | 292 | int err = __rtnl_register(PF_PHONET, RTM_NEWADDR, addr_doit, NULL); |
@@ -169,5 +296,8 @@ int __init phonet_netlink_register(void) | |||
169 | /* Further __rtnl_register() cannot fail */ | 296 | /* Further __rtnl_register() cannot fail */ |
170 | __rtnl_register(PF_PHONET, RTM_DELADDR, addr_doit, NULL); | 297 | __rtnl_register(PF_PHONET, RTM_DELADDR, addr_doit, NULL); |
171 | __rtnl_register(PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit); | 298 | __rtnl_register(PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit); |
299 | __rtnl_register(PF_PHONET, RTM_NEWROUTE, route_doit, NULL); | ||
300 | __rtnl_register(PF_PHONET, RTM_DELROUTE, route_doit, NULL); | ||
301 | __rtnl_register(PF_PHONET, RTM_GETROUTE, NULL, route_dumpit); | ||
172 | return 0; | 302 | return 0; |
173 | } | 303 | } |
diff --git a/net/phonet/socket.c b/net/phonet/socket.c index aa5b5a972bff..0412beb59a05 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c | |||
@@ -94,7 +94,30 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn) | |||
94 | spin_unlock_bh(&pnsocks.lock); | 94 | spin_unlock_bh(&pnsocks.lock); |
95 | 95 | ||
96 | return rval; | 96 | return rval; |
97 | } | ||
98 | |||
99 | /* Deliver a broadcast packet (only in bottom-half) */ | ||
100 | void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb) | ||
101 | { | ||
102 | struct hlist_node *node; | ||
103 | struct sock *sknode; | ||
104 | |||
105 | spin_lock(&pnsocks.lock); | ||
106 | sk_for_each(sknode, node, &pnsocks.hlist) { | ||
107 | struct sk_buff *clone; | ||
97 | 108 | ||
109 | if (!net_eq(sock_net(sknode), net)) | ||
110 | continue; | ||
111 | if (!sock_flag(sknode, SOCK_BROADCAST)) | ||
112 | continue; | ||
113 | |||
114 | clone = skb_clone(skb, GFP_ATOMIC); | ||
115 | if (clone) { | ||
116 | sock_hold(sknode); | ||
117 | sk_receive_skb(sknode, clone, 0); | ||
118 | } | ||
119 | } | ||
120 | spin_unlock(&pnsocks.lock); | ||
98 | } | 121 | } |
99 | 122 | ||
100 | void pn_sock_hash(struct sock *sk) | 123 | void pn_sock_hash(struct sock *sk) |
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index 98e05382fd3c..a202e5b36079 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c | |||
@@ -431,7 +431,7 @@ void rds_sock_put(struct rds_sock *rs) | |||
431 | sock_put(rds_rs_to_sk(rs)); | 431 | sock_put(rds_rs_to_sk(rs)); |
432 | } | 432 | } |
433 | 433 | ||
434 | static struct net_proto_family rds_family_ops = { | 434 | static const struct net_proto_family rds_family_ops = { |
435 | .family = AF_RDS, | 435 | .family = AF_RDS, |
436 | .create = rds_create, | 436 | .create = rds_create, |
437 | .owner = THIS_MODULE, | 437 | .owner = THIS_MODULE, |
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c index 24b743eb0b1b..45474a436862 100644 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c | |||
@@ -67,11 +67,11 @@ static int rds_tcp_accept_one(struct socket *sock) | |||
67 | inet = inet_sk(new_sock->sk); | 67 | inet = inet_sk(new_sock->sk); |
68 | 68 | ||
69 | rdsdebug("accepted tcp %u.%u.%u.%u:%u -> %u.%u.%u.%u:%u\n", | 69 | rdsdebug("accepted tcp %u.%u.%u.%u:%u -> %u.%u.%u.%u:%u\n", |
70 | NIPQUAD(inet->saddr), ntohs(inet->sport), | 70 | NIPQUAD(inet->inet_saddr), ntohs(inet->inet_sport), |
71 | NIPQUAD(inet->daddr), ntohs(inet->dport)); | 71 | NIPQUAD(inet->inet_daddr), ntohs(inet->inet_dport)); |
72 | 72 | ||
73 | conn = rds_conn_create(inet->saddr, inet->daddr, &rds_tcp_transport, | 73 | conn = rds_conn_create(inet->inet_saddr, inet->inet_daddr, |
74 | GFP_KERNEL); | 74 | &rds_tcp_transport, GFP_KERNEL); |
75 | if (IS_ERR(conn)) { | 75 | if (IS_ERR(conn)) { |
76 | ret = PTR_ERR(conn); | 76 | ret = PTR_ERR(conn); |
77 | goto out; | 77 | goto out; |
diff --git a/net/rfkill/core.c b/net/rfkill/core.c index dbeaf2983822..ba2efb960c60 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/list.h> | 27 | #include <linux/list.h> |
28 | #include <linux/mutex.h> | 28 | #include <linux/mutex.h> |
29 | #include <linux/rfkill.h> | 29 | #include <linux/rfkill.h> |
30 | #include <linux/sched.h> | ||
30 | #include <linux/spinlock.h> | 31 | #include <linux/spinlock.h> |
31 | #include <linux/miscdevice.h> | 32 | #include <linux/miscdevice.h> |
32 | #include <linux/wait.h> | 33 | #include <linux/wait.h> |
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 502cce76621d..c17734c2ce89 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c | |||
@@ -1509,7 +1509,7 @@ static const struct file_operations rose_info_fops = { | |||
1509 | }; | 1509 | }; |
1510 | #endif /* CONFIG_PROC_FS */ | 1510 | #endif /* CONFIG_PROC_FS */ |
1511 | 1511 | ||
1512 | static struct net_proto_family rose_family_ops = { | 1512 | static const struct net_proto_family rose_family_ops = { |
1513 | .family = PF_ROSE, | 1513 | .family = PF_ROSE, |
1514 | .create = rose_create, | 1514 | .create = rose_create, |
1515 | .owner = THIS_MODULE, | 1515 | .owner = THIS_MODULE, |
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index a86afceaa94f..6817c9781ef3 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c | |||
@@ -777,7 +777,7 @@ static struct proto rxrpc_proto = { | |||
777 | .max_header = sizeof(struct rxrpc_header), | 777 | .max_header = sizeof(struct rxrpc_header), |
778 | }; | 778 | }; |
779 | 779 | ||
780 | static struct net_proto_family rxrpc_family_ops = { | 780 | static const struct net_proto_family rxrpc_family_ops = { |
781 | .family = PF_RXRPC, | 781 | .family = PF_RXRPC, |
782 | .create = rxrpc_create, | 782 | .create = rxrpc_create, |
783 | .owner = THIS_MODULE, | 783 | .owner = THIS_MODULE, |
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c index a39bf97f8830..60c2b94e6b54 100644 --- a/net/rxrpc/ar-recvmsg.c +++ b/net/rxrpc/ar-recvmsg.c | |||
@@ -146,7 +146,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
146 | memcpy(msg->msg_name, | 146 | memcpy(msg->msg_name, |
147 | &call->conn->trans->peer->srx, | 147 | &call->conn->trans->peer->srx, |
148 | sizeof(call->conn->trans->peer->srx)); | 148 | sizeof(call->conn->trans->peer->srx)); |
149 | sock_recv_timestamp(msg, &rx->sk, skb); | 149 | sock_recv_ts_and_drops(msg, &rx->sk, skb); |
150 | } | 150 | } |
151 | 151 | ||
152 | /* receive the message */ | 152 | /* receive the message */ |
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 2dfb3e7a040d..ca2e1fd2bf69 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
@@ -618,7 +618,8 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a, | |||
618 | goto errout; | 618 | goto errout; |
619 | 619 | ||
620 | if (gnet_stats_copy_basic(&d, &h->tcf_bstats) < 0 || | 620 | if (gnet_stats_copy_basic(&d, &h->tcf_bstats) < 0 || |
621 | gnet_stats_copy_rate_est(&d, &h->tcf_rate_est) < 0 || | 621 | gnet_stats_copy_rate_est(&d, &h->tcf_bstats, |
622 | &h->tcf_rate_est) < 0 || | ||
622 | gnet_stats_copy_queue(&d, &h->tcf_qstats) < 0) | 623 | gnet_stats_copy_queue(&d, &h->tcf_qstats) < 0) |
623 | goto errout; | 624 | goto errout; |
624 | 625 | ||
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 4ab916b8074b..e9607fe55b58 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c | |||
@@ -54,6 +54,8 @@ static int tcf_skbedit(struct sk_buff *skb, struct tc_action *a, | |||
54 | if (d->flags & SKBEDIT_F_QUEUE_MAPPING && | 54 | if (d->flags & SKBEDIT_F_QUEUE_MAPPING && |
55 | skb->dev->real_num_tx_queues > d->queue_mapping) | 55 | skb->dev->real_num_tx_queues > d->queue_mapping) |
56 | skb_set_queue_mapping(skb, d->queue_mapping); | 56 | skb_set_queue_mapping(skb, d->queue_mapping); |
57 | if (d->flags & SKBEDIT_F_MARK) | ||
58 | skb->mark = d->mark; | ||
57 | 59 | ||
58 | spin_unlock(&d->tcf_lock); | 60 | spin_unlock(&d->tcf_lock); |
59 | return d->tcf_action; | 61 | return d->tcf_action; |
@@ -63,6 +65,7 @@ static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = { | |||
63 | [TCA_SKBEDIT_PARMS] = { .len = sizeof(struct tc_skbedit) }, | 65 | [TCA_SKBEDIT_PARMS] = { .len = sizeof(struct tc_skbedit) }, |
64 | [TCA_SKBEDIT_PRIORITY] = { .len = sizeof(u32) }, | 66 | [TCA_SKBEDIT_PRIORITY] = { .len = sizeof(u32) }, |
65 | [TCA_SKBEDIT_QUEUE_MAPPING] = { .len = sizeof(u16) }, | 67 | [TCA_SKBEDIT_QUEUE_MAPPING] = { .len = sizeof(u16) }, |
68 | [TCA_SKBEDIT_MARK] = { .len = sizeof(u32) }, | ||
66 | }; | 69 | }; |
67 | 70 | ||
68 | static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est, | 71 | static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est, |
@@ -72,7 +75,7 @@ static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est, | |||
72 | struct tc_skbedit *parm; | 75 | struct tc_skbedit *parm; |
73 | struct tcf_skbedit *d; | 76 | struct tcf_skbedit *d; |
74 | struct tcf_common *pc; | 77 | struct tcf_common *pc; |
75 | u32 flags = 0, *priority = NULL; | 78 | u32 flags = 0, *priority = NULL, *mark = NULL; |
76 | u16 *queue_mapping = NULL; | 79 | u16 *queue_mapping = NULL; |
77 | int ret = 0, err; | 80 | int ret = 0, err; |
78 | 81 | ||
@@ -95,6 +98,12 @@ static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est, | |||
95 | flags |= SKBEDIT_F_QUEUE_MAPPING; | 98 | flags |= SKBEDIT_F_QUEUE_MAPPING; |
96 | queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]); | 99 | queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]); |
97 | } | 100 | } |
101 | |||
102 | if (tb[TCA_SKBEDIT_MARK] != NULL) { | ||
103 | flags |= SKBEDIT_F_MARK; | ||
104 | mark = nla_data(tb[TCA_SKBEDIT_MARK]); | ||
105 | } | ||
106 | |||
98 | if (!flags) | 107 | if (!flags) |
99 | return -EINVAL; | 108 | return -EINVAL; |
100 | 109 | ||
@@ -124,6 +133,9 @@ static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est, | |||
124 | d->priority = *priority; | 133 | d->priority = *priority; |
125 | if (flags & SKBEDIT_F_QUEUE_MAPPING) | 134 | if (flags & SKBEDIT_F_QUEUE_MAPPING) |
126 | d->queue_mapping = *queue_mapping; | 135 | d->queue_mapping = *queue_mapping; |
136 | if (flags & SKBEDIT_F_MARK) | ||
137 | d->mark = *mark; | ||
138 | |||
127 | d->tcf_action = parm->action; | 139 | d->tcf_action = parm->action; |
128 | 140 | ||
129 | spin_unlock_bh(&d->tcf_lock); | 141 | spin_unlock_bh(&d->tcf_lock); |
@@ -161,6 +173,9 @@ static inline int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, | |||
161 | if (d->flags & SKBEDIT_F_QUEUE_MAPPING) | 173 | if (d->flags & SKBEDIT_F_QUEUE_MAPPING) |
162 | NLA_PUT(skb, TCA_SKBEDIT_QUEUE_MAPPING, | 174 | NLA_PUT(skb, TCA_SKBEDIT_QUEUE_MAPPING, |
163 | sizeof(d->queue_mapping), &d->queue_mapping); | 175 | sizeof(d->queue_mapping), &d->queue_mapping); |
176 | if (d->flags & SKBEDIT_F_MARK) | ||
177 | NLA_PUT(skb, TCA_SKBEDIT_MARK, sizeof(d->mark), | ||
178 | &d->mark); | ||
164 | t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); | 179 | t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); |
165 | t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse); | 180 | t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse); |
166 | t.expires = jiffies_to_clock_t(d->tcf_tm.expires); | 181 | t.expires = jiffies_to_clock_t(d->tcf_tm.expires); |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 903e4188b6ca..1acfd29cc826 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -1179,7 +1179,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, | |||
1179 | goto nla_put_failure; | 1179 | goto nla_put_failure; |
1180 | 1180 | ||
1181 | if (gnet_stats_copy_basic(&d, &q->bstats) < 0 || | 1181 | if (gnet_stats_copy_basic(&d, &q->bstats) < 0 || |
1182 | gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 || | 1182 | gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 || |
1183 | gnet_stats_copy_queue(&d, &q->qstats) < 0) | 1183 | gnet_stats_copy_queue(&d, &q->qstats) < 0) |
1184 | goto nla_put_failure; | 1184 | goto nla_put_failure; |
1185 | 1185 | ||
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 5b132c473264..3846d65bc03e 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -1609,7 +1609,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |||
1609 | cl->xstats.undertime = cl->undertime - q->now; | 1609 | cl->xstats.undertime = cl->undertime - q->now; |
1610 | 1610 | ||
1611 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || | 1611 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || |
1612 | gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || | 1612 | gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || |
1613 | gnet_stats_copy_queue(d, &cl->qstats) < 0) | 1613 | gnet_stats_copy_queue(d, &cl->qstats) < 0) |
1614 | return -1; | 1614 | return -1; |
1615 | 1615 | ||
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index 5a888af7e5da..a65604f8f2b8 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c | |||
@@ -280,7 +280,7 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |||
280 | } | 280 | } |
281 | 281 | ||
282 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || | 282 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || |
283 | gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || | 283 | gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || |
284 | gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0) | 284 | gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0) |
285 | return -1; | 285 | return -1; |
286 | 286 | ||
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 2c5c76be18f8..b38b39c60752 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -1375,7 +1375,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |||
1375 | xstats.rtwork = cl->cl_cumul; | 1375 | xstats.rtwork = cl->cl_cumul; |
1376 | 1376 | ||
1377 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || | 1377 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || |
1378 | gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || | 1378 | gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || |
1379 | gnet_stats_copy_queue(d, &cl->qstats) < 0) | 1379 | gnet_stats_copy_queue(d, &cl->qstats) < 0) |
1380 | return -1; | 1380 | return -1; |
1381 | 1381 | ||
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 85acab9dc6fd..2e38d1abd830 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -1105,7 +1105,7 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) | |||
1105 | cl->xstats.ctokens = cl->ctokens; | 1105 | cl->xstats.ctokens = cl->ctokens; |
1106 | 1106 | ||
1107 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || | 1107 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || |
1108 | gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || | 1108 | gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 || |
1109 | gnet_stats_copy_queue(d, &cl->qstats) < 0) | 1109 | gnet_stats_copy_queue(d, &cl->qstats) < 0) |
1110 | return -1; | 1110 | return -1; |
1111 | 1111 | ||
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 612dc878e05c..d9f4cc2c7869 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
@@ -296,19 +296,19 @@ static void sctp_v4_from_sk(union sctp_addr *addr, struct sock *sk) | |||
296 | { | 296 | { |
297 | addr->v4.sin_family = AF_INET; | 297 | addr->v4.sin_family = AF_INET; |
298 | addr->v4.sin_port = 0; | 298 | addr->v4.sin_port = 0; |
299 | addr->v4.sin_addr.s_addr = inet_sk(sk)->rcv_saddr; | 299 | addr->v4.sin_addr.s_addr = inet_sk(sk)->inet_rcv_saddr; |
300 | } | 300 | } |
301 | 301 | ||
302 | /* Initialize sk->sk_rcv_saddr from sctp_addr. */ | 302 | /* Initialize sk->sk_rcv_saddr from sctp_addr. */ |
303 | static void sctp_v4_to_sk_saddr(union sctp_addr *addr, struct sock *sk) | 303 | static void sctp_v4_to_sk_saddr(union sctp_addr *addr, struct sock *sk) |
304 | { | 304 | { |
305 | inet_sk(sk)->rcv_saddr = addr->v4.sin_addr.s_addr; | 305 | inet_sk(sk)->inet_rcv_saddr = addr->v4.sin_addr.s_addr; |
306 | } | 306 | } |
307 | 307 | ||
308 | /* Initialize sk->sk_daddr from sctp_addr. */ | 308 | /* Initialize sk->sk_daddr from sctp_addr. */ |
309 | static void sctp_v4_to_sk_daddr(union sctp_addr *addr, struct sock *sk) | 309 | static void sctp_v4_to_sk_daddr(union sctp_addr *addr, struct sock *sk) |
310 | { | 310 | { |
311 | inet_sk(sk)->daddr = addr->v4.sin_addr.s_addr; | 311 | inet_sk(sk)->inet_daddr = addr->v4.sin_addr.s_addr; |
312 | } | 312 | } |
313 | 313 | ||
314 | /* Initialize a sctp_addr from an address parameter. */ | 314 | /* Initialize a sctp_addr from an address parameter. */ |
@@ -598,7 +598,7 @@ static struct sock *sctp_v4_create_accept_sk(struct sock *sk, | |||
598 | 598 | ||
599 | newinet = inet_sk(newsk); | 599 | newinet = inet_sk(newsk); |
600 | 600 | ||
601 | newinet->daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr; | 601 | newinet->inet_daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr; |
602 | 602 | ||
603 | sk_refcnt_debug_inc(newsk); | 603 | sk_refcnt_debug_inc(newsk); |
604 | 604 | ||
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index c8d05758661d..4085db99033d 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -394,7 +394,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) | |||
394 | 394 | ||
395 | /* Refresh ephemeral port. */ | 395 | /* Refresh ephemeral port. */ |
396 | if (!bp->port) | 396 | if (!bp->port) |
397 | bp->port = inet_sk(sk)->num; | 397 | bp->port = inet_sk(sk)->inet_num; |
398 | 398 | ||
399 | /* Add the address to the bind address list. | 399 | /* Add the address to the bind address list. |
400 | * Use GFP_ATOMIC since BHs will be disabled. | 400 | * Use GFP_ATOMIC since BHs will be disabled. |
@@ -403,7 +403,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) | |||
403 | 403 | ||
404 | /* Copy back into socket for getsockname() use. */ | 404 | /* Copy back into socket for getsockname() use. */ |
405 | if (!ret) { | 405 | if (!ret) { |
406 | inet_sk(sk)->sport = htons(inet_sk(sk)->num); | 406 | inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num); |
407 | af->to_sk_saddr(addr, sk); | 407 | af->to_sk_saddr(addr, sk); |
408 | } | 408 | } |
409 | 409 | ||
@@ -1115,7 +1115,7 @@ static int __sctp_connect(struct sock* sk, | |||
1115 | } | 1115 | } |
1116 | 1116 | ||
1117 | /* Initialize sk's dport and daddr for getpeername() */ | 1117 | /* Initialize sk's dport and daddr for getpeername() */ |
1118 | inet_sk(sk)->dport = htons(asoc->peer.port); | 1118 | inet_sk(sk)->inet_dport = htons(asoc->peer.port); |
1119 | af = sctp_get_af_specific(sa_addr->sa.sa_family); | 1119 | af = sctp_get_af_specific(sa_addr->sa.sa_family); |
1120 | af->to_sk_daddr(sa_addr, sk); | 1120 | af->to_sk_daddr(sa_addr, sk); |
1121 | sk->sk_err = 0; | 1121 | sk->sk_err = 0; |
@@ -1958,7 +1958,7 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
1958 | if (err) | 1958 | if (err) |
1959 | goto out_free; | 1959 | goto out_free; |
1960 | 1960 | ||
1961 | sock_recv_timestamp(msg, sk, skb); | 1961 | sock_recv_ts_and_drops(msg, sk, skb); |
1962 | if (sctp_ulpevent_is_notification(event)) { | 1962 | if (sctp_ulpevent_is_notification(event)) { |
1963 | msg->msg_flags |= MSG_NOTIFICATION; | 1963 | msg->msg_flags |= MSG_NOTIFICATION; |
1964 | sp->pf->event_msgname(event, msg->msg_name, addr_len); | 1964 | sp->pf->event_msgname(event, msg->msg_name, addr_len); |
@@ -5851,7 +5851,7 @@ pp_not_found: | |||
5851 | */ | 5851 | */ |
5852 | success: | 5852 | success: |
5853 | if (!sctp_sk(sk)->bind_hash) { | 5853 | if (!sctp_sk(sk)->bind_hash) { |
5854 | inet_sk(sk)->num = snum; | 5854 | inet_sk(sk)->inet_num = snum; |
5855 | sk_add_bind_node(sk, &pp->owner); | 5855 | sk_add_bind_node(sk, &pp->owner); |
5856 | sctp_sk(sk)->bind_hash = pp; | 5856 | sctp_sk(sk)->bind_hash = pp; |
5857 | } | 5857 | } |
@@ -5923,7 +5923,7 @@ SCTP_STATIC int sctp_listen_start(struct sock *sk, int backlog) | |||
5923 | if (sctp_autobind(sk)) | 5923 | if (sctp_autobind(sk)) |
5924 | return -EAGAIN; | 5924 | return -EAGAIN; |
5925 | } else { | 5925 | } else { |
5926 | if (sctp_get_port(sk, inet_sk(sk)->num)) { | 5926 | if (sctp_get_port(sk, inet_sk(sk)->inet_num)) { |
5927 | sk->sk_state = SCTP_SS_CLOSED; | 5927 | sk->sk_state = SCTP_SS_CLOSED; |
5928 | return -EADDRINUSE; | 5928 | return -EADDRINUSE; |
5929 | } | 5929 | } |
@@ -6094,14 +6094,14 @@ static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) | |||
6094 | static inline void __sctp_put_port(struct sock *sk) | 6094 | static inline void __sctp_put_port(struct sock *sk) |
6095 | { | 6095 | { |
6096 | struct sctp_bind_hashbucket *head = | 6096 | struct sctp_bind_hashbucket *head = |
6097 | &sctp_port_hashtable[sctp_phashfn(inet_sk(sk)->num)]; | 6097 | &sctp_port_hashtable[sctp_phashfn(inet_sk(sk)->inet_num)]; |
6098 | struct sctp_bind_bucket *pp; | 6098 | struct sctp_bind_bucket *pp; |
6099 | 6099 | ||
6100 | sctp_spin_lock(&head->lock); | 6100 | sctp_spin_lock(&head->lock); |
6101 | pp = sctp_sk(sk)->bind_hash; | 6101 | pp = sctp_sk(sk)->bind_hash; |
6102 | __sk_del_bind_node(sk); | 6102 | __sk_del_bind_node(sk); |
6103 | sctp_sk(sk)->bind_hash = NULL; | 6103 | sctp_sk(sk)->bind_hash = NULL; |
6104 | inet_sk(sk)->num = 0; | 6104 | inet_sk(sk)->inet_num = 0; |
6105 | sctp_bucket_destroy(pp); | 6105 | sctp_bucket_destroy(pp); |
6106 | sctp_spin_unlock(&head->lock); | 6106 | sctp_spin_unlock(&head->lock); |
6107 | } | 6107 | } |
@@ -6128,7 +6128,7 @@ static int sctp_autobind(struct sock *sk) | |||
6128 | /* Initialize a local sockaddr structure to INADDR_ANY. */ | 6128 | /* Initialize a local sockaddr structure to INADDR_ANY. */ |
6129 | af = sctp_sk(sk)->pf->af; | 6129 | af = sctp_sk(sk)->pf->af; |
6130 | 6130 | ||
6131 | port = htons(inet_sk(sk)->num); | 6131 | port = htons(inet_sk(sk)->inet_num); |
6132 | af->inaddr_any(&autoaddr, port); | 6132 | af->inaddr_any(&autoaddr, port); |
6133 | 6133 | ||
6134 | return sctp_do_bind(sk, &autoaddr, af->sockaddr_len); | 6134 | return sctp_do_bind(sk, &autoaddr, af->sockaddr_len); |
@@ -6697,12 +6697,12 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk, | |||
6697 | /* Initialize sk's sport, dport, rcv_saddr and daddr for | 6697 | /* Initialize sk's sport, dport, rcv_saddr and daddr for |
6698 | * getsockname() and getpeername() | 6698 | * getsockname() and getpeername() |
6699 | */ | 6699 | */ |
6700 | newinet->sport = inet->sport; | 6700 | newinet->inet_sport = inet->inet_sport; |
6701 | newinet->saddr = inet->saddr; | 6701 | newinet->inet_saddr = inet->inet_saddr; |
6702 | newinet->rcv_saddr = inet->rcv_saddr; | 6702 | newinet->inet_rcv_saddr = inet->inet_rcv_saddr; |
6703 | newinet->dport = htons(asoc->peer.port); | 6703 | newinet->inet_dport = htons(asoc->peer.port); |
6704 | newinet->pmtudisc = inet->pmtudisc; | 6704 | newinet->pmtudisc = inet->pmtudisc; |
6705 | newinet->id = asoc->next_tsn ^ jiffies; | 6705 | newinet->inet_id = asoc->next_tsn ^ jiffies; |
6706 | 6706 | ||
6707 | newinet->uc_ttl = inet->uc_ttl; | 6707 | newinet->uc_ttl = inet->uc_ttl; |
6708 | newinet->mc_loop = 1; | 6708 | newinet->mc_loop = 1; |
@@ -6741,13 +6741,13 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, | |||
6741 | newsp->hmac = NULL; | 6741 | newsp->hmac = NULL; |
6742 | 6742 | ||
6743 | /* Hook this new socket in to the bind_hash list. */ | 6743 | /* Hook this new socket in to the bind_hash list. */ |
6744 | head = &sctp_port_hashtable[sctp_phashfn(inet_sk(oldsk)->num)]; | 6744 | head = &sctp_port_hashtable[sctp_phashfn(inet_sk(oldsk)->inet_num)]; |
6745 | sctp_local_bh_disable(); | 6745 | sctp_local_bh_disable(); |
6746 | sctp_spin_lock(&head->lock); | 6746 | sctp_spin_lock(&head->lock); |
6747 | pp = sctp_sk(oldsk)->bind_hash; | 6747 | pp = sctp_sk(oldsk)->bind_hash; |
6748 | sk_add_bind_node(newsk, &pp->owner); | 6748 | sk_add_bind_node(newsk, &pp->owner); |
6749 | sctp_sk(newsk)->bind_hash = pp; | 6749 | sctp_sk(newsk)->bind_hash = pp; |
6750 | inet_sk(newsk)->num = inet_sk(oldsk)->num; | 6750 | inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num; |
6751 | sctp_spin_unlock(&head->lock); | 6751 | sctp_spin_unlock(&head->lock); |
6752 | sctp_local_bh_enable(); | 6752 | sctp_local_bh_enable(); |
6753 | 6753 | ||
diff --git a/net/socket.c b/net/socket.c index 75655365b5fd..9dff31c9b799 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -668,10 +668,24 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, | |||
668 | 668 | ||
669 | EXPORT_SYMBOL_GPL(__sock_recv_timestamp); | 669 | EXPORT_SYMBOL_GPL(__sock_recv_timestamp); |
670 | 670 | ||
671 | static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock, | 671 | inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) |
672 | struct msghdr *msg, size_t size, int flags) | 672 | { |
673 | if (sock_flag(sk, SOCK_RXQ_OVFL) && skb && skb->dropcount) | ||
674 | put_cmsg(msg, SOL_SOCKET, SO_RXQ_OVFL, | ||
675 | sizeof(__u32), &skb->dropcount); | ||
676 | } | ||
677 | |||
678 | void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, | ||
679 | struct sk_buff *skb) | ||
680 | { | ||
681 | sock_recv_timestamp(msg, sk, skb); | ||
682 | sock_recv_drops(msg, sk, skb); | ||
683 | } | ||
684 | EXPORT_SYMBOL_GPL(sock_recv_ts_and_drops); | ||
685 | |||
686 | static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock, | ||
687 | struct msghdr *msg, size_t size, int flags) | ||
673 | { | 688 | { |
674 | int err; | ||
675 | struct sock_iocb *si = kiocb_to_siocb(iocb); | 689 | struct sock_iocb *si = kiocb_to_siocb(iocb); |
676 | 690 | ||
677 | si->sock = sock; | 691 | si->sock = sock; |
@@ -680,13 +694,17 @@ static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
680 | si->size = size; | 694 | si->size = size; |
681 | si->flags = flags; | 695 | si->flags = flags; |
682 | 696 | ||
683 | err = security_socket_recvmsg(sock, msg, size, flags); | ||
684 | if (err) | ||
685 | return err; | ||
686 | |||
687 | return sock->ops->recvmsg(iocb, sock, msg, size, flags); | 697 | return sock->ops->recvmsg(iocb, sock, msg, size, flags); |
688 | } | 698 | } |
689 | 699 | ||
700 | static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock, | ||
701 | struct msghdr *msg, size_t size, int flags) | ||
702 | { | ||
703 | int err = security_socket_recvmsg(sock, msg, size, flags); | ||
704 | |||
705 | return err ?: __sock_recvmsg_nosec(iocb, sock, msg, size, flags); | ||
706 | } | ||
707 | |||
690 | int sock_recvmsg(struct socket *sock, struct msghdr *msg, | 708 | int sock_recvmsg(struct socket *sock, struct msghdr *msg, |
691 | size_t size, int flags) | 709 | size_t size, int flags) |
692 | { | 710 | { |
@@ -702,6 +720,21 @@ int sock_recvmsg(struct socket *sock, struct msghdr *msg, | |||
702 | return ret; | 720 | return ret; |
703 | } | 721 | } |
704 | 722 | ||
723 | static int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg, | ||
724 | size_t size, int flags) | ||
725 | { | ||
726 | struct kiocb iocb; | ||
727 | struct sock_iocb siocb; | ||
728 | int ret; | ||
729 | |||
730 | init_sync_kiocb(&iocb, NULL); | ||
731 | iocb.private = &siocb; | ||
732 | ret = __sock_recvmsg_nosec(&iocb, sock, msg, size, flags); | ||
733 | if (-EIOCBQUEUED == ret) | ||
734 | ret = wait_on_sync_kiocb(&iocb); | ||
735 | return ret; | ||
736 | } | ||
737 | |||
705 | int kernel_recvmsg(struct socket *sock, struct msghdr *msg, | 738 | int kernel_recvmsg(struct socket *sock, struct msghdr *msg, |
706 | struct kvec *vec, size_t num, size_t size, int flags) | 739 | struct kvec *vec, size_t num, size_t size, int flags) |
707 | { | 740 | { |
@@ -905,11 +938,11 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) | |||
905 | if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) { | 938 | if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) { |
906 | err = dev_ioctl(net, cmd, argp); | 939 | err = dev_ioctl(net, cmd, argp); |
907 | } else | 940 | } else |
908 | #ifdef CONFIG_WIRELESS_EXT | 941 | #ifdef CONFIG_WEXT_CORE |
909 | if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { | 942 | if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { |
910 | err = dev_ioctl(net, cmd, argp); | 943 | err = dev_ioctl(net, cmd, argp); |
911 | } else | 944 | } else |
912 | #endif /* CONFIG_WIRELESS_EXT */ | 945 | #endif |
913 | switch (cmd) { | 946 | switch (cmd) { |
914 | case FIOSETOWN: | 947 | case FIOSETOWN: |
915 | case SIOCSPGRP: | 948 | case SIOCSPGRP: |
@@ -1100,11 +1133,14 @@ static int sock_fasync(int fd, struct file *filp, int on) | |||
1100 | fna->fa_next = sock->fasync_list; | 1133 | fna->fa_next = sock->fasync_list; |
1101 | write_lock_bh(&sk->sk_callback_lock); | 1134 | write_lock_bh(&sk->sk_callback_lock); |
1102 | sock->fasync_list = fna; | 1135 | sock->fasync_list = fna; |
1136 | sock_set_flag(sk, SOCK_FASYNC); | ||
1103 | write_unlock_bh(&sk->sk_callback_lock); | 1137 | write_unlock_bh(&sk->sk_callback_lock); |
1104 | } else { | 1138 | } else { |
1105 | if (fa != NULL) { | 1139 | if (fa != NULL) { |
1106 | write_lock_bh(&sk->sk_callback_lock); | 1140 | write_lock_bh(&sk->sk_callback_lock); |
1107 | *prev = fa->fa_next; | 1141 | *prev = fa->fa_next; |
1142 | if (!sock->fasync_list) | ||
1143 | sock_reset_flag(sk, SOCK_FASYNC); | ||
1108 | write_unlock_bh(&sk->sk_callback_lock); | 1144 | write_unlock_bh(&sk->sk_callback_lock); |
1109 | kfree(fa); | 1145 | kfree(fa); |
1110 | } | 1146 | } |
@@ -1965,22 +2001,15 @@ out: | |||
1965 | return err; | 2001 | return err; |
1966 | } | 2002 | } |
1967 | 2003 | ||
1968 | /* | 2004 | static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg, |
1969 | * BSD recvmsg interface | 2005 | struct msghdr *msg_sys, unsigned flags, int nosec) |
1970 | */ | ||
1971 | |||
1972 | SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg, | ||
1973 | unsigned int, flags) | ||
1974 | { | 2006 | { |
1975 | struct compat_msghdr __user *msg_compat = | 2007 | struct compat_msghdr __user *msg_compat = |
1976 | (struct compat_msghdr __user *)msg; | 2008 | (struct compat_msghdr __user *)msg; |
1977 | struct socket *sock; | ||
1978 | struct iovec iovstack[UIO_FASTIOV]; | 2009 | struct iovec iovstack[UIO_FASTIOV]; |
1979 | struct iovec *iov = iovstack; | 2010 | struct iovec *iov = iovstack; |
1980 | struct msghdr msg_sys; | ||
1981 | unsigned long cmsg_ptr; | 2011 | unsigned long cmsg_ptr; |
1982 | int err, iov_size, total_len, len; | 2012 | int err, iov_size, total_len, len; |
1983 | int fput_needed; | ||
1984 | 2013 | ||
1985 | /* kernel mode address */ | 2014 | /* kernel mode address */ |
1986 | struct sockaddr_storage addr; | 2015 | struct sockaddr_storage addr; |
@@ -1990,27 +2019,23 @@ SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg, | |||
1990 | int __user *uaddr_len; | 2019 | int __user *uaddr_len; |
1991 | 2020 | ||
1992 | if (MSG_CMSG_COMPAT & flags) { | 2021 | if (MSG_CMSG_COMPAT & flags) { |
1993 | if (get_compat_msghdr(&msg_sys, msg_compat)) | 2022 | if (get_compat_msghdr(msg_sys, msg_compat)) |
1994 | return -EFAULT; | 2023 | return -EFAULT; |
1995 | } | 2024 | } |
1996 | else if (copy_from_user(&msg_sys, msg, sizeof(struct msghdr))) | 2025 | else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) |
1997 | return -EFAULT; | 2026 | return -EFAULT; |
1998 | 2027 | ||
1999 | sock = sockfd_lookup_light(fd, &err, &fput_needed); | ||
2000 | if (!sock) | ||
2001 | goto out; | ||
2002 | |||
2003 | err = -EMSGSIZE; | 2028 | err = -EMSGSIZE; |
2004 | if (msg_sys.msg_iovlen > UIO_MAXIOV) | 2029 | if (msg_sys->msg_iovlen > UIO_MAXIOV) |
2005 | goto out_put; | 2030 | goto out; |
2006 | 2031 | ||
2007 | /* Check whether to allocate the iovec area */ | 2032 | /* Check whether to allocate the iovec area */ |
2008 | err = -ENOMEM; | 2033 | err = -ENOMEM; |
2009 | iov_size = msg_sys.msg_iovlen * sizeof(struct iovec); | 2034 | iov_size = msg_sys->msg_iovlen * sizeof(struct iovec); |
2010 | if (msg_sys.msg_iovlen > UIO_FASTIOV) { | 2035 | if (msg_sys->msg_iovlen > UIO_FASTIOV) { |
2011 | iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL); | 2036 | iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL); |
2012 | if (!iov) | 2037 | if (!iov) |
2013 | goto out_put; | 2038 | goto out; |
2014 | } | 2039 | } |
2015 | 2040 | ||
2016 | /* | 2041 | /* |
@@ -2018,46 +2043,47 @@ SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg, | |||
2018 | * kernel msghdr to use the kernel address space) | 2043 | * kernel msghdr to use the kernel address space) |
2019 | */ | 2044 | */ |
2020 | 2045 | ||
2021 | uaddr = (__force void __user *)msg_sys.msg_name; | 2046 | uaddr = (__force void __user *)msg_sys->msg_name; |
2022 | uaddr_len = COMPAT_NAMELEN(msg); | 2047 | uaddr_len = COMPAT_NAMELEN(msg); |
2023 | if (MSG_CMSG_COMPAT & flags) { | 2048 | if (MSG_CMSG_COMPAT & flags) { |
2024 | err = verify_compat_iovec(&msg_sys, iov, | 2049 | err = verify_compat_iovec(msg_sys, iov, |
2025 | (struct sockaddr *)&addr, | 2050 | (struct sockaddr *)&addr, |
2026 | VERIFY_WRITE); | 2051 | VERIFY_WRITE); |
2027 | } else | 2052 | } else |
2028 | err = verify_iovec(&msg_sys, iov, | 2053 | err = verify_iovec(msg_sys, iov, |
2029 | (struct sockaddr *)&addr, | 2054 | (struct sockaddr *)&addr, |
2030 | VERIFY_WRITE); | 2055 | VERIFY_WRITE); |
2031 | if (err < 0) | 2056 | if (err < 0) |
2032 | goto out_freeiov; | 2057 | goto out_freeiov; |
2033 | total_len = err; | 2058 | total_len = err; |
2034 | 2059 | ||
2035 | cmsg_ptr = (unsigned long)msg_sys.msg_control; | 2060 | cmsg_ptr = (unsigned long)msg_sys->msg_control; |
2036 | msg_sys.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); | 2061 | msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); |
2037 | 2062 | ||
2038 | if (sock->file->f_flags & O_NONBLOCK) | 2063 | if (sock->file->f_flags & O_NONBLOCK) |
2039 | flags |= MSG_DONTWAIT; | 2064 | flags |= MSG_DONTWAIT; |
2040 | err = sock_recvmsg(sock, &msg_sys, total_len, flags); | 2065 | err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys, |
2066 | total_len, flags); | ||
2041 | if (err < 0) | 2067 | if (err < 0) |
2042 | goto out_freeiov; | 2068 | goto out_freeiov; |
2043 | len = err; | 2069 | len = err; |
2044 | 2070 | ||
2045 | if (uaddr != NULL) { | 2071 | if (uaddr != NULL) { |
2046 | err = move_addr_to_user((struct sockaddr *)&addr, | 2072 | err = move_addr_to_user((struct sockaddr *)&addr, |
2047 | msg_sys.msg_namelen, uaddr, | 2073 | msg_sys->msg_namelen, uaddr, |
2048 | uaddr_len); | 2074 | uaddr_len); |
2049 | if (err < 0) | 2075 | if (err < 0) |
2050 | goto out_freeiov; | 2076 | goto out_freeiov; |
2051 | } | 2077 | } |
2052 | err = __put_user((msg_sys.msg_flags & ~MSG_CMSG_COMPAT), | 2078 | err = __put_user((msg_sys->msg_flags & ~MSG_CMSG_COMPAT), |
2053 | COMPAT_FLAGS(msg)); | 2079 | COMPAT_FLAGS(msg)); |
2054 | if (err) | 2080 | if (err) |
2055 | goto out_freeiov; | 2081 | goto out_freeiov; |
2056 | if (MSG_CMSG_COMPAT & flags) | 2082 | if (MSG_CMSG_COMPAT & flags) |
2057 | err = __put_user((unsigned long)msg_sys.msg_control - cmsg_ptr, | 2083 | err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, |
2058 | &msg_compat->msg_controllen); | 2084 | &msg_compat->msg_controllen); |
2059 | else | 2085 | else |
2060 | err = __put_user((unsigned long)msg_sys.msg_control - cmsg_ptr, | 2086 | err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, |
2061 | &msg->msg_controllen); | 2087 | &msg->msg_controllen); |
2062 | if (err) | 2088 | if (err) |
2063 | goto out_freeiov; | 2089 | goto out_freeiov; |
@@ -2066,21 +2092,150 @@ SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg, | |||
2066 | out_freeiov: | 2092 | out_freeiov: |
2067 | if (iov != iovstack) | 2093 | if (iov != iovstack) |
2068 | sock_kfree_s(sock->sk, iov, iov_size); | 2094 | sock_kfree_s(sock->sk, iov, iov_size); |
2069 | out_put: | 2095 | out: |
2096 | return err; | ||
2097 | } | ||
2098 | |||
2099 | /* | ||
2100 | * BSD recvmsg interface | ||
2101 | */ | ||
2102 | |||
2103 | SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg, | ||
2104 | unsigned int, flags) | ||
2105 | { | ||
2106 | int fput_needed, err; | ||
2107 | struct msghdr msg_sys; | ||
2108 | struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed); | ||
2109 | |||
2110 | if (!sock) | ||
2111 | goto out; | ||
2112 | |||
2113 | err = __sys_recvmsg(sock, msg, &msg_sys, flags, 0); | ||
2114 | |||
2070 | fput_light(sock->file, fput_needed); | 2115 | fput_light(sock->file, fput_needed); |
2071 | out: | 2116 | out: |
2072 | return err; | 2117 | return err; |
2073 | } | 2118 | } |
2074 | 2119 | ||
2075 | #ifdef __ARCH_WANT_SYS_SOCKETCALL | 2120 | /* |
2121 | * Linux recvmmsg interface | ||
2122 | */ | ||
2123 | |||
2124 | int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, | ||
2125 | unsigned int flags, struct timespec *timeout) | ||
2126 | { | ||
2127 | int fput_needed, err, datagrams; | ||
2128 | struct socket *sock; | ||
2129 | struct mmsghdr __user *entry; | ||
2130 | struct msghdr msg_sys; | ||
2131 | struct timespec end_time; | ||
2132 | |||
2133 | if (timeout && | ||
2134 | poll_select_set_timeout(&end_time, timeout->tv_sec, | ||
2135 | timeout->tv_nsec)) | ||
2136 | return -EINVAL; | ||
2137 | |||
2138 | datagrams = 0; | ||
2139 | |||
2140 | sock = sockfd_lookup_light(fd, &err, &fput_needed); | ||
2141 | if (!sock) | ||
2142 | return err; | ||
2143 | |||
2144 | err = sock_error(sock->sk); | ||
2145 | if (err) | ||
2146 | goto out_put; | ||
2147 | |||
2148 | entry = mmsg; | ||
2149 | |||
2150 | while (datagrams < vlen) { | ||
2151 | /* | ||
2152 | * No need to ask LSM for more than the first datagram. | ||
2153 | */ | ||
2154 | err = __sys_recvmsg(sock, (struct msghdr __user *)entry, | ||
2155 | &msg_sys, flags, datagrams); | ||
2156 | if (err < 0) | ||
2157 | break; | ||
2158 | err = put_user(err, &entry->msg_len); | ||
2159 | if (err) | ||
2160 | break; | ||
2161 | ++entry; | ||
2162 | ++datagrams; | ||
2163 | |||
2164 | if (timeout) { | ||
2165 | ktime_get_ts(timeout); | ||
2166 | *timeout = timespec_sub(end_time, *timeout); | ||
2167 | if (timeout->tv_sec < 0) { | ||
2168 | timeout->tv_sec = timeout->tv_nsec = 0; | ||
2169 | break; | ||
2170 | } | ||
2171 | |||
2172 | /* Timeout, return less than vlen datagrams */ | ||
2173 | if (timeout->tv_nsec == 0 && timeout->tv_sec == 0) | ||
2174 | break; | ||
2175 | } | ||
2176 | |||
2177 | /* Out of band data, return right away */ | ||
2178 | if (msg_sys.msg_flags & MSG_OOB) | ||
2179 | break; | ||
2180 | } | ||
2181 | |||
2182 | out_put: | ||
2183 | fput_light(sock->file, fput_needed); | ||
2184 | |||
2185 | if (err == 0) | ||
2186 | return datagrams; | ||
2187 | |||
2188 | if (datagrams != 0) { | ||
2189 | /* | ||
2190 | * We may return less entries than requested (vlen) if the | ||
2191 | * sock is non block and there aren't enough datagrams... | ||
2192 | */ | ||
2193 | if (err != -EAGAIN) { | ||
2194 | /* | ||
2195 | * ... or if recvmsg returns an error after we | ||
2196 | * received some datagrams, where we record the | ||
2197 | * error to return on the next call or if the | ||
2198 | * app asks about it using getsockopt(SO_ERROR). | ||
2199 | */ | ||
2200 | sock->sk->sk_err = -err; | ||
2201 | } | ||
2076 | 2202 | ||
2203 | return datagrams; | ||
2204 | } | ||
2205 | |||
2206 | return err; | ||
2207 | } | ||
2208 | |||
2209 | SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg, | ||
2210 | unsigned int, vlen, unsigned int, flags, | ||
2211 | struct timespec __user *, timeout) | ||
2212 | { | ||
2213 | int datagrams; | ||
2214 | struct timespec timeout_sys; | ||
2215 | |||
2216 | if (!timeout) | ||
2217 | return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL); | ||
2218 | |||
2219 | if (copy_from_user(&timeout_sys, timeout, sizeof(timeout_sys))) | ||
2220 | return -EFAULT; | ||
2221 | |||
2222 | datagrams = __sys_recvmmsg(fd, mmsg, vlen, flags, &timeout_sys); | ||
2223 | |||
2224 | if (datagrams > 0 && | ||
2225 | copy_to_user(timeout, &timeout_sys, sizeof(timeout_sys))) | ||
2226 | datagrams = -EFAULT; | ||
2227 | |||
2228 | return datagrams; | ||
2229 | } | ||
2230 | |||
2231 | #ifdef __ARCH_WANT_SYS_SOCKETCALL | ||
2077 | /* Argument list sizes for sys_socketcall */ | 2232 | /* Argument list sizes for sys_socketcall */ |
2078 | #define AL(x) ((x) * sizeof(unsigned long)) | 2233 | #define AL(x) ((x) * sizeof(unsigned long)) |
2079 | static const unsigned char nargs[19]={ | 2234 | static const unsigned char nargs[20] = { |
2080 | AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), | 2235 | AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), |
2081 | AL(3),AL(3),AL(4),AL(4),AL(4),AL(6), | 2236 | AL(3),AL(3),AL(4),AL(4),AL(4),AL(6), |
2082 | AL(6),AL(2),AL(5),AL(5),AL(3),AL(3), | 2237 | AL(6),AL(2),AL(5),AL(5),AL(3),AL(3), |
2083 | AL(4) | 2238 | AL(4),AL(5) |
2084 | }; | 2239 | }; |
2085 | 2240 | ||
2086 | #undef AL | 2241 | #undef AL |
@@ -2100,7 +2255,7 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args) | |||
2100 | int err; | 2255 | int err; |
2101 | unsigned int len; | 2256 | unsigned int len; |
2102 | 2257 | ||
2103 | if (call < 1 || call > SYS_ACCEPT4) | 2258 | if (call < 1 || call > SYS_RECVMMSG) |
2104 | return -EINVAL; | 2259 | return -EINVAL; |
2105 | 2260 | ||
2106 | len = nargs[call]; | 2261 | len = nargs[call]; |
@@ -2178,6 +2333,10 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args) | |||
2178 | case SYS_RECVMSG: | 2333 | case SYS_RECVMSG: |
2179 | err = sys_recvmsg(a0, (struct msghdr __user *)a1, a[2]); | 2334 | err = sys_recvmsg(a0, (struct msghdr __user *)a1, a[2]); |
2180 | break; | 2335 | break; |
2336 | case SYS_RECVMMSG: | ||
2337 | err = sys_recvmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3], | ||
2338 | (struct timespec __user *)a[4]); | ||
2339 | break; | ||
2181 | case SYS_ACCEPT4: | 2340 | case SYS_ACCEPT4: |
2182 | err = sys_accept4(a0, (struct sockaddr __user *)a1, | 2341 | err = sys_accept4(a0, (struct sockaddr __user *)a1, |
2183 | (int __user *)a[2], a[3]); | 2342 | (int __user *)a[2], a[3]); |
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index 117f68a8aa40..f4c7ff3a53e6 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c | |||
@@ -686,8 +686,7 @@ svcauth_unix_set_client(struct svc_rqst *rqstp) | |||
686 | case AF_INET: | 686 | case AF_INET: |
687 | sin = svc_addr_in(rqstp); | 687 | sin = svc_addr_in(rqstp); |
688 | sin6 = &sin6_storage; | 688 | sin6 = &sin6_storage; |
689 | ipv6_addr_set(&sin6->sin6_addr, 0, 0, | 689 | ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &sin6->sin6_addr); |
690 | htonl(0x0000FFFF), sin->sin_addr.s_addr); | ||
691 | break; | 690 | break; |
692 | case AF_INET6: | 691 | case AF_INET6: |
693 | sin6 = svc_addr_in6(rqstp); | 692 | sin6 = svc_addr_in6(rqstp); |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index ccc5e83cae5d..c2a17876defd 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -272,14 +272,14 @@ static int svc_one_sock_name(struct svc_sock *svsk, char *buf, int remaining) | |||
272 | case PF_INET: | 272 | case PF_INET: |
273 | len = snprintf(buf, remaining, "ipv4 %s %pI4 %d\n", | 273 | len = snprintf(buf, remaining, "ipv4 %s %pI4 %d\n", |
274 | proto_name, | 274 | proto_name, |
275 | &inet_sk(sk)->rcv_saddr, | 275 | &inet_sk(sk)->inet_rcv_saddr, |
276 | inet_sk(sk)->num); | 276 | inet_sk(sk)->inet_num); |
277 | break; | 277 | break; |
278 | case PF_INET6: | 278 | case PF_INET6: |
279 | len = snprintf(buf, remaining, "ipv6 %s %pI6 %d\n", | 279 | len = snprintf(buf, remaining, "ipv6 %s %pI6 %d\n", |
280 | proto_name, | 280 | proto_name, |
281 | &inet6_sk(sk)->rcv_saddr, | 281 | &inet6_sk(sk)->rcv_saddr, |
282 | inet_sk(sk)->num); | 282 | inet_sk(sk)->inet_num); |
283 | break; | 283 | break; |
284 | default: | 284 | default: |
285 | len = snprintf(buf, remaining, "*unknown-%d*\n", | 285 | len = snprintf(buf, remaining, "*unknown-%d*\n", |
@@ -1311,7 +1311,7 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv, | |||
1311 | /* Register socket with portmapper */ | 1311 | /* Register socket with portmapper */ |
1312 | if (*errp >= 0 && pmap_register) | 1312 | if (*errp >= 0 && pmap_register) |
1313 | *errp = svc_register(serv, inet->sk_family, inet->sk_protocol, | 1313 | *errp = svc_register(serv, inet->sk_family, inet->sk_protocol, |
1314 | ntohs(inet_sk(inet)->sport)); | 1314 | ntohs(inet_sk(inet)->inet_sport)); |
1315 | 1315 | ||
1316 | if (*errp < 0) { | 1316 | if (*errp < 0) { |
1317 | kfree(svsk); | 1317 | kfree(svsk); |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index fc820cd75453..3291902f0b88 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -2216,7 +2216,7 @@ static const struct file_operations unix_seq_fops = { | |||
2216 | 2216 | ||
2217 | #endif | 2217 | #endif |
2218 | 2218 | ||
2219 | static struct net_proto_family unix_family_ops = { | 2219 | static const struct net_proto_family unix_family_ops = { |
2220 | .family = PF_UNIX, | 2220 | .family = PF_UNIX, |
2221 | .create = unix_create, | 2221 | .create = unix_create, |
2222 | .owner = THIS_MODULE, | 2222 | .owner = THIS_MODULE, |
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig index abf7ca3f9ff9..614bdcec1c80 100644 --- a/net/wireless/Kconfig +++ b/net/wireless/Kconfig | |||
@@ -1,3 +1,21 @@ | |||
1 | config WIRELESS_EXT | ||
2 | bool | ||
3 | |||
4 | config WEXT_CORE | ||
5 | def_bool y | ||
6 | depends on CFG80211_WEXT || WIRELESS_EXT | ||
7 | |||
8 | config WEXT_PROC | ||
9 | def_bool y | ||
10 | depends on PROC_FS | ||
11 | depends on WEXT_CORE | ||
12 | |||
13 | config WEXT_SPY | ||
14 | bool | ||
15 | |||
16 | config WEXT_PRIV | ||
17 | bool | ||
18 | |||
1 | config CFG80211 | 19 | config CFG80211 |
2 | tristate "cfg80211 - wireless configuration API" | 20 | tristate "cfg80211 - wireless configuration API" |
3 | depends on RFKILL || !RFKILL | 21 | depends on RFKILL || !RFKILL |
@@ -56,6 +74,12 @@ config CFG80211_REG_DEBUG | |||
56 | 74 | ||
57 | If unsure, say N. | 75 | If unsure, say N. |
58 | 76 | ||
77 | config CFG80211_DEFAULT_PS_VALUE | ||
78 | int | ||
79 | default 1 if CFG80211_DEFAULT_PS | ||
80 | default 0 | ||
81 | depends on CFG80211 | ||
82 | |||
59 | config CFG80211_DEFAULT_PS | 83 | config CFG80211_DEFAULT_PS |
60 | bool "enable powersave by default" | 84 | bool "enable powersave by default" |
61 | depends on CFG80211 | 85 | depends on CFG80211 |
@@ -67,14 +91,10 @@ config CFG80211_DEFAULT_PS | |||
67 | applications instead -- they need to register their network | 91 | applications instead -- they need to register their network |
68 | latency requirement, see Documentation/power/pm_qos_interface.txt. | 92 | latency requirement, see Documentation/power/pm_qos_interface.txt. |
69 | 93 | ||
70 | config CFG80211_DEFAULT_PS_VALUE | ||
71 | int | ||
72 | default 1 if CFG80211_DEFAULT_PS | ||
73 | default 0 | ||
74 | |||
75 | config CFG80211_DEBUGFS | 94 | config CFG80211_DEBUGFS |
76 | bool "cfg80211 DebugFS entries" | 95 | bool "cfg80211 DebugFS entries" |
77 | depends on CFG80211 && DEBUG_FS | 96 | depends on CFG80211 |
97 | depends on DEBUG_FS | ||
78 | ---help--- | 98 | ---help--- |
79 | You can enable this if you want to debugfs entries for cfg80211. | 99 | You can enable this if you want to debugfs entries for cfg80211. |
80 | 100 | ||
@@ -83,6 +103,7 @@ config CFG80211_DEBUGFS | |||
83 | config WIRELESS_OLD_REGULATORY | 103 | config WIRELESS_OLD_REGULATORY |
84 | bool "Old wireless static regulatory definitions" | 104 | bool "Old wireless static regulatory definitions" |
85 | default n | 105 | default n |
106 | depends on CFG80211 | ||
86 | ---help--- | 107 | ---help--- |
87 | This option enables the old static regulatory information | 108 | This option enables the old static regulatory information |
88 | and uses it within the new framework. This option is available | 109 | and uses it within the new framework. This option is available |
@@ -94,20 +115,19 @@ config WIRELESS_OLD_REGULATORY | |||
94 | 115 | ||
95 | Say N and if you say Y, please tell us why. The default is N. | 116 | Say N and if you say Y, please tell us why. The default is N. |
96 | 117 | ||
97 | config WIRELESS_EXT | 118 | config CFG80211_WEXT |
98 | bool "Wireless extensions" | 119 | bool "cfg80211 wireless extensions compatibility" |
120 | depends on CFG80211 | ||
121 | select WEXT_CORE | ||
99 | default y | 122 | default y |
100 | ---help--- | 123 | help |
101 | This option enables the legacy wireless extensions | 124 | Enable this option if you need old userspace for wireless |
102 | (wireless network interface configuration via ioctls.) | 125 | extensions with cfg80211-based drivers. |
103 | |||
104 | Say Y unless you've upgraded all your userspace to use | ||
105 | nl80211 instead of wireless extensions. | ||
106 | 126 | ||
107 | config WIRELESS_EXT_SYSFS | 127 | config WIRELESS_EXT_SYSFS |
108 | bool "Wireless extensions sysfs files" | 128 | bool "Wireless extensions sysfs files" |
109 | default y | 129 | default y |
110 | depends on WIRELESS_EXT && SYSFS | 130 | depends on WEXT_CORE && SYSFS |
111 | help | 131 | help |
112 | This option enables the deprecated wireless statistics | 132 | This option enables the deprecated wireless statistics |
113 | files in /sys/class/net/*/wireless/. The same information | 133 | files in /sys/class/net/*/wireless/. The same information |
diff --git a/net/wireless/Makefile b/net/wireless/Makefile index 3ecaa9179977..f07c8dc7aab2 100644 --- a/net/wireless/Makefile +++ b/net/wireless/Makefile | |||
@@ -1,13 +1,17 @@ | |||
1 | obj-$(CONFIG_WIRELESS_EXT) += wext.o | ||
2 | obj-$(CONFIG_CFG80211) += cfg80211.o | 1 | obj-$(CONFIG_CFG80211) += cfg80211.o |
3 | obj-$(CONFIG_LIB80211) += lib80211.o | 2 | obj-$(CONFIG_LIB80211) += lib80211.o |
4 | obj-$(CONFIG_LIB80211_CRYPT_WEP) += lib80211_crypt_wep.o | 3 | obj-$(CONFIG_LIB80211_CRYPT_WEP) += lib80211_crypt_wep.o |
5 | obj-$(CONFIG_LIB80211_CRYPT_CCMP) += lib80211_crypt_ccmp.o | 4 | obj-$(CONFIG_LIB80211_CRYPT_CCMP) += lib80211_crypt_ccmp.o |
6 | obj-$(CONFIG_LIB80211_CRYPT_TKIP) += lib80211_crypt_tkip.o | 5 | obj-$(CONFIG_LIB80211_CRYPT_TKIP) += lib80211_crypt_tkip.o |
7 | 6 | ||
7 | obj-$(CONFIG_WEXT_CORE) += wext-core.o | ||
8 | obj-$(CONFIG_WEXT_PROC) += wext-proc.o | ||
9 | obj-$(CONFIG_WEXT_SPY) += wext-spy.o | ||
10 | obj-$(CONFIG_WEXT_PRIV) += wext-priv.o | ||
11 | |||
8 | cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o | 12 | cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o |
9 | cfg80211-y += mlme.o ibss.o sme.o chan.o | 13 | cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o |
10 | cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o | 14 | cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o |
11 | cfg80211-$(CONFIG_WIRELESS_EXT) += wext-compat.o wext-sme.o | 15 | cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o |
12 | 16 | ||
13 | ccflags-y += -D__CHECK_ENDIAN__ | 17 | ccflags-y += -D__CHECK_ENDIAN__ |
diff --git a/net/wireless/core.c b/net/wireless/core.c index 45b2be3274db..07252967be9c 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include "sysfs.h" | 21 | #include "sysfs.h" |
22 | #include "debugfs.h" | 22 | #include "debugfs.h" |
23 | #include "wext-compat.h" | 23 | #include "wext-compat.h" |
24 | #include "ethtool.h" | ||
24 | 25 | ||
25 | /* name for sysfs, %d is appended */ | 26 | /* name for sysfs, %d is appended */ |
26 | #define PHY_NAME "phy" | 27 | #define PHY_NAME "phy" |
@@ -358,6 +359,10 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) | |||
358 | INIT_LIST_HEAD(&rdev->bss_list); | 359 | INIT_LIST_HEAD(&rdev->bss_list); |
359 | INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done); | 360 | INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done); |
360 | 361 | ||
362 | #ifdef CONFIG_CFG80211_WEXT | ||
363 | rdev->wiphy.wext = &cfg80211_wext_handler; | ||
364 | #endif | ||
365 | |||
361 | device_initialize(&rdev->wiphy.dev); | 366 | device_initialize(&rdev->wiphy.dev); |
362 | rdev->wiphy.dev.class = &ieee80211_class; | 367 | rdev->wiphy.dev.class = &ieee80211_class; |
363 | rdev->wiphy.dev.platform_data = rdev; | 368 | rdev->wiphy.dev.platform_data = rdev; |
@@ -625,6 +630,10 @@ static void wdev_cleanup_work(struct work_struct *work) | |||
625 | dev_put(wdev->netdev); | 630 | dev_put(wdev->netdev); |
626 | } | 631 | } |
627 | 632 | ||
633 | static struct device_type wiphy_type = { | ||
634 | .name = "wlan", | ||
635 | }; | ||
636 | |||
628 | static int cfg80211_netdev_notifier_call(struct notifier_block * nb, | 637 | static int cfg80211_netdev_notifier_call(struct notifier_block * nb, |
629 | unsigned long state, | 638 | unsigned long state, |
630 | void *ndev) | 639 | void *ndev) |
@@ -641,6 +650,9 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb, | |||
641 | WARN_ON(wdev->iftype == NL80211_IFTYPE_UNSPECIFIED); | 650 | WARN_ON(wdev->iftype == NL80211_IFTYPE_UNSPECIFIED); |
642 | 651 | ||
643 | switch (state) { | 652 | switch (state) { |
653 | case NETDEV_POST_INIT: | ||
654 | SET_NETDEV_DEVTYPE(dev, &wiphy_type); | ||
655 | break; | ||
644 | case NETDEV_REGISTER: | 656 | case NETDEV_REGISTER: |
645 | /* | 657 | /* |
646 | * NB: cannot take rdev->mtx here because this may be | 658 | * NB: cannot take rdev->mtx here because this may be |
@@ -665,9 +677,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb, | |||
665 | wdev->netdev = dev; | 677 | wdev->netdev = dev; |
666 | wdev->sme_state = CFG80211_SME_IDLE; | 678 | wdev->sme_state = CFG80211_SME_IDLE; |
667 | mutex_unlock(&rdev->devlist_mtx); | 679 | mutex_unlock(&rdev->devlist_mtx); |
668 | #ifdef CONFIG_WIRELESS_EXT | 680 | #ifdef CONFIG_CFG80211_WEXT |
669 | if (!dev->wireless_handlers) | ||
670 | dev->wireless_handlers = &cfg80211_wext_handler; | ||
671 | wdev->wext.default_key = -1; | 681 | wdev->wext.default_key = -1; |
672 | wdev->wext.default_mgmt_key = -1; | 682 | wdev->wext.default_mgmt_key = -1; |
673 | wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; | 683 | wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; |
@@ -681,6 +691,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb, | |||
681 | wdev->wext.ps = false; | 691 | wdev->wext.ps = false; |
682 | } | 692 | } |
683 | #endif | 693 | #endif |
694 | if (!dev->ethtool_ops) | ||
695 | dev->ethtool_ops = &cfg80211_ethtool_ops; | ||
684 | break; | 696 | break; |
685 | case NETDEV_GOING_DOWN: | 697 | case NETDEV_GOING_DOWN: |
686 | switch (wdev->iftype) { | 698 | switch (wdev->iftype) { |
@@ -689,7 +701,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb, | |||
689 | break; | 701 | break; |
690 | case NL80211_IFTYPE_STATION: | 702 | case NL80211_IFTYPE_STATION: |
691 | wdev_lock(wdev); | 703 | wdev_lock(wdev); |
692 | #ifdef CONFIG_WIRELESS_EXT | 704 | #ifdef CONFIG_CFG80211_WEXT |
693 | kfree(wdev->wext.ie); | 705 | kfree(wdev->wext.ie); |
694 | wdev->wext.ie = NULL; | 706 | wdev->wext.ie = NULL; |
695 | wdev->wext.ie_len = 0; | 707 | wdev->wext.ie_len = 0; |
@@ -721,7 +733,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb, | |||
721 | mutex_unlock(&rdev->devlist_mtx); | 733 | mutex_unlock(&rdev->devlist_mtx); |
722 | dev_put(dev); | 734 | dev_put(dev); |
723 | } | 735 | } |
724 | #ifdef CONFIG_WIRELESS_EXT | 736 | #ifdef CONFIG_CFG80211_WEXT |
725 | cfg80211_lock_rdev(rdev); | 737 | cfg80211_lock_rdev(rdev); |
726 | mutex_lock(&rdev->devlist_mtx); | 738 | mutex_lock(&rdev->devlist_mtx); |
727 | wdev_lock(wdev); | 739 | wdev_lock(wdev); |
@@ -759,7 +771,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb, | |||
759 | sysfs_remove_link(&dev->dev.kobj, "phy80211"); | 771 | sysfs_remove_link(&dev->dev.kobj, "phy80211"); |
760 | list_del_init(&wdev->list); | 772 | list_del_init(&wdev->list); |
761 | rdev->devlist_generation++; | 773 | rdev->devlist_generation++; |
762 | #ifdef CONFIG_WIRELESS_EXT | 774 | #ifdef CONFIG_CFG80211_WEXT |
763 | kfree(wdev->wext.keys); | 775 | kfree(wdev->wext.keys); |
764 | #endif | 776 | #endif |
765 | } | 777 | } |
diff --git a/net/wireless/ethtool.c b/net/wireless/ethtool.c new file mode 100644 index 000000000000..ca4c825be93d --- /dev/null +++ b/net/wireless/ethtool.c | |||
@@ -0,0 +1,45 @@ | |||
1 | #include <linux/utsname.h> | ||
2 | #include <net/cfg80211.h> | ||
3 | #include "ethtool.h" | ||
4 | |||
5 | static void cfg80211_get_drvinfo(struct net_device *dev, | ||
6 | struct ethtool_drvinfo *info) | ||
7 | { | ||
8 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
9 | |||
10 | strlcpy(info->driver, wiphy_dev(wdev->wiphy)->driver->name, | ||
11 | sizeof(info->driver)); | ||
12 | |||
13 | strlcpy(info->version, init_utsname()->release, sizeof(info->version)); | ||
14 | |||
15 | if (wdev->wiphy->fw_version[0]) | ||
16 | strncpy(info->fw_version, wdev->wiphy->fw_version, | ||
17 | sizeof(info->fw_version)); | ||
18 | else | ||
19 | strncpy(info->fw_version, "N/A", sizeof(info->fw_version)); | ||
20 | |||
21 | strlcpy(info->bus_info, dev_name(wiphy_dev(wdev->wiphy)), | ||
22 | sizeof(info->bus_info)); | ||
23 | } | ||
24 | |||
25 | static int cfg80211_get_regs_len(struct net_device *dev) | ||
26 | { | ||
27 | /* For now, return 0... */ | ||
28 | return 0; | ||
29 | } | ||
30 | |||
31 | static void cfg80211_get_regs(struct net_device *dev, struct ethtool_regs *regs, | ||
32 | void *data) | ||
33 | { | ||
34 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
35 | |||
36 | regs->version = wdev->wiphy->hw_version; | ||
37 | regs->len = 0; | ||
38 | } | ||
39 | |||
40 | const struct ethtool_ops cfg80211_ethtool_ops = { | ||
41 | .get_drvinfo = cfg80211_get_drvinfo, | ||
42 | .get_regs_len = cfg80211_get_regs_len, | ||
43 | .get_regs = cfg80211_get_regs, | ||
44 | .get_link = ethtool_op_get_link, | ||
45 | }; | ||
diff --git a/net/wireless/ethtool.h b/net/wireless/ethtool.h new file mode 100644 index 000000000000..695ecad20bd6 --- /dev/null +++ b/net/wireless/ethtool.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef __CFG80211_ETHTOOL__ | ||
2 | #define __CFG80211_ETHTOOL__ | ||
3 | |||
4 | extern const struct ethtool_ops cfg80211_ethtool_ops; | ||
5 | |||
6 | #endif /* __CFG80211_ETHTOOL__ */ | ||
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c index c88338911979..39b6d92e2828 100644 --- a/net/wireless/ibss.c +++ b/net/wireless/ibss.c | |||
@@ -15,7 +15,7 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid) | |||
15 | { | 15 | { |
16 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 16 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
17 | struct cfg80211_bss *bss; | 17 | struct cfg80211_bss *bss; |
18 | #ifdef CONFIG_WIRELESS_EXT | 18 | #ifdef CONFIG_CFG80211_WEXT |
19 | union iwreq_data wrqu; | 19 | union iwreq_data wrqu; |
20 | #endif | 20 | #endif |
21 | 21 | ||
@@ -44,7 +44,7 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid) | |||
44 | 44 | ||
45 | nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid, | 45 | nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid, |
46 | GFP_KERNEL); | 46 | GFP_KERNEL); |
47 | #ifdef CONFIG_WIRELESS_EXT | 47 | #ifdef CONFIG_CFG80211_WEXT |
48 | memset(&wrqu, 0, sizeof(wrqu)); | 48 | memset(&wrqu, 0, sizeof(wrqu)); |
49 | memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN); | 49 | memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN); |
50 | wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); | 50 | wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); |
@@ -96,7 +96,7 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev, | |||
96 | kfree(wdev->connect_keys); | 96 | kfree(wdev->connect_keys); |
97 | wdev->connect_keys = connkeys; | 97 | wdev->connect_keys = connkeys; |
98 | 98 | ||
99 | #ifdef CONFIG_WIRELESS_EXT | 99 | #ifdef CONFIG_CFG80211_WEXT |
100 | wdev->wext.ibss.channel = params->channel; | 100 | wdev->wext.ibss.channel = params->channel; |
101 | #endif | 101 | #endif |
102 | err = rdev->ops->join_ibss(&rdev->wiphy, dev, params); | 102 | err = rdev->ops->join_ibss(&rdev->wiphy, dev, params); |
@@ -154,7 +154,7 @@ static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext) | |||
154 | 154 | ||
155 | wdev->current_bss = NULL; | 155 | wdev->current_bss = NULL; |
156 | wdev->ssid_len = 0; | 156 | wdev->ssid_len = 0; |
157 | #ifdef CONFIG_WIRELESS_EXT | 157 | #ifdef CONFIG_CFG80211_WEXT |
158 | if (!nowext) | 158 | if (!nowext) |
159 | wdev->wext.ibss.ssid_len = 0; | 159 | wdev->wext.ibss.ssid_len = 0; |
160 | #endif | 160 | #endif |
@@ -203,7 +203,7 @@ int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev, | |||
203 | return err; | 203 | return err; |
204 | } | 204 | } |
205 | 205 | ||
206 | #ifdef CONFIG_WIRELESS_EXT | 206 | #ifdef CONFIG_CFG80211_WEXT |
207 | int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev, | 207 | int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev, |
208 | struct wireless_dev *wdev) | 208 | struct wireless_dev *wdev) |
209 | { | 209 | { |
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 0a6b7a0eca6b..83c2a288dc63 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c | |||
@@ -130,7 +130,7 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len) | |||
130 | } | 130 | } |
131 | EXPORT_SYMBOL(cfg80211_send_rx_assoc); | 131 | EXPORT_SYMBOL(cfg80211_send_rx_assoc); |
132 | 132 | ||
133 | static void __cfg80211_send_deauth(struct net_device *dev, | 133 | void __cfg80211_send_deauth(struct net_device *dev, |
134 | const u8 *buf, size_t len) | 134 | const u8 *buf, size_t len) |
135 | { | 135 | { |
136 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 136 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
@@ -139,7 +139,6 @@ static void __cfg80211_send_deauth(struct net_device *dev, | |||
139 | struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; | 139 | struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; |
140 | const u8 *bssid = mgmt->bssid; | 140 | const u8 *bssid = mgmt->bssid; |
141 | int i; | 141 | int i; |
142 | bool done = false; | ||
143 | 142 | ||
144 | ASSERT_WDEV_LOCK(wdev); | 143 | ASSERT_WDEV_LOCK(wdev); |
145 | 144 | ||
@@ -147,7 +146,6 @@ static void __cfg80211_send_deauth(struct net_device *dev, | |||
147 | 146 | ||
148 | if (wdev->current_bss && | 147 | if (wdev->current_bss && |
149 | memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) { | 148 | memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) { |
150 | done = true; | ||
151 | cfg80211_unhold_bss(wdev->current_bss); | 149 | cfg80211_unhold_bss(wdev->current_bss); |
152 | cfg80211_put_bss(&wdev->current_bss->pub); | 150 | cfg80211_put_bss(&wdev->current_bss->pub); |
153 | wdev->current_bss = NULL; | 151 | wdev->current_bss = NULL; |
@@ -157,7 +155,6 @@ static void __cfg80211_send_deauth(struct net_device *dev, | |||
157 | cfg80211_unhold_bss(wdev->auth_bsses[i]); | 155 | cfg80211_unhold_bss(wdev->auth_bsses[i]); |
158 | cfg80211_put_bss(&wdev->auth_bsses[i]->pub); | 156 | cfg80211_put_bss(&wdev->auth_bsses[i]->pub); |
159 | wdev->auth_bsses[i] = NULL; | 157 | wdev->auth_bsses[i] = NULL; |
160 | done = true; | ||
161 | break; | 158 | break; |
162 | } | 159 | } |
163 | if (wdev->authtry_bsses[i] && | 160 | if (wdev->authtry_bsses[i] && |
@@ -165,13 +162,10 @@ static void __cfg80211_send_deauth(struct net_device *dev, | |||
165 | cfg80211_unhold_bss(wdev->authtry_bsses[i]); | 162 | cfg80211_unhold_bss(wdev->authtry_bsses[i]); |
166 | cfg80211_put_bss(&wdev->authtry_bsses[i]->pub); | 163 | cfg80211_put_bss(&wdev->authtry_bsses[i]->pub); |
167 | wdev->authtry_bsses[i] = NULL; | 164 | wdev->authtry_bsses[i] = NULL; |
168 | done = true; | ||
169 | break; | 165 | break; |
170 | } | 166 | } |
171 | } | 167 | } |
172 | 168 | ||
173 | WARN_ON(!done); | ||
174 | |||
175 | if (wdev->sme_state == CFG80211_SME_CONNECTED) { | 169 | if (wdev->sme_state == CFG80211_SME_CONNECTED) { |
176 | u16 reason_code; | 170 | u16 reason_code; |
177 | bool from_ap; | 171 | bool from_ap; |
@@ -186,27 +180,19 @@ static void __cfg80211_send_deauth(struct net_device *dev, | |||
186 | false, NULL); | 180 | false, NULL); |
187 | } | 181 | } |
188 | } | 182 | } |
183 | EXPORT_SYMBOL(__cfg80211_send_deauth); | ||
189 | 184 | ||
190 | 185 | void cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len) | |
191 | void cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len, | ||
192 | void *cookie) | ||
193 | { | 186 | { |
194 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 187 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
195 | 188 | ||
196 | BUG_ON(cookie && wdev != cookie); | 189 | wdev_lock(wdev); |
197 | 190 | __cfg80211_send_deauth(dev, buf, len); | |
198 | if (cookie) { | 191 | wdev_unlock(wdev); |
199 | /* called within callback */ | ||
200 | __cfg80211_send_deauth(dev, buf, len); | ||
201 | } else { | ||
202 | wdev_lock(wdev); | ||
203 | __cfg80211_send_deauth(dev, buf, len); | ||
204 | wdev_unlock(wdev); | ||
205 | } | ||
206 | } | 192 | } |
207 | EXPORT_SYMBOL(cfg80211_send_deauth); | 193 | EXPORT_SYMBOL(cfg80211_send_deauth); |
208 | 194 | ||
209 | static void __cfg80211_send_disassoc(struct net_device *dev, | 195 | void __cfg80211_send_disassoc(struct net_device *dev, |
210 | const u8 *buf, size_t len) | 196 | const u8 *buf, size_t len) |
211 | { | 197 | { |
212 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 198 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
@@ -247,22 +233,15 @@ static void __cfg80211_send_disassoc(struct net_device *dev, | |||
247 | from_ap = memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0; | 233 | from_ap = memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0; |
248 | __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap); | 234 | __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap); |
249 | } | 235 | } |
236 | EXPORT_SYMBOL(__cfg80211_send_disassoc); | ||
250 | 237 | ||
251 | void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len, | 238 | void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len) |
252 | void *cookie) | ||
253 | { | 239 | { |
254 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 240 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
255 | 241 | ||
256 | BUG_ON(cookie && wdev != cookie); | 242 | wdev_lock(wdev); |
257 | 243 | __cfg80211_send_disassoc(dev, buf, len); | |
258 | if (cookie) { | 244 | wdev_unlock(wdev); |
259 | /* called within callback */ | ||
260 | __cfg80211_send_disassoc(dev, buf, len); | ||
261 | } else { | ||
262 | wdev_lock(wdev); | ||
263 | __cfg80211_send_disassoc(dev, buf, len); | ||
264 | wdev_unlock(wdev); | ||
265 | } | ||
266 | } | 245 | } |
267 | EXPORT_SYMBOL(cfg80211_send_disassoc); | 246 | EXPORT_SYMBOL(cfg80211_send_disassoc); |
268 | 247 | ||
@@ -340,7 +319,7 @@ void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr, | |||
340 | { | 319 | { |
341 | struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; | 320 | struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; |
342 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 321 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
343 | #ifdef CONFIG_WIRELESS_EXT | 322 | #ifdef CONFIG_CFG80211_WEXT |
344 | union iwreq_data wrqu; | 323 | union iwreq_data wrqu; |
345 | char *buf = kmalloc(128, gfp); | 324 | char *buf = kmalloc(128, gfp); |
346 | 325 | ||
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index ca3c92a0a14f..f48394126bf9 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -1264,7 +1264,7 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info) | |||
1264 | if (!err) | 1264 | if (!err) |
1265 | err = func(&rdev->wiphy, dev, key.idx); | 1265 | err = func(&rdev->wiphy, dev, key.idx); |
1266 | 1266 | ||
1267 | #ifdef CONFIG_WIRELESS_EXT | 1267 | #ifdef CONFIG_CFG80211_WEXT |
1268 | if (!err) { | 1268 | if (!err) { |
1269 | if (func == rdev->ops->set_default_key) | 1269 | if (func == rdev->ops->set_default_key) |
1270 | dev->ieee80211_ptr->wext.default_key = key.idx; | 1270 | dev->ieee80211_ptr->wext.default_key = key.idx; |
@@ -1365,7 +1365,7 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info) | |||
1365 | if (!err) | 1365 | if (!err) |
1366 | err = rdev->ops->del_key(&rdev->wiphy, dev, key.idx, mac_addr); | 1366 | err = rdev->ops->del_key(&rdev->wiphy, dev, key.idx, mac_addr); |
1367 | 1367 | ||
1368 | #ifdef CONFIG_WIRELESS_EXT | 1368 | #ifdef CONFIG_CFG80211_WEXT |
1369 | if (!err) { | 1369 | if (!err) { |
1370 | if (key.idx == dev->ieee80211_ptr->wext.default_key) | 1370 | if (key.idx == dev->ieee80211_ptr->wext.default_key) |
1371 | dev->ieee80211_ptr->wext.default_key = -1; | 1371 | dev->ieee80211_ptr->wext.default_key = -1; |
@@ -3105,6 +3105,8 @@ static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags, | |||
3105 | NLA_PUT_U16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval); | 3105 | NLA_PUT_U16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval); |
3106 | NLA_PUT_U16(msg, NL80211_BSS_CAPABILITY, res->capability); | 3106 | NLA_PUT_U16(msg, NL80211_BSS_CAPABILITY, res->capability); |
3107 | NLA_PUT_U32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq); | 3107 | NLA_PUT_U32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq); |
3108 | NLA_PUT_U32(msg, NL80211_BSS_SEEN_MS_AGO, | ||
3109 | jiffies_to_msecs(jiffies - intbss->ts)); | ||
3108 | 3110 | ||
3109 | switch (rdev->wiphy.signal_type) { | 3111 | switch (rdev->wiphy.signal_type) { |
3110 | case CFG80211_SIGNAL_TYPE_MBM: | 3112 | case CFG80211_SIGNAL_TYPE_MBM: |
diff --git a/net/wireless/scan.c b/net/wireless/scan.c index e5f92ee758f4..2e8c515f3c5c 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c | |||
@@ -22,7 +22,7 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak) | |||
22 | { | 22 | { |
23 | struct cfg80211_scan_request *request; | 23 | struct cfg80211_scan_request *request; |
24 | struct net_device *dev; | 24 | struct net_device *dev; |
25 | #ifdef CONFIG_WIRELESS_EXT | 25 | #ifdef CONFIG_CFG80211_WEXT |
26 | union iwreq_data wrqu; | 26 | union iwreq_data wrqu; |
27 | #endif | 27 | #endif |
28 | 28 | ||
@@ -47,7 +47,7 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak) | |||
47 | else | 47 | else |
48 | nl80211_send_scan_done(rdev, dev); | 48 | nl80211_send_scan_done(rdev, dev); |
49 | 49 | ||
50 | #ifdef CONFIG_WIRELESS_EXT | 50 | #ifdef CONFIG_CFG80211_WEXT |
51 | if (!request->aborted) { | 51 | if (!request->aborted) { |
52 | memset(&wrqu, 0, sizeof(wrqu)); | 52 | memset(&wrqu, 0, sizeof(wrqu)); |
53 | 53 | ||
@@ -592,7 +592,7 @@ void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub) | |||
592 | } | 592 | } |
593 | EXPORT_SYMBOL(cfg80211_unlink_bss); | 593 | EXPORT_SYMBOL(cfg80211_unlink_bss); |
594 | 594 | ||
595 | #ifdef CONFIG_WIRELESS_EXT | 595 | #ifdef CONFIG_CFG80211_WEXT |
596 | int cfg80211_wext_siwscan(struct net_device *dev, | 596 | int cfg80211_wext_siwscan(struct net_device *dev, |
597 | struct iw_request_info *info, | 597 | struct iw_request_info *info, |
598 | union iwreq_data *wrqu, char *extra) | 598 | union iwreq_data *wrqu, char *extra) |
diff --git a/net/wireless/sme.c b/net/wireless/sme.c index ece378d531ef..98a3b7efac4c 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c | |||
@@ -362,7 +362,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, | |||
362 | { | 362 | { |
363 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 363 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
364 | u8 *country_ie; | 364 | u8 *country_ie; |
365 | #ifdef CONFIG_WIRELESS_EXT | 365 | #ifdef CONFIG_CFG80211_WEXT |
366 | union iwreq_data wrqu; | 366 | union iwreq_data wrqu; |
367 | #endif | 367 | #endif |
368 | 368 | ||
@@ -379,7 +379,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, | |||
379 | resp_ie, resp_ie_len, | 379 | resp_ie, resp_ie_len, |
380 | status, GFP_KERNEL); | 380 | status, GFP_KERNEL); |
381 | 381 | ||
382 | #ifdef CONFIG_WIRELESS_EXT | 382 | #ifdef CONFIG_CFG80211_WEXT |
383 | if (wextev) { | 383 | if (wextev) { |
384 | if (req_ie && status == WLAN_STATUS_SUCCESS) { | 384 | if (req_ie && status == WLAN_STATUS_SUCCESS) { |
385 | memset(&wrqu, 0, sizeof(wrqu)); | 385 | memset(&wrqu, 0, sizeof(wrqu)); |
@@ -494,7 +494,7 @@ void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid, | |||
494 | const u8 *resp_ie, size_t resp_ie_len) | 494 | const u8 *resp_ie, size_t resp_ie_len) |
495 | { | 495 | { |
496 | struct cfg80211_bss *bss; | 496 | struct cfg80211_bss *bss; |
497 | #ifdef CONFIG_WIRELESS_EXT | 497 | #ifdef CONFIG_CFG80211_WEXT |
498 | union iwreq_data wrqu; | 498 | union iwreq_data wrqu; |
499 | #endif | 499 | #endif |
500 | 500 | ||
@@ -529,7 +529,7 @@ void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid, | |||
529 | req_ie, req_ie_len, resp_ie, resp_ie_len, | 529 | req_ie, req_ie_len, resp_ie, resp_ie_len, |
530 | GFP_KERNEL); | 530 | GFP_KERNEL); |
531 | 531 | ||
532 | #ifdef CONFIG_WIRELESS_EXT | 532 | #ifdef CONFIG_CFG80211_WEXT |
533 | if (req_ie) { | 533 | if (req_ie) { |
534 | memset(&wrqu, 0, sizeof(wrqu)); | 534 | memset(&wrqu, 0, sizeof(wrqu)); |
535 | wrqu.data.length = req_ie_len; | 535 | wrqu.data.length = req_ie_len; |
@@ -590,7 +590,7 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, | |||
590 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 590 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
591 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | 591 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); |
592 | int i; | 592 | int i; |
593 | #ifdef CONFIG_WIRELESS_EXT | 593 | #ifdef CONFIG_CFG80211_WEXT |
594 | union iwreq_data wrqu; | 594 | union iwreq_data wrqu; |
595 | #endif | 595 | #endif |
596 | 596 | ||
@@ -648,7 +648,7 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, | |||
648 | for (i = 0; i < 6; i++) | 648 | for (i = 0; i < 6; i++) |
649 | rdev->ops->del_key(wdev->wiphy, dev, i, NULL); | 649 | rdev->ops->del_key(wdev->wiphy, dev, i, NULL); |
650 | 650 | ||
651 | #ifdef CONFIG_WIRELESS_EXT | 651 | #ifdef CONFIG_CFG80211_WEXT |
652 | memset(&wrqu, 0, sizeof(wrqu)); | 652 | memset(&wrqu, 0, sizeof(wrqu)); |
653 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; | 653 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; |
654 | wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); | 654 | wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); |
diff --git a/net/wireless/wext.c b/net/wireless/wext-core.c index 60fe57761ca9..a4e5ddc8d4f5 100644 --- a/net/wireless/wext.c +++ b/net/wireless/wext-core.c | |||
@@ -1,112 +1,28 @@ | |||
1 | /* | 1 | /* |
2 | * This file implement the Wireless Extensions APIs. | 2 | * This file implement the Wireless Extensions core API. |
3 | * | 3 | * |
4 | * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com> | 4 | * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com> |
5 | * Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved. | 5 | * Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved. |
6 | * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> | ||
6 | * | 7 | * |
7 | * (As all part of the Linux kernel, this file is GPL) | 8 | * (As all part of the Linux kernel, this file is GPL) |
8 | */ | 9 | */ |
9 | 10 | #include <linux/kernel.h> | |
10 | /************************** DOCUMENTATION **************************/ | 11 | #include <linux/netdevice.h> |
11 | /* | 12 | #include <linux/rtnetlink.h> |
12 | * API definition : | 13 | #include <linux/wireless.h> |
13 | * -------------- | 14 | #include <linux/uaccess.h> |
14 | * See <linux/wireless.h> for details of the APIs and the rest. | 15 | #include <net/cfg80211.h> |
15 | * | 16 | #include <net/iw_handler.h> |
16 | * History : | ||
17 | * ------- | ||
18 | * | ||
19 | * v1 - 5.12.01 - Jean II | ||
20 | * o Created this file. | ||
21 | * | ||
22 | * v2 - 13.12.01 - Jean II | ||
23 | * o Move /proc/net/wireless stuff from net/core/dev.c to here | ||
24 | * o Make Wireless Extension IOCTLs go through here | ||
25 | * o Added iw_handler handling ;-) | ||
26 | * o Added standard ioctl description | ||
27 | * o Initial dumb commit strategy based on orinoco.c | ||
28 | * | ||
29 | * v3 - 19.12.01 - Jean II | ||
30 | * o Make sure we don't go out of standard_ioctl[] in ioctl_standard_call | ||
31 | * o Add event dispatcher function | ||
32 | * o Add event description | ||
33 | * o Propagate events as rtnetlink IFLA_WIRELESS option | ||
34 | * o Generate event on selected SET requests | ||
35 | * | ||
36 | * v4 - 18.04.02 - Jean II | ||
37 | * o Fix stupid off by one in iw_ioctl_description : IW_ESSID_MAX_SIZE + 1 | ||
38 | * | ||
39 | * v5 - 21.06.02 - Jean II | ||
40 | * o Add IW_PRIV_TYPE_ADDR in priv_type_size (+cleanup) | ||
41 | * o Reshuffle IW_HEADER_TYPE_XXX to map IW_PRIV_TYPE_XXX changes | ||
42 | * o Add IWEVCUSTOM for driver specific event/scanning token | ||
43 | * o Turn on WE_STRICT_WRITE by default + kernel warning | ||
44 | * o Fix WE_STRICT_WRITE in ioctl_export_private() (32 => iw_num) | ||
45 | * o Fix off-by-one in test (extra_size <= IFNAMSIZ) | ||
46 | * | ||
47 | * v6 - 9.01.03 - Jean II | ||
48 | * o Add common spy support : iw_handler_set_spy(), wireless_spy_update() | ||
49 | * o Add enhanced spy support : iw_handler_set_thrspy() and event. | ||
50 | * o Add WIRELESS_EXT version display in /proc/net/wireless | ||
51 | * | ||
52 | * v6 - 18.06.04 - Jean II | ||
53 | * o Change get_spydata() method for added safety | ||
54 | * o Remove spy #ifdef, they are always on -> cleaner code | ||
55 | * o Allow any size GET request if user specifies length > max | ||
56 | * and if request has IW_DESCR_FLAG_NOMAX flag or is SIOCGIWPRIV | ||
57 | * o Start migrating get_wireless_stats to struct iw_handler_def | ||
58 | * o Add wmb() in iw_handler_set_spy() for non-coherent archs/cpus | ||
59 | * Based on patch from Pavel Roskin <proski@gnu.org> : | ||
60 | * o Fix kernel data leak to user space in private handler handling | ||
61 | * | ||
62 | * v7 - 18.3.05 - Jean II | ||
63 | * o Remove (struct iw_point *)->pointer from events and streams | ||
64 | * o Remove spy_offset from struct iw_handler_def | ||
65 | * o Start deprecating dev->get_wireless_stats, output a warning | ||
66 | * o If IW_QUAL_DBM is set, show dBm values in /proc/net/wireless | ||
67 | * o Don't lose INVALID/DBM flags when clearing UPDATED flags (iwstats) | ||
68 | * | ||
69 | * v8 - 17.02.06 - Jean II | ||
70 | * o RtNetlink requests support (SET/GET) | ||
71 | * | ||
72 | * v8b - 03.08.06 - Herbert Xu | ||
73 | * o Fix Wireless Event locking issues. | ||
74 | * | ||
75 | * v9 - 14.3.06 - Jean II | ||
76 | * o Change length in ESSID and NICK to strlen() instead of strlen()+1 | ||
77 | * o Make standard_ioctl_num and standard_event_num unsigned | ||
78 | * o Remove (struct net_device *)->get_wireless_stats() | ||
79 | * | ||
80 | * v10 - 16.3.07 - Jean II | ||
81 | * o Prevent leaking of kernel space in stream on 64 bits. | ||
82 | */ | ||
83 | |||
84 | /***************************** INCLUDES *****************************/ | ||
85 | |||
86 | #include <linux/module.h> | ||
87 | #include <linux/types.h> /* off_t */ | ||
88 | #include <linux/netdevice.h> /* struct ifreq, dev_get_by_name() */ | ||
89 | #include <linux/proc_fs.h> | ||
90 | #include <linux/rtnetlink.h> /* rtnetlink stuff */ | ||
91 | #include <linux/seq_file.h> | ||
92 | #include <linux/init.h> /* for __init */ | ||
93 | #include <linux/if_arp.h> /* ARPHRD_ETHER */ | ||
94 | #include <linux/etherdevice.h> /* compare_ether_addr */ | ||
95 | #include <linux/interrupt.h> | ||
96 | #include <net/net_namespace.h> | ||
97 | |||
98 | #include <linux/wireless.h> /* Pretty obvious */ | ||
99 | #include <net/iw_handler.h> /* New driver API */ | ||
100 | #include <net/netlink.h> | 17 | #include <net/netlink.h> |
101 | #include <net/wext.h> | 18 | #include <net/wext.h> |
19 | #include <net/net_namespace.h> | ||
20 | |||
21 | typedef int (*wext_ioctl_func)(struct net_device *, struct iwreq *, | ||
22 | unsigned int, struct iw_request_info *, | ||
23 | iw_handler); | ||
102 | 24 | ||
103 | #include <asm/uaccess.h> /* copy_to_user() */ | ||
104 | 25 | ||
105 | /************************* GLOBAL VARIABLES *************************/ | ||
106 | /* | ||
107 | * You should not use global variables, because of re-entrancy. | ||
108 | * On our case, it's only const, so it's OK... | ||
109 | */ | ||
110 | /* | 26 | /* |
111 | * Meta-data about all the standard Wireless Extension request we | 27 | * Meta-data about all the standard Wireless Extension request we |
112 | * know about. | 28 | * know about. |
@@ -390,18 +306,6 @@ static const struct iw_ioctl_description standard_event[] = { | |||
390 | }; | 306 | }; |
391 | static const unsigned standard_event_num = ARRAY_SIZE(standard_event); | 307 | static const unsigned standard_event_num = ARRAY_SIZE(standard_event); |
392 | 308 | ||
393 | /* Size (in bytes) of the various private data types */ | ||
394 | static const char iw_priv_type_size[] = { | ||
395 | 0, /* IW_PRIV_TYPE_NONE */ | ||
396 | 1, /* IW_PRIV_TYPE_BYTE */ | ||
397 | 1, /* IW_PRIV_TYPE_CHAR */ | ||
398 | 0, /* Not defined */ | ||
399 | sizeof(__u32), /* IW_PRIV_TYPE_INT */ | ||
400 | sizeof(struct iw_freq), /* IW_PRIV_TYPE_FLOAT */ | ||
401 | sizeof(struct sockaddr), /* IW_PRIV_TYPE_ADDR */ | ||
402 | 0, /* Not defined */ | ||
403 | }; | ||
404 | |||
405 | /* Size (in bytes) of various events */ | 309 | /* Size (in bytes) of various events */ |
406 | static const int event_type_size[] = { | 310 | static const int event_type_size[] = { |
407 | IW_EV_LCP_LEN, /* IW_HEADER_TYPE_NULL */ | 311 | IW_EV_LCP_LEN, /* IW_HEADER_TYPE_NULL */ |
@@ -433,323 +337,346 @@ static const int compat_event_type_size[] = { | |||
433 | }; | 337 | }; |
434 | #endif | 338 | #endif |
435 | 339 | ||
436 | /************************ COMMON SUBROUTINES ************************/ | ||
437 | /* | ||
438 | * Stuff that may be used in various place or doesn't fit in one | ||
439 | * of the section below. | ||
440 | */ | ||
441 | |||
442 | /* ---------------------------------------------------------------- */ | ||
443 | /* | ||
444 | * Return the driver handler associated with a specific Wireless Extension. | ||
445 | */ | ||
446 | static iw_handler get_handler(struct net_device *dev, unsigned int cmd) | ||
447 | { | ||
448 | /* Don't "optimise" the following variable, it will crash */ | ||
449 | unsigned int index; /* *MUST* be unsigned */ | ||
450 | 340 | ||
451 | /* Check if we have some wireless handlers defined */ | 341 | /* IW event code */ |
452 | if (dev->wireless_handlers == NULL) | ||
453 | return NULL; | ||
454 | |||
455 | /* Try as a standard command */ | ||
456 | index = cmd - SIOCIWFIRST; | ||
457 | if (index < dev->wireless_handlers->num_standard) | ||
458 | return dev->wireless_handlers->standard[index]; | ||
459 | |||
460 | /* Try as a private command */ | ||
461 | index = cmd - SIOCIWFIRSTPRIV; | ||
462 | if (index < dev->wireless_handlers->num_private) | ||
463 | return dev->wireless_handlers->private[index]; | ||
464 | 342 | ||
465 | /* Not found */ | 343 | static int __net_init wext_pernet_init(struct net *net) |
466 | return NULL; | ||
467 | } | ||
468 | |||
469 | /* ---------------------------------------------------------------- */ | ||
470 | /* | ||
471 | * Get statistics out of the driver | ||
472 | */ | ||
473 | struct iw_statistics *get_wireless_stats(struct net_device *dev) | ||
474 | { | 344 | { |
475 | /* New location */ | 345 | skb_queue_head_init(&net->wext_nlevents); |
476 | if ((dev->wireless_handlers != NULL) && | 346 | return 0; |
477 | (dev->wireless_handlers->get_wireless_stats != NULL)) | ||
478 | return dev->wireless_handlers->get_wireless_stats(dev); | ||
479 | |||
480 | /* Not found */ | ||
481 | return NULL; | ||
482 | } | 347 | } |
483 | 348 | ||
484 | /* ---------------------------------------------------------------- */ | 349 | static void __net_exit wext_pernet_exit(struct net *net) |
485 | /* | ||
486 | * Call the commit handler in the driver | ||
487 | * (if exist and if conditions are right) | ||
488 | * | ||
489 | * Note : our current commit strategy is currently pretty dumb, | ||
490 | * but we will be able to improve on that... | ||
491 | * The goal is to try to agreagate as many changes as possible | ||
492 | * before doing the commit. Drivers that will define a commit handler | ||
493 | * are usually those that need a reset after changing parameters, so | ||
494 | * we want to minimise the number of reset. | ||
495 | * A cool idea is to use a timer : at each "set" command, we re-set the | ||
496 | * timer, when the timer eventually fires, we call the driver. | ||
497 | * Hopefully, more on that later. | ||
498 | * | ||
499 | * Also, I'm waiting to see how many people will complain about the | ||
500 | * netif_running(dev) test. I'm open on that one... | ||
501 | * Hopefully, the driver will remember to do a commit in "open()" ;-) | ||
502 | */ | ||
503 | static int call_commit_handler(struct net_device *dev) | ||
504 | { | 350 | { |
505 | if ((netif_running(dev)) && | 351 | skb_queue_purge(&net->wext_nlevents); |
506 | (dev->wireless_handlers->standard[0] != NULL)) | ||
507 | /* Call the commit handler on the driver */ | ||
508 | return dev->wireless_handlers->standard[0](dev, NULL, | ||
509 | NULL, NULL); | ||
510 | else | ||
511 | return 0; /* Command completed successfully */ | ||
512 | } | 352 | } |
513 | 353 | ||
514 | /* ---------------------------------------------------------------- */ | 354 | static struct pernet_operations wext_pernet_ops = { |
515 | /* | 355 | .init = wext_pernet_init, |
516 | * Calculate size of private arguments | 356 | .exit = wext_pernet_exit, |
517 | */ | 357 | }; |
518 | static int get_priv_size(__u16 args) | ||
519 | { | ||
520 | int num = args & IW_PRIV_SIZE_MASK; | ||
521 | int type = (args & IW_PRIV_TYPE_MASK) >> 12; | ||
522 | 358 | ||
523 | return num * iw_priv_type_size[type]; | 359 | static int __init wireless_nlevent_init(void) |
360 | { | ||
361 | return register_pernet_subsys(&wext_pernet_ops); | ||
524 | } | 362 | } |
525 | 363 | ||
526 | /* ---------------------------------------------------------------- */ | 364 | subsys_initcall(wireless_nlevent_init); |
527 | /* | 365 | |
528 | * Re-calculate the size of private arguments | 366 | /* Process events generated by the wireless layer or the driver. */ |
529 | */ | 367 | static void wireless_nlevent_process(struct work_struct *work) |
530 | static int adjust_priv_size(__u16 args, struct iw_point *iwp) | ||
531 | { | 368 | { |
532 | int num = iwp->length; | 369 | struct sk_buff *skb; |
533 | int max = args & IW_PRIV_SIZE_MASK; | 370 | struct net *net; |
534 | int type = (args & IW_PRIV_TYPE_MASK) >> 12; | ||
535 | 371 | ||
536 | /* Make sure the driver doesn't goof up */ | 372 | rtnl_lock(); |
537 | if (max < num) | 373 | |
538 | num = max; | 374 | for_each_net(net) { |
375 | while ((skb = skb_dequeue(&net->wext_nlevents))) | ||
376 | rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, | ||
377 | GFP_KERNEL); | ||
378 | } | ||
539 | 379 | ||
540 | return num * iw_priv_type_size[type]; | 380 | rtnl_unlock(); |
541 | } | 381 | } |
542 | 382 | ||
543 | /* ---------------------------------------------------------------- */ | 383 | static DECLARE_WORK(wireless_nlevent_work, wireless_nlevent_process); |
544 | /* | 384 | |
545 | * Standard Wireless Handler : get wireless stats | 385 | static struct nlmsghdr *rtnetlink_ifinfo_prep(struct net_device *dev, |
546 | * Allow programatic access to /proc/net/wireless even if /proc | 386 | struct sk_buff *skb) |
547 | * doesn't exist... Also more efficient... | ||
548 | */ | ||
549 | static int iw_handler_get_iwstats(struct net_device * dev, | ||
550 | struct iw_request_info * info, | ||
551 | union iwreq_data * wrqu, | ||
552 | char * extra) | ||
553 | { | 387 | { |
554 | /* Get stats from the driver */ | 388 | struct ifinfomsg *r; |
555 | struct iw_statistics *stats; | 389 | struct nlmsghdr *nlh; |
556 | 390 | ||
557 | stats = get_wireless_stats(dev); | 391 | nlh = nlmsg_put(skb, 0, 0, RTM_NEWLINK, sizeof(*r), 0); |
558 | if (stats) { | 392 | if (!nlh) |
559 | /* Copy statistics to extra */ | 393 | return NULL; |
560 | memcpy(extra, stats, sizeof(struct iw_statistics)); | ||
561 | wrqu->data.length = sizeof(struct iw_statistics); | ||
562 | 394 | ||
563 | /* Check if we need to clear the updated flag */ | 395 | r = nlmsg_data(nlh); |
564 | if (wrqu->data.flags != 0) | 396 | r->ifi_family = AF_UNSPEC; |
565 | stats->qual.updated &= ~IW_QUAL_ALL_UPDATED; | 397 | r->__ifi_pad = 0; |
566 | return 0; | 398 | r->ifi_type = dev->type; |
567 | } else | 399 | r->ifi_index = dev->ifindex; |
568 | return -EOPNOTSUPP; | 400 | r->ifi_flags = dev_get_flags(dev); |
401 | r->ifi_change = 0; /* Wireless changes don't affect those flags */ | ||
402 | |||
403 | NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); | ||
404 | |||
405 | return nlh; | ||
406 | nla_put_failure: | ||
407 | nlmsg_cancel(skb, nlh); | ||
408 | return NULL; | ||
569 | } | 409 | } |
570 | 410 | ||
571 | /* ---------------------------------------------------------------- */ | 411 | |
572 | /* | 412 | /* |
573 | * Standard Wireless Handler : get iwpriv definitions | 413 | * Main event dispatcher. Called from other parts and drivers. |
574 | * Export the driver private handler definition | 414 | * Send the event on the appropriate channels. |
575 | * They will be picked up by tools like iwpriv... | 415 | * May be called from interrupt context. |
576 | */ | 416 | */ |
577 | static int iw_handler_get_private(struct net_device * dev, | 417 | void wireless_send_event(struct net_device * dev, |
578 | struct iw_request_info * info, | 418 | unsigned int cmd, |
579 | union iwreq_data * wrqu, | 419 | union iwreq_data * wrqu, |
580 | char * extra) | 420 | const char * extra) |
581 | { | 421 | { |
582 | /* Check if the driver has something to export */ | 422 | const struct iw_ioctl_description * descr = NULL; |
583 | if ((dev->wireless_handlers->num_private_args == 0) || | 423 | int extra_len = 0; |
584 | (dev->wireless_handlers->private_args == NULL)) | 424 | struct iw_event *event; /* Mallocated whole event */ |
585 | return -EOPNOTSUPP; | 425 | int event_len; /* Its size */ |
426 | int hdr_len; /* Size of the event header */ | ||
427 | int wrqu_off = 0; /* Offset in wrqu */ | ||
428 | /* Don't "optimise" the following variable, it will crash */ | ||
429 | unsigned cmd_index; /* *MUST* be unsigned */ | ||
430 | struct sk_buff *skb; | ||
431 | struct nlmsghdr *nlh; | ||
432 | struct nlattr *nla; | ||
433 | #ifdef CONFIG_COMPAT | ||
434 | struct __compat_iw_event *compat_event; | ||
435 | struct compat_iw_point compat_wrqu; | ||
436 | struct sk_buff *compskb; | ||
437 | #endif | ||
586 | 438 | ||
587 | /* Check if there is enough buffer up there */ | 439 | /* |
588 | if (wrqu->data.length < dev->wireless_handlers->num_private_args) { | 440 | * Nothing in the kernel sends scan events with data, be safe. |
589 | /* User space can't know in advance how large the buffer | 441 | * This is necessary because we cannot fix up scan event data |
590 | * needs to be. Give it a hint, so that we can support | 442 | * for compat, due to being contained in 'extra', but normally |
591 | * any size buffer we want somewhat efficiently... */ | 443 | * applications are required to retrieve the scan data anyway |
592 | wrqu->data.length = dev->wireless_handlers->num_private_args; | 444 | * and no data is included in the event, this codifies that |
593 | return -E2BIG; | 445 | * practice. |
446 | */ | ||
447 | if (WARN_ON(cmd == SIOCGIWSCAN && extra)) | ||
448 | extra = NULL; | ||
449 | |||
450 | /* Get the description of the Event */ | ||
451 | if (cmd <= SIOCIWLAST) { | ||
452 | cmd_index = cmd - SIOCIWFIRST; | ||
453 | if (cmd_index < standard_ioctl_num) | ||
454 | descr = &(standard_ioctl[cmd_index]); | ||
455 | } else { | ||
456 | cmd_index = cmd - IWEVFIRST; | ||
457 | if (cmd_index < standard_event_num) | ||
458 | descr = &(standard_event[cmd_index]); | ||
459 | } | ||
460 | /* Don't accept unknown events */ | ||
461 | if (descr == NULL) { | ||
462 | /* Note : we don't return an error to the driver, because | ||
463 | * the driver would not know what to do about it. It can't | ||
464 | * return an error to the user, because the event is not | ||
465 | * initiated by a user request. | ||
466 | * The best the driver could do is to log an error message. | ||
467 | * We will do it ourselves instead... | ||
468 | */ | ||
469 | printk(KERN_ERR "%s (WE) : Invalid/Unknown Wireless Event (0x%04X)\n", | ||
470 | dev->name, cmd); | ||
471 | return; | ||
594 | } | 472 | } |
595 | 473 | ||
596 | /* Set the number of available ioctls. */ | 474 | /* Check extra parameters and set extra_len */ |
597 | wrqu->data.length = dev->wireless_handlers->num_private_args; | 475 | if (descr->header_type == IW_HEADER_TYPE_POINT) { |
476 | /* Check if number of token fits within bounds */ | ||
477 | if (wrqu->data.length > descr->max_tokens) { | ||
478 | printk(KERN_ERR "%s (WE) : Wireless Event too big (%d)\n", dev->name, wrqu->data.length); | ||
479 | return; | ||
480 | } | ||
481 | if (wrqu->data.length < descr->min_tokens) { | ||
482 | printk(KERN_ERR "%s (WE) : Wireless Event too small (%d)\n", dev->name, wrqu->data.length); | ||
483 | return; | ||
484 | } | ||
485 | /* Calculate extra_len - extra is NULL for restricted events */ | ||
486 | if (extra != NULL) | ||
487 | extra_len = wrqu->data.length * descr->token_size; | ||
488 | /* Always at an offset in wrqu */ | ||
489 | wrqu_off = IW_EV_POINT_OFF; | ||
490 | } | ||
598 | 491 | ||
599 | /* Copy structure to the user buffer. */ | 492 | /* Total length of the event */ |
600 | memcpy(extra, dev->wireless_handlers->private_args, | 493 | hdr_len = event_type_size[descr->header_type]; |
601 | sizeof(struct iw_priv_args) * wrqu->data.length); | 494 | event_len = hdr_len + extra_len; |
602 | 495 | ||
603 | return 0; | 496 | /* |
604 | } | 497 | * The problem for 64/32 bit. |
498 | * | ||
499 | * On 64-bit, a regular event is laid out as follows: | ||
500 | * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | | ||
501 | * | event.len | event.cmd | p a d d i n g | | ||
502 | * | wrqu data ... (with the correct size) | | ||
503 | * | ||
504 | * This padding exists because we manipulate event->u, | ||
505 | * and 'event' is not packed. | ||
506 | * | ||
507 | * An iw_point event is laid out like this instead: | ||
508 | * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | | ||
509 | * | event.len | event.cmd | p a d d i n g | | ||
510 | * | iwpnt.len | iwpnt.flg | p a d d i n g | | ||
511 | * | extra data ... | ||
512 | * | ||
513 | * The second padding exists because struct iw_point is extended, | ||
514 | * but this depends on the platform... | ||
515 | * | ||
516 | * On 32-bit, all the padding shouldn't be there. | ||
517 | */ | ||
605 | 518 | ||
519 | skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); | ||
520 | if (!skb) | ||
521 | return; | ||
606 | 522 | ||
607 | /******************** /proc/net/wireless SUPPORT ********************/ | 523 | /* Send via the RtNetlink event channel */ |
608 | /* | 524 | nlh = rtnetlink_ifinfo_prep(dev, skb); |
609 | * The /proc/net/wireless file is a human readable user-space interface | 525 | if (WARN_ON(!nlh)) { |
610 | * exporting various wireless specific statistics from the wireless devices. | 526 | kfree_skb(skb); |
611 | * This is the most popular part of the Wireless Extensions ;-) | 527 | return; |
612 | * | 528 | } |
613 | * This interface is a pure clone of /proc/net/dev (in net/core/dev.c). | ||
614 | * The content of the file is basically the content of "struct iw_statistics". | ||
615 | */ | ||
616 | 529 | ||
617 | #ifdef CONFIG_PROC_FS | 530 | /* Add the wireless events in the netlink packet */ |
531 | nla = nla_reserve(skb, IFLA_WIRELESS, event_len); | ||
532 | if (!nla) { | ||
533 | kfree_skb(skb); | ||
534 | return; | ||
535 | } | ||
536 | event = nla_data(nla); | ||
618 | 537 | ||
619 | /* ---------------------------------------------------------------- */ | 538 | /* Fill event - first clear to avoid data leaking */ |
620 | /* | 539 | memset(event, 0, hdr_len); |
621 | * Print one entry (line) of /proc/net/wireless | 540 | event->len = event_len; |
622 | */ | 541 | event->cmd = cmd; |
623 | static void wireless_seq_printf_stats(struct seq_file *seq, | 542 | memcpy(&event->u, ((char *) wrqu) + wrqu_off, hdr_len - IW_EV_LCP_LEN); |
624 | struct net_device *dev) | 543 | if (extra_len) |
625 | { | 544 | memcpy(((char *) event) + hdr_len, extra, extra_len); |
626 | /* Get stats from the driver */ | ||
627 | struct iw_statistics *stats = get_wireless_stats(dev); | ||
628 | static struct iw_statistics nullstats = {}; | ||
629 | 545 | ||
630 | /* show device if it's wireless regardless of current stats */ | 546 | nlmsg_end(skb, nlh); |
631 | if (!stats && dev->wireless_handlers) | 547 | #ifdef CONFIG_COMPAT |
632 | stats = &nullstats; | 548 | hdr_len = compat_event_type_size[descr->header_type]; |
549 | event_len = hdr_len + extra_len; | ||
633 | 550 | ||
634 | if (stats) { | 551 | compskb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); |
635 | seq_printf(seq, "%6s: %04x %3d%c %3d%c %3d%c %6d %6d %6d " | 552 | if (!compskb) { |
636 | "%6d %6d %6d\n", | 553 | kfree_skb(skb); |
637 | dev->name, stats->status, stats->qual.qual, | 554 | return; |
638 | stats->qual.updated & IW_QUAL_QUAL_UPDATED | ||
639 | ? '.' : ' ', | ||
640 | ((__s32) stats->qual.level) - | ||
641 | ((stats->qual.updated & IW_QUAL_DBM) ? 0x100 : 0), | ||
642 | stats->qual.updated & IW_QUAL_LEVEL_UPDATED | ||
643 | ? '.' : ' ', | ||
644 | ((__s32) stats->qual.noise) - | ||
645 | ((stats->qual.updated & IW_QUAL_DBM) ? 0x100 : 0), | ||
646 | stats->qual.updated & IW_QUAL_NOISE_UPDATED | ||
647 | ? '.' : ' ', | ||
648 | stats->discard.nwid, stats->discard.code, | ||
649 | stats->discard.fragment, stats->discard.retries, | ||
650 | stats->discard.misc, stats->miss.beacon); | ||
651 | |||
652 | if (stats != &nullstats) | ||
653 | stats->qual.updated &= ~IW_QUAL_ALL_UPDATED; | ||
654 | } | 555 | } |
655 | } | ||
656 | 556 | ||
657 | /* ---------------------------------------------------------------- */ | 557 | /* Send via the RtNetlink event channel */ |
658 | /* | 558 | nlh = rtnetlink_ifinfo_prep(dev, compskb); |
659 | * Print info for /proc/net/wireless (print all entries) | 559 | if (WARN_ON(!nlh)) { |
660 | */ | 560 | kfree_skb(skb); |
661 | static int wireless_dev_seq_show(struct seq_file *seq, void *v) | 561 | kfree_skb(compskb); |
662 | { | 562 | return; |
663 | might_sleep(); | 563 | } |
664 | 564 | ||
665 | if (v == SEQ_START_TOKEN) | 565 | /* Add the wireless events in the netlink packet */ |
666 | seq_printf(seq, "Inter-| sta-| Quality | Discarded " | 566 | nla = nla_reserve(compskb, IFLA_WIRELESS, event_len); |
667 | "packets | Missed | WE\n" | 567 | if (!nla) { |
668 | " face | tus | link level noise | nwid " | 568 | kfree_skb(skb); |
669 | "crypt frag retry misc | beacon | %d\n", | 569 | kfree_skb(compskb); |
670 | WIRELESS_EXT); | 570 | return; |
671 | else | 571 | } |
672 | wireless_seq_printf_stats(seq, v); | 572 | compat_event = nla_data(nla); |
673 | return 0; | 573 | |
574 | compat_event->len = event_len; | ||
575 | compat_event->cmd = cmd; | ||
576 | if (descr->header_type == IW_HEADER_TYPE_POINT) { | ||
577 | compat_wrqu.length = wrqu->data.length; | ||
578 | compat_wrqu.flags = wrqu->data.flags; | ||
579 | memcpy(&compat_event->pointer, | ||
580 | ((char *) &compat_wrqu) + IW_EV_COMPAT_POINT_OFF, | ||
581 | hdr_len - IW_EV_COMPAT_LCP_LEN); | ||
582 | if (extra_len) | ||
583 | memcpy(((char *) compat_event) + hdr_len, | ||
584 | extra, extra_len); | ||
585 | } else { | ||
586 | /* extra_len must be zero, so no if (extra) needed */ | ||
587 | memcpy(&compat_event->pointer, wrqu, | ||
588 | hdr_len - IW_EV_COMPAT_LCP_LEN); | ||
589 | } | ||
590 | |||
591 | nlmsg_end(compskb, nlh); | ||
592 | |||
593 | skb_shinfo(skb)->frag_list = compskb; | ||
594 | #endif | ||
595 | skb_queue_tail(&dev_net(dev)->wext_nlevents, skb); | ||
596 | schedule_work(&wireless_nlevent_work); | ||
674 | } | 597 | } |
598 | EXPORT_SYMBOL(wireless_send_event); | ||
599 | |||
600 | |||
601 | |||
602 | /* IW handlers */ | ||
675 | 603 | ||
676 | static void *wireless_dev_seq_start(struct seq_file *seq, loff_t *pos) | 604 | struct iw_statistics *get_wireless_stats(struct net_device *dev) |
677 | { | 605 | { |
678 | struct net *net = seq_file_net(seq); | 606 | #ifdef CONFIG_WIRELESS_EXT |
679 | loff_t off; | 607 | if ((dev->wireless_handlers != NULL) && |
680 | struct net_device *dev; | 608 | (dev->wireless_handlers->get_wireless_stats != NULL)) |
609 | return dev->wireless_handlers->get_wireless_stats(dev); | ||
610 | #endif | ||
681 | 611 | ||
682 | rtnl_lock(); | 612 | #ifdef CONFIG_CFG80211_WEXT |
683 | if (!*pos) | 613 | if (dev->ieee80211_ptr && dev->ieee80211_ptr && |
684 | return SEQ_START_TOKEN; | 614 | dev->ieee80211_ptr->wiphy && |
615 | dev->ieee80211_ptr->wiphy->wext && | ||
616 | dev->ieee80211_ptr->wiphy->wext->get_wireless_stats) | ||
617 | return dev->ieee80211_ptr->wiphy->wext->get_wireless_stats(dev); | ||
618 | #endif | ||
685 | 619 | ||
686 | off = 1; | 620 | /* not found */ |
687 | for_each_netdev(net, dev) | ||
688 | if (off++ == *pos) | ||
689 | return dev; | ||
690 | return NULL; | 621 | return NULL; |
691 | } | 622 | } |
692 | 623 | ||
693 | static void *wireless_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 624 | static int iw_handler_get_iwstats(struct net_device * dev, |
625 | struct iw_request_info * info, | ||
626 | union iwreq_data * wrqu, | ||
627 | char * extra) | ||
694 | { | 628 | { |
695 | struct net *net = seq_file_net(seq); | 629 | /* Get stats from the driver */ |
630 | struct iw_statistics *stats; | ||
696 | 631 | ||
697 | ++*pos; | 632 | stats = get_wireless_stats(dev); |
633 | if (stats) { | ||
634 | /* Copy statistics to extra */ | ||
635 | memcpy(extra, stats, sizeof(struct iw_statistics)); | ||
636 | wrqu->data.length = sizeof(struct iw_statistics); | ||
698 | 637 | ||
699 | return v == SEQ_START_TOKEN ? | 638 | /* Check if we need to clear the updated flag */ |
700 | first_net_device(net) : next_net_device(v); | 639 | if (wrqu->data.flags != 0) |
640 | stats->qual.updated &= ~IW_QUAL_ALL_UPDATED; | ||
641 | return 0; | ||
642 | } else | ||
643 | return -EOPNOTSUPP; | ||
701 | } | 644 | } |
702 | 645 | ||
703 | static void wireless_dev_seq_stop(struct seq_file *seq, void *v) | 646 | static iw_handler get_handler(struct net_device *dev, unsigned int cmd) |
704 | { | 647 | { |
705 | rtnl_unlock(); | 648 | /* Don't "optimise" the following variable, it will crash */ |
706 | } | 649 | unsigned int index; /* *MUST* be unsigned */ |
707 | 650 | const struct iw_handler_def *handlers = NULL; | |
708 | static const struct seq_operations wireless_seq_ops = { | ||
709 | .start = wireless_dev_seq_start, | ||
710 | .next = wireless_dev_seq_next, | ||
711 | .stop = wireless_dev_seq_stop, | ||
712 | .show = wireless_dev_seq_show, | ||
713 | }; | ||
714 | 651 | ||
715 | static int seq_open_wireless(struct inode *inode, struct file *file) | 652 | #ifdef CONFIG_CFG80211_WEXT |
716 | { | 653 | if (dev->ieee80211_ptr && dev->ieee80211_ptr->wiphy) |
717 | return seq_open_net(inode, file, &wireless_seq_ops, | 654 | handlers = dev->ieee80211_ptr->wiphy->wext; |
718 | sizeof(struct seq_net_private)); | 655 | #endif |
719 | } | 656 | #ifdef CONFIG_WIRELESS_EXT |
657 | if (dev->wireless_handlers) | ||
658 | handlers = dev->wireless_handlers; | ||
659 | #endif | ||
720 | 660 | ||
721 | static const struct file_operations wireless_seq_fops = { | 661 | if (!handlers) |
722 | .owner = THIS_MODULE, | 662 | return NULL; |
723 | .open = seq_open_wireless, | ||
724 | .read = seq_read, | ||
725 | .llseek = seq_lseek, | ||
726 | .release = seq_release_net, | ||
727 | }; | ||
728 | 663 | ||
729 | int wext_proc_init(struct net *net) | 664 | /* Try as a standard command */ |
730 | { | 665 | index = cmd - SIOCIWFIRST; |
731 | /* Create /proc/net/wireless entry */ | 666 | if (index < handlers->num_standard) |
732 | if (!proc_net_fops_create(net, "wireless", S_IRUGO, &wireless_seq_fops)) | 667 | return handlers->standard[index]; |
733 | return -ENOMEM; | ||
734 | 668 | ||
735 | return 0; | 669 | #ifdef CONFIG_WEXT_PRIV |
736 | } | 670 | /* Try as a private command */ |
671 | index = cmd - SIOCIWFIRSTPRIV; | ||
672 | if (index < handlers->num_private) | ||
673 | return handlers->private[index]; | ||
674 | #endif | ||
737 | 675 | ||
738 | void wext_proc_exit(struct net *net) | 676 | /* Not found */ |
739 | { | 677 | return NULL; |
740 | proc_net_remove(net, "wireless"); | ||
741 | } | 678 | } |
742 | #endif /* CONFIG_PROC_FS */ | ||
743 | 679 | ||
744 | /************************** IOCTL SUPPORT **************************/ | ||
745 | /* | ||
746 | * The original user space API to configure all those Wireless Extensions | ||
747 | * is through IOCTLs. | ||
748 | * In there, we check if we need to call the new driver API (iw_handler) | ||
749 | * or just call the driver ioctl handler. | ||
750 | */ | ||
751 | |||
752 | /* ---------------------------------------------------------------- */ | ||
753 | static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd, | 680 | static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd, |
754 | const struct iw_ioctl_description *descr, | 681 | const struct iw_ioctl_description *descr, |
755 | iw_handler handler, struct net_device *dev, | 682 | iw_handler handler, struct net_device *dev, |
@@ -893,188 +820,39 @@ out: | |||
893 | } | 820 | } |
894 | 821 | ||
895 | /* | 822 | /* |
896 | * Wrapper to call a standard Wireless Extension handler. | 823 | * Call the commit handler in the driver |
897 | * We do various checks and also take care of moving data between | 824 | * (if exist and if conditions are right) |
898 | * user space and kernel space. | 825 | * |
899 | */ | 826 | * Note : our current commit strategy is currently pretty dumb, |
900 | static int ioctl_standard_call(struct net_device * dev, | 827 | * but we will be able to improve on that... |
901 | struct iwreq *iwr, | 828 | * The goal is to try to agreagate as many changes as possible |
902 | unsigned int cmd, | 829 | * before doing the commit. Drivers that will define a commit handler |
903 | struct iw_request_info *info, | 830 | * are usually those that need a reset after changing parameters, so |
904 | iw_handler handler) | 831 | * we want to minimise the number of reset. |
905 | { | 832 | * A cool idea is to use a timer : at each "set" command, we re-set the |
906 | const struct iw_ioctl_description * descr; | 833 | * timer, when the timer eventually fires, we call the driver. |
907 | int ret = -EINVAL; | 834 | * Hopefully, more on that later. |
908 | |||
909 | /* Get the description of the IOCTL */ | ||
910 | if ((cmd - SIOCIWFIRST) >= standard_ioctl_num) | ||
911 | return -EOPNOTSUPP; | ||
912 | descr = &(standard_ioctl[cmd - SIOCIWFIRST]); | ||
913 | |||
914 | /* Check if we have a pointer to user space data or not */ | ||
915 | if (descr->header_type != IW_HEADER_TYPE_POINT) { | ||
916 | |||
917 | /* No extra arguments. Trivial to handle */ | ||
918 | ret = handler(dev, info, &(iwr->u), NULL); | ||
919 | |||
920 | /* Generate an event to notify listeners of the change */ | ||
921 | if ((descr->flags & IW_DESCR_FLAG_EVENT) && | ||
922 | ((ret == 0) || (ret == -EIWCOMMIT))) | ||
923 | wireless_send_event(dev, cmd, &(iwr->u), NULL); | ||
924 | } else { | ||
925 | ret = ioctl_standard_iw_point(&iwr->u.data, cmd, descr, | ||
926 | handler, dev, info); | ||
927 | } | ||
928 | |||
929 | /* Call commit handler if needed and defined */ | ||
930 | if (ret == -EIWCOMMIT) | ||
931 | ret = call_commit_handler(dev); | ||
932 | |||
933 | /* Here, we will generate the appropriate event if needed */ | ||
934 | |||
935 | return ret; | ||
936 | } | ||
937 | |||
938 | /* ---------------------------------------------------------------- */ | ||
939 | /* | ||
940 | * Wrapper to call a private Wireless Extension handler. | ||
941 | * We do various checks and also take care of moving data between | ||
942 | * user space and kernel space. | ||
943 | * It's not as nice and slimline as the standard wrapper. The cause | ||
944 | * is struct iw_priv_args, which was not really designed for the | ||
945 | * job we are going here. | ||
946 | * | 835 | * |
947 | * IMPORTANT : This function prevent to set and get data on the same | 836 | * Also, I'm waiting to see how many people will complain about the |
948 | * IOCTL and enforce the SET/GET convention. Not doing it would be | 837 | * netif_running(dev) test. I'm open on that one... |
949 | * far too hairy... | 838 | * Hopefully, the driver will remember to do a commit in "open()" ;-) |
950 | * If you need to set and get data at the same time, please don't use | ||
951 | * a iw_handler but process it in your ioctl handler (i.e. use the | ||
952 | * old driver API). | ||
953 | */ | 839 | */ |
954 | static int get_priv_descr_and_size(struct net_device *dev, unsigned int cmd, | 840 | int call_commit_handler(struct net_device *dev) |
955 | const struct iw_priv_args **descrp) | ||
956 | { | ||
957 | const struct iw_priv_args *descr; | ||
958 | int i, extra_size; | ||
959 | |||
960 | descr = NULL; | ||
961 | for (i = 0; i < dev->wireless_handlers->num_private_args; i++) { | ||
962 | if (cmd == dev->wireless_handlers->private_args[i].cmd) { | ||
963 | descr = &dev->wireless_handlers->private_args[i]; | ||
964 | break; | ||
965 | } | ||
966 | } | ||
967 | |||
968 | extra_size = 0; | ||
969 | if (descr) { | ||
970 | if (IW_IS_SET(cmd)) { | ||
971 | int offset = 0; /* For sub-ioctls */ | ||
972 | /* Check for sub-ioctl handler */ | ||
973 | if (descr->name[0] == '\0') | ||
974 | /* Reserve one int for sub-ioctl index */ | ||
975 | offset = sizeof(__u32); | ||
976 | |||
977 | /* Size of set arguments */ | ||
978 | extra_size = get_priv_size(descr->set_args); | ||
979 | |||
980 | /* Does it fits in iwr ? */ | ||
981 | if ((descr->set_args & IW_PRIV_SIZE_FIXED) && | ||
982 | ((extra_size + offset) <= IFNAMSIZ)) | ||
983 | extra_size = 0; | ||
984 | } else { | ||
985 | /* Size of get arguments */ | ||
986 | extra_size = get_priv_size(descr->get_args); | ||
987 | |||
988 | /* Does it fits in iwr ? */ | ||
989 | if ((descr->get_args & IW_PRIV_SIZE_FIXED) && | ||
990 | (extra_size <= IFNAMSIZ)) | ||
991 | extra_size = 0; | ||
992 | } | ||
993 | } | ||
994 | *descrp = descr; | ||
995 | return extra_size; | ||
996 | } | ||
997 | |||
998 | static int ioctl_private_iw_point(struct iw_point *iwp, unsigned int cmd, | ||
999 | const struct iw_priv_args *descr, | ||
1000 | iw_handler handler, struct net_device *dev, | ||
1001 | struct iw_request_info *info, int extra_size) | ||
1002 | { | ||
1003 | char *extra; | ||
1004 | int err; | ||
1005 | |||
1006 | /* Check what user space is giving us */ | ||
1007 | if (IW_IS_SET(cmd)) { | ||
1008 | if (!iwp->pointer && iwp->length != 0) | ||
1009 | return -EFAULT; | ||
1010 | |||
1011 | if (iwp->length > (descr->set_args & IW_PRIV_SIZE_MASK)) | ||
1012 | return -E2BIG; | ||
1013 | } else if (!iwp->pointer) | ||
1014 | return -EFAULT; | ||
1015 | |||
1016 | extra = kmalloc(extra_size, GFP_KERNEL); | ||
1017 | if (!extra) | ||
1018 | return -ENOMEM; | ||
1019 | |||
1020 | /* If it is a SET, get all the extra data in here */ | ||
1021 | if (IW_IS_SET(cmd) && (iwp->length != 0)) { | ||
1022 | if (copy_from_user(extra, iwp->pointer, extra_size)) { | ||
1023 | err = -EFAULT; | ||
1024 | goto out; | ||
1025 | } | ||
1026 | } | ||
1027 | |||
1028 | /* Call the handler */ | ||
1029 | err = handler(dev, info, (union iwreq_data *) iwp, extra); | ||
1030 | |||
1031 | /* If we have something to return to the user */ | ||
1032 | if (!err && IW_IS_GET(cmd)) { | ||
1033 | /* Adjust for the actual length if it's variable, | ||
1034 | * avoid leaking kernel bits outside. | ||
1035 | */ | ||
1036 | if (!(descr->get_args & IW_PRIV_SIZE_FIXED)) | ||
1037 | extra_size = adjust_priv_size(descr->get_args, iwp); | ||
1038 | |||
1039 | if (copy_to_user(iwp->pointer, extra, extra_size)) | ||
1040 | err = -EFAULT; | ||
1041 | } | ||
1042 | |||
1043 | out: | ||
1044 | kfree(extra); | ||
1045 | return err; | ||
1046 | } | ||
1047 | |||
1048 | static int ioctl_private_call(struct net_device *dev, struct iwreq *iwr, | ||
1049 | unsigned int cmd, struct iw_request_info *info, | ||
1050 | iw_handler handler) | ||
1051 | { | 841 | { |
1052 | int extra_size = 0, ret = -EINVAL; | 842 | #ifdef CONFIG_WIRELESS_EXT |
1053 | const struct iw_priv_args *descr; | 843 | if ((netif_running(dev)) && |
1054 | 844 | (dev->wireless_handlers->standard[0] != NULL)) | |
1055 | extra_size = get_priv_descr_and_size(dev, cmd, &descr); | 845 | /* Call the commit handler on the driver */ |
1056 | 846 | return dev->wireless_handlers->standard[0](dev, NULL, | |
1057 | /* Check if we have a pointer to user space data or not. */ | 847 | NULL, NULL); |
1058 | if (extra_size == 0) { | 848 | else |
1059 | /* No extra arguments. Trivial to handle */ | 849 | return 0; /* Command completed successfully */ |
1060 | ret = handler(dev, info, &(iwr->u), (char *) &(iwr->u)); | 850 | #else |
1061 | } else { | 851 | /* cfg80211 has no commit */ |
1062 | ret = ioctl_private_iw_point(&iwr->u.data, cmd, descr, | 852 | return 0; |
1063 | handler, dev, info, extra_size); | 853 | #endif |
1064 | } | ||
1065 | |||
1066 | /* Call commit handler if needed and defined */ | ||
1067 | if (ret == -EIWCOMMIT) | ||
1068 | ret = call_commit_handler(dev); | ||
1069 | |||
1070 | return ret; | ||
1071 | } | 854 | } |
1072 | 855 | ||
1073 | /* ---------------------------------------------------------------- */ | ||
1074 | typedef int (*wext_ioctl_func)(struct net_device *, struct iwreq *, | ||
1075 | unsigned int, struct iw_request_info *, | ||
1076 | iw_handler); | ||
1077 | |||
1078 | /* | 856 | /* |
1079 | * Main IOCTl dispatcher. | 857 | * Main IOCTl dispatcher. |
1080 | * Check the type of IOCTL and call the appropriate wrapper... | 858 | * Check the type of IOCTL and call the appropriate wrapper... |
@@ -1103,9 +881,11 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, | |||
1103 | return standard(dev, iwr, cmd, info, | 881 | return standard(dev, iwr, cmd, info, |
1104 | &iw_handler_get_iwstats); | 882 | &iw_handler_get_iwstats); |
1105 | 883 | ||
884 | #ifdef CONFIG_WEXT_PRIV | ||
1106 | if (cmd == SIOCGIWPRIV && dev->wireless_handlers) | 885 | if (cmd == SIOCGIWPRIV && dev->wireless_handlers) |
1107 | return standard(dev, iwr, cmd, info, | 886 | return standard(dev, iwr, cmd, info, |
1108 | &iw_handler_get_private); | 887 | iw_handler_get_private); |
888 | #endif | ||
1109 | 889 | ||
1110 | /* Basic check */ | 890 | /* Basic check */ |
1111 | if (!netif_device_present(dev)) | 891 | if (!netif_device_present(dev)) |
@@ -1117,7 +897,7 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, | |||
1117 | /* Standard and private are not the same */ | 897 | /* Standard and private are not the same */ |
1118 | if (cmd < SIOCIWFIRSTPRIV) | 898 | if (cmd < SIOCIWFIRSTPRIV) |
1119 | return standard(dev, iwr, cmd, info, handler); | 899 | return standard(dev, iwr, cmd, info, handler); |
1120 | else | 900 | else if (private) |
1121 | return private(dev, iwr, cmd, info, handler); | 901 | return private(dev, iwr, cmd, info, handler); |
1122 | } | 902 | } |
1123 | /* Old driver API : call driver ioctl handler */ | 903 | /* Old driver API : call driver ioctl handler */ |
@@ -1157,6 +937,50 @@ static int wext_ioctl_dispatch(struct net *net, struct ifreq *ifr, | |||
1157 | return ret; | 937 | return ret; |
1158 | } | 938 | } |
1159 | 939 | ||
940 | /* | ||
941 | * Wrapper to call a standard Wireless Extension handler. | ||
942 | * We do various checks and also take care of moving data between | ||
943 | * user space and kernel space. | ||
944 | */ | ||
945 | static int ioctl_standard_call(struct net_device * dev, | ||
946 | struct iwreq *iwr, | ||
947 | unsigned int cmd, | ||
948 | struct iw_request_info *info, | ||
949 | iw_handler handler) | ||
950 | { | ||
951 | const struct iw_ioctl_description * descr; | ||
952 | int ret = -EINVAL; | ||
953 | |||
954 | /* Get the description of the IOCTL */ | ||
955 | if ((cmd - SIOCIWFIRST) >= standard_ioctl_num) | ||
956 | return -EOPNOTSUPP; | ||
957 | descr = &(standard_ioctl[cmd - SIOCIWFIRST]); | ||
958 | |||
959 | /* Check if we have a pointer to user space data or not */ | ||
960 | if (descr->header_type != IW_HEADER_TYPE_POINT) { | ||
961 | |||
962 | /* No extra arguments. Trivial to handle */ | ||
963 | ret = handler(dev, info, &(iwr->u), NULL); | ||
964 | |||
965 | /* Generate an event to notify listeners of the change */ | ||
966 | if ((descr->flags & IW_DESCR_FLAG_EVENT) && | ||
967 | ((ret == 0) || (ret == -EIWCOMMIT))) | ||
968 | wireless_send_event(dev, cmd, &(iwr->u), NULL); | ||
969 | } else { | ||
970 | ret = ioctl_standard_iw_point(&iwr->u.data, cmd, descr, | ||
971 | handler, dev, info); | ||
972 | } | ||
973 | |||
974 | /* Call commit handler if needed and defined */ | ||
975 | if (ret == -EIWCOMMIT) | ||
976 | ret = call_commit_handler(dev); | ||
977 | |||
978 | /* Here, we will generate the appropriate event if needed */ | ||
979 | |||
980 | return ret; | ||
981 | } | ||
982 | |||
983 | |||
1160 | int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, | 984 | int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, |
1161 | void __user *arg) | 985 | void __user *arg) |
1162 | { | 986 | { |
@@ -1205,43 +1029,6 @@ static int compat_standard_call(struct net_device *dev, | |||
1205 | return err; | 1029 | return err; |
1206 | } | 1030 | } |
1207 | 1031 | ||
1208 | static int compat_private_call(struct net_device *dev, struct iwreq *iwr, | ||
1209 | unsigned int cmd, struct iw_request_info *info, | ||
1210 | iw_handler handler) | ||
1211 | { | ||
1212 | const struct iw_priv_args *descr; | ||
1213 | int ret, extra_size; | ||
1214 | |||
1215 | extra_size = get_priv_descr_and_size(dev, cmd, &descr); | ||
1216 | |||
1217 | /* Check if we have a pointer to user space data or not. */ | ||
1218 | if (extra_size == 0) { | ||
1219 | /* No extra arguments. Trivial to handle */ | ||
1220 | ret = handler(dev, info, &(iwr->u), (char *) &(iwr->u)); | ||
1221 | } else { | ||
1222 | struct compat_iw_point *iwp_compat; | ||
1223 | struct iw_point iwp; | ||
1224 | |||
1225 | iwp_compat = (struct compat_iw_point *) &iwr->u.data; | ||
1226 | iwp.pointer = compat_ptr(iwp_compat->pointer); | ||
1227 | iwp.length = iwp_compat->length; | ||
1228 | iwp.flags = iwp_compat->flags; | ||
1229 | |||
1230 | ret = ioctl_private_iw_point(&iwp, cmd, descr, | ||
1231 | handler, dev, info, extra_size); | ||
1232 | |||
1233 | iwp_compat->pointer = ptr_to_compat(iwp.pointer); | ||
1234 | iwp_compat->length = iwp.length; | ||
1235 | iwp_compat->flags = iwp.flags; | ||
1236 | } | ||
1237 | |||
1238 | /* Call commit handler if needed and defined */ | ||
1239 | if (ret == -EIWCOMMIT) | ||
1240 | ret = call_commit_handler(dev); | ||
1241 | |||
1242 | return ret; | ||
1243 | } | ||
1244 | |||
1245 | int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, | 1032 | int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, |
1246 | unsigned long arg) | 1033 | unsigned long arg) |
1247 | { | 1034 | { |
@@ -1274,502 +1061,3 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, | |||
1274 | return ret; | 1061 | return ret; |
1275 | } | 1062 | } |
1276 | #endif | 1063 | #endif |
1277 | |||
1278 | static int __net_init wext_pernet_init(struct net *net) | ||
1279 | { | ||
1280 | skb_queue_head_init(&net->wext_nlevents); | ||
1281 | return 0; | ||
1282 | } | ||
1283 | |||
1284 | static void __net_exit wext_pernet_exit(struct net *net) | ||
1285 | { | ||
1286 | skb_queue_purge(&net->wext_nlevents); | ||
1287 | } | ||
1288 | |||
1289 | static struct pernet_operations wext_pernet_ops = { | ||
1290 | .init = wext_pernet_init, | ||
1291 | .exit = wext_pernet_exit, | ||
1292 | }; | ||
1293 | |||
1294 | static int __init wireless_nlevent_init(void) | ||
1295 | { | ||
1296 | return register_pernet_subsys(&wext_pernet_ops); | ||
1297 | } | ||
1298 | |||
1299 | subsys_initcall(wireless_nlevent_init); | ||
1300 | |||
1301 | /* Process events generated by the wireless layer or the driver. */ | ||
1302 | static void wireless_nlevent_process(struct work_struct *work) | ||
1303 | { | ||
1304 | struct sk_buff *skb; | ||
1305 | struct net *net; | ||
1306 | |||
1307 | rtnl_lock(); | ||
1308 | |||
1309 | for_each_net(net) { | ||
1310 | while ((skb = skb_dequeue(&net->wext_nlevents))) | ||
1311 | rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, | ||
1312 | GFP_KERNEL); | ||
1313 | } | ||
1314 | |||
1315 | rtnl_unlock(); | ||
1316 | } | ||
1317 | |||
1318 | static DECLARE_WORK(wireless_nlevent_work, wireless_nlevent_process); | ||
1319 | |||
1320 | static struct nlmsghdr *rtnetlink_ifinfo_prep(struct net_device *dev, | ||
1321 | struct sk_buff *skb) | ||
1322 | { | ||
1323 | struct ifinfomsg *r; | ||
1324 | struct nlmsghdr *nlh; | ||
1325 | |||
1326 | nlh = nlmsg_put(skb, 0, 0, RTM_NEWLINK, sizeof(*r), 0); | ||
1327 | if (!nlh) | ||
1328 | return NULL; | ||
1329 | |||
1330 | r = nlmsg_data(nlh); | ||
1331 | r->ifi_family = AF_UNSPEC; | ||
1332 | r->__ifi_pad = 0; | ||
1333 | r->ifi_type = dev->type; | ||
1334 | r->ifi_index = dev->ifindex; | ||
1335 | r->ifi_flags = dev_get_flags(dev); | ||
1336 | r->ifi_change = 0; /* Wireless changes don't affect those flags */ | ||
1337 | |||
1338 | NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); | ||
1339 | |||
1340 | return nlh; | ||
1341 | nla_put_failure: | ||
1342 | nlmsg_cancel(skb, nlh); | ||
1343 | return NULL; | ||
1344 | } | ||
1345 | |||
1346 | |||
1347 | /* | ||
1348 | * Main event dispatcher. Called from other parts and drivers. | ||
1349 | * Send the event on the appropriate channels. | ||
1350 | * May be called from interrupt context. | ||
1351 | */ | ||
1352 | void wireless_send_event(struct net_device * dev, | ||
1353 | unsigned int cmd, | ||
1354 | union iwreq_data * wrqu, | ||
1355 | const char * extra) | ||
1356 | { | ||
1357 | const struct iw_ioctl_description * descr = NULL; | ||
1358 | int extra_len = 0; | ||
1359 | struct iw_event *event; /* Mallocated whole event */ | ||
1360 | int event_len; /* Its size */ | ||
1361 | int hdr_len; /* Size of the event header */ | ||
1362 | int wrqu_off = 0; /* Offset in wrqu */ | ||
1363 | /* Don't "optimise" the following variable, it will crash */ | ||
1364 | unsigned cmd_index; /* *MUST* be unsigned */ | ||
1365 | struct sk_buff *skb; | ||
1366 | struct nlmsghdr *nlh; | ||
1367 | struct nlattr *nla; | ||
1368 | #ifdef CONFIG_COMPAT | ||
1369 | struct __compat_iw_event *compat_event; | ||
1370 | struct compat_iw_point compat_wrqu; | ||
1371 | struct sk_buff *compskb; | ||
1372 | #endif | ||
1373 | |||
1374 | /* | ||
1375 | * Nothing in the kernel sends scan events with data, be safe. | ||
1376 | * This is necessary because we cannot fix up scan event data | ||
1377 | * for compat, due to being contained in 'extra', but normally | ||
1378 | * applications are required to retrieve the scan data anyway | ||
1379 | * and no data is included in the event, this codifies that | ||
1380 | * practice. | ||
1381 | */ | ||
1382 | if (WARN_ON(cmd == SIOCGIWSCAN && extra)) | ||
1383 | extra = NULL; | ||
1384 | |||
1385 | /* Get the description of the Event */ | ||
1386 | if (cmd <= SIOCIWLAST) { | ||
1387 | cmd_index = cmd - SIOCIWFIRST; | ||
1388 | if (cmd_index < standard_ioctl_num) | ||
1389 | descr = &(standard_ioctl[cmd_index]); | ||
1390 | } else { | ||
1391 | cmd_index = cmd - IWEVFIRST; | ||
1392 | if (cmd_index < standard_event_num) | ||
1393 | descr = &(standard_event[cmd_index]); | ||
1394 | } | ||
1395 | /* Don't accept unknown events */ | ||
1396 | if (descr == NULL) { | ||
1397 | /* Note : we don't return an error to the driver, because | ||
1398 | * the driver would not know what to do about it. It can't | ||
1399 | * return an error to the user, because the event is not | ||
1400 | * initiated by a user request. | ||
1401 | * The best the driver could do is to log an error message. | ||
1402 | * We will do it ourselves instead... | ||
1403 | */ | ||
1404 | printk(KERN_ERR "%s (WE) : Invalid/Unknown Wireless Event (0x%04X)\n", | ||
1405 | dev->name, cmd); | ||
1406 | return; | ||
1407 | } | ||
1408 | |||
1409 | /* Check extra parameters and set extra_len */ | ||
1410 | if (descr->header_type == IW_HEADER_TYPE_POINT) { | ||
1411 | /* Check if number of token fits within bounds */ | ||
1412 | if (wrqu->data.length > descr->max_tokens) { | ||
1413 | printk(KERN_ERR "%s (WE) : Wireless Event too big (%d)\n", dev->name, wrqu->data.length); | ||
1414 | return; | ||
1415 | } | ||
1416 | if (wrqu->data.length < descr->min_tokens) { | ||
1417 | printk(KERN_ERR "%s (WE) : Wireless Event too small (%d)\n", dev->name, wrqu->data.length); | ||
1418 | return; | ||
1419 | } | ||
1420 | /* Calculate extra_len - extra is NULL for restricted events */ | ||
1421 | if (extra != NULL) | ||
1422 | extra_len = wrqu->data.length * descr->token_size; | ||
1423 | /* Always at an offset in wrqu */ | ||
1424 | wrqu_off = IW_EV_POINT_OFF; | ||
1425 | } | ||
1426 | |||
1427 | /* Total length of the event */ | ||
1428 | hdr_len = event_type_size[descr->header_type]; | ||
1429 | event_len = hdr_len + extra_len; | ||
1430 | |||
1431 | /* | ||
1432 | * The problem for 64/32 bit. | ||
1433 | * | ||
1434 | * On 64-bit, a regular event is laid out as follows: | ||
1435 | * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | | ||
1436 | * | event.len | event.cmd | p a d d i n g | | ||
1437 | * | wrqu data ... (with the correct size) | | ||
1438 | * | ||
1439 | * This padding exists because we manipulate event->u, | ||
1440 | * and 'event' is not packed. | ||
1441 | * | ||
1442 | * An iw_point event is laid out like this instead: | ||
1443 | * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | | ||
1444 | * | event.len | event.cmd | p a d d i n g | | ||
1445 | * | iwpnt.len | iwpnt.flg | p a d d i n g | | ||
1446 | * | extra data ... | ||
1447 | * | ||
1448 | * The second padding exists because struct iw_point is extended, | ||
1449 | * but this depends on the platform... | ||
1450 | * | ||
1451 | * On 32-bit, all the padding shouldn't be there. | ||
1452 | */ | ||
1453 | |||
1454 | skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); | ||
1455 | if (!skb) | ||
1456 | return; | ||
1457 | |||
1458 | /* Send via the RtNetlink event channel */ | ||
1459 | nlh = rtnetlink_ifinfo_prep(dev, skb); | ||
1460 | if (WARN_ON(!nlh)) { | ||
1461 | kfree_skb(skb); | ||
1462 | return; | ||
1463 | } | ||
1464 | |||
1465 | /* Add the wireless events in the netlink packet */ | ||
1466 | nla = nla_reserve(skb, IFLA_WIRELESS, event_len); | ||
1467 | if (!nla) { | ||
1468 | kfree_skb(skb); | ||
1469 | return; | ||
1470 | } | ||
1471 | event = nla_data(nla); | ||
1472 | |||
1473 | /* Fill event - first clear to avoid data leaking */ | ||
1474 | memset(event, 0, hdr_len); | ||
1475 | event->len = event_len; | ||
1476 | event->cmd = cmd; | ||
1477 | memcpy(&event->u, ((char *) wrqu) + wrqu_off, hdr_len - IW_EV_LCP_LEN); | ||
1478 | if (extra_len) | ||
1479 | memcpy(((char *) event) + hdr_len, extra, extra_len); | ||
1480 | |||
1481 | nlmsg_end(skb, nlh); | ||
1482 | #ifdef CONFIG_COMPAT | ||
1483 | hdr_len = compat_event_type_size[descr->header_type]; | ||
1484 | event_len = hdr_len + extra_len; | ||
1485 | |||
1486 | compskb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); | ||
1487 | if (!compskb) { | ||
1488 | kfree_skb(skb); | ||
1489 | return; | ||
1490 | } | ||
1491 | |||
1492 | /* Send via the RtNetlink event channel */ | ||
1493 | nlh = rtnetlink_ifinfo_prep(dev, compskb); | ||
1494 | if (WARN_ON(!nlh)) { | ||
1495 | kfree_skb(skb); | ||
1496 | kfree_skb(compskb); | ||
1497 | return; | ||
1498 | } | ||
1499 | |||
1500 | /* Add the wireless events in the netlink packet */ | ||
1501 | nla = nla_reserve(compskb, IFLA_WIRELESS, event_len); | ||
1502 | if (!nla) { | ||
1503 | kfree_skb(skb); | ||
1504 | kfree_skb(compskb); | ||
1505 | return; | ||
1506 | } | ||
1507 | compat_event = nla_data(nla); | ||
1508 | |||
1509 | compat_event->len = event_len; | ||
1510 | compat_event->cmd = cmd; | ||
1511 | if (descr->header_type == IW_HEADER_TYPE_POINT) { | ||
1512 | compat_wrqu.length = wrqu->data.length; | ||
1513 | compat_wrqu.flags = wrqu->data.flags; | ||
1514 | memcpy(&compat_event->pointer, | ||
1515 | ((char *) &compat_wrqu) + IW_EV_COMPAT_POINT_OFF, | ||
1516 | hdr_len - IW_EV_COMPAT_LCP_LEN); | ||
1517 | if (extra_len) | ||
1518 | memcpy(((char *) compat_event) + hdr_len, | ||
1519 | extra, extra_len); | ||
1520 | } else { | ||
1521 | /* extra_len must be zero, so no if (extra) needed */ | ||
1522 | memcpy(&compat_event->pointer, wrqu, | ||
1523 | hdr_len - IW_EV_COMPAT_LCP_LEN); | ||
1524 | } | ||
1525 | |||
1526 | nlmsg_end(compskb, nlh); | ||
1527 | |||
1528 | skb_shinfo(skb)->frag_list = compskb; | ||
1529 | #endif | ||
1530 | skb_queue_tail(&dev_net(dev)->wext_nlevents, skb); | ||
1531 | schedule_work(&wireless_nlevent_work); | ||
1532 | } | ||
1533 | EXPORT_SYMBOL(wireless_send_event); | ||
1534 | |||
1535 | /********************** ENHANCED IWSPY SUPPORT **********************/ | ||
1536 | /* | ||
1537 | * In the old days, the driver was handling spy support all by itself. | ||
1538 | * Now, the driver can delegate this task to Wireless Extensions. | ||
1539 | * It needs to use those standard spy iw_handler in struct iw_handler_def, | ||
1540 | * push data to us via wireless_spy_update() and include struct iw_spy_data | ||
1541 | * in its private part (and export it in net_device->wireless_data->spy_data). | ||
1542 | * One of the main advantage of centralising spy support here is that | ||
1543 | * it becomes much easier to improve and extend it without having to touch | ||
1544 | * the drivers. One example is the addition of the Spy-Threshold events. | ||
1545 | */ | ||
1546 | |||
1547 | /* ---------------------------------------------------------------- */ | ||
1548 | /* | ||
1549 | * Return the pointer to the spy data in the driver. | ||
1550 | * Because this is called on the Rx path via wireless_spy_update(), | ||
1551 | * we want it to be efficient... | ||
1552 | */ | ||
1553 | static inline struct iw_spy_data *get_spydata(struct net_device *dev) | ||
1554 | { | ||
1555 | /* This is the new way */ | ||
1556 | if (dev->wireless_data) | ||
1557 | return dev->wireless_data->spy_data; | ||
1558 | return NULL; | ||
1559 | } | ||
1560 | |||
1561 | /*------------------------------------------------------------------*/ | ||
1562 | /* | ||
1563 | * Standard Wireless Handler : set Spy List | ||
1564 | */ | ||
1565 | int iw_handler_set_spy(struct net_device * dev, | ||
1566 | struct iw_request_info * info, | ||
1567 | union iwreq_data * wrqu, | ||
1568 | char * extra) | ||
1569 | { | ||
1570 | struct iw_spy_data * spydata = get_spydata(dev); | ||
1571 | struct sockaddr * address = (struct sockaddr *) extra; | ||
1572 | |||
1573 | /* Make sure driver is not buggy or using the old API */ | ||
1574 | if (!spydata) | ||
1575 | return -EOPNOTSUPP; | ||
1576 | |||
1577 | /* Disable spy collection while we copy the addresses. | ||
1578 | * While we copy addresses, any call to wireless_spy_update() | ||
1579 | * will NOP. This is OK, as anyway the addresses are changing. */ | ||
1580 | spydata->spy_number = 0; | ||
1581 | |||
1582 | /* We want to operate without locking, because wireless_spy_update() | ||
1583 | * most likely will happen in the interrupt handler, and therefore | ||
1584 | * have its own locking constraints and needs performance. | ||
1585 | * The rtnl_lock() make sure we don't race with the other iw_handlers. | ||
1586 | * This make sure wireless_spy_update() "see" that the spy list | ||
1587 | * is temporarily disabled. */ | ||
1588 | smp_wmb(); | ||
1589 | |||
1590 | /* Are there are addresses to copy? */ | ||
1591 | if (wrqu->data.length > 0) { | ||
1592 | int i; | ||
1593 | |||
1594 | /* Copy addresses */ | ||
1595 | for (i = 0; i < wrqu->data.length; i++) | ||
1596 | memcpy(spydata->spy_address[i], address[i].sa_data, | ||
1597 | ETH_ALEN); | ||
1598 | /* Reset stats */ | ||
1599 | memset(spydata->spy_stat, 0, | ||
1600 | sizeof(struct iw_quality) * IW_MAX_SPY); | ||
1601 | } | ||
1602 | |||
1603 | /* Make sure above is updated before re-enabling */ | ||
1604 | smp_wmb(); | ||
1605 | |||
1606 | /* Enable addresses */ | ||
1607 | spydata->spy_number = wrqu->data.length; | ||
1608 | |||
1609 | return 0; | ||
1610 | } | ||
1611 | EXPORT_SYMBOL(iw_handler_set_spy); | ||
1612 | |||
1613 | /*------------------------------------------------------------------*/ | ||
1614 | /* | ||
1615 | * Standard Wireless Handler : get Spy List | ||
1616 | */ | ||
1617 | int iw_handler_get_spy(struct net_device * dev, | ||
1618 | struct iw_request_info * info, | ||
1619 | union iwreq_data * wrqu, | ||
1620 | char * extra) | ||
1621 | { | ||
1622 | struct iw_spy_data * spydata = get_spydata(dev); | ||
1623 | struct sockaddr * address = (struct sockaddr *) extra; | ||
1624 | int i; | ||
1625 | |||
1626 | /* Make sure driver is not buggy or using the old API */ | ||
1627 | if (!spydata) | ||
1628 | return -EOPNOTSUPP; | ||
1629 | |||
1630 | wrqu->data.length = spydata->spy_number; | ||
1631 | |||
1632 | /* Copy addresses. */ | ||
1633 | for (i = 0; i < spydata->spy_number; i++) { | ||
1634 | memcpy(address[i].sa_data, spydata->spy_address[i], ETH_ALEN); | ||
1635 | address[i].sa_family = AF_UNIX; | ||
1636 | } | ||
1637 | /* Copy stats to the user buffer (just after). */ | ||
1638 | if (spydata->spy_number > 0) | ||
1639 | memcpy(extra + (sizeof(struct sockaddr) *spydata->spy_number), | ||
1640 | spydata->spy_stat, | ||
1641 | sizeof(struct iw_quality) * spydata->spy_number); | ||
1642 | /* Reset updated flags. */ | ||
1643 | for (i = 0; i < spydata->spy_number; i++) | ||
1644 | spydata->spy_stat[i].updated &= ~IW_QUAL_ALL_UPDATED; | ||
1645 | return 0; | ||
1646 | } | ||
1647 | EXPORT_SYMBOL(iw_handler_get_spy); | ||
1648 | |||
1649 | /*------------------------------------------------------------------*/ | ||
1650 | /* | ||
1651 | * Standard Wireless Handler : set spy threshold | ||
1652 | */ | ||
1653 | int iw_handler_set_thrspy(struct net_device * dev, | ||
1654 | struct iw_request_info *info, | ||
1655 | union iwreq_data * wrqu, | ||
1656 | char * extra) | ||
1657 | { | ||
1658 | struct iw_spy_data * spydata = get_spydata(dev); | ||
1659 | struct iw_thrspy * threshold = (struct iw_thrspy *) extra; | ||
1660 | |||
1661 | /* Make sure driver is not buggy or using the old API */ | ||
1662 | if (!spydata) | ||
1663 | return -EOPNOTSUPP; | ||
1664 | |||
1665 | /* Just do it */ | ||
1666 | memcpy(&(spydata->spy_thr_low), &(threshold->low), | ||
1667 | 2 * sizeof(struct iw_quality)); | ||
1668 | |||
1669 | /* Clear flag */ | ||
1670 | memset(spydata->spy_thr_under, '\0', sizeof(spydata->spy_thr_under)); | ||
1671 | |||
1672 | return 0; | ||
1673 | } | ||
1674 | EXPORT_SYMBOL(iw_handler_set_thrspy); | ||
1675 | |||
1676 | /*------------------------------------------------------------------*/ | ||
1677 | /* | ||
1678 | * Standard Wireless Handler : get spy threshold | ||
1679 | */ | ||
1680 | int iw_handler_get_thrspy(struct net_device * dev, | ||
1681 | struct iw_request_info *info, | ||
1682 | union iwreq_data * wrqu, | ||
1683 | char * extra) | ||
1684 | { | ||
1685 | struct iw_spy_data * spydata = get_spydata(dev); | ||
1686 | struct iw_thrspy * threshold = (struct iw_thrspy *) extra; | ||
1687 | |||
1688 | /* Make sure driver is not buggy or using the old API */ | ||
1689 | if (!spydata) | ||
1690 | return -EOPNOTSUPP; | ||
1691 | |||
1692 | /* Just do it */ | ||
1693 | memcpy(&(threshold->low), &(spydata->spy_thr_low), | ||
1694 | 2 * sizeof(struct iw_quality)); | ||
1695 | |||
1696 | return 0; | ||
1697 | } | ||
1698 | EXPORT_SYMBOL(iw_handler_get_thrspy); | ||
1699 | |||
1700 | /*------------------------------------------------------------------*/ | ||
1701 | /* | ||
1702 | * Prepare and send a Spy Threshold event | ||
1703 | */ | ||
1704 | static void iw_send_thrspy_event(struct net_device * dev, | ||
1705 | struct iw_spy_data * spydata, | ||
1706 | unsigned char * address, | ||
1707 | struct iw_quality * wstats) | ||
1708 | { | ||
1709 | union iwreq_data wrqu; | ||
1710 | struct iw_thrspy threshold; | ||
1711 | |||
1712 | /* Init */ | ||
1713 | wrqu.data.length = 1; | ||
1714 | wrqu.data.flags = 0; | ||
1715 | /* Copy address */ | ||
1716 | memcpy(threshold.addr.sa_data, address, ETH_ALEN); | ||
1717 | threshold.addr.sa_family = ARPHRD_ETHER; | ||
1718 | /* Copy stats */ | ||
1719 | memcpy(&(threshold.qual), wstats, sizeof(struct iw_quality)); | ||
1720 | /* Copy also thresholds */ | ||
1721 | memcpy(&(threshold.low), &(spydata->spy_thr_low), | ||
1722 | 2 * sizeof(struct iw_quality)); | ||
1723 | |||
1724 | /* Send event to user space */ | ||
1725 | wireless_send_event(dev, SIOCGIWTHRSPY, &wrqu, (char *) &threshold); | ||
1726 | } | ||
1727 | |||
1728 | /* ---------------------------------------------------------------- */ | ||
1729 | /* | ||
1730 | * Call for the driver to update the spy data. | ||
1731 | * For now, the spy data is a simple array. As the size of the array is | ||
1732 | * small, this is good enough. If we wanted to support larger number of | ||
1733 | * spy addresses, we should use something more efficient... | ||
1734 | */ | ||
1735 | void wireless_spy_update(struct net_device * dev, | ||
1736 | unsigned char * address, | ||
1737 | struct iw_quality * wstats) | ||
1738 | { | ||
1739 | struct iw_spy_data * spydata = get_spydata(dev); | ||
1740 | int i; | ||
1741 | int match = -1; | ||
1742 | |||
1743 | /* Make sure driver is not buggy or using the old API */ | ||
1744 | if (!spydata) | ||
1745 | return; | ||
1746 | |||
1747 | /* Update all records that match */ | ||
1748 | for (i = 0; i < spydata->spy_number; i++) | ||
1749 | if (!compare_ether_addr(address, spydata->spy_address[i])) { | ||
1750 | memcpy(&(spydata->spy_stat[i]), wstats, | ||
1751 | sizeof(struct iw_quality)); | ||
1752 | match = i; | ||
1753 | } | ||
1754 | |||
1755 | /* Generate an event if we cross the spy threshold. | ||
1756 | * To avoid event storms, we have a simple hysteresis : we generate | ||
1757 | * event only when we go under the low threshold or above the | ||
1758 | * high threshold. */ | ||
1759 | if (match >= 0) { | ||
1760 | if (spydata->spy_thr_under[match]) { | ||
1761 | if (wstats->level > spydata->spy_thr_high.level) { | ||
1762 | spydata->spy_thr_under[match] = 0; | ||
1763 | iw_send_thrspy_event(dev, spydata, | ||
1764 | address, wstats); | ||
1765 | } | ||
1766 | } else { | ||
1767 | if (wstats->level < spydata->spy_thr_low.level) { | ||
1768 | spydata->spy_thr_under[match] = 1; | ||
1769 | iw_send_thrspy_event(dev, spydata, | ||
1770 | address, wstats); | ||
1771 | } | ||
1772 | } | ||
1773 | } | ||
1774 | } | ||
1775 | EXPORT_SYMBOL(wireless_spy_update); | ||
diff --git a/net/wireless/wext-priv.c b/net/wireless/wext-priv.c new file mode 100644 index 000000000000..a3c2277de9e5 --- /dev/null +++ b/net/wireless/wext-priv.c | |||
@@ -0,0 +1,248 @@ | |||
1 | /* | ||
2 | * This file implement the Wireless Extensions priv API. | ||
3 | * | ||
4 | * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com> | ||
5 | * Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved. | ||
6 | * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> | ||
7 | * | ||
8 | * (As all part of the Linux kernel, this file is GPL) | ||
9 | */ | ||
10 | #include <linux/wireless.h> | ||
11 | #include <linux/netdevice.h> | ||
12 | #include <net/iw_handler.h> | ||
13 | #include <net/wext.h> | ||
14 | |||
15 | int iw_handler_get_private(struct net_device * dev, | ||
16 | struct iw_request_info * info, | ||
17 | union iwreq_data * wrqu, | ||
18 | char * extra) | ||
19 | { | ||
20 | /* Check if the driver has something to export */ | ||
21 | if ((dev->wireless_handlers->num_private_args == 0) || | ||
22 | (dev->wireless_handlers->private_args == NULL)) | ||
23 | return -EOPNOTSUPP; | ||
24 | |||
25 | /* Check if there is enough buffer up there */ | ||
26 | if (wrqu->data.length < dev->wireless_handlers->num_private_args) { | ||
27 | /* User space can't know in advance how large the buffer | ||
28 | * needs to be. Give it a hint, so that we can support | ||
29 | * any size buffer we want somewhat efficiently... */ | ||
30 | wrqu->data.length = dev->wireless_handlers->num_private_args; | ||
31 | return -E2BIG; | ||
32 | } | ||
33 | |||
34 | /* Set the number of available ioctls. */ | ||
35 | wrqu->data.length = dev->wireless_handlers->num_private_args; | ||
36 | |||
37 | /* Copy structure to the user buffer. */ | ||
38 | memcpy(extra, dev->wireless_handlers->private_args, | ||
39 | sizeof(struct iw_priv_args) * wrqu->data.length); | ||
40 | |||
41 | return 0; | ||
42 | } | ||
43 | |||
44 | /* Size (in bytes) of the various private data types */ | ||
45 | static const char iw_priv_type_size[] = { | ||
46 | 0, /* IW_PRIV_TYPE_NONE */ | ||
47 | 1, /* IW_PRIV_TYPE_BYTE */ | ||
48 | 1, /* IW_PRIV_TYPE_CHAR */ | ||
49 | 0, /* Not defined */ | ||
50 | sizeof(__u32), /* IW_PRIV_TYPE_INT */ | ||
51 | sizeof(struct iw_freq), /* IW_PRIV_TYPE_FLOAT */ | ||
52 | sizeof(struct sockaddr), /* IW_PRIV_TYPE_ADDR */ | ||
53 | 0, /* Not defined */ | ||
54 | }; | ||
55 | |||
56 | static int get_priv_size(__u16 args) | ||
57 | { | ||
58 | int num = args & IW_PRIV_SIZE_MASK; | ||
59 | int type = (args & IW_PRIV_TYPE_MASK) >> 12; | ||
60 | |||
61 | return num * iw_priv_type_size[type]; | ||
62 | } | ||
63 | |||
64 | static int adjust_priv_size(__u16 args, struct iw_point *iwp) | ||
65 | { | ||
66 | int num = iwp->length; | ||
67 | int max = args & IW_PRIV_SIZE_MASK; | ||
68 | int type = (args & IW_PRIV_TYPE_MASK) >> 12; | ||
69 | |||
70 | /* Make sure the driver doesn't goof up */ | ||
71 | if (max < num) | ||
72 | num = max; | ||
73 | |||
74 | return num * iw_priv_type_size[type]; | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * Wrapper to call a private Wireless Extension handler. | ||
79 | * We do various checks and also take care of moving data between | ||
80 | * user space and kernel space. | ||
81 | * It's not as nice and slimline as the standard wrapper. The cause | ||
82 | * is struct iw_priv_args, which was not really designed for the | ||
83 | * job we are going here. | ||
84 | * | ||
85 | * IMPORTANT : This function prevent to set and get data on the same | ||
86 | * IOCTL and enforce the SET/GET convention. Not doing it would be | ||
87 | * far too hairy... | ||
88 | * If you need to set and get data at the same time, please don't use | ||
89 | * a iw_handler but process it in your ioctl handler (i.e. use the | ||
90 | * old driver API). | ||
91 | */ | ||
92 | static int get_priv_descr_and_size(struct net_device *dev, unsigned int cmd, | ||
93 | const struct iw_priv_args **descrp) | ||
94 | { | ||
95 | const struct iw_priv_args *descr; | ||
96 | int i, extra_size; | ||
97 | |||
98 | descr = NULL; | ||
99 | for (i = 0; i < dev->wireless_handlers->num_private_args; i++) { | ||
100 | if (cmd == dev->wireless_handlers->private_args[i].cmd) { | ||
101 | descr = &dev->wireless_handlers->private_args[i]; | ||
102 | break; | ||
103 | } | ||
104 | } | ||
105 | |||
106 | extra_size = 0; | ||
107 | if (descr) { | ||
108 | if (IW_IS_SET(cmd)) { | ||
109 | int offset = 0; /* For sub-ioctls */ | ||
110 | /* Check for sub-ioctl handler */ | ||
111 | if (descr->name[0] == '\0') | ||
112 | /* Reserve one int for sub-ioctl index */ | ||
113 | offset = sizeof(__u32); | ||
114 | |||
115 | /* Size of set arguments */ | ||
116 | extra_size = get_priv_size(descr->set_args); | ||
117 | |||
118 | /* Does it fits in iwr ? */ | ||
119 | if ((descr->set_args & IW_PRIV_SIZE_FIXED) && | ||
120 | ((extra_size + offset) <= IFNAMSIZ)) | ||
121 | extra_size = 0; | ||
122 | } else { | ||
123 | /* Size of get arguments */ | ||
124 | extra_size = get_priv_size(descr->get_args); | ||
125 | |||
126 | /* Does it fits in iwr ? */ | ||
127 | if ((descr->get_args & IW_PRIV_SIZE_FIXED) && | ||
128 | (extra_size <= IFNAMSIZ)) | ||
129 | extra_size = 0; | ||
130 | } | ||
131 | } | ||
132 | *descrp = descr; | ||
133 | return extra_size; | ||
134 | } | ||
135 | |||
136 | static int ioctl_private_iw_point(struct iw_point *iwp, unsigned int cmd, | ||
137 | const struct iw_priv_args *descr, | ||
138 | iw_handler handler, struct net_device *dev, | ||
139 | struct iw_request_info *info, int extra_size) | ||
140 | { | ||
141 | char *extra; | ||
142 | int err; | ||
143 | |||
144 | /* Check what user space is giving us */ | ||
145 | if (IW_IS_SET(cmd)) { | ||
146 | if (!iwp->pointer && iwp->length != 0) | ||
147 | return -EFAULT; | ||
148 | |||
149 | if (iwp->length > (descr->set_args & IW_PRIV_SIZE_MASK)) | ||
150 | return -E2BIG; | ||
151 | } else if (!iwp->pointer) | ||
152 | return -EFAULT; | ||
153 | |||
154 | extra = kmalloc(extra_size, GFP_KERNEL); | ||
155 | if (!extra) | ||
156 | return -ENOMEM; | ||
157 | |||
158 | /* If it is a SET, get all the extra data in here */ | ||
159 | if (IW_IS_SET(cmd) && (iwp->length != 0)) { | ||
160 | if (copy_from_user(extra, iwp->pointer, extra_size)) { | ||
161 | err = -EFAULT; | ||
162 | goto out; | ||
163 | } | ||
164 | } | ||
165 | |||
166 | /* Call the handler */ | ||
167 | err = handler(dev, info, (union iwreq_data *) iwp, extra); | ||
168 | |||
169 | /* If we have something to return to the user */ | ||
170 | if (!err && IW_IS_GET(cmd)) { | ||
171 | /* Adjust for the actual length if it's variable, | ||
172 | * avoid leaking kernel bits outside. | ||
173 | */ | ||
174 | if (!(descr->get_args & IW_PRIV_SIZE_FIXED)) | ||
175 | extra_size = adjust_priv_size(descr->get_args, iwp); | ||
176 | |||
177 | if (copy_to_user(iwp->pointer, extra, extra_size)) | ||
178 | err = -EFAULT; | ||
179 | } | ||
180 | |||
181 | out: | ||
182 | kfree(extra); | ||
183 | return err; | ||
184 | } | ||
185 | |||
186 | int ioctl_private_call(struct net_device *dev, struct iwreq *iwr, | ||
187 | unsigned int cmd, struct iw_request_info *info, | ||
188 | iw_handler handler) | ||
189 | { | ||
190 | int extra_size = 0, ret = -EINVAL; | ||
191 | const struct iw_priv_args *descr; | ||
192 | |||
193 | extra_size = get_priv_descr_and_size(dev, cmd, &descr); | ||
194 | |||
195 | /* Check if we have a pointer to user space data or not. */ | ||
196 | if (extra_size == 0) { | ||
197 | /* No extra arguments. Trivial to handle */ | ||
198 | ret = handler(dev, info, &(iwr->u), (char *) &(iwr->u)); | ||
199 | } else { | ||
200 | ret = ioctl_private_iw_point(&iwr->u.data, cmd, descr, | ||
201 | handler, dev, info, extra_size); | ||
202 | } | ||
203 | |||
204 | /* Call commit handler if needed and defined */ | ||
205 | if (ret == -EIWCOMMIT) | ||
206 | ret = call_commit_handler(dev); | ||
207 | |||
208 | return ret; | ||
209 | } | ||
210 | |||
211 | #ifdef CONFIG_COMPAT | ||
212 | int compat_private_call(struct net_device *dev, struct iwreq *iwr, | ||
213 | unsigned int cmd, struct iw_request_info *info, | ||
214 | iw_handler handler) | ||
215 | { | ||
216 | const struct iw_priv_args *descr; | ||
217 | int ret, extra_size; | ||
218 | |||
219 | extra_size = get_priv_descr_and_size(dev, cmd, &descr); | ||
220 | |||
221 | /* Check if we have a pointer to user space data or not. */ | ||
222 | if (extra_size == 0) { | ||
223 | /* No extra arguments. Trivial to handle */ | ||
224 | ret = handler(dev, info, &(iwr->u), (char *) &(iwr->u)); | ||
225 | } else { | ||
226 | struct compat_iw_point *iwp_compat; | ||
227 | struct iw_point iwp; | ||
228 | |||
229 | iwp_compat = (struct compat_iw_point *) &iwr->u.data; | ||
230 | iwp.pointer = compat_ptr(iwp_compat->pointer); | ||
231 | iwp.length = iwp_compat->length; | ||
232 | iwp.flags = iwp_compat->flags; | ||
233 | |||
234 | ret = ioctl_private_iw_point(&iwp, cmd, descr, | ||
235 | handler, dev, info, extra_size); | ||
236 | |||
237 | iwp_compat->pointer = ptr_to_compat(iwp.pointer); | ||
238 | iwp_compat->length = iwp.length; | ||
239 | iwp_compat->flags = iwp.flags; | ||
240 | } | ||
241 | |||
242 | /* Call commit handler if needed and defined */ | ||
243 | if (ret == -EIWCOMMIT) | ||
244 | ret = call_commit_handler(dev); | ||
245 | |||
246 | return ret; | ||
247 | } | ||
248 | #endif | ||
diff --git a/net/wireless/wext-proc.c b/net/wireless/wext-proc.c new file mode 100644 index 000000000000..273a7f77c834 --- /dev/null +++ b/net/wireless/wext-proc.c | |||
@@ -0,0 +1,155 @@ | |||
1 | /* | ||
2 | * This file implement the Wireless Extensions proc API. | ||
3 | * | ||
4 | * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com> | ||
5 | * Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved. | ||
6 | * | ||
7 | * (As all part of the Linux kernel, this file is GPL) | ||
8 | */ | ||
9 | |||
10 | /* | ||
11 | * The /proc/net/wireless file is a human readable user-space interface | ||
12 | * exporting various wireless specific statistics from the wireless devices. | ||
13 | * This is the most popular part of the Wireless Extensions ;-) | ||
14 | * | ||
15 | * This interface is a pure clone of /proc/net/dev (in net/core/dev.c). | ||
16 | * The content of the file is basically the content of "struct iw_statistics". | ||
17 | */ | ||
18 | |||
19 | #include <linux/module.h> | ||
20 | #include <linux/proc_fs.h> | ||
21 | #include <linux/seq_file.h> | ||
22 | #include <linux/wireless.h> | ||
23 | #include <linux/netdevice.h> | ||
24 | #include <linux/rtnetlink.h> | ||
25 | #include <net/iw_handler.h> | ||
26 | #include <net/wext.h> | ||
27 | |||
28 | |||
29 | static void wireless_seq_printf_stats(struct seq_file *seq, | ||
30 | struct net_device *dev) | ||
31 | { | ||
32 | /* Get stats from the driver */ | ||
33 | struct iw_statistics *stats = get_wireless_stats(dev); | ||
34 | static struct iw_statistics nullstats = {}; | ||
35 | |||
36 | /* show device if it's wireless regardless of current stats */ | ||
37 | if (!stats) { | ||
38 | #ifdef CONFIG_WIRELESS_EXT | ||
39 | if (dev->wireless_handlers) | ||
40 | stats = &nullstats; | ||
41 | #endif | ||
42 | #ifdef CONFIG_CFG80211 | ||
43 | if (dev->ieee80211_ptr) | ||
44 | stats = &nullstats; | ||
45 | #endif | ||
46 | } | ||
47 | |||
48 | if (stats) { | ||
49 | seq_printf(seq, "%6s: %04x %3d%c %3d%c %3d%c %6d %6d %6d " | ||
50 | "%6d %6d %6d\n", | ||
51 | dev->name, stats->status, stats->qual.qual, | ||
52 | stats->qual.updated & IW_QUAL_QUAL_UPDATED | ||
53 | ? '.' : ' ', | ||
54 | ((__s32) stats->qual.level) - | ||
55 | ((stats->qual.updated & IW_QUAL_DBM) ? 0x100 : 0), | ||
56 | stats->qual.updated & IW_QUAL_LEVEL_UPDATED | ||
57 | ? '.' : ' ', | ||
58 | ((__s32) stats->qual.noise) - | ||
59 | ((stats->qual.updated & IW_QUAL_DBM) ? 0x100 : 0), | ||
60 | stats->qual.updated & IW_QUAL_NOISE_UPDATED | ||
61 | ? '.' : ' ', | ||
62 | stats->discard.nwid, stats->discard.code, | ||
63 | stats->discard.fragment, stats->discard.retries, | ||
64 | stats->discard.misc, stats->miss.beacon); | ||
65 | |||
66 | if (stats != &nullstats) | ||
67 | stats->qual.updated &= ~IW_QUAL_ALL_UPDATED; | ||
68 | } | ||
69 | } | ||
70 | |||
71 | /* ---------------------------------------------------------------- */ | ||
72 | /* | ||
73 | * Print info for /proc/net/wireless (print all entries) | ||
74 | */ | ||
75 | static int wireless_dev_seq_show(struct seq_file *seq, void *v) | ||
76 | { | ||
77 | might_sleep(); | ||
78 | |||
79 | if (v == SEQ_START_TOKEN) | ||
80 | seq_printf(seq, "Inter-| sta-| Quality | Discarded " | ||
81 | "packets | Missed | WE\n" | ||
82 | " face | tus | link level noise | nwid " | ||
83 | "crypt frag retry misc | beacon | %d\n", | ||
84 | WIRELESS_EXT); | ||
85 | else | ||
86 | wireless_seq_printf_stats(seq, v); | ||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | static void *wireless_dev_seq_start(struct seq_file *seq, loff_t *pos) | ||
91 | { | ||
92 | struct net *net = seq_file_net(seq); | ||
93 | loff_t off; | ||
94 | struct net_device *dev; | ||
95 | |||
96 | rtnl_lock(); | ||
97 | if (!*pos) | ||
98 | return SEQ_START_TOKEN; | ||
99 | |||
100 | off = 1; | ||
101 | for_each_netdev(net, dev) | ||
102 | if (off++ == *pos) | ||
103 | return dev; | ||
104 | return NULL; | ||
105 | } | ||
106 | |||
107 | static void *wireless_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
108 | { | ||
109 | struct net *net = seq_file_net(seq); | ||
110 | |||
111 | ++*pos; | ||
112 | |||
113 | return v == SEQ_START_TOKEN ? | ||
114 | first_net_device(net) : next_net_device(v); | ||
115 | } | ||
116 | |||
117 | static void wireless_dev_seq_stop(struct seq_file *seq, void *v) | ||
118 | { | ||
119 | rtnl_unlock(); | ||
120 | } | ||
121 | |||
122 | static const struct seq_operations wireless_seq_ops = { | ||
123 | .start = wireless_dev_seq_start, | ||
124 | .next = wireless_dev_seq_next, | ||
125 | .stop = wireless_dev_seq_stop, | ||
126 | .show = wireless_dev_seq_show, | ||
127 | }; | ||
128 | |||
129 | static int seq_open_wireless(struct inode *inode, struct file *file) | ||
130 | { | ||
131 | return seq_open_net(inode, file, &wireless_seq_ops, | ||
132 | sizeof(struct seq_net_private)); | ||
133 | } | ||
134 | |||
135 | static const struct file_operations wireless_seq_fops = { | ||
136 | .owner = THIS_MODULE, | ||
137 | .open = seq_open_wireless, | ||
138 | .read = seq_read, | ||
139 | .llseek = seq_lseek, | ||
140 | .release = seq_release_net, | ||
141 | }; | ||
142 | |||
143 | int wext_proc_init(struct net *net) | ||
144 | { | ||
145 | /* Create /proc/net/wireless entry */ | ||
146 | if (!proc_net_fops_create(net, "wireless", S_IRUGO, &wireless_seq_fops)) | ||
147 | return -ENOMEM; | ||
148 | |||
149 | return 0; | ||
150 | } | ||
151 | |||
152 | void wext_proc_exit(struct net *net) | ||
153 | { | ||
154 | proc_net_remove(net, "wireless"); | ||
155 | } | ||
diff --git a/net/wireless/wext-spy.c b/net/wireless/wext-spy.c new file mode 100644 index 000000000000..6dcfe65a2d1a --- /dev/null +++ b/net/wireless/wext-spy.c | |||
@@ -0,0 +1,231 @@ | |||
1 | /* | ||
2 | * This file implement the Wireless Extensions spy API. | ||
3 | * | ||
4 | * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com> | ||
5 | * Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved. | ||
6 | * | ||
7 | * (As all part of the Linux kernel, this file is GPL) | ||
8 | */ | ||
9 | |||
10 | #include <linux/wireless.h> | ||
11 | #include <linux/netdevice.h> | ||
12 | #include <linux/etherdevice.h> | ||
13 | #include <net/iw_handler.h> | ||
14 | #include <net/arp.h> | ||
15 | #include <net/wext.h> | ||
16 | |||
17 | static inline struct iw_spy_data *get_spydata(struct net_device *dev) | ||
18 | { | ||
19 | /* This is the new way */ | ||
20 | if (dev->wireless_data) | ||
21 | return dev->wireless_data->spy_data; | ||
22 | return NULL; | ||
23 | } | ||
24 | |||
25 | int iw_handler_set_spy(struct net_device * dev, | ||
26 | struct iw_request_info * info, | ||
27 | union iwreq_data * wrqu, | ||
28 | char * extra) | ||
29 | { | ||
30 | struct iw_spy_data * spydata = get_spydata(dev); | ||
31 | struct sockaddr * address = (struct sockaddr *) extra; | ||
32 | |||
33 | /* Make sure driver is not buggy or using the old API */ | ||
34 | if (!spydata) | ||
35 | return -EOPNOTSUPP; | ||
36 | |||
37 | /* Disable spy collection while we copy the addresses. | ||
38 | * While we copy addresses, any call to wireless_spy_update() | ||
39 | * will NOP. This is OK, as anyway the addresses are changing. */ | ||
40 | spydata->spy_number = 0; | ||
41 | |||
42 | /* We want to operate without locking, because wireless_spy_update() | ||
43 | * most likely will happen in the interrupt handler, and therefore | ||
44 | * have its own locking constraints and needs performance. | ||
45 | * The rtnl_lock() make sure we don't race with the other iw_handlers. | ||
46 | * This make sure wireless_spy_update() "see" that the spy list | ||
47 | * is temporarily disabled. */ | ||
48 | smp_wmb(); | ||
49 | |||
50 | /* Are there are addresses to copy? */ | ||
51 | if (wrqu->data.length > 0) { | ||
52 | int i; | ||
53 | |||
54 | /* Copy addresses */ | ||
55 | for (i = 0; i < wrqu->data.length; i++) | ||
56 | memcpy(spydata->spy_address[i], address[i].sa_data, | ||
57 | ETH_ALEN); | ||
58 | /* Reset stats */ | ||
59 | memset(spydata->spy_stat, 0, | ||
60 | sizeof(struct iw_quality) * IW_MAX_SPY); | ||
61 | } | ||
62 | |||
63 | /* Make sure above is updated before re-enabling */ | ||
64 | smp_wmb(); | ||
65 | |||
66 | /* Enable addresses */ | ||
67 | spydata->spy_number = wrqu->data.length; | ||
68 | |||
69 | return 0; | ||
70 | } | ||
71 | EXPORT_SYMBOL(iw_handler_set_spy); | ||
72 | |||
73 | int iw_handler_get_spy(struct net_device * dev, | ||
74 | struct iw_request_info * info, | ||
75 | union iwreq_data * wrqu, | ||
76 | char * extra) | ||
77 | { | ||
78 | struct iw_spy_data * spydata = get_spydata(dev); | ||
79 | struct sockaddr * address = (struct sockaddr *) extra; | ||
80 | int i; | ||
81 | |||
82 | /* Make sure driver is not buggy or using the old API */ | ||
83 | if (!spydata) | ||
84 | return -EOPNOTSUPP; | ||
85 | |||
86 | wrqu->data.length = spydata->spy_number; | ||
87 | |||
88 | /* Copy addresses. */ | ||
89 | for (i = 0; i < spydata->spy_number; i++) { | ||
90 | memcpy(address[i].sa_data, spydata->spy_address[i], ETH_ALEN); | ||
91 | address[i].sa_family = AF_UNIX; | ||
92 | } | ||
93 | /* Copy stats to the user buffer (just after). */ | ||
94 | if (spydata->spy_number > 0) | ||
95 | memcpy(extra + (sizeof(struct sockaddr) *spydata->spy_number), | ||
96 | spydata->spy_stat, | ||
97 | sizeof(struct iw_quality) * spydata->spy_number); | ||
98 | /* Reset updated flags. */ | ||
99 | for (i = 0; i < spydata->spy_number; i++) | ||
100 | spydata->spy_stat[i].updated &= ~IW_QUAL_ALL_UPDATED; | ||
101 | return 0; | ||
102 | } | ||
103 | EXPORT_SYMBOL(iw_handler_get_spy); | ||
104 | |||
105 | /*------------------------------------------------------------------*/ | ||
106 | /* | ||
107 | * Standard Wireless Handler : set spy threshold | ||
108 | */ | ||
109 | int iw_handler_set_thrspy(struct net_device * dev, | ||
110 | struct iw_request_info *info, | ||
111 | union iwreq_data * wrqu, | ||
112 | char * extra) | ||
113 | { | ||
114 | struct iw_spy_data * spydata = get_spydata(dev); | ||
115 | struct iw_thrspy * threshold = (struct iw_thrspy *) extra; | ||
116 | |||
117 | /* Make sure driver is not buggy or using the old API */ | ||
118 | if (!spydata) | ||
119 | return -EOPNOTSUPP; | ||
120 | |||
121 | /* Just do it */ | ||
122 | memcpy(&(spydata->spy_thr_low), &(threshold->low), | ||
123 | 2 * sizeof(struct iw_quality)); | ||
124 | |||
125 | /* Clear flag */ | ||
126 | memset(spydata->spy_thr_under, '\0', sizeof(spydata->spy_thr_under)); | ||
127 | |||
128 | return 0; | ||
129 | } | ||
130 | EXPORT_SYMBOL(iw_handler_set_thrspy); | ||
131 | |||
132 | /*------------------------------------------------------------------*/ | ||
133 | /* | ||
134 | * Standard Wireless Handler : get spy threshold | ||
135 | */ | ||
136 | int iw_handler_get_thrspy(struct net_device * dev, | ||
137 | struct iw_request_info *info, | ||
138 | union iwreq_data * wrqu, | ||
139 | char * extra) | ||
140 | { | ||
141 | struct iw_spy_data * spydata = get_spydata(dev); | ||
142 | struct iw_thrspy * threshold = (struct iw_thrspy *) extra; | ||
143 | |||
144 | /* Make sure driver is not buggy or using the old API */ | ||
145 | if (!spydata) | ||
146 | return -EOPNOTSUPP; | ||
147 | |||
148 | /* Just do it */ | ||
149 | memcpy(&(threshold->low), &(spydata->spy_thr_low), | ||
150 | 2 * sizeof(struct iw_quality)); | ||
151 | |||
152 | return 0; | ||
153 | } | ||
154 | EXPORT_SYMBOL(iw_handler_get_thrspy); | ||
155 | |||
156 | /*------------------------------------------------------------------*/ | ||
157 | /* | ||
158 | * Prepare and send a Spy Threshold event | ||
159 | */ | ||
160 | static void iw_send_thrspy_event(struct net_device * dev, | ||
161 | struct iw_spy_data * spydata, | ||
162 | unsigned char * address, | ||
163 | struct iw_quality * wstats) | ||
164 | { | ||
165 | union iwreq_data wrqu; | ||
166 | struct iw_thrspy threshold; | ||
167 | |||
168 | /* Init */ | ||
169 | wrqu.data.length = 1; | ||
170 | wrqu.data.flags = 0; | ||
171 | /* Copy address */ | ||
172 | memcpy(threshold.addr.sa_data, address, ETH_ALEN); | ||
173 | threshold.addr.sa_family = ARPHRD_ETHER; | ||
174 | /* Copy stats */ | ||
175 | memcpy(&(threshold.qual), wstats, sizeof(struct iw_quality)); | ||
176 | /* Copy also thresholds */ | ||
177 | memcpy(&(threshold.low), &(spydata->spy_thr_low), | ||
178 | 2 * sizeof(struct iw_quality)); | ||
179 | |||
180 | /* Send event to user space */ | ||
181 | wireless_send_event(dev, SIOCGIWTHRSPY, &wrqu, (char *) &threshold); | ||
182 | } | ||
183 | |||
184 | /* ---------------------------------------------------------------- */ | ||
185 | /* | ||
186 | * Call for the driver to update the spy data. | ||
187 | * For now, the spy data is a simple array. As the size of the array is | ||
188 | * small, this is good enough. If we wanted to support larger number of | ||
189 | * spy addresses, we should use something more efficient... | ||
190 | */ | ||
191 | void wireless_spy_update(struct net_device * dev, | ||
192 | unsigned char * address, | ||
193 | struct iw_quality * wstats) | ||
194 | { | ||
195 | struct iw_spy_data * spydata = get_spydata(dev); | ||
196 | int i; | ||
197 | int match = -1; | ||
198 | |||
199 | /* Make sure driver is not buggy or using the old API */ | ||
200 | if (!spydata) | ||
201 | return; | ||
202 | |||
203 | /* Update all records that match */ | ||
204 | for (i = 0; i < spydata->spy_number; i++) | ||
205 | if (!compare_ether_addr(address, spydata->spy_address[i])) { | ||
206 | memcpy(&(spydata->spy_stat[i]), wstats, | ||
207 | sizeof(struct iw_quality)); | ||
208 | match = i; | ||
209 | } | ||
210 | |||
211 | /* Generate an event if we cross the spy threshold. | ||
212 | * To avoid event storms, we have a simple hysteresis : we generate | ||
213 | * event only when we go under the low threshold or above the | ||
214 | * high threshold. */ | ||
215 | if (match >= 0) { | ||
216 | if (spydata->spy_thr_under[match]) { | ||
217 | if (wstats->level > spydata->spy_thr_high.level) { | ||
218 | spydata->spy_thr_under[match] = 0; | ||
219 | iw_send_thrspy_event(dev, spydata, | ||
220 | address, wstats); | ||
221 | } | ||
222 | } else { | ||
223 | if (wstats->level < spydata->spy_thr_low.level) { | ||
224 | spydata->spy_thr_under[match] = 1; | ||
225 | iw_send_thrspy_event(dev, spydata, | ||
226 | address, wstats); | ||
227 | } | ||
228 | } | ||
229 | } | ||
230 | } | ||
231 | EXPORT_SYMBOL(wireless_spy_update); | ||
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 7fa9c7ad3d3b..e19d811788a5 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c | |||
@@ -1363,7 +1363,7 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
1363 | facilities.throughput > 0xDD) | 1363 | facilities.throughput > 0xDD) |
1364 | break; | 1364 | break; |
1365 | if (facilities.reverse && | 1365 | if (facilities.reverse && |
1366 | (facilities.reverse | 0x81)!= 0x81) | 1366 | (facilities.reverse & 0x81) != 0x81) |
1367 | break; | 1367 | break; |
1368 | x25->facilities = facilities; | 1368 | x25->facilities = facilities; |
1369 | rc = 0; | 1369 | rc = 0; |
@@ -1476,7 +1476,7 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
1476 | return rc; | 1476 | return rc; |
1477 | } | 1477 | } |
1478 | 1478 | ||
1479 | static struct net_proto_family x25_family_ops = { | 1479 | static const struct net_proto_family x25_family_ops = { |
1480 | .family = AF_X25, | 1480 | .family = AF_X25, |
1481 | .create = x25_create, | 1481 | .create = x25_create, |
1482 | .owner = THIS_MODULE, | 1482 | .owner = THIS_MODULE, |
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c index 7d7c3abf38b5..96d922783547 100644 --- a/net/x25/x25_in.c +++ b/net/x25/x25_in.c | |||
@@ -114,7 +114,7 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp | |||
114 | /* | 114 | /* |
115 | * Copy any Call User Data. | 115 | * Copy any Call User Data. |
116 | */ | 116 | */ |
117 | if (skb->len >= 0) { | 117 | if (skb->len > 0) { |
118 | skb_copy_from_linear_data(skb, | 118 | skb_copy_from_linear_data(skb, |
119 | x25->calluserdata.cuddata, | 119 | x25->calluserdata.cuddata, |
120 | skb->len); | 120 | skb->len); |
diff --git a/net/x25/x25_route.c b/net/x25/x25_route.c index 2c999ccf504a..66961ea28c91 100644 --- a/net/x25/x25_route.c +++ b/net/x25/x25_route.c | |||
@@ -190,7 +190,7 @@ int x25_route_ioctl(unsigned int cmd, void __user *arg) | |||
190 | goto out; | 190 | goto out; |
191 | 191 | ||
192 | rc = -EINVAL; | 192 | rc = -EINVAL; |
193 | if (rt.sigdigits < 0 || rt.sigdigits > 15) | 193 | if (rt.sigdigits > 15) |
194 | goto out; | 194 | goto out; |
195 | 195 | ||
196 | dev = x25_dev_get(rt.device); | 196 | dev = x25_dev_get(rt.device); |
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c index faf54c6bf96b..b39341072aa6 100644 --- a/net/xfrm/xfrm_algo.c +++ b/net/xfrm/xfrm_algo.c | |||
@@ -689,84 +689,6 @@ int xfrm_count_enc_supported(void) | |||
689 | } | 689 | } |
690 | EXPORT_SYMBOL_GPL(xfrm_count_enc_supported); | 690 | EXPORT_SYMBOL_GPL(xfrm_count_enc_supported); |
691 | 691 | ||
692 | /* Move to common area: it is shared with AH. */ | ||
693 | |||
694 | int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc, | ||
695 | int offset, int len, icv_update_fn_t icv_update) | ||
696 | { | ||
697 | int start = skb_headlen(skb); | ||
698 | int i, copy = start - offset; | ||
699 | struct sk_buff *frag_iter; | ||
700 | struct scatterlist sg; | ||
701 | int err; | ||
702 | |||
703 | /* Checksum header. */ | ||
704 | if (copy > 0) { | ||
705 | if (copy > len) | ||
706 | copy = len; | ||
707 | |||
708 | sg_init_one(&sg, skb->data + offset, copy); | ||
709 | |||
710 | err = icv_update(desc, &sg, copy); | ||
711 | if (unlikely(err)) | ||
712 | return err; | ||
713 | |||
714 | if ((len -= copy) == 0) | ||
715 | return 0; | ||
716 | offset += copy; | ||
717 | } | ||
718 | |||
719 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
720 | int end; | ||
721 | |||
722 | WARN_ON(start > offset + len); | ||
723 | |||
724 | end = start + skb_shinfo(skb)->frags[i].size; | ||
725 | if ((copy = end - offset) > 0) { | ||
726 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
727 | |||
728 | if (copy > len) | ||
729 | copy = len; | ||
730 | |||
731 | sg_init_table(&sg, 1); | ||
732 | sg_set_page(&sg, frag->page, copy, | ||
733 | frag->page_offset + offset-start); | ||
734 | |||
735 | err = icv_update(desc, &sg, copy); | ||
736 | if (unlikely(err)) | ||
737 | return err; | ||
738 | |||
739 | if (!(len -= copy)) | ||
740 | return 0; | ||
741 | offset += copy; | ||
742 | } | ||
743 | start = end; | ||
744 | } | ||
745 | |||
746 | skb_walk_frags(skb, frag_iter) { | ||
747 | int end; | ||
748 | |||
749 | WARN_ON(start > offset + len); | ||
750 | |||
751 | end = start + frag_iter->len; | ||
752 | if ((copy = end - offset) > 0) { | ||
753 | if (copy > len) | ||
754 | copy = len; | ||
755 | err = skb_icv_walk(frag_iter, desc, offset-start, | ||
756 | copy, icv_update); | ||
757 | if (unlikely(err)) | ||
758 | return err; | ||
759 | if ((len -= copy) == 0) | ||
760 | return 0; | ||
761 | offset += copy; | ||
762 | } | ||
763 | start = end; | ||
764 | } | ||
765 | BUG_ON(len); | ||
766 | return 0; | ||
767 | } | ||
768 | EXPORT_SYMBOL_GPL(skb_icv_walk); | ||
769 | |||
770 | #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE) | 692 | #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE) |
771 | 693 | ||
772 | void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) | 694 | void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) |