diff options
Diffstat (limited to 'net')
206 files changed, 11952 insertions, 8702 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 8fbcefe10c9f..1e5c9904571d 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -171,7 +171,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, | |||
171 | skb->dev = __find_vlan_dev(dev, vid); | 171 | skb->dev = __find_vlan_dev(dev, vid); |
172 | if (!skb->dev) { | 172 | if (!skb->dev) { |
173 | pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n", | 173 | pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n", |
174 | __FUNCTION__, (unsigned int)vid, dev->name); | 174 | __func__, (unsigned int)vid, dev->name); |
175 | goto err_unlock; | 175 | goto err_unlock; |
176 | } | 176 | } |
177 | 177 | ||
@@ -187,7 +187,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, | |||
187 | ntohs(vhdr->h_vlan_TCI)); | 187 | ntohs(vhdr->h_vlan_TCI)); |
188 | 188 | ||
189 | pr_debug("%s: priority: %u for TCI: %hu\n", | 189 | pr_debug("%s: priority: %u for TCI: %hu\n", |
190 | __FUNCTION__, skb->priority, ntohs(vhdr->h_vlan_TCI)); | 190 | __func__, skb->priority, ntohs(vhdr->h_vlan_TCI)); |
191 | 191 | ||
192 | switch (skb->pkt_type) { | 192 | switch (skb->pkt_type) { |
193 | case PACKET_BROADCAST: /* Yeah, stats collect these together.. */ | 193 | case PACKET_BROADCAST: /* Yeah, stats collect these together.. */ |
@@ -268,7 +268,7 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, | |||
268 | struct net_device *vdev = dev; | 268 | struct net_device *vdev = dev; |
269 | 269 | ||
270 | pr_debug("%s: skb: %p type: %hx len: %u vlan_id: %hx, daddr: %p\n", | 270 | pr_debug("%s: skb: %p type: %hx len: %u vlan_id: %hx, daddr: %p\n", |
271 | __FUNCTION__, skb, type, len, vlan_dev_info(dev)->vlan_id, | 271 | __func__, skb, type, len, vlan_dev_info(dev)->vlan_id, |
272 | daddr); | 272 | daddr); |
273 | 273 | ||
274 | /* build vlan header only if re_order_header flag is NOT set. This | 274 | /* build vlan header only if re_order_header flag is NOT set. This |
@@ -340,7 +340,7 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, | |||
340 | return -ENOMEM; | 340 | return -ENOMEM; |
341 | } | 341 | } |
342 | vlan_dev_info(vdev)->cnt_inc_headroom_on_tx++; | 342 | vlan_dev_info(vdev)->cnt_inc_headroom_on_tx++; |
343 | pr_debug("%s: %s: had to grow skb\n", __FUNCTION__, vdev->name); | 343 | pr_debug("%s: %s: had to grow skb\n", __func__, vdev->name); |
344 | } | 344 | } |
345 | 345 | ||
346 | if (build_vlan_header) { | 346 | if (build_vlan_header) { |
@@ -382,7 +382,7 @@ static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
382 | vlan_dev_info(dev)->cnt_encap_on_xmit++; | 382 | vlan_dev_info(dev)->cnt_encap_on_xmit++; |
383 | 383 | ||
384 | pr_debug("%s: proto to encap: 0x%hx\n", | 384 | pr_debug("%s: proto to encap: 0x%hx\n", |
385 | __FUNCTION__, ntohs(veth->h_vlan_proto)); | 385 | __func__, ntohs(veth->h_vlan_proto)); |
386 | /* Construct the second two bytes. This field looks something | 386 | /* Construct the second two bytes. This field looks something |
387 | * like: | 387 | * like: |
388 | * usr_priority: 3 bits (high bits) | 388 | * usr_priority: 3 bits (high bits) |
@@ -403,7 +403,7 @@ static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
403 | } | 403 | } |
404 | 404 | ||
405 | pr_debug("%s: about to send skb: %p to dev: %s\n", | 405 | pr_debug("%s: about to send skb: %p to dev: %s\n", |
406 | __FUNCTION__, skb, skb->dev->name); | 406 | __func__, skb, skb->dev->name); |
407 | pr_debug(" " MAC_FMT " " MAC_FMT " %4hx %4hx %4hx\n", | 407 | pr_debug(" " MAC_FMT " " MAC_FMT " %4hx %4hx %4hx\n", |
408 | veth->h_dest[0], veth->h_dest[1], veth->h_dest[2], | 408 | veth->h_dest[0], veth->h_dest[1], veth->h_dest[2], |
409 | veth->h_dest[3], veth->h_dest[4], veth->h_dest[5], | 409 | veth->h_dest[3], veth->h_dest[4], veth->h_dest[5], |
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c index 146cfb0e9882..3b8657a0b837 100644 --- a/net/8021q/vlanproc.c +++ b/net/8021q/vlanproc.c | |||
@@ -168,7 +168,7 @@ int __init vlan_proc_init(void) | |||
168 | return 0; | 168 | return 0; |
169 | 169 | ||
170 | err: | 170 | err: |
171 | pr_err("%s: can't create entry in proc filesystem!\n", __FUNCTION__); | 171 | pr_err("%s: can't create entry in proc filesystem!\n", __func__); |
172 | vlan_proc_cleanup(); | 172 | vlan_proc_cleanup(); |
173 | return -ENOBUFS; | 173 | return -ENOBUFS; |
174 | } | 174 | } |
diff --git a/net/9p/error.c b/net/9p/error.c index ab2458b6c903..64104b9cb422 100644 --- a/net/9p/error.c +++ b/net/9p/error.c | |||
@@ -230,7 +230,7 @@ int p9_errstr2errno(char *errstr, int len) | |||
230 | if (errno == 0) { | 230 | if (errno == 0) { |
231 | /* TODO: if error isn't found, add it dynamically */ | 231 | /* TODO: if error isn't found, add it dynamically */ |
232 | errstr[len] = 0; | 232 | errstr[len] = 0; |
233 | printk(KERN_ERR "%s: errstr :%s: not found\n", __FUNCTION__, | 233 | printk(KERN_ERR "%s: errstr :%s: not found\n", __func__, |
234 | errstr); | 234 | errstr); |
235 | errno = 1; | 235 | errno = 1; |
236 | } | 236 | } |
diff --git a/net/Kconfig b/net/Kconfig index 6627c6ae5db6..acbf7c60e89b 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
@@ -45,7 +45,7 @@ config INET | |||
45 | ---help--- | 45 | ---help--- |
46 | These are the protocols used on the Internet and on most local | 46 | These are the protocols used on the Internet and on most local |
47 | Ethernets. It is highly recommended to say Y here (this will enlarge | 47 | Ethernets. It is highly recommended to say Y here (this will enlarge |
48 | your kernel by about 144 KB), since some programs (e.g. the X window | 48 | your kernel by about 400 KB), since some programs (e.g. the X window |
49 | system) use TCP/IP even if your machine is not connected to any | 49 | system) use TCP/IP even if your machine is not connected to any |
50 | other computer. You will get the so-called loopback device which | 50 | other computer. You will get the so-called loopback device which |
51 | allows you to ping yourself (great fun, that!). | 51 | allows you to ping yourself (great fun, that!). |
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c index 18058bbc7962..61166f66479f 100644 --- a/net/appletalk/aarp.c +++ b/net/appletalk/aarp.c | |||
@@ -1033,25 +1033,8 @@ static const struct seq_operations aarp_seq_ops = { | |||
1033 | 1033 | ||
1034 | static int aarp_seq_open(struct inode *inode, struct file *file) | 1034 | static int aarp_seq_open(struct inode *inode, struct file *file) |
1035 | { | 1035 | { |
1036 | struct seq_file *seq; | 1036 | return seq_open_private(file, &aarp_seq_ops, |
1037 | int rc = -ENOMEM; | 1037 | sizeof(struct aarp_iter_state)); |
1038 | struct aarp_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); | ||
1039 | |||
1040 | if (!s) | ||
1041 | goto out; | ||
1042 | |||
1043 | rc = seq_open(file, &aarp_seq_ops); | ||
1044 | if (rc) | ||
1045 | goto out_kfree; | ||
1046 | |||
1047 | seq = file->private_data; | ||
1048 | seq->private = s; | ||
1049 | memset(s, 0, sizeof(*s)); | ||
1050 | out: | ||
1051 | return rc; | ||
1052 | out_kfree: | ||
1053 | kfree(s); | ||
1054 | goto out; | ||
1055 | } | 1038 | } |
1056 | 1039 | ||
1057 | const struct file_operations atalk_seq_arp_fops = { | 1040 | const struct file_operations atalk_seq_arp_fops = { |
diff --git a/net/atm/clip.c b/net/atm/clip.c index d30167c0b48e..d45971bd286c 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c | |||
@@ -648,10 +648,6 @@ static int clip_inet_event(struct notifier_block *this, unsigned long event, | |||
648 | struct in_device *in_dev; | 648 | struct in_device *in_dev; |
649 | 649 | ||
650 | in_dev = ((struct in_ifaddr *)ifa)->ifa_dev; | 650 | in_dev = ((struct in_ifaddr *)ifa)->ifa_dev; |
651 | if (!in_dev || !in_dev->dev) { | ||
652 | printk(KERN_WARNING "clip_inet_event: no device\n"); | ||
653 | return NOTIFY_DONE; | ||
654 | } | ||
655 | /* | 651 | /* |
656 | * Transitions are of the down-change-up type, so it's sufficient to | 652 | * Transitions are of the down-change-up type, so it's sufficient to |
657 | * handle the change on up. | 653 | * handle the change on up. |
diff --git a/net/atm/lec.c b/net/atm/lec.c index 0e450d12f035..e2d800d818e3 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c | |||
@@ -1169,32 +1169,7 @@ static const struct seq_operations lec_seq_ops = { | |||
1169 | 1169 | ||
1170 | static int lec_seq_open(struct inode *inode, struct file *file) | 1170 | static int lec_seq_open(struct inode *inode, struct file *file) |
1171 | { | 1171 | { |
1172 | struct lec_state *state; | 1172 | return seq_open_private(file, &lec_seq_ops, sizeof(struct lec_state)); |
1173 | struct seq_file *seq; | ||
1174 | int rc = -EAGAIN; | ||
1175 | |||
1176 | state = kmalloc(sizeof(*state), GFP_KERNEL); | ||
1177 | if (!state) { | ||
1178 | rc = -ENOMEM; | ||
1179 | goto out; | ||
1180 | } | ||
1181 | |||
1182 | rc = seq_open(file, &lec_seq_ops); | ||
1183 | if (rc) | ||
1184 | goto out_kfree; | ||
1185 | seq = file->private_data; | ||
1186 | seq->private = state; | ||
1187 | out: | ||
1188 | return rc; | ||
1189 | |||
1190 | out_kfree: | ||
1191 | kfree(state); | ||
1192 | goto out; | ||
1193 | } | ||
1194 | |||
1195 | static int lec_seq_release(struct inode *inode, struct file *file) | ||
1196 | { | ||
1197 | return seq_release_private(inode, file); | ||
1198 | } | 1173 | } |
1199 | 1174 | ||
1200 | static const struct file_operations lec_seq_fops = { | 1175 | static const struct file_operations lec_seq_fops = { |
@@ -1202,7 +1177,7 @@ static const struct file_operations lec_seq_fops = { | |||
1202 | .open = lec_seq_open, | 1177 | .open = lec_seq_open, |
1203 | .read = seq_read, | 1178 | .read = seq_read, |
1204 | .llseek = seq_lseek, | 1179 | .llseek = seq_lseek, |
1205 | .release = lec_seq_release, | 1180 | .release = seq_release_private, |
1206 | }; | 1181 | }; |
1207 | #endif | 1182 | #endif |
1208 | 1183 | ||
diff --git a/net/atm/proc.c b/net/atm/proc.c index e9693aed7ef8..b995b66b5585 100644 --- a/net/atm/proc.c +++ b/net/atm/proc.c | |||
@@ -114,31 +114,13 @@ static int __vcc_seq_open(struct inode *inode, struct file *file, | |||
114 | int family, const struct seq_operations *ops) | 114 | int family, const struct seq_operations *ops) |
115 | { | 115 | { |
116 | struct vcc_state *state; | 116 | struct vcc_state *state; |
117 | struct seq_file *seq; | ||
118 | int rc = -ENOMEM; | ||
119 | 117 | ||
120 | state = kmalloc(sizeof(*state), GFP_KERNEL); | 118 | state = __seq_open_private(file, ops, sizeof(*state)); |
121 | if (!state) | 119 | if (state == NULL) |
122 | goto out; | 120 | return -ENOMEM; |
123 | |||
124 | rc = seq_open(file, ops); | ||
125 | if (rc) | ||
126 | goto out_kfree; | ||
127 | 121 | ||
128 | state->family = family; | 122 | state->family = family; |
129 | 123 | return 0; | |
130 | seq = file->private_data; | ||
131 | seq->private = state; | ||
132 | out: | ||
133 | return rc; | ||
134 | out_kfree: | ||
135 | kfree(state); | ||
136 | goto out; | ||
137 | } | ||
138 | |||
139 | static int vcc_seq_release(struct inode *inode, struct file *file) | ||
140 | { | ||
141 | return seq_release_private(inode, file); | ||
142 | } | 124 | } |
143 | 125 | ||
144 | static void *vcc_seq_start(struct seq_file *seq, loff_t *pos) | 126 | static void *vcc_seq_start(struct seq_file *seq, loff_t *pos) |
@@ -314,7 +296,7 @@ static const struct file_operations pvc_seq_fops = { | |||
314 | .open = pvc_seq_open, | 296 | .open = pvc_seq_open, |
315 | .read = seq_read, | 297 | .read = seq_read, |
316 | .llseek = seq_lseek, | 298 | .llseek = seq_lseek, |
317 | .release = vcc_seq_release, | 299 | .release = seq_release_private, |
318 | }; | 300 | }; |
319 | 301 | ||
320 | static int vcc_seq_show(struct seq_file *seq, void *v) | 302 | static int vcc_seq_show(struct seq_file *seq, void *v) |
@@ -348,7 +330,7 @@ static const struct file_operations vcc_seq_fops = { | |||
348 | .open = vcc_seq_open, | 330 | .open = vcc_seq_open, |
349 | .read = seq_read, | 331 | .read = seq_read, |
350 | .llseek = seq_lseek, | 332 | .llseek = seq_lseek, |
351 | .release = vcc_seq_release, | 333 | .release = seq_release_private, |
352 | }; | 334 | }; |
353 | 335 | ||
354 | static int svc_seq_show(struct seq_file *seq, void *v) | 336 | static int svc_seq_show(struct seq_file *seq, void *v) |
@@ -383,7 +365,7 @@ static const struct file_operations svc_seq_fops = { | |||
383 | .open = svc_seq_open, | 365 | .open = svc_seq_open, |
384 | .read = seq_read, | 366 | .read = seq_read, |
385 | .llseek = seq_lseek, | 367 | .llseek = seq_lseek, |
386 | .release = vcc_seq_release, | 368 | .release = seq_release_private, |
387 | }; | 369 | }; |
388 | 370 | ||
389 | static ssize_t proc_dev_atm_read(struct file *file, char __user *buf, | 371 | static ssize_t proc_dev_atm_read(struct file *file, char __user *buf, |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 1c0efd8ad9f3..0278a069c6f1 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -223,8 +223,8 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb) | |||
223 | } | 223 | } |
224 | nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; | 224 | nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; |
225 | 225 | ||
226 | skb->dst = (struct dst_entry *)&__fake_rtable; | 226 | skb->rtable = &__fake_rtable; |
227 | dst_hold(skb->dst); | 227 | dst_hold(&__fake_rtable.u.dst); |
228 | 228 | ||
229 | skb->dev = nf_bridge->physindev; | 229 | skb->dev = nf_bridge->physindev; |
230 | nf_bridge_push_encap_header(skb); | 230 | nf_bridge_push_encap_header(skb); |
@@ -388,8 +388,8 @@ bridged_dnat: | |||
388 | skb->pkt_type = PACKET_HOST; | 388 | skb->pkt_type = PACKET_HOST; |
389 | } | 389 | } |
390 | } else { | 390 | } else { |
391 | skb->dst = (struct dst_entry *)&__fake_rtable; | 391 | skb->rtable = &__fake_rtable; |
392 | dst_hold(skb->dst); | 392 | dst_hold(&__fake_rtable.u.dst); |
393 | } | 393 | } |
394 | 394 | ||
395 | skb->dev = nf_bridge->physindev; | 395 | skb->dev = nf_bridge->physindev; |
@@ -608,9 +608,9 @@ static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff *skb, | |||
608 | const struct net_device *out, | 608 | const struct net_device *out, |
609 | int (*okfn)(struct sk_buff *)) | 609 | int (*okfn)(struct sk_buff *)) |
610 | { | 610 | { |
611 | if (skb->dst == (struct dst_entry *)&__fake_rtable) { | 611 | if (skb->rtable == &__fake_rtable) { |
612 | dst_release(skb->dst); | 612 | dst_release(&__fake_rtable.u.dst); |
613 | skb->dst = NULL; | 613 | skb->rtable = NULL; |
614 | } | 614 | } |
615 | 615 | ||
616 | return NF_ACCEPT; | 616 | return NF_ACCEPT; |
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index 9cf0538d1717..27d6a511c8c1 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c | |||
@@ -415,21 +415,21 @@ int br_sysfs_addbr(struct net_device *dev) | |||
415 | err = sysfs_create_group(brobj, &bridge_group); | 415 | err = sysfs_create_group(brobj, &bridge_group); |
416 | if (err) { | 416 | if (err) { |
417 | pr_info("%s: can't create group %s/%s\n", | 417 | pr_info("%s: can't create group %s/%s\n", |
418 | __FUNCTION__, dev->name, bridge_group.name); | 418 | __func__, dev->name, bridge_group.name); |
419 | goto out1; | 419 | goto out1; |
420 | } | 420 | } |
421 | 421 | ||
422 | err = sysfs_create_bin_file(brobj, &bridge_forward); | 422 | err = sysfs_create_bin_file(brobj, &bridge_forward); |
423 | if (err) { | 423 | if (err) { |
424 | pr_info("%s: can't create attribute file %s/%s\n", | 424 | pr_info("%s: can't create attribute file %s/%s\n", |
425 | __FUNCTION__, dev->name, bridge_forward.attr.name); | 425 | __func__, dev->name, bridge_forward.attr.name); |
426 | goto out2; | 426 | goto out2; |
427 | } | 427 | } |
428 | 428 | ||
429 | br->ifobj = kobject_create_and_add(SYSFS_BRIDGE_PORT_SUBDIR, brobj); | 429 | br->ifobj = kobject_create_and_add(SYSFS_BRIDGE_PORT_SUBDIR, brobj); |
430 | if (!br->ifobj) { | 430 | if (!br->ifobj) { |
431 | pr_info("%s: can't add kobject (directory) %s/%s\n", | 431 | pr_info("%s: can't add kobject (directory) %s/%s\n", |
432 | __FUNCTION__, dev->name, SYSFS_BRIDGE_PORT_SUBDIR); | 432 | __func__, dev->name, SYSFS_BRIDGE_PORT_SUBDIR); |
433 | goto out3; | 433 | goto out3; |
434 | } | 434 | } |
435 | return 0; | 435 | return 0; |
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c index cec582563e0d..f8a3455f4493 100644 --- a/net/core/dev_mcast.c +++ b/net/core/dev_mcast.c | |||
@@ -156,39 +156,14 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from) | |||
156 | EXPORT_SYMBOL(dev_mc_unsync); | 156 | EXPORT_SYMBOL(dev_mc_unsync); |
157 | 157 | ||
158 | #ifdef CONFIG_PROC_FS | 158 | #ifdef CONFIG_PROC_FS |
159 | static void *dev_mc_seq_start(struct seq_file *seq, loff_t *pos) | ||
160 | __acquires(dev_base_lock) | ||
161 | { | ||
162 | struct net *net = seq_file_net(seq); | ||
163 | struct net_device *dev; | ||
164 | loff_t off = 0; | ||
165 | |||
166 | read_lock(&dev_base_lock); | ||
167 | for_each_netdev(net, dev) { | ||
168 | if (off++ == *pos) | ||
169 | return dev; | ||
170 | } | ||
171 | return NULL; | ||
172 | } | ||
173 | |||
174 | static void *dev_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
175 | { | ||
176 | ++*pos; | ||
177 | return next_net_device((struct net_device *)v); | ||
178 | } | ||
179 | |||
180 | static void dev_mc_seq_stop(struct seq_file *seq, void *v) | ||
181 | __releases(dev_base_lock) | ||
182 | { | ||
183 | read_unlock(&dev_base_lock); | ||
184 | } | ||
185 | |||
186 | |||
187 | static int dev_mc_seq_show(struct seq_file *seq, void *v) | 159 | static int dev_mc_seq_show(struct seq_file *seq, void *v) |
188 | { | 160 | { |
189 | struct dev_addr_list *m; | 161 | struct dev_addr_list *m; |
190 | struct net_device *dev = v; | 162 | struct net_device *dev = v; |
191 | 163 | ||
164 | if (v == SEQ_START_TOKEN) | ||
165 | return 0; | ||
166 | |||
192 | netif_tx_lock_bh(dev); | 167 | netif_tx_lock_bh(dev); |
193 | for (m = dev->mc_list; m; m = m->next) { | 168 | for (m = dev->mc_list; m; m = m->next) { |
194 | int i; | 169 | int i; |
@@ -206,9 +181,9 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v) | |||
206 | } | 181 | } |
207 | 182 | ||
208 | static const struct seq_operations dev_mc_seq_ops = { | 183 | static const struct seq_operations dev_mc_seq_ops = { |
209 | .start = dev_mc_seq_start, | 184 | .start = dev_seq_start, |
210 | .next = dev_mc_seq_next, | 185 | .next = dev_seq_next, |
211 | .stop = dev_mc_seq_stop, | 186 | .stop = dev_seq_stop, |
212 | .show = dev_mc_seq_show, | 187 | .show = dev_mc_seq_show, |
213 | }; | 188 | }; |
214 | 189 | ||
diff --git a/net/core/dst.c b/net/core/dst.c index 7deef483c79f..3a01a819ba47 100644 --- a/net/core/dst.c +++ b/net/core/dst.c | |||
@@ -295,9 +295,6 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event, void | |||
295 | struct net_device *dev = ptr; | 295 | struct net_device *dev = ptr; |
296 | struct dst_entry *dst, *last = NULL; | 296 | struct dst_entry *dst, *last = NULL; |
297 | 297 | ||
298 | if (dev->nd_net != &init_net) | ||
299 | return NOTIFY_DONE; | ||
300 | |||
301 | switch (event) { | 298 | switch (event) { |
302 | case NETDEV_UNREGISTER: | 299 | case NETDEV_UNREGISTER: |
303 | case NETDEV_DOWN: | 300 | case NETDEV_DOWN: |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index d9a02b2cc289..23c0a10c0c37 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -1284,9 +1284,7 @@ static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl, | |||
1284 | struct neigh_parms *p; | 1284 | struct neigh_parms *p; |
1285 | 1285 | ||
1286 | for (p = &tbl->parms; p; p = p->next) { | 1286 | for (p = &tbl->parms; p; p = p->next) { |
1287 | if (p->net != net) | 1287 | if ((p->dev && p->dev->ifindex == ifindex && p->net == net) || |
1288 | continue; | ||
1289 | if ((p->dev && p->dev->ifindex == ifindex) || | ||
1290 | (!p->dev && !ifindex)) | 1288 | (!p->dev && !ifindex)) |
1291 | return p; | 1289 | return p; |
1292 | } | 1290 | } |
@@ -2741,7 +2739,8 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, | |||
2741 | neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name; | 2739 | neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name; |
2742 | neigh_path[NEIGH_CTL_PATH_PROTO].ctl_name = p_id; | 2740 | neigh_path[NEIGH_CTL_PATH_PROTO].ctl_name = p_id; |
2743 | 2741 | ||
2744 | t->sysctl_header = register_sysctl_paths(neigh_path, t->neigh_vars); | 2742 | t->sysctl_header = |
2743 | register_net_sysctl_table(p->net, neigh_path, t->neigh_vars); | ||
2745 | if (!t->sysctl_header) | 2744 | if (!t->sysctl_header) |
2746 | goto free_procname; | 2745 | goto free_procname; |
2747 | 2746 | ||
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 4b7e756181c9..d0c8bf585f06 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -388,9 +388,7 @@ static void arp_reply(struct sk_buff *skb) | |||
388 | if (skb->dev->flags & IFF_NOARP) | 388 | if (skb->dev->flags & IFF_NOARP) |
389 | return; | 389 | return; |
390 | 390 | ||
391 | if (!pskb_may_pull(skb, (sizeof(struct arphdr) + | 391 | if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) |
392 | (2 * skb->dev->addr_len) + | ||
393 | (2 * sizeof(u32))))) | ||
394 | return; | 392 | return; |
395 | 393 | ||
396 | skb_reset_network_header(skb); | 394 | skb_reset_network_header(skb); |
@@ -418,7 +416,7 @@ static void arp_reply(struct sk_buff *skb) | |||
418 | ipv4_is_loopback(tip) || ipv4_is_multicast(tip)) | 416 | ipv4_is_loopback(tip) || ipv4_is_multicast(tip)) |
419 | return; | 417 | return; |
420 | 418 | ||
421 | size = sizeof(struct arphdr) + 2 * (skb->dev->addr_len + 4); | 419 | size = arp_hdr_len(skb->dev); |
422 | send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev), | 420 | send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev), |
423 | LL_RESERVED_SPACE(np->dev)); | 421 | LL_RESERVED_SPACE(np->dev)); |
424 | 422 | ||
diff --git a/net/core/sock.c b/net/core/sock.c index 09cb3a74de7f..bb5236aee643 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -981,12 +981,31 @@ void sk_free(struct sock *sk) | |||
981 | 981 | ||
982 | if (atomic_read(&sk->sk_omem_alloc)) | 982 | if (atomic_read(&sk->sk_omem_alloc)) |
983 | printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n", | 983 | printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n", |
984 | __FUNCTION__, atomic_read(&sk->sk_omem_alloc)); | 984 | __func__, atomic_read(&sk->sk_omem_alloc)); |
985 | 985 | ||
986 | put_net(sk->sk_net); | 986 | put_net(sk->sk_net); |
987 | sk_prot_free(sk->sk_prot_creator, sk); | 987 | sk_prot_free(sk->sk_prot_creator, sk); |
988 | } | 988 | } |
989 | 989 | ||
990 | /* | ||
991 | * Last sock_put should drop referrence to sk->sk_net. It has already | ||
992 | * been dropped in sk_change_net. Taking referrence to stopping namespace | ||
993 | * is not an option. | ||
994 | * Take referrence to a socket to remove it from hash _alive_ and after that | ||
995 | * destroy it in the context of init_net. | ||
996 | */ | ||
997 | void sk_release_kernel(struct sock *sk) | ||
998 | { | ||
999 | if (sk == NULL || sk->sk_socket == NULL) | ||
1000 | return; | ||
1001 | |||
1002 | sock_hold(sk); | ||
1003 | sock_release(sk->sk_socket); | ||
1004 | sk->sk_net = get_net(&init_net); | ||
1005 | sock_put(sk); | ||
1006 | } | ||
1007 | EXPORT_SYMBOL(sk_release_kernel); | ||
1008 | |||
990 | struct sock *sk_clone(const struct sock *sk, const gfp_t priority) | 1009 | struct sock *sk_clone(const struct sock *sk, const gfp_t priority) |
991 | { | 1010 | { |
992 | struct sock *newsk; | 1011 | struct sock *newsk; |
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 287a62bc2e0f..e1b7c9c6a623 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h | |||
@@ -23,9 +23,9 @@ | |||
23 | * DCCP - specific warning and debugging macros. | 23 | * DCCP - specific warning and debugging macros. |
24 | */ | 24 | */ |
25 | #define DCCP_WARN(fmt, a...) LIMIT_NETDEBUG(KERN_WARNING "%s: " fmt, \ | 25 | #define DCCP_WARN(fmt, a...) LIMIT_NETDEBUG(KERN_WARNING "%s: " fmt, \ |
26 | __FUNCTION__, ##a) | 26 | __func__, ##a) |
27 | #define DCCP_CRIT(fmt, a...) printk(KERN_CRIT fmt " at %s:%d/%s()\n", ##a, \ | 27 | #define DCCP_CRIT(fmt, a...) printk(KERN_CRIT fmt " at %s:%d/%s()\n", ##a, \ |
28 | __FILE__, __LINE__, __FUNCTION__) | 28 | __FILE__, __LINE__, __func__) |
29 | #define DCCP_BUG(a...) do { DCCP_CRIT("BUG: " a); dump_stack(); } while(0) | 29 | #define DCCP_BUG(a...) do { DCCP_CRIT("BUG: " a); dump_stack(); } while(0) |
30 | #define DCCP_BUG_ON(cond) do { if (unlikely((cond) != 0)) \ | 30 | #define DCCP_BUG_ON(cond) do { if (unlikely((cond) != 0)) \ |
31 | DCCP_BUG("\"%s\" holds (exception!)", \ | 31 | DCCP_BUG("\"%s\" holds (exception!)", \ |
@@ -36,7 +36,7 @@ | |||
36 | printk(fmt, ##args); \ | 36 | printk(fmt, ##args); \ |
37 | } while(0) | 37 | } while(0) |
38 | #define DCCP_PR_DEBUG(enable, fmt, a...) DCCP_PRINTK(enable, KERN_DEBUG \ | 38 | #define DCCP_PR_DEBUG(enable, fmt, a...) DCCP_PRINTK(enable, KERN_DEBUG \ |
39 | "%s: " fmt, __FUNCTION__, ##a) | 39 | "%s: " fmt, __func__, ##a) |
40 | 40 | ||
41 | #ifdef CONFIG_IP_DCCP_DEBUG | 41 | #ifdef CONFIG_IP_DCCP_DEBUG |
42 | extern int dccp_debug; | 42 | extern int dccp_debug; |
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 474075adbde4..17ad69e90e48 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
@@ -450,7 +450,7 @@ static struct dst_entry* dccp_v4_route_skb(struct sock *sk, | |||
450 | struct sk_buff *skb) | 450 | struct sk_buff *skb) |
451 | { | 451 | { |
452 | struct rtable *rt; | 452 | struct rtable *rt; |
453 | struct flowi fl = { .oif = ((struct rtable *)skb->dst)->rt_iif, | 453 | struct flowi fl = { .oif = skb->rtable->rt_iif, |
454 | .nl_u = { .ip4_u = | 454 | .nl_u = { .ip4_u = |
455 | { .daddr = ip_hdr(skb)->saddr, | 455 | { .daddr = ip_hdr(skb)->saddr, |
456 | .saddr = ip_hdr(skb)->daddr, | 456 | .saddr = ip_hdr(skb)->daddr, |
@@ -471,15 +471,14 @@ static struct dst_entry* dccp_v4_route_skb(struct sock *sk, | |||
471 | return &rt->u.dst; | 471 | return &rt->u.dst; |
472 | } | 472 | } |
473 | 473 | ||
474 | static int dccp_v4_send_response(struct sock *sk, struct request_sock *req, | 474 | static int dccp_v4_send_response(struct sock *sk, struct request_sock *req) |
475 | struct dst_entry *dst) | ||
476 | { | 475 | { |
477 | int err = -1; | 476 | int err = -1; |
478 | struct sk_buff *skb; | 477 | struct sk_buff *skb; |
478 | struct dst_entry *dst; | ||
479 | 479 | ||
480 | /* First, grab a route. */ | 480 | dst = inet_csk_route_req(sk, req); |
481 | 481 | if (dst == NULL) | |
482 | if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL) | ||
483 | goto out; | 482 | goto out; |
484 | 483 | ||
485 | skb = dccp_make_response(sk, dst, req); | 484 | skb = dccp_make_response(sk, dst, req); |
@@ -512,7 +511,7 @@ static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) | |||
512 | if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET) | 511 | if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET) |
513 | return; | 512 | return; |
514 | 513 | ||
515 | if (((struct rtable *)rxskb->dst)->rt_type != RTN_LOCAL) | 514 | if (rxskb->rtable->rt_type != RTN_LOCAL) |
516 | return; | 515 | return; |
517 | 516 | ||
518 | dst = dccp_v4_route_skb(dccp_v4_ctl_socket->sk, rxskb); | 517 | dst = dccp_v4_route_skb(dccp_v4_ctl_socket->sk, rxskb); |
@@ -564,8 +563,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
564 | struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); | 563 | struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); |
565 | 564 | ||
566 | /* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */ | 565 | /* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */ |
567 | if (((struct rtable *)skb->dst)->rt_flags & | 566 | if (skb->rtable->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) |
568 | (RTCF_BROADCAST | RTCF_MULTICAST)) | ||
569 | return 0; /* discard, don't send a reset here */ | 567 | return 0; /* discard, don't send a reset here */ |
570 | 568 | ||
571 | if (dccp_bad_service_code(sk, service)) { | 569 | if (dccp_bad_service_code(sk, service)) { |
@@ -620,7 +618,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
620 | dreq->dreq_iss = dccp_v4_init_sequence(skb); | 618 | dreq->dreq_iss = dccp_v4_init_sequence(skb); |
621 | dreq->dreq_service = service; | 619 | dreq->dreq_service = service; |
622 | 620 | ||
623 | if (dccp_v4_send_response(sk, req, NULL)) | 621 | if (dccp_v4_send_response(sk, req)) |
624 | goto drop_and_free; | 622 | goto drop_and_free; |
625 | 623 | ||
626 | inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); | 624 | inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); |
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 490333d47c7b..1a5e50b90677 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -224,8 +224,7 @@ out: | |||
224 | } | 224 | } |
225 | 225 | ||
226 | 226 | ||
227 | static int dccp_v6_send_response(struct sock *sk, struct request_sock *req, | 227 | static int dccp_v6_send_response(struct sock *sk, struct request_sock *req) |
228 | struct dst_entry *dst) | ||
229 | { | 228 | { |
230 | struct inet6_request_sock *ireq6 = inet6_rsk(req); | 229 | struct inet6_request_sock *ireq6 = inet6_rsk(req); |
231 | struct ipv6_pinfo *np = inet6_sk(sk); | 230 | struct ipv6_pinfo *np = inet6_sk(sk); |
@@ -234,6 +233,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req, | |||
234 | struct in6_addr *final_p = NULL, final; | 233 | struct in6_addr *final_p = NULL, final; |
235 | struct flowi fl; | 234 | struct flowi fl; |
236 | int err = -1; | 235 | int err = -1; |
236 | struct dst_entry *dst; | ||
237 | 237 | ||
238 | memset(&fl, 0, sizeof(fl)); | 238 | memset(&fl, 0, sizeof(fl)); |
239 | fl.proto = IPPROTO_DCCP; | 239 | fl.proto = IPPROTO_DCCP; |
@@ -245,28 +245,26 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req, | |||
245 | fl.fl_ip_sport = inet_sk(sk)->sport; | 245 | fl.fl_ip_sport = inet_sk(sk)->sport; |
246 | security_req_classify_flow(req, &fl); | 246 | security_req_classify_flow(req, &fl); |
247 | 247 | ||
248 | if (dst == NULL) { | 248 | opt = np->opt; |
249 | opt = np->opt; | ||
250 | 249 | ||
251 | if (opt != NULL && opt->srcrt != NULL) { | 250 | if (opt != NULL && opt->srcrt != NULL) { |
252 | const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt; | 251 | const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt; |
253 | 252 | ||
254 | ipv6_addr_copy(&final, &fl.fl6_dst); | 253 | ipv6_addr_copy(&final, &fl.fl6_dst); |
255 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | 254 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); |
256 | final_p = &final; | 255 | final_p = &final; |
257 | } | 256 | } |
258 | 257 | ||
259 | err = ip6_dst_lookup(sk, &dst, &fl); | 258 | err = ip6_dst_lookup(sk, &dst, &fl); |
260 | if (err) | 259 | if (err) |
261 | goto done; | 260 | goto done; |
262 | 261 | ||
263 | if (final_p) | 262 | if (final_p) |
264 | ipv6_addr_copy(&fl.fl6_dst, final_p); | 263 | ipv6_addr_copy(&fl.fl6_dst, final_p); |
265 | 264 | ||
266 | err = xfrm_lookup(&dst, &fl, sk, 0); | 265 | err = xfrm_lookup(&dst, &fl, sk, 0); |
267 | if (err < 0) | 266 | if (err < 0) |
268 | goto done; | 267 | goto done; |
269 | } | ||
270 | 268 | ||
271 | skb = dccp_make_response(sk, dst, req); | 269 | skb = dccp_make_response(sk, dst, req); |
272 | if (skb != NULL) { | 270 | if (skb != NULL) { |
@@ -448,7 +446,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
448 | dreq->dreq_iss = dccp_v6_init_sequence(skb); | 446 | dreq->dreq_iss = dccp_v6_init_sequence(skb); |
449 | dreq->dreq_service = service; | 447 | dreq->dreq_service = service; |
450 | 448 | ||
451 | if (dccp_v6_send_response(sk, req, NULL)) | 449 | if (dccp_v6_send_response(sk, req)) |
452 | goto drop_and_free; | 450 | goto drop_and_free; |
453 | 451 | ||
454 | inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); | 452 | inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); |
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index 027d1814e1ab..33ad48321b08 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c | |||
@@ -216,7 +216,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, | |||
216 | * counter (backoff, monitored by dccp_response_timer). | 216 | * counter (backoff, monitored by dccp_response_timer). |
217 | */ | 217 | */ |
218 | req->retrans++; | 218 | req->retrans++; |
219 | req->rsk_ops->rtx_syn_ack(sk, req, NULL); | 219 | req->rsk_ops->rtx_syn_ack(sk, req); |
220 | } | 220 | } |
221 | /* Network Duplicate, discard packet */ | 221 | /* Network Duplicate, discard packet */ |
222 | return NULL; | 222 | return NULL; |
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index acd48ee522d6..23fd95a7ad15 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c | |||
@@ -2320,25 +2320,8 @@ static const struct seq_operations dn_socket_seq_ops = { | |||
2320 | 2320 | ||
2321 | static int dn_socket_seq_open(struct inode *inode, struct file *file) | 2321 | static int dn_socket_seq_open(struct inode *inode, struct file *file) |
2322 | { | 2322 | { |
2323 | struct seq_file *seq; | 2323 | return seq_open_private(file, &dn_socket_seq_ops, |
2324 | int rc = -ENOMEM; | 2324 | sizeof(struct dn_iter_state)); |
2325 | struct dn_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); | ||
2326 | |||
2327 | if (!s) | ||
2328 | goto out; | ||
2329 | |||
2330 | rc = seq_open(file, &dn_socket_seq_ops); | ||
2331 | if (rc) | ||
2332 | goto out_kfree; | ||
2333 | |||
2334 | seq = file->private_data; | ||
2335 | seq->private = s; | ||
2336 | memset(s, 0, sizeof(*s)); | ||
2337 | out: | ||
2338 | return rc; | ||
2339 | out_kfree: | ||
2340 | kfree(s); | ||
2341 | goto out; | ||
2342 | } | 2325 | } |
2343 | 2326 | ||
2344 | static const struct file_operations dn_socket_seq_fops = { | 2327 | static const struct file_operations dn_socket_seq_fops = { |
diff --git a/net/ieee80211/Kconfig b/net/ieee80211/Kconfig index bd501046c9c0..94ed7d3cd9da 100644 --- a/net/ieee80211/Kconfig +++ b/net/ieee80211/Kconfig | |||
@@ -71,4 +71,3 @@ config IEEE80211_CRYPT_TKIP | |||
71 | This can be compiled as a module and it will be called | 71 | This can be compiled as a module and it will be called |
72 | "ieee80211_crypt_tkip". | 72 | "ieee80211_crypt_tkip". |
73 | 73 | ||
74 | source "net/ieee80211/softmac/Kconfig" | ||
diff --git a/net/ieee80211/Makefile b/net/ieee80211/Makefile index 796a7c76ee48..f988417121da 100644 --- a/net/ieee80211/Makefile +++ b/net/ieee80211/Makefile | |||
@@ -10,4 +10,3 @@ ieee80211-objs := \ | |||
10 | ieee80211_wx.o \ | 10 | ieee80211_wx.o \ |
11 | ieee80211_geo.o | 11 | ieee80211_geo.o |
12 | 12 | ||
13 | obj-$(CONFIG_IEEE80211_SOFTMAC) += softmac/ | ||
diff --git a/net/ieee80211/softmac/Kconfig b/net/ieee80211/softmac/Kconfig deleted file mode 100644 index 2811651cb134..000000000000 --- a/net/ieee80211/softmac/Kconfig +++ /dev/null | |||
@@ -1,12 +0,0 @@ | |||
1 | config IEEE80211_SOFTMAC | ||
2 | tristate "Software MAC add-on to the IEEE 802.11 networking stack" | ||
3 | depends on IEEE80211 && EXPERIMENTAL | ||
4 | select WIRELESS_EXT | ||
5 | select IEEE80211_CRYPT_WEP | ||
6 | ---help--- | ||
7 | This option enables the hardware independent software MAC addon | ||
8 | for the IEEE 802.11 networking stack. | ||
9 | |||
10 | config IEEE80211_SOFTMAC_DEBUG | ||
11 | bool "Enable full debugging output" | ||
12 | depends on IEEE80211_SOFTMAC | ||
diff --git a/net/ieee80211/softmac/Makefile b/net/ieee80211/softmac/Makefile deleted file mode 100644 index bfcb391bb2c7..000000000000 --- a/net/ieee80211/softmac/Makefile +++ /dev/null | |||
@@ -1,9 +0,0 @@ | |||
1 | obj-$(CONFIG_IEEE80211_SOFTMAC) += ieee80211softmac.o | ||
2 | ieee80211softmac-objs := \ | ||
3 | ieee80211softmac_io.o \ | ||
4 | ieee80211softmac_auth.o \ | ||
5 | ieee80211softmac_module.o \ | ||
6 | ieee80211softmac_scan.o \ | ||
7 | ieee80211softmac_wx.o \ | ||
8 | ieee80211softmac_assoc.o \ | ||
9 | ieee80211softmac_event.o | ||
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c deleted file mode 100644 index c4d122ddd72c..000000000000 --- a/net/ieee80211/softmac/ieee80211softmac_assoc.c +++ /dev/null | |||
@@ -1,489 +0,0 @@ | |||
1 | /* | ||
2 | * This file contains the softmac's association logic. | ||
3 | * | ||
4 | * Copyright (c) 2005, 2006 Johannes Berg <johannes@sipsolutions.net> | ||
5 | * Joseph Jezak <josejx@gentoo.org> | ||
6 | * Larry Finger <Larry.Finger@lwfinger.net> | ||
7 | * Danny van Dyk <kugelfang@gentoo.org> | ||
8 | * Michael Buesch <mbuesch@freenet.de> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of version 2 of the GNU General Public License as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
17 | * more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
22 | * | ||
23 | * The full GNU General Public License is included in this distribution in the | ||
24 | * file called COPYING. | ||
25 | */ | ||
26 | |||
27 | #include "ieee80211softmac_priv.h" | ||
28 | |||
29 | /* | ||
30 | * Overview | ||
31 | * | ||
32 | * Before you can associate, you have to authenticate. | ||
33 | * | ||
34 | */ | ||
35 | |||
36 | /* Sends out an association request to the desired AP */ | ||
37 | static void | ||
38 | ieee80211softmac_assoc(struct ieee80211softmac_device *mac, struct ieee80211softmac_network *net) | ||
39 | { | ||
40 | unsigned long flags; | ||
41 | |||
42 | /* Switch to correct channel for this network */ | ||
43 | mac->set_channel(mac->dev, net->channel); | ||
44 | |||
45 | /* Send association request */ | ||
46 | ieee80211softmac_send_mgt_frame(mac, net, IEEE80211_STYPE_ASSOC_REQ, 0); | ||
47 | |||
48 | dprintk(KERN_INFO PFX "sent association request!\n"); | ||
49 | |||
50 | spin_lock_irqsave(&mac->lock, flags); | ||
51 | mac->associnfo.associated = 0; /* just to make sure */ | ||
52 | |||
53 | /* Set a timer for timeout */ | ||
54 | /* FIXME: make timeout configurable */ | ||
55 | if (likely(mac->running)) | ||
56 | queue_delayed_work(mac->wq, &mac->associnfo.timeout, 5 * HZ); | ||
57 | spin_unlock_irqrestore(&mac->lock, flags); | ||
58 | } | ||
59 | |||
60 | void | ||
61 | ieee80211softmac_assoc_timeout(struct work_struct *work) | ||
62 | { | ||
63 | struct ieee80211softmac_device *mac = | ||
64 | container_of(work, struct ieee80211softmac_device, | ||
65 | associnfo.timeout.work); | ||
66 | struct ieee80211softmac_network *n; | ||
67 | |||
68 | mutex_lock(&mac->associnfo.mutex); | ||
69 | /* we might race against ieee80211softmac_handle_assoc_response, | ||
70 | * so make sure only one of us does something */ | ||
71 | if (!mac->associnfo.associating) | ||
72 | goto out; | ||
73 | mac->associnfo.associating = 0; | ||
74 | mac->associnfo.bssvalid = 0; | ||
75 | mac->associnfo.associated = 0; | ||
76 | |||
77 | n = ieee80211softmac_get_network_by_bssid_locked(mac, mac->associnfo.bssid); | ||
78 | |||
79 | dprintk(KERN_INFO PFX "assoc request timed out!\n"); | ||
80 | ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATE_TIMEOUT, n); | ||
81 | out: | ||
82 | mutex_unlock(&mac->associnfo.mutex); | ||
83 | } | ||
84 | |||
85 | void | ||
86 | ieee80211softmac_disassoc(struct ieee80211softmac_device *mac) | ||
87 | { | ||
88 | unsigned long flags; | ||
89 | |||
90 | spin_lock_irqsave(&mac->lock, flags); | ||
91 | if (mac->associnfo.associating) | ||
92 | cancel_delayed_work(&mac->associnfo.timeout); | ||
93 | |||
94 | netif_carrier_off(mac->dev); | ||
95 | |||
96 | mac->associnfo.associated = 0; | ||
97 | mac->associnfo.bssvalid = 0; | ||
98 | mac->associnfo.associating = 0; | ||
99 | ieee80211softmac_init_bss(mac); | ||
100 | ieee80211softmac_call_events_locked(mac, IEEE80211SOFTMAC_EVENT_DISASSOCIATED, NULL); | ||
101 | spin_unlock_irqrestore(&mac->lock, flags); | ||
102 | } | ||
103 | |||
104 | /* Sends out a disassociation request to the desired AP */ | ||
105 | void | ||
106 | ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason) | ||
107 | { | ||
108 | struct ieee80211softmac_network *found; | ||
109 | |||
110 | if (mac->associnfo.bssvalid && mac->associnfo.associated) { | ||
111 | found = ieee80211softmac_get_network_by_bssid(mac, mac->associnfo.bssid); | ||
112 | if (found) | ||
113 | ieee80211softmac_send_mgt_frame(mac, found, IEEE80211_STYPE_DISASSOC, reason); | ||
114 | } | ||
115 | |||
116 | ieee80211softmac_disassoc(mac); | ||
117 | } | ||
118 | |||
119 | static inline int | ||
120 | we_support_all_basic_rates(struct ieee80211softmac_device *mac, u8 *from, u8 from_len) | ||
121 | { | ||
122 | int idx; | ||
123 | u8 rate; | ||
124 | |||
125 | for (idx = 0; idx < (from_len); idx++) { | ||
126 | rate = (from)[idx]; | ||
127 | if (!(rate & IEEE80211_BASIC_RATE_MASK)) | ||
128 | continue; | ||
129 | rate &= ~IEEE80211_BASIC_RATE_MASK; | ||
130 | if (!ieee80211softmac_ratesinfo_rate_supported(&mac->ratesinfo, rate)) | ||
131 | return 0; | ||
132 | } | ||
133 | return 1; | ||
134 | } | ||
135 | |||
136 | static int | ||
137 | network_matches_request(struct ieee80211softmac_device *mac, struct ieee80211_network *net) | ||
138 | { | ||
139 | /* we cannot associate to networks whose name we don't know */ | ||
140 | if (ieee80211_is_empty_essid(net->ssid, net->ssid_len)) | ||
141 | return 0; | ||
142 | /* do not associate to a network whose BSSBasicRateSet we cannot support */ | ||
143 | if (!we_support_all_basic_rates(mac, net->rates, net->rates_len)) | ||
144 | return 0; | ||
145 | /* do we really need to check the ex rates? */ | ||
146 | if (!we_support_all_basic_rates(mac, net->rates_ex, net->rates_ex_len)) | ||
147 | return 0; | ||
148 | |||
149 | /* assume that users know what they're doing ... | ||
150 | * (note we don't let them select a net we're incompatible with) */ | ||
151 | if (mac->associnfo.bssfixed) { | ||
152 | return !memcmp(mac->associnfo.bssid, net->bssid, ETH_ALEN); | ||
153 | } | ||
154 | |||
155 | /* if 'ANY' network requested, take any that doesn't have privacy enabled */ | ||
156 | if (mac->associnfo.req_essid.len == 0 | ||
157 | && !(net->capability & WLAN_CAPABILITY_PRIVACY)) | ||
158 | return 1; | ||
159 | if (net->ssid_len != mac->associnfo.req_essid.len) | ||
160 | return 0; | ||
161 | if (!memcmp(net->ssid, mac->associnfo.req_essid.data, mac->associnfo.req_essid.len)) | ||
162 | return 1; | ||
163 | return 0; | ||
164 | } | ||
165 | |||
166 | static void | ||
167 | ieee80211softmac_assoc_notify_scan(struct net_device *dev, int event_type, void *context) | ||
168 | { | ||
169 | struct ieee80211softmac_device *mac = ieee80211_priv(dev); | ||
170 | ieee80211softmac_assoc_work(&mac->associnfo.work.work); | ||
171 | } | ||
172 | |||
173 | static void | ||
174 | ieee80211softmac_assoc_notify_auth(struct net_device *dev, int event_type, void *context) | ||
175 | { | ||
176 | struct ieee80211softmac_device *mac = ieee80211_priv(dev); | ||
177 | |||
178 | switch (event_type) { | ||
179 | case IEEE80211SOFTMAC_EVENT_AUTHENTICATED: | ||
180 | ieee80211softmac_assoc_work(&mac->associnfo.work.work); | ||
181 | break; | ||
182 | case IEEE80211SOFTMAC_EVENT_AUTH_FAILED: | ||
183 | case IEEE80211SOFTMAC_EVENT_AUTH_TIMEOUT: | ||
184 | ieee80211softmac_disassoc(mac); | ||
185 | break; | ||
186 | } | ||
187 | } | ||
188 | |||
189 | /* This function is called to handle userspace requests (asynchronously) */ | ||
190 | void | ||
191 | ieee80211softmac_assoc_work(struct work_struct *work) | ||
192 | { | ||
193 | struct ieee80211softmac_device *mac = | ||
194 | container_of(work, struct ieee80211softmac_device, | ||
195 | associnfo.work.work); | ||
196 | struct ieee80211softmac_network *found = NULL; | ||
197 | struct ieee80211_network *net = NULL, *best = NULL; | ||
198 | int bssvalid; | ||
199 | unsigned long flags; | ||
200 | |||
201 | mutex_lock(&mac->associnfo.mutex); | ||
202 | |||
203 | if (!mac->associnfo.associating) | ||
204 | goto out; | ||
205 | |||
206 | /* ieee80211_disassoc might clear this */ | ||
207 | bssvalid = mac->associnfo.bssvalid; | ||
208 | |||
209 | /* meh */ | ||
210 | if (mac->associnfo.associated) | ||
211 | ieee80211softmac_send_disassoc_req(mac, WLAN_REASON_DISASSOC_STA_HAS_LEFT); | ||
212 | |||
213 | /* try to find the requested network in our list, if we found one already */ | ||
214 | if (bssvalid || mac->associnfo.bssfixed) | ||
215 | found = ieee80211softmac_get_network_by_bssid(mac, mac->associnfo.bssid); | ||
216 | |||
217 | /* Search the ieee80211 networks for this network if we didn't find it by bssid, | ||
218 | * but only if we've scanned at least once (to get a better list of networks to | ||
219 | * select from). If we have not scanned before, the !found logic below will be | ||
220 | * invoked and will scan. */ | ||
221 | if (!found && (mac->associnfo.scan_retry < IEEE80211SOFTMAC_ASSOC_SCAN_RETRY_LIMIT)) | ||
222 | { | ||
223 | s8 rssi = -128; /* if I don't initialise, gcc emits an invalid warning | ||
224 | because it cannot follow the best pointer logic. */ | ||
225 | spin_lock_irqsave(&mac->ieee->lock, flags); | ||
226 | list_for_each_entry(net, &mac->ieee->network_list, list) { | ||
227 | /* we're supposed to find the network with | ||
228 | * the best signal here, as we're asked to join | ||
229 | * any network with a specific ESSID, and many | ||
230 | * different ones could have that. | ||
231 | * | ||
232 | * I'll for now just go with the reported rssi. | ||
233 | * | ||
234 | * We also should take into account the rateset | ||
235 | * here to find the best BSSID to try. | ||
236 | */ | ||
237 | if (network_matches_request(mac, net)) { | ||
238 | if (!best) { | ||
239 | best = net; | ||
240 | rssi = best->stats.rssi; | ||
241 | continue; | ||
242 | } | ||
243 | /* we already had a matching network, so | ||
244 | * compare their properties to get the | ||
245 | * better of the two ... (see above) | ||
246 | */ | ||
247 | if (rssi < net->stats.rssi) { | ||
248 | best = net; | ||
249 | rssi = best->stats.rssi; | ||
250 | } | ||
251 | } | ||
252 | } | ||
253 | /* if we unlock here, we might get interrupted and the `best' | ||
254 | * pointer could go stale */ | ||
255 | if (best) { | ||
256 | found = ieee80211softmac_create_network(mac, best); | ||
257 | /* if found is still NULL, then we got -ENOMEM somewhere */ | ||
258 | if (found) | ||
259 | ieee80211softmac_add_network(mac, found); | ||
260 | } | ||
261 | spin_unlock_irqrestore(&mac->ieee->lock, flags); | ||
262 | } | ||
263 | |||
264 | if (!found) { | ||
265 | if (mac->associnfo.scan_retry > 0) { | ||
266 | mac->associnfo.scan_retry--; | ||
267 | |||
268 | /* We know of no such network. Let's scan. | ||
269 | * NB: this also happens if we had no memory to copy the network info... | ||
270 | * Maybe we can hope to have more memory after scanning finishes ;) | ||
271 | */ | ||
272 | dprintk(KERN_INFO PFX "Associate: Scanning for networks first.\n"); | ||
273 | ieee80211softmac_notify(mac->dev, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, ieee80211softmac_assoc_notify_scan, NULL); | ||
274 | if (ieee80211softmac_start_scan(mac)) { | ||
275 | dprintk(KERN_INFO PFX "Associate: failed to initiate scan. Is device up?\n"); | ||
276 | } | ||
277 | goto out; | ||
278 | } else { | ||
279 | mac->associnfo.associating = 0; | ||
280 | mac->associnfo.associated = 0; | ||
281 | |||
282 | dprintk(KERN_INFO PFX "Unable to find matching network after scan!\n"); | ||
283 | /* reset the retry counter for the next user request since we | ||
284 | * break out and don't reschedule ourselves after this point. */ | ||
285 | mac->associnfo.scan_retry = IEEE80211SOFTMAC_ASSOC_SCAN_RETRY_LIMIT; | ||
286 | ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATE_NET_NOT_FOUND, NULL); | ||
287 | goto out; | ||
288 | } | ||
289 | } | ||
290 | |||
291 | /* reset the retry counter for the next user request since we | ||
292 | * now found a net and will try to associate to it, but not | ||
293 | * schedule this function again. */ | ||
294 | mac->associnfo.scan_retry = IEEE80211SOFTMAC_ASSOC_SCAN_RETRY_LIMIT; | ||
295 | mac->associnfo.bssvalid = 1; | ||
296 | memcpy(mac->associnfo.bssid, found->bssid, ETH_ALEN); | ||
297 | /* copy the ESSID for displaying it */ | ||
298 | mac->associnfo.associate_essid.len = found->essid.len; | ||
299 | memcpy(mac->associnfo.associate_essid.data, found->essid.data, IW_ESSID_MAX_SIZE + 1); | ||
300 | |||
301 | /* we found a network! authenticate (if necessary) and associate to it. */ | ||
302 | if (found->authenticating) { | ||
303 | dprintk(KERN_INFO PFX "Already requested authentication, waiting...\n"); | ||
304 | if(!mac->associnfo.assoc_wait) { | ||
305 | mac->associnfo.assoc_wait = 1; | ||
306 | ieee80211softmac_notify_internal(mac, IEEE80211SOFTMAC_EVENT_ANY, found, ieee80211softmac_assoc_notify_auth, NULL, GFP_KERNEL); | ||
307 | } | ||
308 | goto out; | ||
309 | } | ||
310 | if (!found->authenticated && !found->authenticating) { | ||
311 | /* This relies on the fact that _auth_req only queues the work, | ||
312 | * otherwise adding the notification would be racy. */ | ||
313 | if (!ieee80211softmac_auth_req(mac, found)) { | ||
314 | if(!mac->associnfo.assoc_wait) { | ||
315 | dprintk(KERN_INFO PFX "Cannot associate without being authenticated, requested authentication\n"); | ||
316 | mac->associnfo.assoc_wait = 1; | ||
317 | ieee80211softmac_notify_internal(mac, IEEE80211SOFTMAC_EVENT_ANY, found, ieee80211softmac_assoc_notify_auth, NULL, GFP_KERNEL); | ||
318 | } | ||
319 | } else { | ||
320 | printkl(KERN_WARNING PFX "Not authenticated, but requesting authentication failed. Giving up to associate\n"); | ||
321 | mac->associnfo.assoc_wait = 0; | ||
322 | ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATE_FAILED, found); | ||
323 | } | ||
324 | goto out; | ||
325 | } | ||
326 | /* finally! now we can start associating */ | ||
327 | mac->associnfo.assoc_wait = 0; | ||
328 | ieee80211softmac_assoc(mac, found); | ||
329 | |||
330 | out: | ||
331 | mutex_unlock(&mac->associnfo.mutex); | ||
332 | } | ||
333 | |||
334 | /* call this to do whatever is necessary when we're associated */ | ||
335 | static void | ||
336 | ieee80211softmac_associated(struct ieee80211softmac_device *mac, | ||
337 | struct ieee80211_assoc_response * resp, | ||
338 | struct ieee80211softmac_network *net) | ||
339 | { | ||
340 | u16 cap = le16_to_cpu(resp->capability); | ||
341 | u8 erp_value = net->erp_value; | ||
342 | |||
343 | mac->associnfo.associating = 0; | ||
344 | mac->bssinfo.supported_rates = net->supported_rates; | ||
345 | ieee80211softmac_recalc_txrates(mac); | ||
346 | |||
347 | mac->associnfo.associated = 1; | ||
348 | |||
349 | mac->associnfo.short_preamble_available = | ||
350 | (cap & WLAN_CAPABILITY_SHORT_PREAMBLE) != 0; | ||
351 | ieee80211softmac_process_erp(mac, erp_value); | ||
352 | |||
353 | if (mac->set_bssid_filter) | ||
354 | mac->set_bssid_filter(mac->dev, net->bssid); | ||
355 | memcpy(mac->ieee->bssid, net->bssid, ETH_ALEN); | ||
356 | netif_carrier_on(mac->dev); | ||
357 | |||
358 | mac->association_id = le16_to_cpup(&resp->aid); | ||
359 | } | ||
360 | |||
361 | /* received frame handling functions */ | ||
362 | int | ||
363 | ieee80211softmac_handle_assoc_response(struct net_device * dev, | ||
364 | struct ieee80211_assoc_response * resp, | ||
365 | struct ieee80211_network * _ieee80211_network) | ||
366 | { | ||
367 | /* NOTE: the network parameter has to be mostly ignored by | ||
368 | * this code because it is the ieee80211's pointer | ||
369 | * to the struct, not ours (we made a copy) | ||
370 | */ | ||
371 | struct ieee80211softmac_device *mac = ieee80211_priv(dev); | ||
372 | u16 status = le16_to_cpup(&resp->status); | ||
373 | struct ieee80211softmac_network *network = NULL; | ||
374 | unsigned long flags; | ||
375 | DECLARE_MAC_BUF(mac2); | ||
376 | |||
377 | if (unlikely(!mac->running)) | ||
378 | return -ENODEV; | ||
379 | |||
380 | spin_lock_irqsave(&mac->lock, flags); | ||
381 | |||
382 | if (!mac->associnfo.associating) { | ||
383 | /* we race against the timeout function, so make sure | ||
384 | * only one of us can do work */ | ||
385 | spin_unlock_irqrestore(&mac->lock, flags); | ||
386 | return 0; | ||
387 | } | ||
388 | network = ieee80211softmac_get_network_by_bssid_locked(mac, resp->header.addr3); | ||
389 | |||
390 | /* someone sending us things without us knowing him? Ignore. */ | ||
391 | if (!network) { | ||
392 | dprintk(KERN_INFO PFX "Received unrequested assocation response from %s\n", | ||
393 | print_mac(mac2, resp->header.addr3)); | ||
394 | spin_unlock_irqrestore(&mac->lock, flags); | ||
395 | return 0; | ||
396 | } | ||
397 | |||
398 | /* now that we know it was for us, we can cancel the timeout */ | ||
399 | cancel_delayed_work(&mac->associnfo.timeout); | ||
400 | |||
401 | /* if the association response included an ERP IE, update our saved | ||
402 | * copy */ | ||
403 | if (_ieee80211_network->flags & NETWORK_HAS_ERP_VALUE) | ||
404 | network->erp_value = _ieee80211_network->erp_value; | ||
405 | |||
406 | switch (status) { | ||
407 | case 0: | ||
408 | dprintk(KERN_INFO PFX "associated!\n"); | ||
409 | ieee80211softmac_associated(mac, resp, network); | ||
410 | ieee80211softmac_call_events_locked(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATED, network); | ||
411 | break; | ||
412 | case WLAN_REASON_STA_REQ_ASSOC_WITHOUT_AUTH: | ||
413 | if (!network->auth_desynced_once) { | ||
414 | /* there seem to be a few rare cases where our view of | ||
415 | * the world is obscured, or buggy APs that don't DEAUTH | ||
416 | * us properly. So we handle that, but allow it only once. | ||
417 | */ | ||
418 | printkl(KERN_INFO PFX "We were not authenticated during association, retrying...\n"); | ||
419 | network->authenticated = 0; | ||
420 | /* we don't want to do this more than once ... */ | ||
421 | network->auth_desynced_once = 1; | ||
422 | queue_delayed_work(mac->wq, &mac->associnfo.work, 0); | ||
423 | break; | ||
424 | } | ||
425 | default: | ||
426 | dprintk(KERN_INFO PFX "associating failed (reason: 0x%x)!\n", status); | ||
427 | mac->associnfo.associating = 0; | ||
428 | mac->associnfo.bssvalid = 0; | ||
429 | mac->associnfo.associated = 0; | ||
430 | ieee80211softmac_call_events_locked(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATE_FAILED, network); | ||
431 | } | ||
432 | |||
433 | spin_unlock_irqrestore(&mac->lock, flags); | ||
434 | return 0; | ||
435 | } | ||
436 | |||
437 | void | ||
438 | ieee80211softmac_try_reassoc(struct ieee80211softmac_device *mac) | ||
439 | { | ||
440 | unsigned long flags; | ||
441 | |||
442 | spin_lock_irqsave(&mac->lock, flags); | ||
443 | mac->associnfo.associating = 1; | ||
444 | queue_delayed_work(mac->wq, &mac->associnfo.work, 0); | ||
445 | spin_unlock_irqrestore(&mac->lock, flags); | ||
446 | } | ||
447 | |||
448 | int | ||
449 | ieee80211softmac_handle_disassoc(struct net_device * dev, | ||
450 | struct ieee80211_disassoc *disassoc) | ||
451 | { | ||
452 | struct ieee80211softmac_device *mac = ieee80211_priv(dev); | ||
453 | |||
454 | if (unlikely(!mac->running)) | ||
455 | return -ENODEV; | ||
456 | |||
457 | if (memcmp(disassoc->header.addr2, mac->associnfo.bssid, ETH_ALEN)) | ||
458 | return 0; | ||
459 | |||
460 | if (memcmp(disassoc->header.addr1, mac->dev->dev_addr, ETH_ALEN)) | ||
461 | return 0; | ||
462 | |||
463 | dprintk(KERN_INFO PFX "got disassoc frame\n"); | ||
464 | ieee80211softmac_disassoc(mac); | ||
465 | |||
466 | ieee80211softmac_try_reassoc(mac); | ||
467 | |||
468 | return 0; | ||
469 | } | ||
470 | |||
471 | int | ||
472 | ieee80211softmac_handle_reassoc_req(struct net_device * dev, | ||
473 | struct ieee80211_reassoc_request * resp) | ||
474 | { | ||
475 | struct ieee80211softmac_device *mac = ieee80211_priv(dev); | ||
476 | struct ieee80211softmac_network *network; | ||
477 | |||
478 | if (unlikely(!mac->running)) | ||
479 | return -ENODEV; | ||
480 | |||
481 | network = ieee80211softmac_get_network_by_bssid(mac, resp->header.addr3); | ||
482 | if (!network) { | ||
483 | dprintkl(KERN_INFO PFX "reassoc request from unknown network\n"); | ||
484 | return 0; | ||
485 | } | ||
486 | queue_delayed_work(mac->wq, &mac->associnfo.work, 0); | ||
487 | |||
488 | return 0; | ||
489 | } | ||
diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c deleted file mode 100644 index 1a96c2572578..000000000000 --- a/net/ieee80211/softmac/ieee80211softmac_auth.c +++ /dev/null | |||
@@ -1,413 +0,0 @@ | |||
1 | /* | ||
2 | * This file contains the softmac's authentication logic. | ||
3 | * | ||
4 | * Copyright (c) 2005, 2006 Johannes Berg <johannes@sipsolutions.net> | ||
5 | * Joseph Jezak <josejx@gentoo.org> | ||
6 | * Larry Finger <Larry.Finger@lwfinger.net> | ||
7 | * Danny van Dyk <kugelfang@gentoo.org> | ||
8 | * Michael Buesch <mbuesch@freenet.de> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of version 2 of the GNU General Public License as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
17 | * more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
22 | * | ||
23 | * The full GNU General Public License is included in this distribution in the | ||
24 | * file called COPYING. | ||
25 | */ | ||
26 | |||
27 | #include "ieee80211softmac_priv.h" | ||
28 | |||
29 | static void ieee80211softmac_auth_queue(struct work_struct *work); | ||
30 | |||
31 | /* Queues an auth request to the desired AP */ | ||
32 | int | ||
33 | ieee80211softmac_auth_req(struct ieee80211softmac_device *mac, | ||
34 | struct ieee80211softmac_network *net) | ||
35 | { | ||
36 | struct ieee80211softmac_auth_queue_item *auth; | ||
37 | unsigned long flags; | ||
38 | DECLARE_MAC_BUF(mac2); | ||
39 | |||
40 | if (net->authenticating || net->authenticated) | ||
41 | return 0; | ||
42 | net->authenticating = 1; | ||
43 | |||
44 | /* Add the network if it's not already added */ | ||
45 | ieee80211softmac_add_network(mac, net); | ||
46 | |||
47 | dprintk(KERN_NOTICE PFX "Queueing Authentication Request to %s\n", print_mac(mac2, net->bssid)); | ||
48 | /* Queue the auth request */ | ||
49 | auth = (struct ieee80211softmac_auth_queue_item *) | ||
50 | kmalloc(sizeof(struct ieee80211softmac_auth_queue_item), GFP_KERNEL); | ||
51 | if(auth == NULL) | ||
52 | return -ENOMEM; | ||
53 | |||
54 | auth->net = net; | ||
55 | auth->mac = mac; | ||
56 | auth->retry = IEEE80211SOFTMAC_AUTH_RETRY_LIMIT; | ||
57 | auth->state = IEEE80211SOFTMAC_AUTH_OPEN_REQUEST; | ||
58 | INIT_DELAYED_WORK(&auth->work, ieee80211softmac_auth_queue); | ||
59 | |||
60 | /* Lock (for list) */ | ||
61 | spin_lock_irqsave(&mac->lock, flags); | ||
62 | |||
63 | /* add to list */ | ||
64 | list_add_tail(&auth->list, &mac->auth_queue); | ||
65 | queue_delayed_work(mac->wq, &auth->work, 0); | ||
66 | spin_unlock_irqrestore(&mac->lock, flags); | ||
67 | |||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | |||
72 | /* Sends an auth request to the desired AP and handles timeouts */ | ||
73 | static void | ||
74 | ieee80211softmac_auth_queue(struct work_struct *work) | ||
75 | { | ||
76 | struct ieee80211softmac_device *mac; | ||
77 | struct ieee80211softmac_auth_queue_item *auth; | ||
78 | struct ieee80211softmac_network *net; | ||
79 | unsigned long flags; | ||
80 | DECLARE_MAC_BUF(mac2); | ||
81 | |||
82 | auth = container_of(work, struct ieee80211softmac_auth_queue_item, | ||
83 | work.work); | ||
84 | net = auth->net; | ||
85 | mac = auth->mac; | ||
86 | |||
87 | if(auth->retry > 0) { | ||
88 | /* Switch to correct channel for this network */ | ||
89 | mac->set_channel(mac->dev, net->channel); | ||
90 | |||
91 | /* Lock and set flags */ | ||
92 | spin_lock_irqsave(&mac->lock, flags); | ||
93 | if (unlikely(!mac->running)) { | ||
94 | /* Prevent reschedule on workqueue flush */ | ||
95 | spin_unlock_irqrestore(&mac->lock, flags); | ||
96 | return; | ||
97 | } | ||
98 | net->authenticated = 0; | ||
99 | /* add a timeout call so we eventually give up waiting for an auth reply */ | ||
100 | queue_delayed_work(mac->wq, &auth->work, IEEE80211SOFTMAC_AUTH_TIMEOUT); | ||
101 | auth->retry--; | ||
102 | spin_unlock_irqrestore(&mac->lock, flags); | ||
103 | if (ieee80211softmac_send_mgt_frame(mac, auth->net, IEEE80211_STYPE_AUTH, auth->state)) | ||
104 | dprintk(KERN_NOTICE PFX "Sending Authentication Request to %s failed (this shouldn't happen, wait for the timeout).\n", | ||
105 | print_mac(mac2, net->bssid)); | ||
106 | else | ||
107 | dprintk(KERN_NOTICE PFX "Sent Authentication Request to %s.\n", print_mac(mac2, net->bssid)); | ||
108 | return; | ||
109 | } | ||
110 | |||
111 | printkl(KERN_WARNING PFX "Authentication timed out with %s\n", print_mac(mac2, net->bssid)); | ||
112 | /* Remove this item from the queue */ | ||
113 | spin_lock_irqsave(&mac->lock, flags); | ||
114 | net->authenticating = 0; | ||
115 | ieee80211softmac_call_events_locked(mac, IEEE80211SOFTMAC_EVENT_AUTH_TIMEOUT, net); | ||
116 | cancel_delayed_work(&auth->work); /* just to make sure... */ | ||
117 | list_del(&auth->list); | ||
118 | spin_unlock_irqrestore(&mac->lock, flags); | ||
119 | /* Free it */ | ||
120 | kfree(auth); | ||
121 | } | ||
122 | |||
123 | /* Sends a response to an auth challenge (for shared key auth). */ | ||
124 | static void | ||
125 | ieee80211softmac_auth_challenge_response(struct work_struct *work) | ||
126 | { | ||
127 | struct ieee80211softmac_auth_queue_item *aq = | ||
128 | container_of(work, struct ieee80211softmac_auth_queue_item, | ||
129 | work.work); | ||
130 | |||
131 | /* Send our response */ | ||
132 | ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state); | ||
133 | } | ||
134 | |||
135 | /* Handle the auth response from the AP | ||
136 | * This should be registered with ieee80211 as handle_auth | ||
137 | */ | ||
138 | int | ||
139 | ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth) | ||
140 | { | ||
141 | |||
142 | struct list_head *list_ptr; | ||
143 | struct ieee80211softmac_device *mac = ieee80211_priv(dev); | ||
144 | struct ieee80211softmac_auth_queue_item *aq = NULL; | ||
145 | struct ieee80211softmac_network *net = NULL; | ||
146 | unsigned long flags; | ||
147 | u8 * data; | ||
148 | DECLARE_MAC_BUF(mac2); | ||
149 | |||
150 | if (unlikely(!mac->running)) | ||
151 | return -ENODEV; | ||
152 | |||
153 | /* Find correct auth queue item */ | ||
154 | spin_lock_irqsave(&mac->lock, flags); | ||
155 | list_for_each(list_ptr, &mac->auth_queue) { | ||
156 | aq = list_entry(list_ptr, struct ieee80211softmac_auth_queue_item, list); | ||
157 | net = aq->net; | ||
158 | if (!memcmp(net->bssid, auth->header.addr2, ETH_ALEN)) | ||
159 | break; | ||
160 | else | ||
161 | aq = NULL; | ||
162 | } | ||
163 | spin_unlock_irqrestore(&mac->lock, flags); | ||
164 | |||
165 | /* Make sure that we've got an auth queue item for this request */ | ||
166 | if(aq == NULL) | ||
167 | { | ||
168 | dprintkl(KERN_DEBUG PFX "Authentication response received from %s but no queue item exists.\n", print_mac(mac2, auth->header.addr2)); | ||
169 | /* Error #? */ | ||
170 | return -1; | ||
171 | } | ||
172 | |||
173 | /* Check for out of order authentication */ | ||
174 | if(!net->authenticating) | ||
175 | { | ||
176 | dprintkl(KERN_DEBUG PFX "Authentication response received from %s but did not request authentication.\n",print_mac(mac2, auth->header.addr2)); | ||
177 | return -1; | ||
178 | } | ||
179 | |||
180 | /* Parse the auth packet */ | ||
181 | switch(le16_to_cpu(auth->algorithm)) { | ||
182 | case WLAN_AUTH_OPEN: | ||
183 | /* Check the status code of the response */ | ||
184 | |||
185 | switch(le16_to_cpu(auth->status)) { | ||
186 | case WLAN_STATUS_SUCCESS: | ||
187 | /* Update the status to Authenticated */ | ||
188 | spin_lock_irqsave(&mac->lock, flags); | ||
189 | net->authenticating = 0; | ||
190 | net->authenticated = 1; | ||
191 | spin_unlock_irqrestore(&mac->lock, flags); | ||
192 | |||
193 | /* Send event */ | ||
194 | printkl(KERN_NOTICE PFX "Open Authentication completed with %s\n", print_mac(mac2, net->bssid)); | ||
195 | ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_AUTHENTICATED, net); | ||
196 | break; | ||
197 | default: | ||
198 | /* Lock and reset flags */ | ||
199 | spin_lock_irqsave(&mac->lock, flags); | ||
200 | net->authenticated = 0; | ||
201 | net->authenticating = 0; | ||
202 | spin_unlock_irqrestore(&mac->lock, flags); | ||
203 | |||
204 | printkl(KERN_NOTICE PFX "Open Authentication with %s failed, error code: %i\n", | ||
205 | print_mac(mac2, net->bssid), le16_to_cpup(&auth->status)); | ||
206 | /* Count the error? */ | ||
207 | break; | ||
208 | } | ||
209 | goto free_aq; | ||
210 | break; | ||
211 | case WLAN_AUTH_SHARED_KEY: | ||
212 | /* Figure out where we are in the process */ | ||
213 | switch(le16_to_cpu(auth->transaction)) { | ||
214 | case IEEE80211SOFTMAC_AUTH_SHARED_CHALLENGE: | ||
215 | /* Check to make sure we have a challenge IE */ | ||
216 | data = (u8 *)auth->info_element; | ||
217 | if (*data++ != MFIE_TYPE_CHALLENGE) { | ||
218 | printkl(KERN_NOTICE PFX "Shared Key Authentication failed due to a missing challenge.\n"); | ||
219 | break; | ||
220 | } | ||
221 | /* Save the challenge */ | ||
222 | spin_lock_irqsave(&mac->lock, flags); | ||
223 | net->challenge_len = *data++; | ||
224 | if (net->challenge_len > WLAN_AUTH_CHALLENGE_LEN) | ||
225 | net->challenge_len = WLAN_AUTH_CHALLENGE_LEN; | ||
226 | kfree(net->challenge); | ||
227 | net->challenge = kmemdup(data, net->challenge_len, | ||
228 | GFP_ATOMIC); | ||
229 | if (net->challenge == NULL) { | ||
230 | printkl(KERN_NOTICE PFX "Shared Key " | ||
231 | "Authentication failed due to " | ||
232 | "memory shortage.\n"); | ||
233 | spin_unlock_irqrestore(&mac->lock, flags); | ||
234 | break; | ||
235 | } | ||
236 | aq->state = IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE; | ||
237 | |||
238 | /* We reuse the work struct from the auth request here. | ||
239 | * It is safe to do so as each one is per-request, and | ||
240 | * at this point (dealing with authentication response) | ||
241 | * we have obviously already sent the initial auth | ||
242 | * request. */ | ||
243 | cancel_delayed_work(&aq->work); | ||
244 | INIT_DELAYED_WORK(&aq->work, &ieee80211softmac_auth_challenge_response); | ||
245 | queue_delayed_work(mac->wq, &aq->work, 0); | ||
246 | spin_unlock_irqrestore(&mac->lock, flags); | ||
247 | return 0; | ||
248 | case IEEE80211SOFTMAC_AUTH_SHARED_PASS: | ||
249 | kfree(net->challenge); | ||
250 | net->challenge = NULL; | ||
251 | net->challenge_len = 0; | ||
252 | /* Check the status code of the response */ | ||
253 | switch(auth->status) { | ||
254 | case WLAN_STATUS_SUCCESS: | ||
255 | /* Update the status to Authenticated */ | ||
256 | spin_lock_irqsave(&mac->lock, flags); | ||
257 | net->authenticating = 0; | ||
258 | net->authenticated = 1; | ||
259 | spin_unlock_irqrestore(&mac->lock, flags); | ||
260 | printkl(KERN_NOTICE PFX "Shared Key Authentication completed with %s\n", | ||
261 | print_mac(mac2, net->bssid)); | ||
262 | ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_AUTHENTICATED, net); | ||
263 | break; | ||
264 | default: | ||
265 | printkl(KERN_NOTICE PFX "Shared Key Authentication with %s failed, error code: %i\n", | ||
266 | print_mac(mac2, net->bssid), le16_to_cpup(&auth->status)); | ||
267 | /* Lock and reset flags */ | ||
268 | spin_lock_irqsave(&mac->lock, flags); | ||
269 | net->authenticating = 0; | ||
270 | net->authenticated = 0; | ||
271 | spin_unlock_irqrestore(&mac->lock, flags); | ||
272 | /* Count the error? */ | ||
273 | break; | ||
274 | } | ||
275 | goto free_aq; | ||
276 | break; | ||
277 | default: | ||
278 | printkl(KERN_WARNING PFX "Unhandled Authentication Step: %i\n", auth->transaction); | ||
279 | break; | ||
280 | } | ||
281 | goto free_aq; | ||
282 | break; | ||
283 | default: | ||
284 | /* ERROR */ | ||
285 | goto free_aq; | ||
286 | break; | ||
287 | } | ||
288 | return 0; | ||
289 | free_aq: | ||
290 | /* Cancel the timeout */ | ||
291 | spin_lock_irqsave(&mac->lock, flags); | ||
292 | cancel_delayed_work(&aq->work); | ||
293 | /* Remove this item from the queue */ | ||
294 | list_del(&aq->list); | ||
295 | spin_unlock_irqrestore(&mac->lock, flags); | ||
296 | |||
297 | /* Free it */ | ||
298 | kfree(aq); | ||
299 | return 0; | ||
300 | } | ||
301 | |||
302 | /* | ||
303 | * Handle deauthorization | ||
304 | */ | ||
305 | static void | ||
306 | ieee80211softmac_deauth_from_net(struct ieee80211softmac_device *mac, | ||
307 | struct ieee80211softmac_network *net) | ||
308 | { | ||
309 | struct ieee80211softmac_auth_queue_item *aq = NULL; | ||
310 | struct list_head *list_ptr; | ||
311 | unsigned long flags; | ||
312 | |||
313 | /* deauthentication implies disassociation */ | ||
314 | ieee80211softmac_disassoc(mac); | ||
315 | |||
316 | /* Lock and reset status flags */ | ||
317 | spin_lock_irqsave(&mac->lock, flags); | ||
318 | net->authenticating = 0; | ||
319 | net->authenticated = 0; | ||
320 | |||
321 | /* Find correct auth queue item, if it exists */ | ||
322 | list_for_each(list_ptr, &mac->auth_queue) { | ||
323 | aq = list_entry(list_ptr, struct ieee80211softmac_auth_queue_item, list); | ||
324 | if (!memcmp(net->bssid, aq->net->bssid, ETH_ALEN)) | ||
325 | break; | ||
326 | else | ||
327 | aq = NULL; | ||
328 | } | ||
329 | |||
330 | /* Cancel pending work */ | ||
331 | if(aq != NULL) | ||
332 | /* Not entirely safe? What about running work? */ | ||
333 | cancel_delayed_work(&aq->work); | ||
334 | |||
335 | /* Free our network ref */ | ||
336 | ieee80211softmac_del_network_locked(mac, net); | ||
337 | if(net->challenge != NULL) | ||
338 | kfree(net->challenge); | ||
339 | kfree(net); | ||
340 | |||
341 | /* can't transmit data right now... */ | ||
342 | netif_carrier_off(mac->dev); | ||
343 | spin_unlock_irqrestore(&mac->lock, flags); | ||
344 | |||
345 | ieee80211softmac_try_reassoc(mac); | ||
346 | } | ||
347 | |||
348 | /* | ||
349 | * Sends a deauth request to the desired AP | ||
350 | */ | ||
351 | int | ||
352 | ieee80211softmac_deauth_req(struct ieee80211softmac_device *mac, | ||
353 | struct ieee80211softmac_network *net, int reason) | ||
354 | { | ||
355 | int ret; | ||
356 | |||
357 | /* Make sure the network is authenticated */ | ||
358 | if (!net->authenticated) | ||
359 | { | ||
360 | dprintkl(KERN_DEBUG PFX "Can't send deauthentication packet, network is not authenticated.\n"); | ||
361 | /* Error okay? */ | ||
362 | return -EPERM; | ||
363 | } | ||
364 | |||
365 | /* Send the de-auth packet */ | ||
366 | if((ret = ieee80211softmac_send_mgt_frame(mac, net, IEEE80211_STYPE_DEAUTH, reason))) | ||
367 | return ret; | ||
368 | |||
369 | ieee80211softmac_deauth_from_net(mac, net); | ||
370 | return 0; | ||
371 | } | ||
372 | |||
373 | /* | ||
374 | * This should be registered with ieee80211 as handle_deauth | ||
375 | */ | ||
376 | int | ||
377 | ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *deauth) | ||
378 | { | ||
379 | |||
380 | struct ieee80211softmac_network *net = NULL; | ||
381 | struct ieee80211softmac_device *mac = ieee80211_priv(dev); | ||
382 | DECLARE_MAC_BUF(mac2); | ||
383 | |||
384 | if (unlikely(!mac->running)) | ||
385 | return -ENODEV; | ||
386 | |||
387 | if (!deauth) { | ||
388 | dprintk("deauth without deauth packet. eek!\n"); | ||
389 | return 0; | ||
390 | } | ||
391 | |||
392 | net = ieee80211softmac_get_network_by_bssid(mac, deauth->header.addr2); | ||
393 | |||
394 | if (net == NULL) { | ||
395 | dprintkl(KERN_DEBUG PFX "Received deauthentication packet from %s, but that network is unknown.\n", | ||
396 | print_mac(mac2, deauth->header.addr2)); | ||
397 | return 0; | ||
398 | } | ||
399 | |||
400 | /* Make sure the network is authenticated */ | ||
401 | if(!net->authenticated) | ||
402 | { | ||
403 | dprintkl(KERN_DEBUG PFX "Can't perform deauthentication, network is not authenticated.\n"); | ||
404 | /* Error okay? */ | ||
405 | return -EPERM; | ||
406 | } | ||
407 | |||
408 | ieee80211softmac_deauth_from_net(mac, net); | ||
409 | |||
410 | /* let's try to re-associate */ | ||
411 | queue_delayed_work(mac->wq, &mac->associnfo.work, 0); | ||
412 | return 0; | ||
413 | } | ||
diff --git a/net/ieee80211/softmac/ieee80211softmac_event.c b/net/ieee80211/softmac/ieee80211softmac_event.c deleted file mode 100644 index 8cef05b60f16..000000000000 --- a/net/ieee80211/softmac/ieee80211softmac_event.c +++ /dev/null | |||
@@ -1,189 +0,0 @@ | |||
1 | /* | ||
2 | * Event system | ||
3 | * Also see comments in public header file and longer explanation below. | ||
4 | * | ||
5 | * Copyright (c) 2005, 2006 Johannes Berg <johannes@sipsolutions.net> | ||
6 | * Joseph Jezak <josejx@gentoo.org> | ||
7 | * Larry Finger <Larry.Finger@lwfinger.net> | ||
8 | * Danny van Dyk <kugelfang@gentoo.org> | ||
9 | * Michael Buesch <mbuesch@freenet.de> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify it | ||
12 | * under the terms of version 2 of the GNU General Public License as | ||
13 | * published by the Free Software Foundation. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
16 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
18 | * more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, write to the Free Software | ||
22 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
23 | * | ||
24 | * The full GNU General Public License is included in this distribution in the | ||
25 | * file called COPYING. | ||
26 | */ | ||
27 | |||
28 | #include "ieee80211softmac_priv.h" | ||
29 | |||
30 | /* | ||
31 | * Each event has associated to it | ||
32 | * - an event type (see constants in public header) | ||
33 | * - an event context (see below) | ||
34 | * - the function to be called | ||
35 | * - a context (extra parameter to call the function with) | ||
36 | * - and the softmac struct | ||
37 | * | ||
38 | * The event context is private and can only be used from | ||
39 | * within this module. Its meaning varies with the event | ||
40 | * type: | ||
41 | * SCAN_FINISHED, | ||
42 | * DISASSOCIATED: NULL | ||
43 | * ASSOCIATED, | ||
44 | * ASSOCIATE_FAILED, | ||
45 | * ASSOCIATE_TIMEOUT, | ||
46 | * AUTHENTICATED, | ||
47 | * AUTH_FAILED, | ||
48 | * AUTH_TIMEOUT: a pointer to the network struct | ||
49 | * ... | ||
50 | * Code within this module can use the event context to be only | ||
51 | * called when the event is true for that specific context | ||
52 | * as per above table. | ||
53 | * If the event context is NULL, then the notification is always called, | ||
54 | * regardless of the event context. The event context is not passed to | ||
55 | * the callback, it is assumed that the context suffices. | ||
56 | * | ||
57 | * You can also use the event context only by setting the event type | ||
58 | * to -1 (private use only), in which case you'll be notified | ||
59 | * whenever the event context matches. | ||
60 | */ | ||
61 | |||
62 | static char *event_descriptions[IEEE80211SOFTMAC_EVENT_LAST+1] = { | ||
63 | NULL, /* scan finished */ | ||
64 | NULL, /* associated */ | ||
65 | "associating failed", | ||
66 | "associating timed out", | ||
67 | "authenticated", | ||
68 | "authenticating failed", | ||
69 | "authenticating timed out", | ||
70 | "associating failed because no suitable network was found", | ||
71 | NULL, /* disassociated */ | ||
72 | }; | ||
73 | |||
74 | |||
75 | static void | ||
76 | ieee80211softmac_notify_callback(struct work_struct *work) | ||
77 | { | ||
78 | struct ieee80211softmac_event *pevent = | ||
79 | container_of(work, struct ieee80211softmac_event, work.work); | ||
80 | struct ieee80211softmac_event event = *pevent; | ||
81 | kfree(pevent); | ||
82 | |||
83 | event.fun(event.mac->dev, event.event_type, event.context); | ||
84 | } | ||
85 | |||
86 | int | ||
87 | ieee80211softmac_notify_internal(struct ieee80211softmac_device *mac, | ||
88 | int event, void *event_context, notify_function_ptr fun, void *context, gfp_t gfp_mask) | ||
89 | { | ||
90 | struct ieee80211softmac_event *eventptr; | ||
91 | unsigned long flags; | ||
92 | |||
93 | if (event < -1 || event > IEEE80211SOFTMAC_EVENT_LAST) | ||
94 | return -ENOSYS; | ||
95 | |||
96 | if (!fun) | ||
97 | return -EINVAL; | ||
98 | |||
99 | eventptr = kmalloc(sizeof(struct ieee80211softmac_event), gfp_mask); | ||
100 | if (!eventptr) | ||
101 | return -ENOMEM; | ||
102 | |||
103 | eventptr->event_type = event; | ||
104 | INIT_DELAYED_WORK(&eventptr->work, ieee80211softmac_notify_callback); | ||
105 | eventptr->fun = fun; | ||
106 | eventptr->context = context; | ||
107 | eventptr->mac = mac; | ||
108 | eventptr->event_context = event_context; | ||
109 | |||
110 | spin_lock_irqsave(&mac->lock, flags); | ||
111 | list_add(&eventptr->list, &mac->events); | ||
112 | spin_unlock_irqrestore(&mac->lock, flags); | ||
113 | |||
114 | return 0; | ||
115 | } | ||
116 | |||
117 | int | ||
118 | ieee80211softmac_notify_gfp(struct net_device *dev, | ||
119 | int event, notify_function_ptr fun, void *context, gfp_t gfp_mask) | ||
120 | { | ||
121 | struct ieee80211softmac_device *mac = ieee80211_priv(dev); | ||
122 | |||
123 | if (event < 0 || event > IEEE80211SOFTMAC_EVENT_LAST) | ||
124 | return -ENOSYS; | ||
125 | |||
126 | return ieee80211softmac_notify_internal(mac, event, NULL, fun, context, gfp_mask); | ||
127 | } | ||
128 | EXPORT_SYMBOL_GPL(ieee80211softmac_notify_gfp); | ||
129 | |||
130 | /* private -- calling all callbacks that were specified */ | ||
131 | void | ||
132 | ieee80211softmac_call_events_locked(struct ieee80211softmac_device *mac, int event, void *event_ctx) | ||
133 | { | ||
134 | struct ieee80211softmac_event *eventptr, *tmp; | ||
135 | struct ieee80211softmac_network *network; | ||
136 | |||
137 | if (event >= 0) { | ||
138 | union iwreq_data wrqu; | ||
139 | int we_event; | ||
140 | char *msg = NULL; | ||
141 | |||
142 | memset(&wrqu, '\0', sizeof (union iwreq_data)); | ||
143 | |||
144 | switch(event) { | ||
145 | case IEEE80211SOFTMAC_EVENT_ASSOCIATED: | ||
146 | network = (struct ieee80211softmac_network *)event_ctx; | ||
147 | memcpy(wrqu.ap_addr.sa_data, &network->bssid[0], ETH_ALEN); | ||
148 | /* fall through */ | ||
149 | case IEEE80211SOFTMAC_EVENT_DISASSOCIATED: | ||
150 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; | ||
151 | we_event = SIOCGIWAP; | ||
152 | break; | ||
153 | case IEEE80211SOFTMAC_EVENT_SCAN_FINISHED: | ||
154 | we_event = SIOCGIWSCAN; | ||
155 | break; | ||
156 | default: | ||
157 | msg = event_descriptions[event]; | ||
158 | if (!msg) | ||
159 | msg = "SOFTMAC EVENT BUG"; | ||
160 | wrqu.data.length = strlen(msg); | ||
161 | we_event = IWEVCUSTOM; | ||
162 | break; | ||
163 | } | ||
164 | wireless_send_event(mac->dev, we_event, &wrqu, msg); | ||
165 | } | ||
166 | |||
167 | if (!list_empty(&mac->events)) | ||
168 | list_for_each_entry_safe(eventptr, tmp, &mac->events, list) { | ||
169 | if ((eventptr->event_type == event || eventptr->event_type == -1) | ||
170 | && (eventptr->event_context == NULL || eventptr->event_context == event_ctx)) { | ||
171 | list_del(&eventptr->list); | ||
172 | /* User may have subscribed to ANY event, so | ||
173 | * we tell them which event triggered it. */ | ||
174 | eventptr->event_type = event; | ||
175 | queue_delayed_work(mac->wq, &eventptr->work, 0); | ||
176 | } | ||
177 | } | ||
178 | } | ||
179 | |||
180 | void | ||
181 | ieee80211softmac_call_events(struct ieee80211softmac_device *mac, int event, void *event_ctx) | ||
182 | { | ||
183 | unsigned long flags; | ||
184 | |||
185 | spin_lock_irqsave(&mac->lock, flags); | ||
186 | ieee80211softmac_call_events_locked(mac, event, event_ctx); | ||
187 | |||
188 | spin_unlock_irqrestore(&mac->lock, flags); | ||
189 | } | ||
diff --git a/net/ieee80211/softmac/ieee80211softmac_io.c b/net/ieee80211/softmac/ieee80211softmac_io.c deleted file mode 100644 index 73b4b13fbd8f..000000000000 --- a/net/ieee80211/softmac/ieee80211softmac_io.c +++ /dev/null | |||
@@ -1,488 +0,0 @@ | |||
1 | /* | ||
2 | * Some parts based on code from net80211 | ||
3 | * Copyright (c) 2001 Atsushi Onoe | ||
4 | * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting | ||
5 | * All rights reserved. | ||
6 | * | ||
7 | * Redistribution and use in source and binary forms, with or without | ||
8 | * modification, are permitted provided that the following conditions | ||
9 | * are met: | ||
10 | * 1. Redistributions of source code must retain the above copyright | ||
11 | * notice, this list of conditions and the following disclaimer. | ||
12 | * 2. Redistributions in binary form must reproduce the above copyright | ||
13 | * notice, this list of conditions and the following disclaimer in the | ||
14 | * documentation and/or other materials provided with the distribution. | ||
15 | * 3. The name of the author may not be used to endorse or promote products | ||
16 | * derived from this software without specific prior written permission. | ||
17 | * | ||
18 | * Alternatively, this software may be distributed under the terms of the | ||
19 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
20 | * Software Foundation. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | ||
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | ||
24 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | ||
25 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
27 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
28 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
29 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
31 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
32 | * | ||
33 | */ | ||
34 | |||
35 | #include "ieee80211softmac_priv.h" | ||
36 | |||
37 | /* Helper functions for inserting data into the frames */ | ||
38 | |||
39 | /* | ||
40 | * Adds an ESSID element to the frame | ||
41 | * | ||
42 | */ | ||
43 | static u8 * | ||
44 | ieee80211softmac_add_essid(u8 *dst, struct ieee80211softmac_essid *essid) | ||
45 | { | ||
46 | if (essid) { | ||
47 | *dst++ = MFIE_TYPE_SSID; | ||
48 | *dst++ = essid->len; | ||
49 | memcpy(dst, essid->data, essid->len); | ||
50 | return dst+essid->len; | ||
51 | } else { | ||
52 | *dst++ = MFIE_TYPE_SSID; | ||
53 | *dst++ = 0; | ||
54 | return dst; | ||
55 | } | ||
56 | } | ||
57 | |||
58 | /* Adds Supported Rates and if required Extended Rates Information Element | ||
59 | * to the frame, ASSUMES WE HAVE A SORTED LIST OF RATES */ | ||
60 | static u8 * | ||
61 | ieee80211softmac_frame_add_rates(u8 *dst, const struct ieee80211softmac_ratesinfo *r) | ||
62 | { | ||
63 | int cck_len, ofdm_len; | ||
64 | *dst++ = MFIE_TYPE_RATES; | ||
65 | |||
66 | for(cck_len=0; ieee80211_is_cck_rate(r->rates[cck_len]) && (cck_len < r->count);cck_len++); | ||
67 | |||
68 | if(cck_len > IEEE80211SOFTMAC_MAX_RATES_LEN) | ||
69 | cck_len = IEEE80211SOFTMAC_MAX_RATES_LEN; | ||
70 | *dst++ = cck_len; | ||
71 | memcpy(dst, r->rates, cck_len); | ||
72 | dst += cck_len; | ||
73 | |||
74 | if(cck_len < r->count){ | ||
75 | for (ofdm_len=0; ieee80211_is_ofdm_rate(r->rates[ofdm_len + cck_len]) && (ofdm_len + cck_len < r->count); ofdm_len++); | ||
76 | if (ofdm_len > 0) { | ||
77 | if (ofdm_len > IEEE80211SOFTMAC_MAX_EX_RATES_LEN) | ||
78 | ofdm_len = IEEE80211SOFTMAC_MAX_EX_RATES_LEN; | ||
79 | *dst++ = MFIE_TYPE_RATES_EX; | ||
80 | *dst++ = ofdm_len; | ||
81 | memcpy(dst, r->rates + cck_len, ofdm_len); | ||
82 | dst += ofdm_len; | ||
83 | } | ||
84 | } | ||
85 | return dst; | ||
86 | } | ||
87 | |||
88 | /* Allocate a management frame */ | ||
89 | static u8 * | ||
90 | ieee80211softmac_alloc_mgt(u32 size) | ||
91 | { | ||
92 | u8 * data; | ||
93 | |||
94 | /* Add the header and FCS to the size */ | ||
95 | size = size + IEEE80211_3ADDR_LEN; | ||
96 | if(size > IEEE80211_DATA_LEN) | ||
97 | return NULL; | ||
98 | /* Allocate the frame */ | ||
99 | data = kzalloc(size, GFP_ATOMIC); | ||
100 | return data; | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * Add a 2 Address Header | ||
105 | */ | ||
106 | static void | ||
107 | ieee80211softmac_hdr_2addr(struct ieee80211softmac_device *mac, | ||
108 | struct ieee80211_hdr_2addr *header, u32 type, u8 *dest) | ||
109 | { | ||
110 | /* Fill in the frame control flags */ | ||
111 | header->frame_ctl = cpu_to_le16(type); | ||
112 | /* Control packets always have WEP turned off */ | ||
113 | if(type > IEEE80211_STYPE_CFENDACK && type < IEEE80211_STYPE_PSPOLL) | ||
114 | header->frame_ctl |= mac->ieee->sec.level ? cpu_to_le16(IEEE80211_FCTL_PROTECTED) : 0; | ||
115 | |||
116 | /* Fill in the duration */ | ||
117 | header->duration_id = 0; | ||
118 | /* FIXME: How do I find this? | ||
119 | * calculate. But most drivers just fill in 0 (except if it's a station id of course) */ | ||
120 | |||
121 | /* Fill in the Destination Address */ | ||
122 | if(dest == NULL) | ||
123 | memset(header->addr1, 0xFF, ETH_ALEN); | ||
124 | else | ||
125 | memcpy(header->addr1, dest, ETH_ALEN); | ||
126 | /* Fill in the Source Address */ | ||
127 | memcpy(header->addr2, mac->ieee->dev->dev_addr, ETH_ALEN); | ||
128 | |||
129 | } | ||
130 | |||
131 | |||
132 | /* Add a 3 Address Header */ | ||
133 | static void | ||
134 | ieee80211softmac_hdr_3addr(struct ieee80211softmac_device *mac, | ||
135 | struct ieee80211_hdr_3addr *header, u32 type, u8 *dest, u8 *bssid) | ||
136 | { | ||
137 | /* This is common with 2addr, so use that instead */ | ||
138 | ieee80211softmac_hdr_2addr(mac, (struct ieee80211_hdr_2addr *)header, type, dest); | ||
139 | |||
140 | /* Fill in the BSS ID */ | ||
141 | if(bssid == NULL) | ||
142 | memset(header->addr3, 0xFF, ETH_ALEN); | ||
143 | else | ||
144 | memcpy(header->addr3, bssid, ETH_ALEN); | ||
145 | |||
146 | /* Fill in the sequence # */ | ||
147 | /* FIXME: I need to add this to the softmac struct | ||
148 | * shouldn't the sequence number be in ieee80211? */ | ||
149 | } | ||
150 | |||
151 | static __le16 | ||
152 | ieee80211softmac_capabilities(struct ieee80211softmac_device *mac, | ||
153 | struct ieee80211softmac_network *net) | ||
154 | { | ||
155 | __le16 capability = 0; | ||
156 | |||
157 | /* ESS and IBSS bits are set according to the current mode */ | ||
158 | switch (mac->ieee->iw_mode) { | ||
159 | case IW_MODE_INFRA: | ||
160 | capability = cpu_to_le16(WLAN_CAPABILITY_ESS); | ||
161 | break; | ||
162 | case IW_MODE_ADHOC: | ||
163 | capability = cpu_to_le16(WLAN_CAPABILITY_IBSS); | ||
164 | break; | ||
165 | case IW_MODE_AUTO: | ||
166 | capability = cpu_to_le16(net->capabilities & | ||
167 | (WLAN_CAPABILITY_ESS|WLAN_CAPABILITY_IBSS)); | ||
168 | break; | ||
169 | default: | ||
170 | /* bleh. we don't ever go to these modes */ | ||
171 | printk(KERN_ERR PFX "invalid iw_mode!\n"); | ||
172 | break; | ||
173 | } | ||
174 | |||
175 | /* CF Pollable / CF Poll Request */ | ||
176 | /* Needs to be implemented, for now, the 0's == not supported */ | ||
177 | |||
178 | /* Privacy Bit */ | ||
179 | capability |= mac->ieee->sec.level ? | ||
180 | cpu_to_le16(WLAN_CAPABILITY_PRIVACY) : 0; | ||
181 | |||
182 | /* Short Preamble */ | ||
183 | /* Always supported: we probably won't ever be powering devices which | ||
184 | * dont support this... */ | ||
185 | capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE); | ||
186 | |||
187 | /* PBCC */ | ||
188 | /* Not widely used */ | ||
189 | |||
190 | /* Channel Agility */ | ||
191 | /* Not widely used */ | ||
192 | |||
193 | /* Short Slot */ | ||
194 | /* Will be implemented later */ | ||
195 | |||
196 | /* DSSS-OFDM */ | ||
197 | /* Not widely used */ | ||
198 | |||
199 | return capability; | ||
200 | } | ||
201 | |||
202 | /***************************************************************************** | ||
203 | * Create Management packets | ||
204 | *****************************************************************************/ | ||
205 | |||
206 | /* Creates an association request packet */ | ||
207 | static u32 | ||
208 | ieee80211softmac_assoc_req(struct ieee80211_assoc_request **pkt, | ||
209 | struct ieee80211softmac_device *mac, struct ieee80211softmac_network *net) | ||
210 | { | ||
211 | u8 *data; | ||
212 | (*pkt) = (struct ieee80211_assoc_request *)ieee80211softmac_alloc_mgt( | ||
213 | 2 + /* Capability Info */ | ||
214 | 2 + /* Listen Interval */ | ||
215 | /* SSID IE */ | ||
216 | 1 + 1 + IW_ESSID_MAX_SIZE + | ||
217 | /* Rates IE */ | ||
218 | 1 + 1 + IEEE80211SOFTMAC_MAX_RATES_LEN + | ||
219 | /* Extended Rates IE */ | ||
220 | 1 + 1 + IEEE80211SOFTMAC_MAX_EX_RATES_LEN + | ||
221 | /* WPA IE if present */ | ||
222 | mac->wpa.IElen | ||
223 | /* Other IE's? Optional? | ||
224 | * Yeah, probably need an extra IE parameter -- lots of vendors like to | ||
225 | * fill in their own IEs */ | ||
226 | ); | ||
227 | if (unlikely((*pkt) == NULL)) | ||
228 | return 0; | ||
229 | ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_ASSOC_REQ, net->bssid, net->bssid); | ||
230 | |||
231 | /* Fill in the capabilities */ | ||
232 | (*pkt)->capability = ieee80211softmac_capabilities(mac, net); | ||
233 | |||
234 | /* Fill in Listen Interval (?) */ | ||
235 | (*pkt)->listen_interval = cpu_to_le16(10); | ||
236 | |||
237 | data = (u8 *)(*pkt)->info_element; | ||
238 | /* Add SSID */ | ||
239 | data = ieee80211softmac_add_essid(data, &net->essid); | ||
240 | /* Add Rates */ | ||
241 | data = ieee80211softmac_frame_add_rates(data, &mac->ratesinfo); | ||
242 | /* Add WPA IE */ | ||
243 | if (mac->wpa.IElen && mac->wpa.IE) { | ||
244 | memcpy(data, mac->wpa.IE, mac->wpa.IElen); | ||
245 | data += mac->wpa.IElen; | ||
246 | } | ||
247 | /* Return the number of used bytes */ | ||
248 | return (data - (u8*)(*pkt)); | ||
249 | } | ||
250 | |||
251 | /* Create a reassociation request packet */ | ||
252 | static u32 | ||
253 | ieee80211softmac_reassoc_req(struct ieee80211_reassoc_request **pkt, | ||
254 | struct ieee80211softmac_device *mac, struct ieee80211softmac_network *net) | ||
255 | { | ||
256 | u8 *data; | ||
257 | (*pkt) = (struct ieee80211_reassoc_request *)ieee80211softmac_alloc_mgt( | ||
258 | 2 + /* Capability Info */ | ||
259 | 2 + /* Listen Interval */ | ||
260 | ETH_ALEN + /* AP MAC */ | ||
261 | /* SSID IE */ | ||
262 | 1 + 1 + IW_ESSID_MAX_SIZE + | ||
263 | /* Rates IE */ | ||
264 | 1 + 1 + IEEE80211SOFTMAC_MAX_RATES_LEN + | ||
265 | /* Extended Rates IE */ | ||
266 | 1 + 1 + IEEE80211SOFTMAC_MAX_EX_RATES_LEN | ||
267 | /* Other IE's? */ | ||
268 | ); | ||
269 | if (unlikely((*pkt) == NULL)) | ||
270 | return 0; | ||
271 | ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_REASSOC_REQ, net->bssid, net->bssid); | ||
272 | |||
273 | /* Fill in the capabilities */ | ||
274 | (*pkt)->capability = ieee80211softmac_capabilities(mac, net); | ||
275 | |||
276 | /* Fill in Listen Interval (?) */ | ||
277 | (*pkt)->listen_interval = cpu_to_le16(10); | ||
278 | /* Fill in the current AP MAC */ | ||
279 | memcpy((*pkt)->current_ap, mac->ieee->bssid, ETH_ALEN); | ||
280 | |||
281 | data = (u8 *)(*pkt)->info_element; | ||
282 | /* Add SSID */ | ||
283 | data = ieee80211softmac_add_essid(data, &net->essid); | ||
284 | /* Add Rates */ | ||
285 | data = ieee80211softmac_frame_add_rates(data, &mac->ratesinfo); | ||
286 | /* Return packet size */ | ||
287 | return (data - (u8 *)(*pkt)); | ||
288 | } | ||
289 | |||
290 | /* Create an authentication packet */ | ||
291 | static u32 | ||
292 | ieee80211softmac_auth(struct ieee80211_auth **pkt, | ||
293 | struct ieee80211softmac_device *mac, struct ieee80211softmac_network *net, | ||
294 | u16 transaction, u16 status, int *encrypt_mpdu) | ||
295 | { | ||
296 | u8 *data; | ||
297 | int auth_mode = mac->ieee->sec.auth_mode; | ||
298 | int is_shared_response = (auth_mode == WLAN_AUTH_SHARED_KEY | ||
299 | && transaction == IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE); | ||
300 | |||
301 | /* Allocate Packet */ | ||
302 | (*pkt) = (struct ieee80211_auth *)ieee80211softmac_alloc_mgt( | ||
303 | 2 + /* Auth Algorithm */ | ||
304 | 2 + /* Auth Transaction Seq */ | ||
305 | 2 + /* Status Code */ | ||
306 | /* Challenge Text IE */ | ||
307 | (is_shared_response ? 1 + 1 + net->challenge_len : 0) | ||
308 | ); | ||
309 | if (unlikely((*pkt) == NULL)) | ||
310 | return 0; | ||
311 | ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_AUTH, net->bssid, net->bssid); | ||
312 | |||
313 | /* Algorithm */ | ||
314 | (*pkt)->algorithm = cpu_to_le16(auth_mode); | ||
315 | /* Transaction */ | ||
316 | (*pkt)->transaction = cpu_to_le16(transaction); | ||
317 | /* Status */ | ||
318 | (*pkt)->status = cpu_to_le16(status); | ||
319 | |||
320 | data = (u8 *)(*pkt)->info_element; | ||
321 | /* Challenge Text */ | ||
322 | if (is_shared_response) { | ||
323 | *data = MFIE_TYPE_CHALLENGE; | ||
324 | data++; | ||
325 | |||
326 | /* Copy the challenge in */ | ||
327 | *data = net->challenge_len; | ||
328 | data++; | ||
329 | memcpy(data, net->challenge, net->challenge_len); | ||
330 | data += net->challenge_len; | ||
331 | |||
332 | /* Make sure this frame gets encrypted with the shared key */ | ||
333 | *encrypt_mpdu = 1; | ||
334 | } else | ||
335 | *encrypt_mpdu = 0; | ||
336 | |||
337 | /* Return the packet size */ | ||
338 | return (data - (u8 *)(*pkt)); | ||
339 | } | ||
340 | |||
341 | /* Create a disassocation or deauthentication packet */ | ||
342 | static u32 | ||
343 | ieee80211softmac_disassoc_deauth(struct ieee80211_disassoc **pkt, | ||
344 | struct ieee80211softmac_device *mac, struct ieee80211softmac_network *net, | ||
345 | u16 type, u16 reason) | ||
346 | { | ||
347 | /* Allocate Packet */ | ||
348 | (*pkt) = (struct ieee80211_disassoc *)ieee80211softmac_alloc_mgt(2); | ||
349 | if (unlikely((*pkt) == NULL)) | ||
350 | return 0; | ||
351 | ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), type, net->bssid, net->bssid); | ||
352 | /* Reason */ | ||
353 | (*pkt)->reason = cpu_to_le16(reason); | ||
354 | /* Return the packet size */ | ||
355 | return (2 + IEEE80211_3ADDR_LEN); | ||
356 | } | ||
357 | |||
358 | /* Create a probe request packet */ | ||
359 | static u32 | ||
360 | ieee80211softmac_probe_req(struct ieee80211_probe_request **pkt, | ||
361 | struct ieee80211softmac_device *mac, struct ieee80211softmac_essid *essid) | ||
362 | { | ||
363 | u8 *data; | ||
364 | /* Allocate Packet */ | ||
365 | (*pkt) = (struct ieee80211_probe_request *)ieee80211softmac_alloc_mgt( | ||
366 | /* SSID of requested network */ | ||
367 | 1 + 1 + IW_ESSID_MAX_SIZE + | ||
368 | /* Rates IE */ | ||
369 | 1 + 1 + IEEE80211SOFTMAC_MAX_RATES_LEN + | ||
370 | /* Extended Rates IE */ | ||
371 | 1 + 1 + IEEE80211SOFTMAC_MAX_EX_RATES_LEN | ||
372 | ); | ||
373 | if (unlikely((*pkt) == NULL)) | ||
374 | return 0; | ||
375 | ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_PROBE_REQ, NULL, NULL); | ||
376 | |||
377 | data = (u8 *)(*pkt)->info_element; | ||
378 | /* Add ESSID (can be NULL) */ | ||
379 | data = ieee80211softmac_add_essid(data, essid); | ||
380 | /* Add Rates */ | ||
381 | data = ieee80211softmac_frame_add_rates(data, &mac->ratesinfo); | ||
382 | /* Return packet size */ | ||
383 | return (data - (u8 *)(*pkt)); | ||
384 | } | ||
385 | |||
386 | /* Create a probe response packet */ | ||
387 | /* FIXME: Not complete */ | ||
388 | static u32 | ||
389 | ieee80211softmac_probe_resp(struct ieee80211_probe_response **pkt, | ||
390 | struct ieee80211softmac_device *mac, struct ieee80211softmac_network *net) | ||
391 | { | ||
392 | u8 *data; | ||
393 | /* Allocate Packet */ | ||
394 | (*pkt) = (struct ieee80211_probe_response *)ieee80211softmac_alloc_mgt( | ||
395 | 8 + /* Timestamp */ | ||
396 | 2 + /* Beacon Interval */ | ||
397 | 2 + /* Capability Info */ | ||
398 | /* SSID IE */ | ||
399 | 1 + 1 + IW_ESSID_MAX_SIZE + | ||
400 | 7 + /* FH Parameter Set */ | ||
401 | 2 + /* DS Parameter Set */ | ||
402 | 8 + /* CF Parameter Set */ | ||
403 | 4 /* IBSS Parameter Set */ | ||
404 | ); | ||
405 | if (unlikely((*pkt) == NULL)) | ||
406 | return 0; | ||
407 | ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_PROBE_RESP, net->bssid, net->bssid); | ||
408 | data = (u8 *)(*pkt)->info_element; | ||
409 | |||
410 | /* Return the packet size */ | ||
411 | return (data - (u8 *)(*pkt)); | ||
412 | } | ||
413 | |||
414 | |||
415 | /* Sends a manangement packet | ||
416 | * FIXME: document the use of the arg parameter | ||
417 | * for _AUTH: (transaction #) | (status << 16) | ||
418 | */ | ||
419 | int | ||
420 | ieee80211softmac_send_mgt_frame(struct ieee80211softmac_device *mac, | ||
421 | void *ptrarg, u32 type, u32 arg) | ||
422 | { | ||
423 | void *pkt = NULL; | ||
424 | u32 pkt_size = 0; | ||
425 | int encrypt_mpdu = 0; | ||
426 | |||
427 | switch(type) { | ||
428 | case IEEE80211_STYPE_ASSOC_REQ: | ||
429 | pkt_size = ieee80211softmac_assoc_req((struct ieee80211_assoc_request **)(&pkt), mac, (struct ieee80211softmac_network *)ptrarg); | ||
430 | break; | ||
431 | case IEEE80211_STYPE_REASSOC_REQ: | ||
432 | pkt_size = ieee80211softmac_reassoc_req((struct ieee80211_reassoc_request **)(&pkt), mac, (struct ieee80211softmac_network *)ptrarg); | ||
433 | break; | ||
434 | case IEEE80211_STYPE_AUTH: | ||
435 | pkt_size = ieee80211softmac_auth((struct ieee80211_auth **)(&pkt), mac, (struct ieee80211softmac_network *)ptrarg, (u16)(arg & 0xFFFF), (u16) (arg >> 16), &encrypt_mpdu); | ||
436 | break; | ||
437 | case IEEE80211_STYPE_DISASSOC: | ||
438 | case IEEE80211_STYPE_DEAUTH: | ||
439 | pkt_size = ieee80211softmac_disassoc_deauth((struct ieee80211_disassoc **)(&pkt), mac, (struct ieee80211softmac_network *)ptrarg, type, (u16)(arg & 0xFFFF)); | ||
440 | break; | ||
441 | case IEEE80211_STYPE_PROBE_REQ: | ||
442 | pkt_size = ieee80211softmac_probe_req((struct ieee80211_probe_request **)(&pkt), mac, (struct ieee80211softmac_essid *)ptrarg); | ||
443 | break; | ||
444 | case IEEE80211_STYPE_PROBE_RESP: | ||
445 | pkt_size = ieee80211softmac_probe_resp((struct ieee80211_probe_response **)(&pkt), mac, (struct ieee80211softmac_network *)ptrarg); | ||
446 | break; | ||
447 | default: | ||
448 | printkl(KERN_DEBUG PFX "Unsupported Management Frame type: %i\n", type); | ||
449 | return -EINVAL; | ||
450 | }; | ||
451 | |||
452 | if(pkt_size == 0 || pkt == NULL) { | ||
453 | printkl(KERN_DEBUG PFX "Error, packet is nonexistant or 0 length\n"); | ||
454 | return -ENOMEM; | ||
455 | } | ||
456 | |||
457 | /* Send the packet to the ieee80211 layer for tx */ | ||
458 | /* we defined softmac->mgmt_xmit for this. Should we keep it | ||
459 | * as it is (that means we'd need to wrap this into a txb), | ||
460 | * modify the prototype (so it matches this function), | ||
461 | * or get rid of it alltogether? | ||
462 | * Does this work for you now? | ||
463 | */ | ||
464 | ieee80211_tx_frame(mac->ieee, (struct ieee80211_hdr *)pkt, | ||
465 | IEEE80211_3ADDR_LEN, pkt_size, encrypt_mpdu); | ||
466 | |||
467 | kfree(pkt); | ||
468 | return 0; | ||
469 | } | ||
470 | |||
471 | /* Beacon handling */ | ||
472 | int ieee80211softmac_handle_beacon(struct net_device *dev, | ||
473 | struct ieee80211_beacon *beacon, | ||
474 | struct ieee80211_network *network) | ||
475 | { | ||
476 | struct ieee80211softmac_device *mac = ieee80211_priv(dev); | ||
477 | |||
478 | /* This might race, but we don't really care and it's not worth | ||
479 | * adding heavyweight locking in this fastpath. | ||
480 | */ | ||
481 | if (mac->associnfo.associated) { | ||
482 | if (memcmp(network->bssid, mac->associnfo.bssid, ETH_ALEN) == 0) | ||
483 | ieee80211softmac_process_erp(mac, network->erp_value); | ||
484 | } | ||
485 | |||
486 | return 0; | ||
487 | } | ||
488 | |||
diff --git a/net/ieee80211/softmac/ieee80211softmac_module.c b/net/ieee80211/softmac/ieee80211softmac_module.c deleted file mode 100644 index 07505ca859af..000000000000 --- a/net/ieee80211/softmac/ieee80211softmac_module.c +++ /dev/null | |||
@@ -1,568 +0,0 @@ | |||
1 | /* | ||
2 | * Contains some basic softmac functions along with module registration code etc. | ||
3 | * | ||
4 | * Copyright (c) 2005, 2006 Johannes Berg <johannes@sipsolutions.net> | ||
5 | * Joseph Jezak <josejx@gentoo.org> | ||
6 | * Larry Finger <Larry.Finger@lwfinger.net> | ||
7 | * Danny van Dyk <kugelfang@gentoo.org> | ||
8 | * Michael Buesch <mbuesch@freenet.de> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of version 2 of the GNU General Public License as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
17 | * more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
22 | * | ||
23 | * The full GNU General Public License is included in this distribution in the | ||
24 | * file called COPYING. | ||
25 | */ | ||
26 | |||
27 | #include "ieee80211softmac_priv.h" | ||
28 | #include <linux/sort.h> | ||
29 | #include <linux/etherdevice.h> | ||
30 | |||
31 | struct net_device *alloc_ieee80211softmac(int sizeof_priv) | ||
32 | { | ||
33 | struct ieee80211softmac_device *softmac; | ||
34 | struct net_device *dev; | ||
35 | |||
36 | dev = alloc_ieee80211(sizeof(*softmac) + sizeof_priv); | ||
37 | if (!dev) | ||
38 | return NULL; | ||
39 | softmac = ieee80211_priv(dev); | ||
40 | softmac->wq = create_freezeable_workqueue("softmac"); | ||
41 | if (!softmac->wq) { | ||
42 | free_ieee80211(dev); | ||
43 | return NULL; | ||
44 | } | ||
45 | |||
46 | softmac->dev = dev; | ||
47 | softmac->ieee = netdev_priv(dev); | ||
48 | spin_lock_init(&softmac->lock); | ||
49 | |||
50 | softmac->ieee->handle_auth = ieee80211softmac_auth_resp; | ||
51 | softmac->ieee->handle_deauth = ieee80211softmac_deauth_resp; | ||
52 | softmac->ieee->handle_assoc_response = ieee80211softmac_handle_assoc_response; | ||
53 | softmac->ieee->handle_reassoc_request = ieee80211softmac_handle_reassoc_req; | ||
54 | softmac->ieee->handle_disassoc = ieee80211softmac_handle_disassoc; | ||
55 | softmac->ieee->handle_beacon = ieee80211softmac_handle_beacon; | ||
56 | softmac->scaninfo = NULL; | ||
57 | |||
58 | softmac->associnfo.scan_retry = IEEE80211SOFTMAC_ASSOC_SCAN_RETRY_LIMIT; | ||
59 | |||
60 | /* TODO: initialise all the other callbacks in the ieee struct | ||
61 | * (once they're written) | ||
62 | */ | ||
63 | |||
64 | INIT_LIST_HEAD(&softmac->auth_queue); | ||
65 | INIT_LIST_HEAD(&softmac->network_list); | ||
66 | INIT_LIST_HEAD(&softmac->events); | ||
67 | |||
68 | mutex_init(&softmac->associnfo.mutex); | ||
69 | INIT_DELAYED_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work); | ||
70 | INIT_DELAYED_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout); | ||
71 | softmac->start_scan = ieee80211softmac_start_scan_implementation; | ||
72 | softmac->wait_for_scan = ieee80211softmac_wait_for_scan_implementation; | ||
73 | softmac->stop_scan = ieee80211softmac_stop_scan_implementation; | ||
74 | |||
75 | /* to start with, we can't send anything ... */ | ||
76 | netif_carrier_off(dev); | ||
77 | |||
78 | return dev; | ||
79 | } | ||
80 | EXPORT_SYMBOL_GPL(alloc_ieee80211softmac); | ||
81 | |||
82 | /* Clears the pending work queue items, stops all scans, etc. */ | ||
83 | void | ||
84 | ieee80211softmac_clear_pending_work(struct ieee80211softmac_device *sm) | ||
85 | { | ||
86 | unsigned long flags; | ||
87 | struct ieee80211softmac_event *eventptr, *eventtmp; | ||
88 | struct ieee80211softmac_auth_queue_item *authptr, *authtmp; | ||
89 | struct ieee80211softmac_network *netptr, *nettmp; | ||
90 | |||
91 | ieee80211softmac_stop_scan(sm); | ||
92 | ieee80211softmac_wait_for_scan(sm); | ||
93 | |||
94 | spin_lock_irqsave(&sm->lock, flags); | ||
95 | sm->running = 0; | ||
96 | |||
97 | /* Free all pending assoc work items */ | ||
98 | cancel_delayed_work(&sm->associnfo.work); | ||
99 | |||
100 | /* Free all pending scan work items */ | ||
101 | if(sm->scaninfo != NULL) | ||
102 | cancel_delayed_work(&sm->scaninfo->softmac_scan); | ||
103 | |||
104 | /* Free all pending auth work items */ | ||
105 | list_for_each_entry(authptr, &sm->auth_queue, list) | ||
106 | cancel_delayed_work(&authptr->work); | ||
107 | |||
108 | /* delete all pending event calls and work items */ | ||
109 | list_for_each_entry_safe(eventptr, eventtmp, &sm->events, list) | ||
110 | cancel_delayed_work(&eventptr->work); | ||
111 | |||
112 | spin_unlock_irqrestore(&sm->lock, flags); | ||
113 | flush_workqueue(sm->wq); | ||
114 | |||
115 | /* now we should be save and no longer need locking... */ | ||
116 | spin_lock_irqsave(&sm->lock, flags); | ||
117 | /* Free all pending auth work items */ | ||
118 | list_for_each_entry_safe(authptr, authtmp, &sm->auth_queue, list) { | ||
119 | list_del(&authptr->list); | ||
120 | kfree(authptr); | ||
121 | } | ||
122 | |||
123 | /* delete all pending event calls and work items */ | ||
124 | list_for_each_entry_safe(eventptr, eventtmp, &sm->events, list) { | ||
125 | list_del(&eventptr->list); | ||
126 | kfree(eventptr); | ||
127 | } | ||
128 | |||
129 | /* Free all networks */ | ||
130 | list_for_each_entry_safe(netptr, nettmp, &sm->network_list, list) { | ||
131 | ieee80211softmac_del_network_locked(sm, netptr); | ||
132 | if(netptr->challenge != NULL) | ||
133 | kfree(netptr->challenge); | ||
134 | kfree(netptr); | ||
135 | } | ||
136 | |||
137 | spin_unlock_irqrestore(&sm->lock, flags); | ||
138 | } | ||
139 | EXPORT_SYMBOL_GPL(ieee80211softmac_clear_pending_work); | ||
140 | |||
141 | void free_ieee80211softmac(struct net_device *dev) | ||
142 | { | ||
143 | struct ieee80211softmac_device *sm = ieee80211_priv(dev); | ||
144 | ieee80211softmac_clear_pending_work(sm); | ||
145 | kfree(sm->scaninfo); | ||
146 | kfree(sm->wpa.IE); | ||
147 | destroy_workqueue(sm->wq); | ||
148 | free_ieee80211(dev); | ||
149 | } | ||
150 | EXPORT_SYMBOL_GPL(free_ieee80211softmac); | ||
151 | |||
152 | static void ieee80211softmac_start_check_rates(struct ieee80211softmac_device *mac) | ||
153 | { | ||
154 | struct ieee80211softmac_ratesinfo *ri = &mac->ratesinfo; | ||
155 | /* I took out the sorting check, we're seperating by modulation now. */ | ||
156 | if (ri->count) | ||
157 | return; | ||
158 | /* otherwise assume we hav'em all! */ | ||
159 | if (mac->ieee->modulation & IEEE80211_CCK_MODULATION) { | ||
160 | ri->rates[ri->count++] = IEEE80211_CCK_RATE_1MB; | ||
161 | ri->rates[ri->count++] = IEEE80211_CCK_RATE_2MB; | ||
162 | ri->rates[ri->count++] = IEEE80211_CCK_RATE_5MB; | ||
163 | ri->rates[ri->count++] = IEEE80211_CCK_RATE_11MB; | ||
164 | } | ||
165 | if (mac->ieee->modulation & IEEE80211_OFDM_MODULATION) { | ||
166 | ri->rates[ri->count++] = IEEE80211_OFDM_RATE_6MB; | ||
167 | ri->rates[ri->count++] = IEEE80211_OFDM_RATE_9MB; | ||
168 | ri->rates[ri->count++] = IEEE80211_OFDM_RATE_12MB; | ||
169 | ri->rates[ri->count++] = IEEE80211_OFDM_RATE_18MB; | ||
170 | ri->rates[ri->count++] = IEEE80211_OFDM_RATE_24MB; | ||
171 | ri->rates[ri->count++] = IEEE80211_OFDM_RATE_36MB; | ||
172 | ri->rates[ri->count++] = IEEE80211_OFDM_RATE_48MB; | ||
173 | ri->rates[ri->count++] = IEEE80211_OFDM_RATE_54MB; | ||
174 | } | ||
175 | } | ||
176 | |||
177 | int ieee80211softmac_ratesinfo_rate_supported(struct ieee80211softmac_ratesinfo *ri, u8 rate) | ||
178 | { | ||
179 | int search; | ||
180 | u8 search_rate; | ||
181 | |||
182 | for (search = 0; search < ri->count; search++) { | ||
183 | search_rate = ri->rates[search]; | ||
184 | search_rate &= ~IEEE80211_BASIC_RATE_MASK; | ||
185 | if (rate == search_rate) | ||
186 | return 1; | ||
187 | } | ||
188 | |||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | u8 ieee80211softmac_highest_supported_rate(struct ieee80211softmac_device *mac, | ||
193 | struct ieee80211softmac_ratesinfo *ri, int basic_only) | ||
194 | { | ||
195 | u8 user_rate = mac->txrates.user_rate; | ||
196 | int i; | ||
197 | |||
198 | if (ri->count == 0) | ||
199 | return IEEE80211_CCK_RATE_1MB; | ||
200 | |||
201 | for (i = ri->count - 1; i >= 0; i--) { | ||
202 | u8 rate = ri->rates[i]; | ||
203 | if (basic_only && !(rate & IEEE80211_BASIC_RATE_MASK)) | ||
204 | continue; | ||
205 | rate &= ~IEEE80211_BASIC_RATE_MASK; | ||
206 | if (rate > user_rate) | ||
207 | continue; | ||
208 | if (ieee80211softmac_ratesinfo_rate_supported(&mac->ratesinfo, rate)) | ||
209 | return rate; | ||
210 | } | ||
211 | |||
212 | /* If we haven't found a suitable rate by now, just trust the user */ | ||
213 | return user_rate; | ||
214 | } | ||
215 | EXPORT_SYMBOL_GPL(ieee80211softmac_highest_supported_rate); | ||
216 | |||
217 | void ieee80211softmac_process_erp(struct ieee80211softmac_device *mac, | ||
218 | u8 erp_value) | ||
219 | { | ||
220 | int use_protection; | ||
221 | int short_preamble; | ||
222 | u32 changes = 0; | ||
223 | |||
224 | /* Barker preamble mode */ | ||
225 | short_preamble = ((erp_value & WLAN_ERP_BARKER_PREAMBLE) == 0 | ||
226 | && mac->associnfo.short_preamble_available) ? 1 : 0; | ||
227 | |||
228 | /* Protection needed? */ | ||
229 | use_protection = (erp_value & WLAN_ERP_USE_PROTECTION) != 0; | ||
230 | |||
231 | if (mac->bssinfo.short_preamble != short_preamble) { | ||
232 | changes |= IEEE80211SOFTMAC_BSSINFOCHG_SHORT_PREAMBLE; | ||
233 | mac->bssinfo.short_preamble = short_preamble; | ||
234 | } | ||
235 | |||
236 | if (mac->bssinfo.use_protection != use_protection) { | ||
237 | changes |= IEEE80211SOFTMAC_BSSINFOCHG_PROTECTION; | ||
238 | mac->bssinfo.use_protection = use_protection; | ||
239 | } | ||
240 | |||
241 | if (mac->bssinfo_change && changes) | ||
242 | mac->bssinfo_change(mac->dev, changes); | ||
243 | } | ||
244 | |||
245 | void ieee80211softmac_recalc_txrates(struct ieee80211softmac_device *mac) | ||
246 | { | ||
247 | struct ieee80211softmac_txrates *txrates = &mac->txrates; | ||
248 | u32 change = 0; | ||
249 | |||
250 | change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT; | ||
251 | txrates->default_rate = ieee80211softmac_highest_supported_rate(mac, &mac->bssinfo.supported_rates, 0); | ||
252 | |||
253 | change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT_FBACK; | ||
254 | txrates->default_fallback = lower_rate(mac, txrates->default_rate); | ||
255 | |||
256 | change |= IEEE80211SOFTMAC_TXRATECHG_MCAST; | ||
257 | txrates->mcast_rate = ieee80211softmac_highest_supported_rate(mac, &mac->bssinfo.supported_rates, 1); | ||
258 | |||
259 | if (mac->txrates_change) | ||
260 | mac->txrates_change(mac->dev, change); | ||
261 | |||
262 | } | ||
263 | |||
264 | void ieee80211softmac_init_bss(struct ieee80211softmac_device *mac) | ||
265 | { | ||
266 | struct ieee80211_device *ieee = mac->ieee; | ||
267 | u32 change = 0; | ||
268 | struct ieee80211softmac_txrates *txrates = &mac->txrates; | ||
269 | struct ieee80211softmac_bss_info *bssinfo = &mac->bssinfo; | ||
270 | |||
271 | /* TODO: We need some kind of state machine to lower the default rates | ||
272 | * if we loose too many packets. | ||
273 | */ | ||
274 | /* Change the default txrate to the highest possible value. | ||
275 | * The txrate machine will lower it, if it is too high. | ||
276 | */ | ||
277 | if (ieee->modulation & IEEE80211_OFDM_MODULATION) | ||
278 | txrates->user_rate = IEEE80211_OFDM_RATE_24MB; | ||
279 | else | ||
280 | txrates->user_rate = IEEE80211_CCK_RATE_11MB; | ||
281 | |||
282 | txrates->default_rate = IEEE80211_CCK_RATE_1MB; | ||
283 | change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT; | ||
284 | |||
285 | txrates->default_fallback = IEEE80211_CCK_RATE_1MB; | ||
286 | change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT_FBACK; | ||
287 | |||
288 | txrates->mcast_rate = IEEE80211_CCK_RATE_1MB; | ||
289 | change |= IEEE80211SOFTMAC_TXRATECHG_MCAST; | ||
290 | |||
291 | txrates->mgt_mcast_rate = IEEE80211_CCK_RATE_1MB; | ||
292 | change |= IEEE80211SOFTMAC_TXRATECHG_MGT_MCAST; | ||
293 | |||
294 | if (mac->txrates_change) | ||
295 | mac->txrates_change(mac->dev, change); | ||
296 | |||
297 | change = 0; | ||
298 | |||
299 | bssinfo->supported_rates.count = 0; | ||
300 | memset(bssinfo->supported_rates.rates, 0, | ||
301 | sizeof(bssinfo->supported_rates.rates)); | ||
302 | change |= IEEE80211SOFTMAC_BSSINFOCHG_RATES; | ||
303 | |||
304 | bssinfo->short_preamble = 0; | ||
305 | change |= IEEE80211SOFTMAC_BSSINFOCHG_SHORT_PREAMBLE; | ||
306 | |||
307 | bssinfo->use_protection = 0; | ||
308 | change |= IEEE80211SOFTMAC_BSSINFOCHG_PROTECTION; | ||
309 | |||
310 | if (mac->bssinfo_change) | ||
311 | mac->bssinfo_change(mac->dev, change); | ||
312 | |||
313 | mac->running = 1; | ||
314 | } | ||
315 | |||
316 | void ieee80211softmac_start(struct net_device *dev) | ||
317 | { | ||
318 | struct ieee80211softmac_device *mac = ieee80211_priv(dev); | ||
319 | |||
320 | ieee80211softmac_start_check_rates(mac); | ||
321 | ieee80211softmac_init_bss(mac); | ||
322 | } | ||
323 | EXPORT_SYMBOL_GPL(ieee80211softmac_start); | ||
324 | |||
325 | void ieee80211softmac_stop(struct net_device *dev) | ||
326 | { | ||
327 | struct ieee80211softmac_device *mac = ieee80211_priv(dev); | ||
328 | |||
329 | ieee80211softmac_clear_pending_work(mac); | ||
330 | } | ||
331 | EXPORT_SYMBOL_GPL(ieee80211softmac_stop); | ||
332 | |||
333 | void ieee80211softmac_set_rates(struct net_device *dev, u8 count, u8 *rates) | ||
334 | { | ||
335 | struct ieee80211softmac_device *mac = ieee80211_priv(dev); | ||
336 | unsigned long flags; | ||
337 | |||
338 | spin_lock_irqsave(&mac->lock, flags); | ||
339 | memcpy(mac->ratesinfo.rates, rates, count); | ||
340 | mac->ratesinfo.count = count; | ||
341 | spin_unlock_irqrestore(&mac->lock, flags); | ||
342 | } | ||
343 | EXPORT_SYMBOL_GPL(ieee80211softmac_set_rates); | ||
344 | |||
345 | static u8 raise_rate(struct ieee80211softmac_device *mac, u8 rate) | ||
346 | { | ||
347 | int i; | ||
348 | struct ieee80211softmac_ratesinfo *ri = &mac->ratesinfo; | ||
349 | |||
350 | for (i=0; i<ri->count-1; i++) { | ||
351 | if (ri->rates[i] == rate) | ||
352 | return ri->rates[i+1]; | ||
353 | } | ||
354 | /* I guess we can't go any higher... */ | ||
355 | return ri->rates[ri->count]; | ||
356 | } | ||
357 | |||
358 | u8 ieee80211softmac_lower_rate_delta(struct ieee80211softmac_device *mac, u8 rate, int delta) | ||
359 | { | ||
360 | int i; | ||
361 | struct ieee80211softmac_ratesinfo *ri = &mac->ratesinfo; | ||
362 | |||
363 | for (i=delta; i<ri->count; i++) { | ||
364 | if (ri->rates[i] == rate) | ||
365 | return ri->rates[i-delta]; | ||
366 | } | ||
367 | /* I guess we can't go any lower... */ | ||
368 | return ri->rates[0]; | ||
369 | } | ||
370 | |||
371 | static void ieee80211softmac_add_txrates_badness(struct ieee80211softmac_device *mac, | ||
372 | int amount) | ||
373 | { | ||
374 | u8 default_rate = mac->txrates.default_rate; | ||
375 | u8 default_fallback = mac->txrates.default_fallback; | ||
376 | u32 changes = 0; | ||
377 | |||
378 | //TODO: This is highly experimental code. | ||
379 | // Maybe the dynamic rate selection does not work | ||
380 | // and it has to be removed again. | ||
381 | |||
382 | printk("badness %d\n", mac->txrate_badness); | ||
383 | mac->txrate_badness += amount; | ||
384 | if (mac->txrate_badness <= -1000) { | ||
385 | /* Very small badness. Try a faster bitrate. */ | ||
386 | default_rate = raise_rate(mac, default_rate); | ||
387 | changes |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT; | ||
388 | default_fallback = get_fallback_rate(mac, default_rate); | ||
389 | changes |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT_FBACK; | ||
390 | mac->txrate_badness = 0; | ||
391 | printk("Bitrate raised to %u\n", default_rate); | ||
392 | } else if (mac->txrate_badness >= 10000) { | ||
393 | /* Very high badness. Try a slower bitrate. */ | ||
394 | default_rate = lower_rate(mac, default_rate); | ||
395 | changes |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT; | ||
396 | default_fallback = get_fallback_rate(mac, default_rate); | ||
397 | changes |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT_FBACK; | ||
398 | mac->txrate_badness = 0; | ||
399 | printk("Bitrate lowered to %u\n", default_rate); | ||
400 | } | ||
401 | |||
402 | mac->txrates.default_rate = default_rate; | ||
403 | mac->txrates.default_fallback = default_fallback; | ||
404 | |||
405 | if (changes && mac->txrates_change) | ||
406 | mac->txrates_change(mac->dev, changes); | ||
407 | } | ||
408 | |||
409 | void ieee80211softmac_fragment_lost(struct net_device *dev, | ||
410 | u16 wl_seq) | ||
411 | { | ||
412 | struct ieee80211softmac_device *mac = ieee80211_priv(dev); | ||
413 | unsigned long flags; | ||
414 | |||
415 | spin_lock_irqsave(&mac->lock, flags); | ||
416 | ieee80211softmac_add_txrates_badness(mac, 1000); | ||
417 | //TODO | ||
418 | |||
419 | spin_unlock_irqrestore(&mac->lock, flags); | ||
420 | } | ||
421 | EXPORT_SYMBOL_GPL(ieee80211softmac_fragment_lost); | ||
422 | |||
423 | static int rate_cmp(const void *a_, const void *b_) { | ||
424 | u8 *a, *b; | ||
425 | a = (u8*)a_; | ||
426 | b = (u8*)b_; | ||
427 | return ((*a & ~IEEE80211_BASIC_RATE_MASK) - (*b & ~IEEE80211_BASIC_RATE_MASK)); | ||
428 | } | ||
429 | |||
430 | /* Allocate a softmac network struct and fill it from a network */ | ||
431 | struct ieee80211softmac_network * | ||
432 | ieee80211softmac_create_network(struct ieee80211softmac_device *mac, | ||
433 | struct ieee80211_network *net) | ||
434 | { | ||
435 | struct ieee80211softmac_network *softnet; | ||
436 | softnet = kzalloc(sizeof(struct ieee80211softmac_network), GFP_ATOMIC); | ||
437 | if(softnet == NULL) | ||
438 | return NULL; | ||
439 | memcpy(softnet->bssid, net->bssid, ETH_ALEN); | ||
440 | softnet->channel = net->channel; | ||
441 | softnet->essid.len = net->ssid_len; | ||
442 | memcpy(softnet->essid.data, net->ssid, softnet->essid.len); | ||
443 | |||
444 | /* copy rates over */ | ||
445 | softnet->supported_rates.count = net->rates_len; | ||
446 | memcpy(&softnet->supported_rates.rates[0], net->rates, net->rates_len); | ||
447 | memcpy(&softnet->supported_rates.rates[softnet->supported_rates.count], net->rates_ex, net->rates_ex_len); | ||
448 | softnet->supported_rates.count += net->rates_ex_len; | ||
449 | sort(softnet->supported_rates.rates, softnet->supported_rates.count, sizeof(softnet->supported_rates.rates[0]), rate_cmp, NULL); | ||
450 | |||
451 | /* we save the ERP value because it is needed at association time, and | ||
452 | * many AP's do not include an ERP IE in the association response. */ | ||
453 | softnet->erp_value = net->erp_value; | ||
454 | |||
455 | softnet->capabilities = net->capability; | ||
456 | return softnet; | ||
457 | } | ||
458 | |||
459 | |||
460 | /* Add a network to the list, while locked */ | ||
461 | void | ||
462 | ieee80211softmac_add_network_locked(struct ieee80211softmac_device *mac, | ||
463 | struct ieee80211softmac_network *add_net) | ||
464 | { | ||
465 | struct ieee80211softmac_network *softmac_net; | ||
466 | |||
467 | list_for_each_entry(softmac_net, &mac->network_list, list) { | ||
468 | if(!memcmp(softmac_net->bssid, add_net->bssid, ETH_ALEN)) | ||
469 | return; | ||
470 | } | ||
471 | list_add(&(add_net->list), &mac->network_list); | ||
472 | } | ||
473 | |||
474 | /* Add a network to the list, with locking */ | ||
475 | void | ||
476 | ieee80211softmac_add_network(struct ieee80211softmac_device *mac, | ||
477 | struct ieee80211softmac_network *add_net) | ||
478 | { | ||
479 | unsigned long flags; | ||
480 | spin_lock_irqsave(&mac->lock, flags); | ||
481 | ieee80211softmac_add_network_locked(mac, add_net); | ||
482 | spin_unlock_irqrestore(&mac->lock, flags); | ||
483 | } | ||
484 | |||
485 | |||
486 | /* Delete a network from the list, while locked*/ | ||
487 | void | ||
488 | ieee80211softmac_del_network_locked(struct ieee80211softmac_device *mac, | ||
489 | struct ieee80211softmac_network *del_net) | ||
490 | { | ||
491 | list_del(&(del_net->list)); | ||
492 | } | ||
493 | |||
494 | /* Delete a network from the list with locking */ | ||
495 | void | ||
496 | ieee80211softmac_del_network(struct ieee80211softmac_device *mac, | ||
497 | struct ieee80211softmac_network *del_net) | ||
498 | { | ||
499 | unsigned long flags; | ||
500 | spin_lock_irqsave(&mac->lock, flags); | ||
501 | ieee80211softmac_del_network_locked(mac, del_net); | ||
502 | spin_unlock_irqrestore(&mac->lock, flags); | ||
503 | } | ||
504 | |||
505 | /* Get a network from the list by MAC while locked */ | ||
506 | struct ieee80211softmac_network * | ||
507 | ieee80211softmac_get_network_by_bssid_locked(struct ieee80211softmac_device *mac, | ||
508 | u8 *bssid) | ||
509 | { | ||
510 | struct ieee80211softmac_network *softmac_net; | ||
511 | |||
512 | list_for_each_entry(softmac_net, &mac->network_list, list) { | ||
513 | if(!memcmp(softmac_net->bssid, bssid, ETH_ALEN)) | ||
514 | return softmac_net; | ||
515 | } | ||
516 | return NULL; | ||
517 | } | ||
518 | |||
519 | /* Get a network from the list by BSSID with locking */ | ||
520 | struct ieee80211softmac_network * | ||
521 | ieee80211softmac_get_network_by_bssid(struct ieee80211softmac_device *mac, | ||
522 | u8 *bssid) | ||
523 | { | ||
524 | unsigned long flags; | ||
525 | struct ieee80211softmac_network *softmac_net; | ||
526 | |||
527 | spin_lock_irqsave(&mac->lock, flags); | ||
528 | softmac_net = ieee80211softmac_get_network_by_bssid_locked(mac, bssid); | ||
529 | spin_unlock_irqrestore(&mac->lock, flags); | ||
530 | return softmac_net; | ||
531 | } | ||
532 | |||
533 | /* Get a network from the list by ESSID while locked */ | ||
534 | struct ieee80211softmac_network * | ||
535 | ieee80211softmac_get_network_by_essid_locked(struct ieee80211softmac_device *mac, | ||
536 | struct ieee80211softmac_essid *essid) | ||
537 | { | ||
538 | struct ieee80211softmac_network *softmac_net; | ||
539 | |||
540 | list_for_each_entry(softmac_net, &mac->network_list, list) { | ||
541 | if (softmac_net->essid.len == essid->len && | ||
542 | !memcmp(softmac_net->essid.data, essid->data, essid->len)) | ||
543 | return softmac_net; | ||
544 | } | ||
545 | return NULL; | ||
546 | } | ||
547 | |||
548 | /* Get a network from the list by ESSID with locking */ | ||
549 | struct ieee80211softmac_network * | ||
550 | ieee80211softmac_get_network_by_essid(struct ieee80211softmac_device *mac, | ||
551 | struct ieee80211softmac_essid *essid) | ||
552 | { | ||
553 | unsigned long flags; | ||
554 | struct ieee80211softmac_network *softmac_net = NULL; | ||
555 | |||
556 | spin_lock_irqsave(&mac->lock, flags); | ||
557 | softmac_net = ieee80211softmac_get_network_by_essid_locked(mac, essid); | ||
558 | spin_unlock_irqrestore(&mac->lock, flags); | ||
559 | return softmac_net; | ||
560 | } | ||
561 | |||
562 | MODULE_LICENSE("GPL"); | ||
563 | MODULE_AUTHOR("Johannes Berg"); | ||
564 | MODULE_AUTHOR("Joseph Jezak"); | ||
565 | MODULE_AUTHOR("Larry Finger"); | ||
566 | MODULE_AUTHOR("Danny van Dyk"); | ||
567 | MODULE_AUTHOR("Michael Buesch"); | ||
568 | MODULE_DESCRIPTION("802.11 software MAC"); | ||
diff --git a/net/ieee80211/softmac/ieee80211softmac_priv.h b/net/ieee80211/softmac/ieee80211softmac_priv.h deleted file mode 100644 index c43b189634df..000000000000 --- a/net/ieee80211/softmac/ieee80211softmac_priv.h +++ /dev/null | |||
@@ -1,244 +0,0 @@ | |||
1 | /* | ||
2 | * Internal softmac API definitions. | ||
3 | * | ||
4 | * Copyright (c) 2005, 2006 Johannes Berg <johannes@sipsolutions.net> | ||
5 | * Joseph Jezak <josejx@gentoo.org> | ||
6 | * Larry Finger <Larry.Finger@lwfinger.net> | ||
7 | * Danny van Dyk <kugelfang@gentoo.org> | ||
8 | * Michael Buesch <mbuesch@freenet.de> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of version 2 of the GNU General Public License as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
17 | * more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
22 | * | ||
23 | * The full GNU General Public License is included in this distribution in the | ||
24 | * file called COPYING. | ||
25 | */ | ||
26 | |||
27 | #ifndef IEEE80211SOFTMAC_PRIV_H_ | ||
28 | #define IEEE80211SOFTMAC_PRIV_H_ | ||
29 | |||
30 | #include <net/ieee80211softmac.h> | ||
31 | #include <net/ieee80211softmac_wx.h> | ||
32 | #include <linux/kernel.h> | ||
33 | #include <linux/stringify.h> | ||
34 | |||
35 | |||
36 | #define PFX "SoftMAC: " | ||
37 | |||
38 | #ifdef assert | ||
39 | # undef assert | ||
40 | #endif | ||
41 | #ifdef CONFIG_IEEE80211_SOFTMAC_DEBUG | ||
42 | #define assert(expr) \ | ||
43 | do { \ | ||
44 | if (unlikely(!(expr))) { \ | ||
45 | printkl(KERN_ERR PFX "ASSERTION FAILED (%s) at: %s:%d:%s()\n", #expr, \ | ||
46 | __FILE__, __LINE__, __FUNCTION__); \ | ||
47 | } \ | ||
48 | } while (0) | ||
49 | #else | ||
50 | #define assert(expr) do {} while (0) | ||
51 | #endif | ||
52 | |||
53 | /* rate limited printk(). */ | ||
54 | #ifdef printkl | ||
55 | # undef printkl | ||
56 | #endif | ||
57 | #define printkl(f, x...) do { if (printk_ratelimit()) printk(f ,##x); } while (0) | ||
58 | /* rate limited printk() for debugging */ | ||
59 | #ifdef dprintkl | ||
60 | # undef dprintkl | ||
61 | #endif | ||
62 | #ifdef CONFIG_IEEE80211_SOFTMAC_DEBUG | ||
63 | # define dprintkl printkl | ||
64 | #else | ||
65 | # define dprintkl(f, x...) do { /* nothing */ } while (0) | ||
66 | #endif | ||
67 | |||
68 | /* debugging printk() */ | ||
69 | #ifdef dprintk | ||
70 | # undef dprintk | ||
71 | #endif | ||
72 | #ifdef CONFIG_IEEE80211_SOFTMAC_DEBUG | ||
73 | # define dprintk(f, x...) do { printk(f ,##x); } while (0) | ||
74 | #else | ||
75 | # define dprintk(f, x...) do { /* nothing */ } while (0) | ||
76 | #endif | ||
77 | |||
78 | /* private definitions and prototypes */ | ||
79 | |||
80 | /*** prototypes from _scan.c */ | ||
81 | void ieee80211softmac_scan(struct work_struct *work); | ||
82 | /* for internal use if scanning is needed */ | ||
83 | int ieee80211softmac_start_scan(struct ieee80211softmac_device *mac); | ||
84 | void ieee80211softmac_stop_scan(struct ieee80211softmac_device *mac); | ||
85 | void ieee80211softmac_wait_for_scan(struct ieee80211softmac_device *mac); | ||
86 | |||
87 | /* for use by _module.c to assign to the callbacks */ | ||
88 | int ieee80211softmac_start_scan_implementation(struct net_device *dev); | ||
89 | void ieee80211softmac_stop_scan_implementation(struct net_device *dev); | ||
90 | void ieee80211softmac_wait_for_scan_implementation(struct net_device *dev); | ||
91 | |||
92 | /*** Network prototypes from _module.c */ | ||
93 | struct ieee80211softmac_network * ieee80211softmac_create_network( | ||
94 | struct ieee80211softmac_device *mac, struct ieee80211_network *net); | ||
95 | void ieee80211softmac_add_network_locked(struct ieee80211softmac_device *mac, | ||
96 | struct ieee80211softmac_network *net); | ||
97 | void ieee80211softmac_add_network(struct ieee80211softmac_device *mac, | ||
98 | struct ieee80211softmac_network *net); | ||
99 | void ieee80211softmac_del_network_locked(struct ieee80211softmac_device *mac, | ||
100 | struct ieee80211softmac_network *net); | ||
101 | void ieee80211softmac_del_network(struct ieee80211softmac_device *mac, | ||
102 | struct ieee80211softmac_network *net); | ||
103 | struct ieee80211softmac_network * ieee80211softmac_get_network_by_bssid_locked( | ||
104 | struct ieee80211softmac_device *mac, u8 *ea); | ||
105 | struct ieee80211softmac_network * ieee80211softmac_get_network_by_bssid( | ||
106 | struct ieee80211softmac_device *mac, u8 *ea); | ||
107 | struct ieee80211softmac_network * ieee80211softmac_get_network_by_ssid_locked( | ||
108 | struct ieee80211softmac_device *mac, u8 *ssid, u8 ssid_len); | ||
109 | struct ieee80211softmac_network * ieee80211softmac_get_network_by_ssid( | ||
110 | struct ieee80211softmac_device *mac, u8 *ssid, u8 ssid_len); | ||
111 | struct ieee80211softmac_network * | ||
112 | ieee80211softmac_get_network_by_essid_locked(struct ieee80211softmac_device *mac, | ||
113 | struct ieee80211softmac_essid *essid); | ||
114 | struct ieee80211softmac_network * | ||
115 | ieee80211softmac_get_network_by_essid(struct ieee80211softmac_device *mac, | ||
116 | struct ieee80211softmac_essid *essid); | ||
117 | |||
118 | /* Rates related */ | ||
119 | void ieee80211softmac_process_erp(struct ieee80211softmac_device *mac, | ||
120 | u8 erp_value); | ||
121 | int ieee80211softmac_ratesinfo_rate_supported(struct ieee80211softmac_ratesinfo *ri, u8 rate); | ||
122 | u8 ieee80211softmac_lower_rate_delta(struct ieee80211softmac_device *mac, u8 rate, int delta); | ||
123 | void ieee80211softmac_init_bss(struct ieee80211softmac_device *mac); | ||
124 | void ieee80211softmac_recalc_txrates(struct ieee80211softmac_device *mac); | ||
125 | static inline u8 lower_rate(struct ieee80211softmac_device *mac, u8 rate) { | ||
126 | return ieee80211softmac_lower_rate_delta(mac, rate, 1); | ||
127 | } | ||
128 | |||
129 | static inline u8 get_fallback_rate(struct ieee80211softmac_device *mac, u8 rate) | ||
130 | { | ||
131 | return ieee80211softmac_lower_rate_delta(mac, rate, 2); | ||
132 | } | ||
133 | |||
134 | |||
135 | /*** prototypes from _io.c */ | ||
136 | int ieee80211softmac_send_mgt_frame(struct ieee80211softmac_device *mac, | ||
137 | void* ptrarg, u32 type, u32 arg); | ||
138 | int ieee80211softmac_handle_beacon(struct net_device *dev, | ||
139 | struct ieee80211_beacon *beacon, | ||
140 | struct ieee80211_network *network); | ||
141 | |||
142 | /*** prototypes from _auth.c */ | ||
143 | /* do these have to go into the public header? */ | ||
144 | int ieee80211softmac_auth_req(struct ieee80211softmac_device *mac, struct ieee80211softmac_network *net); | ||
145 | int ieee80211softmac_deauth_req(struct ieee80211softmac_device *mac, struct ieee80211softmac_network *net, int reason); | ||
146 | |||
147 | /* for use by _module.c to assign to the callbacks */ | ||
148 | int ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth); | ||
149 | int ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *deauth); | ||
150 | |||
151 | /*** prototypes from _assoc.c */ | ||
152 | void ieee80211softmac_assoc_work(struct work_struct *work); | ||
153 | int ieee80211softmac_handle_assoc_response(struct net_device * dev, | ||
154 | struct ieee80211_assoc_response * resp, | ||
155 | struct ieee80211_network * network); | ||
156 | int ieee80211softmac_handle_disassoc(struct net_device * dev, | ||
157 | struct ieee80211_disassoc * disassoc); | ||
158 | int ieee80211softmac_handle_reassoc_req(struct net_device * dev, | ||
159 | struct ieee80211_reassoc_request * reassoc); | ||
160 | void ieee80211softmac_assoc_timeout(struct work_struct *work); | ||
161 | void ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason); | ||
162 | void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac); | ||
163 | |||
164 | /* some helper functions */ | ||
165 | static inline int ieee80211softmac_scan_handlers_check_self(struct ieee80211softmac_device *sm) | ||
166 | { | ||
167 | return (sm->start_scan == ieee80211softmac_start_scan_implementation) && | ||
168 | (sm->stop_scan == ieee80211softmac_stop_scan_implementation) && | ||
169 | (sm->wait_for_scan == ieee80211softmac_wait_for_scan_implementation); | ||
170 | } | ||
171 | |||
172 | static inline int ieee80211softmac_scan_sanity_check(struct ieee80211softmac_device *sm) | ||
173 | { | ||
174 | return ((sm->start_scan != ieee80211softmac_start_scan_implementation) && | ||
175 | (sm->stop_scan != ieee80211softmac_stop_scan_implementation) && | ||
176 | (sm->wait_for_scan != ieee80211softmac_wait_for_scan_implementation) | ||
177 | ) || ieee80211softmac_scan_handlers_check_self(sm); | ||
178 | } | ||
179 | |||
180 | #define IEEE80211SOFTMAC_PROBE_DELAY HZ/50 | ||
181 | #define IEEE80211SOFTMAC_WORKQUEUE_NAME_LEN (17 + IFNAMSIZ) | ||
182 | |||
183 | struct ieee80211softmac_network { | ||
184 | struct list_head list; /* List */ | ||
185 | /* Network information copied from ieee80211_network */ | ||
186 | u8 bssid[ETH_ALEN]; | ||
187 | u8 channel; | ||
188 | struct ieee80211softmac_essid essid; | ||
189 | |||
190 | struct ieee80211softmac_ratesinfo supported_rates; | ||
191 | |||
192 | /* SoftMAC specific */ | ||
193 | u16 authenticating:1, /* Status Flags */ | ||
194 | authenticated:1, | ||
195 | auth_desynced_once:1; | ||
196 | |||
197 | u8 erp_value; /* Saved ERP value */ | ||
198 | u16 capabilities; /* Capabilities bitfield */ | ||
199 | u8 challenge_len; /* Auth Challenge length */ | ||
200 | char *challenge; /* Challenge Text */ | ||
201 | }; | ||
202 | |||
203 | /* structure used to keep track of networks we're auth'ing to */ | ||
204 | struct ieee80211softmac_auth_queue_item { | ||
205 | struct list_head list; /* List head */ | ||
206 | struct ieee80211softmac_network *net; /* Network to auth */ | ||
207 | struct ieee80211softmac_device *mac; /* SoftMAC device */ | ||
208 | u8 retry; /* Retry limit */ | ||
209 | u8 state; /* Auth State */ | ||
210 | struct delayed_work work; /* Work queue */ | ||
211 | }; | ||
212 | |||
213 | /* scanning information */ | ||
214 | struct ieee80211softmac_scaninfo { | ||
215 | u8 current_channel_idx, | ||
216 | number_channels; | ||
217 | struct ieee80211_channel *channels; | ||
218 | u8 started:1, | ||
219 | stop:1; | ||
220 | u8 skip_flags; | ||
221 | struct completion finished; | ||
222 | struct delayed_work softmac_scan; | ||
223 | struct ieee80211softmac_device *mac; | ||
224 | }; | ||
225 | |||
226 | /* private event struct */ | ||
227 | struct ieee80211softmac_event { | ||
228 | struct list_head list; | ||
229 | int event_type; | ||
230 | void *event_context; | ||
231 | struct delayed_work work; | ||
232 | notify_function_ptr fun; | ||
233 | void *context; | ||
234 | struct ieee80211softmac_device *mac; | ||
235 | }; | ||
236 | |||
237 | void ieee80211softmac_call_events(struct ieee80211softmac_device *mac, int event, void *event_context); | ||
238 | void ieee80211softmac_call_events_locked(struct ieee80211softmac_device *mac, int event, void *event_context); | ||
239 | int ieee80211softmac_notify_internal(struct ieee80211softmac_device *mac, | ||
240 | int event, void *event_context, notify_function_ptr fun, void *context, gfp_t gfp_mask); | ||
241 | |||
242 | void ieee80211softmac_try_reassoc(struct ieee80211softmac_device *mac); | ||
243 | |||
244 | #endif /* IEEE80211SOFTMAC_PRIV_H_ */ | ||
diff --git a/net/ieee80211/softmac/ieee80211softmac_scan.c b/net/ieee80211/softmac/ieee80211softmac_scan.c deleted file mode 100644 index bfab8d7db88f..000000000000 --- a/net/ieee80211/softmac/ieee80211softmac_scan.c +++ /dev/null | |||
@@ -1,254 +0,0 @@ | |||
1 | /* | ||
2 | * Scanning routines. | ||
3 | * | ||
4 | * These are not exported because they're assigned to the function pointers. | ||
5 | * | ||
6 | * Copyright (c) 2005, 2006 Johannes Berg <johannes@sipsolutions.net> | ||
7 | * Joseph Jezak <josejx@gentoo.org> | ||
8 | * Larry Finger <Larry.Finger@lwfinger.net> | ||
9 | * Danny van Dyk <kugelfang@gentoo.org> | ||
10 | * Michael Buesch <mbuesch@freenet.de> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify it | ||
13 | * under the terms of version 2 of the GNU General Public License as | ||
14 | * published by the Free Software Foundation. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
17 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
19 | * more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
24 | * | ||
25 | * The full GNU General Public License is included in this distribution in the | ||
26 | * file called COPYING. | ||
27 | */ | ||
28 | |||
29 | #include <linux/completion.h> | ||
30 | #include "ieee80211softmac_priv.h" | ||
31 | |||
32 | /* internal, use to trigger scanning if needed. | ||
33 | * Returns -EBUSY if already scanning, | ||
34 | * result of start_scan otherwise */ | ||
35 | int | ||
36 | ieee80211softmac_start_scan(struct ieee80211softmac_device *sm) | ||
37 | { | ||
38 | unsigned long flags; | ||
39 | int ret; | ||
40 | |||
41 | spin_lock_irqsave(&sm->lock, flags); | ||
42 | if (sm->scanning) | ||
43 | { | ||
44 | spin_unlock_irqrestore(&sm->lock, flags); | ||
45 | return -EINPROGRESS; | ||
46 | } | ||
47 | sm->scanning = 1; | ||
48 | spin_unlock_irqrestore(&sm->lock, flags); | ||
49 | |||
50 | ret = sm->start_scan(sm->dev); | ||
51 | if (ret) { | ||
52 | spin_lock_irqsave(&sm->lock, flags); | ||
53 | sm->scanning = 0; | ||
54 | spin_unlock_irqrestore(&sm->lock, flags); | ||
55 | } | ||
56 | return ret; | ||
57 | } | ||
58 | |||
59 | void | ||
60 | ieee80211softmac_stop_scan(struct ieee80211softmac_device *sm) | ||
61 | { | ||
62 | unsigned long flags; | ||
63 | |||
64 | spin_lock_irqsave(&sm->lock, flags); | ||
65 | |||
66 | if (!sm->scanning) { | ||
67 | spin_unlock_irqrestore(&sm->lock, flags); | ||
68 | return; | ||
69 | } | ||
70 | |||
71 | spin_unlock_irqrestore(&sm->lock, flags); | ||
72 | sm->stop_scan(sm->dev); | ||
73 | } | ||
74 | |||
75 | void | ||
76 | ieee80211softmac_wait_for_scan(struct ieee80211softmac_device *sm) | ||
77 | { | ||
78 | unsigned long flags; | ||
79 | |||
80 | spin_lock_irqsave(&sm->lock, flags); | ||
81 | |||
82 | if (!sm->scanning) { | ||
83 | spin_unlock_irqrestore(&sm->lock, flags); | ||
84 | return; | ||
85 | } | ||
86 | |||
87 | spin_unlock_irqrestore(&sm->lock, flags); | ||
88 | sm->wait_for_scan(sm->dev); | ||
89 | } | ||
90 | |||
91 | |||
92 | /* internal scanning implementation follows */ | ||
93 | void ieee80211softmac_scan(struct work_struct *work) | ||
94 | { | ||
95 | int invalid_channel; | ||
96 | u8 current_channel_idx; | ||
97 | struct ieee80211softmac_scaninfo *si = | ||
98 | container_of(work, struct ieee80211softmac_scaninfo, | ||
99 | softmac_scan.work); | ||
100 | struct ieee80211softmac_device *sm = si->mac; | ||
101 | unsigned long flags; | ||
102 | |||
103 | while (!(si->stop) && (si->current_channel_idx < si->number_channels)) { | ||
104 | current_channel_idx = si->current_channel_idx; | ||
105 | si->current_channel_idx++; /* go to the next channel */ | ||
106 | |||
107 | invalid_channel = (si->skip_flags & si->channels[current_channel_idx].flags); | ||
108 | |||
109 | if (!invalid_channel) { | ||
110 | sm->set_channel(sm->dev, si->channels[current_channel_idx].channel); | ||
111 | // FIXME make this user configurable (active/passive) | ||
112 | if(ieee80211softmac_send_mgt_frame(sm, NULL, IEEE80211_STYPE_PROBE_REQ, 0)) | ||
113 | printkl(KERN_DEBUG PFX "Sending Probe Request Failed\n"); | ||
114 | |||
115 | /* also send directed management frame for the network we're looking for */ | ||
116 | // TODO: is this if correct, or should we do this only if scanning from assoc request? | ||
117 | if (sm->associnfo.req_essid.len) | ||
118 | ieee80211softmac_send_mgt_frame(sm, &sm->associnfo.req_essid, IEEE80211_STYPE_PROBE_REQ, 0); | ||
119 | |||
120 | spin_lock_irqsave(&sm->lock, flags); | ||
121 | if (unlikely(!sm->running)) { | ||
122 | /* Prevent reschedule on workqueue flush */ | ||
123 | spin_unlock_irqrestore(&sm->lock, flags); | ||
124 | break; | ||
125 | } | ||
126 | queue_delayed_work(sm->wq, &si->softmac_scan, IEEE80211SOFTMAC_PROBE_DELAY); | ||
127 | spin_unlock_irqrestore(&sm->lock, flags); | ||
128 | return; | ||
129 | } else { | ||
130 | dprintk(PFX "Not probing Channel %d (not allowed here)\n", si->channels[current_channel_idx].channel); | ||
131 | } | ||
132 | } | ||
133 | |||
134 | spin_lock_irqsave(&sm->lock, flags); | ||
135 | cancel_delayed_work(&si->softmac_scan); | ||
136 | si->started = 0; | ||
137 | spin_unlock_irqrestore(&sm->lock, flags); | ||
138 | |||
139 | dprintk(PFX "Scanning finished: scanned %d channels starting with channel %d\n", | ||
140 | sm->scaninfo->number_channels, sm->scaninfo->channels[0].channel); | ||
141 | ieee80211softmac_scan_finished(sm); | ||
142 | complete_all(&sm->scaninfo->finished); | ||
143 | } | ||
144 | |||
145 | static inline struct ieee80211softmac_scaninfo *allocate_scaninfo(struct ieee80211softmac_device *mac) | ||
146 | { | ||
147 | /* ugh. can we call this without having the spinlock held? */ | ||
148 | struct ieee80211softmac_scaninfo *info = kmalloc(sizeof(struct ieee80211softmac_scaninfo), GFP_ATOMIC); | ||
149 | if (unlikely(!info)) | ||
150 | return NULL; | ||
151 | INIT_DELAYED_WORK(&info->softmac_scan, ieee80211softmac_scan); | ||
152 | info->mac = mac; | ||
153 | init_completion(&info->finished); | ||
154 | return info; | ||
155 | } | ||
156 | |||
157 | int ieee80211softmac_start_scan_implementation(struct net_device *dev) | ||
158 | { | ||
159 | struct ieee80211softmac_device *sm = ieee80211_priv(dev); | ||
160 | unsigned long flags; | ||
161 | |||
162 | if (!(dev->flags & IFF_UP)) | ||
163 | return -ENODEV; | ||
164 | |||
165 | assert(ieee80211softmac_scan_handlers_check_self(sm)); | ||
166 | if (!ieee80211softmac_scan_handlers_check_self(sm)) | ||
167 | return -EINVAL; | ||
168 | |||
169 | spin_lock_irqsave(&sm->lock, flags); | ||
170 | /* it looks like we need to hold the lock here | ||
171 | * to make sure we don't allocate two of these... */ | ||
172 | if (unlikely(!sm->scaninfo)) | ||
173 | sm->scaninfo = allocate_scaninfo(sm); | ||
174 | if (unlikely(!sm->scaninfo)) { | ||
175 | spin_unlock_irqrestore(&sm->lock, flags); | ||
176 | return -ENOMEM; | ||
177 | } | ||
178 | |||
179 | sm->scaninfo->skip_flags = IEEE80211_CH_INVALID; | ||
180 | if (0 /* not scanning in IEEE802.11b */)//TODO | ||
181 | sm->scaninfo->skip_flags |= IEEE80211_CH_B_ONLY; | ||
182 | if (0 /* IEEE802.11a */) {//TODO | ||
183 | sm->scaninfo->channels = sm->ieee->geo.a; | ||
184 | sm->scaninfo->number_channels = sm->ieee->geo.a_channels; | ||
185 | } else { | ||
186 | sm->scaninfo->channels = sm->ieee->geo.bg; | ||
187 | sm->scaninfo->number_channels = sm->ieee->geo.bg_channels; | ||
188 | } | ||
189 | sm->scaninfo->current_channel_idx = 0; | ||
190 | sm->scaninfo->started = 1; | ||
191 | sm->scaninfo->stop = 0; | ||
192 | INIT_COMPLETION(sm->scaninfo->finished); | ||
193 | queue_delayed_work(sm->wq, &sm->scaninfo->softmac_scan, 0); | ||
194 | spin_unlock_irqrestore(&sm->lock, flags); | ||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | void ieee80211softmac_stop_scan_implementation(struct net_device *dev) | ||
199 | { | ||
200 | struct ieee80211softmac_device *sm = ieee80211_priv(dev); | ||
201 | unsigned long flags; | ||
202 | |||
203 | assert(ieee80211softmac_scan_handlers_check_self(sm)); | ||
204 | if (!ieee80211softmac_scan_handlers_check_self(sm)) | ||
205 | return; | ||
206 | |||
207 | spin_lock_irqsave(&sm->lock, flags); | ||
208 | assert(sm->scaninfo != NULL); | ||
209 | if (sm->scaninfo) { | ||
210 | if (sm->scaninfo->started) | ||
211 | sm->scaninfo->stop = 1; | ||
212 | else | ||
213 | complete_all(&sm->scaninfo->finished); | ||
214 | } | ||
215 | spin_unlock_irqrestore(&sm->lock, flags); | ||
216 | } | ||
217 | |||
218 | void ieee80211softmac_wait_for_scan_implementation(struct net_device *dev) | ||
219 | { | ||
220 | struct ieee80211softmac_device *sm = ieee80211_priv(dev); | ||
221 | unsigned long flags; | ||
222 | |||
223 | assert(ieee80211softmac_scan_handlers_check_self(sm)); | ||
224 | if (!ieee80211softmac_scan_handlers_check_self(sm)) | ||
225 | return; | ||
226 | |||
227 | spin_lock_irqsave(&sm->lock, flags); | ||
228 | if (!sm->scaninfo->started) { | ||
229 | spin_unlock_irqrestore(&sm->lock, flags); | ||
230 | return; | ||
231 | } | ||
232 | spin_unlock_irqrestore(&sm->lock, flags); | ||
233 | wait_for_completion(&sm->scaninfo->finished); | ||
234 | } | ||
235 | |||
236 | /* this is what drivers (that do scanning) call when they're done */ | ||
237 | void ieee80211softmac_scan_finished(struct ieee80211softmac_device *sm) | ||
238 | { | ||
239 | unsigned long flags; | ||
240 | |||
241 | spin_lock_irqsave(&sm->lock, flags); | ||
242 | sm->scanning = 0; | ||
243 | spin_unlock_irqrestore(&sm->lock, flags); | ||
244 | |||
245 | if (sm->associnfo.bssvalid) { | ||
246 | struct ieee80211softmac_network *net; | ||
247 | |||
248 | net = ieee80211softmac_get_network_by_bssid(sm, sm->associnfo.bssid); | ||
249 | if (net) | ||
250 | sm->set_channel(sm->dev, net->channel); | ||
251 | } | ||
252 | ieee80211softmac_call_events(sm, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, NULL); | ||
253 | } | ||
254 | EXPORT_SYMBOL_GPL(ieee80211softmac_scan_finished); | ||
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c deleted file mode 100644 index e01b59aedc54..000000000000 --- a/net/ieee80211/softmac/ieee80211softmac_wx.c +++ /dev/null | |||
@@ -1,508 +0,0 @@ | |||
1 | /* | ||
2 | * This file contains our _wx handlers. Make sure you EXPORT_SYMBOL_GPL them | ||
3 | * | ||
4 | * Copyright (c) 2005, 2006 Johannes Berg <johannes@sipsolutions.net> | ||
5 | * Joseph Jezak <josejx@gentoo.org> | ||
6 | * Larry Finger <Larry.Finger@lwfinger.net> | ||
7 | * Danny van Dyk <kugelfang@gentoo.org> | ||
8 | * Michael Buesch <mbuesch@freenet.de> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of version 2 of the GNU General Public License as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
17 | * more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
22 | * | ||
23 | * The full GNU General Public License is included in this distribution in the | ||
24 | * file called COPYING. | ||
25 | */ | ||
26 | |||
27 | #include "ieee80211softmac_priv.h" | ||
28 | |||
29 | #include <net/iw_handler.h> | ||
30 | /* for is_broadcast_ether_addr and is_zero_ether_addr */ | ||
31 | #include <linux/etherdevice.h> | ||
32 | |||
33 | int | ||
34 | ieee80211softmac_wx_trigger_scan(struct net_device *net_dev, | ||
35 | struct iw_request_info *info, | ||
36 | union iwreq_data *data, | ||
37 | char *extra) | ||
38 | { | ||
39 | struct ieee80211softmac_device *sm = ieee80211_priv(net_dev); | ||
40 | return ieee80211softmac_start_scan(sm); | ||
41 | } | ||
42 | EXPORT_SYMBOL_GPL(ieee80211softmac_wx_trigger_scan); | ||
43 | |||
44 | |||
45 | /* if we're still scanning, return -EAGAIN so that userspace tools | ||
46 | * can get the complete scan results, otherwise return 0. */ | ||
47 | int | ||
48 | ieee80211softmac_wx_get_scan_results(struct net_device *net_dev, | ||
49 | struct iw_request_info *info, | ||
50 | union iwreq_data *data, | ||
51 | char *extra) | ||
52 | { | ||
53 | unsigned long flags; | ||
54 | struct ieee80211softmac_device *sm = ieee80211_priv(net_dev); | ||
55 | |||
56 | spin_lock_irqsave(&sm->lock, flags); | ||
57 | if (sm->scanning) { | ||
58 | spin_unlock_irqrestore(&sm->lock, flags); | ||
59 | return -EAGAIN; | ||
60 | } | ||
61 | spin_unlock_irqrestore(&sm->lock, flags); | ||
62 | return ieee80211_wx_get_scan(sm->ieee, info, data, extra); | ||
63 | } | ||
64 | EXPORT_SYMBOL_GPL(ieee80211softmac_wx_get_scan_results); | ||
65 | |||
66 | int | ||
67 | ieee80211softmac_wx_set_essid(struct net_device *net_dev, | ||
68 | struct iw_request_info *info, | ||
69 | union iwreq_data *data, | ||
70 | char *extra) | ||
71 | { | ||
72 | struct ieee80211softmac_device *sm = ieee80211_priv(net_dev); | ||
73 | struct ieee80211softmac_auth_queue_item *authptr; | ||
74 | int length = 0; | ||
75 | DECLARE_MAC_BUF(mac); | ||
76 | |||
77 | check_assoc_again: | ||
78 | mutex_lock(&sm->associnfo.mutex); | ||
79 | if((sm->associnfo.associating || sm->associnfo.associated) && | ||
80 | (data->essid.flags && data->essid.length)) { | ||
81 | dprintk(KERN_INFO PFX "Canceling existing associate request!\n"); | ||
82 | /* Cancel assoc work */ | ||
83 | cancel_delayed_work(&sm->associnfo.work); | ||
84 | /* We don't have to do this, but it's a little cleaner */ | ||
85 | list_for_each_entry(authptr, &sm->auth_queue, list) | ||
86 | cancel_delayed_work(&authptr->work); | ||
87 | sm->associnfo.bssvalid = 0; | ||
88 | sm->associnfo.bssfixed = 0; | ||
89 | sm->associnfo.associating = 0; | ||
90 | sm->associnfo.associated = 0; | ||
91 | /* We must unlock to avoid deadlocks with the assoc workqueue | ||
92 | * on the associnfo.mutex */ | ||
93 | mutex_unlock(&sm->associnfo.mutex); | ||
94 | flush_workqueue(sm->wq); | ||
95 | /* Avoid race! Check assoc status again. Maybe someone started an | ||
96 | * association while we flushed. */ | ||
97 | goto check_assoc_again; | ||
98 | } | ||
99 | |||
100 | sm->associnfo.static_essid = 0; | ||
101 | sm->associnfo.assoc_wait = 0; | ||
102 | |||
103 | if (data->essid.flags && data->essid.length) { | ||
104 | length = min((int)data->essid.length, IW_ESSID_MAX_SIZE); | ||
105 | if (length) { | ||
106 | memcpy(sm->associnfo.req_essid.data, extra, length); | ||
107 | sm->associnfo.static_essid = 1; | ||
108 | } | ||
109 | } | ||
110 | |||
111 | /* set our requested ESSID length. | ||
112 | * If applicable, we have already copied the data in */ | ||
113 | sm->associnfo.req_essid.len = length; | ||
114 | |||
115 | sm->associnfo.associating = 1; | ||
116 | /* queue lower level code to do work (if necessary) */ | ||
117 | queue_delayed_work(sm->wq, &sm->associnfo.work, 0); | ||
118 | |||
119 | mutex_unlock(&sm->associnfo.mutex); | ||
120 | |||
121 | return 0; | ||
122 | } | ||
123 | EXPORT_SYMBOL_GPL(ieee80211softmac_wx_set_essid); | ||
124 | |||
125 | int | ||
126 | ieee80211softmac_wx_get_essid(struct net_device *net_dev, | ||
127 | struct iw_request_info *info, | ||
128 | union iwreq_data *data, | ||
129 | char *extra) | ||
130 | { | ||
131 | struct ieee80211softmac_device *sm = ieee80211_priv(net_dev); | ||
132 | |||
133 | mutex_lock(&sm->associnfo.mutex); | ||
134 | /* If all fails, return ANY (empty) */ | ||
135 | data->essid.length = 0; | ||
136 | data->essid.flags = 0; /* active */ | ||
137 | |||
138 | /* If we have a statically configured ESSID then return it */ | ||
139 | if (sm->associnfo.static_essid) { | ||
140 | data->essid.length = sm->associnfo.req_essid.len; | ||
141 | data->essid.flags = 1; /* active */ | ||
142 | memcpy(extra, sm->associnfo.req_essid.data, sm->associnfo.req_essid.len); | ||
143 | dprintk(KERN_INFO PFX "Getting essid from req_essid\n"); | ||
144 | } else if (sm->associnfo.associated || sm->associnfo.associating) { | ||
145 | /* If we're associating/associated, return that */ | ||
146 | data->essid.length = sm->associnfo.associate_essid.len; | ||
147 | data->essid.flags = 1; /* active */ | ||
148 | memcpy(extra, sm->associnfo.associate_essid.data, sm->associnfo.associate_essid.len); | ||
149 | dprintk(KERN_INFO PFX "Getting essid from associate_essid\n"); | ||
150 | } | ||
151 | mutex_unlock(&sm->associnfo.mutex); | ||
152 | |||
153 | return 0; | ||
154 | } | ||
155 | EXPORT_SYMBOL_GPL(ieee80211softmac_wx_get_essid); | ||
156 | |||
157 | int | ||
158 | ieee80211softmac_wx_set_rate(struct net_device *net_dev, | ||
159 | struct iw_request_info *info, | ||
160 | union iwreq_data *data, | ||
161 | char *extra) | ||
162 | { | ||
163 | struct ieee80211softmac_device *mac = ieee80211_priv(net_dev); | ||
164 | struct ieee80211_device *ieee = mac->ieee; | ||
165 | unsigned long flags; | ||
166 | s32 in_rate = data->bitrate.value; | ||
167 | u8 rate; | ||
168 | int is_ofdm = 0; | ||
169 | int err = -EINVAL; | ||
170 | |||
171 | if (in_rate == -1) { | ||
172 | if (ieee->modulation & IEEE80211_OFDM_MODULATION) | ||
173 | in_rate = 24000000; | ||
174 | else | ||
175 | in_rate = 11000000; | ||
176 | } | ||
177 | |||
178 | switch (in_rate) { | ||
179 | case 1000000: | ||
180 | rate = IEEE80211_CCK_RATE_1MB; | ||
181 | break; | ||
182 | case 2000000: | ||
183 | rate = IEEE80211_CCK_RATE_2MB; | ||
184 | break; | ||
185 | case 5500000: | ||
186 | rate = IEEE80211_CCK_RATE_5MB; | ||
187 | break; | ||
188 | case 11000000: | ||
189 | rate = IEEE80211_CCK_RATE_11MB; | ||
190 | break; | ||
191 | case 6000000: | ||
192 | rate = IEEE80211_OFDM_RATE_6MB; | ||
193 | is_ofdm = 1; | ||
194 | break; | ||
195 | case 9000000: | ||
196 | rate = IEEE80211_OFDM_RATE_9MB; | ||
197 | is_ofdm = 1; | ||
198 | break; | ||
199 | case 12000000: | ||
200 | rate = IEEE80211_OFDM_RATE_12MB; | ||
201 | is_ofdm = 1; | ||
202 | break; | ||
203 | case 18000000: | ||
204 | rate = IEEE80211_OFDM_RATE_18MB; | ||
205 | is_ofdm = 1; | ||
206 | break; | ||
207 | case 24000000: | ||
208 | rate = IEEE80211_OFDM_RATE_24MB; | ||
209 | is_ofdm = 1; | ||
210 | break; | ||
211 | case 36000000: | ||
212 | rate = IEEE80211_OFDM_RATE_36MB; | ||
213 | is_ofdm = 1; | ||
214 | break; | ||
215 | case 48000000: | ||
216 | rate = IEEE80211_OFDM_RATE_48MB; | ||
217 | is_ofdm = 1; | ||
218 | break; | ||
219 | case 54000000: | ||
220 | rate = IEEE80211_OFDM_RATE_54MB; | ||
221 | is_ofdm = 1; | ||
222 | break; | ||
223 | default: | ||
224 | goto out; | ||
225 | } | ||
226 | |||
227 | spin_lock_irqsave(&mac->lock, flags); | ||
228 | |||
229 | /* Check if correct modulation for this PHY. */ | ||
230 | if (is_ofdm && !(ieee->modulation & IEEE80211_OFDM_MODULATION)) | ||
231 | goto out_unlock; | ||
232 | |||
233 | mac->txrates.user_rate = rate; | ||
234 | ieee80211softmac_recalc_txrates(mac); | ||
235 | err = 0; | ||
236 | |||
237 | out_unlock: | ||
238 | spin_unlock_irqrestore(&mac->lock, flags); | ||
239 | out: | ||
240 | return err; | ||
241 | } | ||
242 | EXPORT_SYMBOL_GPL(ieee80211softmac_wx_set_rate); | ||
243 | |||
244 | int | ||
245 | ieee80211softmac_wx_get_rate(struct net_device *net_dev, | ||
246 | struct iw_request_info *info, | ||
247 | union iwreq_data *data, | ||
248 | char *extra) | ||
249 | { | ||
250 | struct ieee80211softmac_device *mac = ieee80211_priv(net_dev); | ||
251 | unsigned long flags; | ||
252 | int err = -EINVAL; | ||
253 | |||
254 | spin_lock_irqsave(&mac->lock, flags); | ||
255 | |||
256 | if (unlikely(!mac->running)) { | ||
257 | err = -ENODEV; | ||
258 | goto out_unlock; | ||
259 | } | ||
260 | |||
261 | switch (mac->txrates.default_rate) { | ||
262 | case IEEE80211_CCK_RATE_1MB: | ||
263 | data->bitrate.value = 1000000; | ||
264 | break; | ||
265 | case IEEE80211_CCK_RATE_2MB: | ||
266 | data->bitrate.value = 2000000; | ||
267 | break; | ||
268 | case IEEE80211_CCK_RATE_5MB: | ||
269 | data->bitrate.value = 5500000; | ||
270 | break; | ||
271 | case IEEE80211_CCK_RATE_11MB: | ||
272 | data->bitrate.value = 11000000; | ||
273 | break; | ||
274 | case IEEE80211_OFDM_RATE_6MB: | ||
275 | data->bitrate.value = 6000000; | ||
276 | break; | ||
277 | case IEEE80211_OFDM_RATE_9MB: | ||
278 | data->bitrate.value = 9000000; | ||
279 | break; | ||
280 | case IEEE80211_OFDM_RATE_12MB: | ||
281 | data->bitrate.value = 12000000; | ||
282 | break; | ||
283 | case IEEE80211_OFDM_RATE_18MB: | ||
284 | data->bitrate.value = 18000000; | ||
285 | break; | ||
286 | case IEEE80211_OFDM_RATE_24MB: | ||
287 | data->bitrate.value = 24000000; | ||
288 | break; | ||
289 | case IEEE80211_OFDM_RATE_36MB: | ||
290 | data->bitrate.value = 36000000; | ||
291 | break; | ||
292 | case IEEE80211_OFDM_RATE_48MB: | ||
293 | data->bitrate.value = 48000000; | ||
294 | break; | ||
295 | case IEEE80211_OFDM_RATE_54MB: | ||
296 | data->bitrate.value = 54000000; | ||
297 | break; | ||
298 | default: | ||
299 | assert(0); | ||
300 | goto out_unlock; | ||
301 | } | ||
302 | err = 0; | ||
303 | out_unlock: | ||
304 | spin_unlock_irqrestore(&mac->lock, flags); | ||
305 | |||
306 | return err; | ||
307 | } | ||
308 | EXPORT_SYMBOL_GPL(ieee80211softmac_wx_get_rate); | ||
309 | |||
310 | int | ||
311 | ieee80211softmac_wx_get_wap(struct net_device *net_dev, | ||
312 | struct iw_request_info *info, | ||
313 | union iwreq_data *data, | ||
314 | char *extra) | ||
315 | { | ||
316 | struct ieee80211softmac_device *mac = ieee80211_priv(net_dev); | ||
317 | int err = 0; | ||
318 | |||
319 | mutex_lock(&mac->associnfo.mutex); | ||
320 | if (mac->associnfo.bssvalid) | ||
321 | memcpy(data->ap_addr.sa_data, mac->associnfo.bssid, ETH_ALEN); | ||
322 | else | ||
323 | memset(data->ap_addr.sa_data, 0xff, ETH_ALEN); | ||
324 | data->ap_addr.sa_family = ARPHRD_ETHER; | ||
325 | mutex_unlock(&mac->associnfo.mutex); | ||
326 | |||
327 | return err; | ||
328 | } | ||
329 | EXPORT_SYMBOL_GPL(ieee80211softmac_wx_get_wap); | ||
330 | |||
331 | int | ||
332 | ieee80211softmac_wx_set_wap(struct net_device *net_dev, | ||
333 | struct iw_request_info *info, | ||
334 | union iwreq_data *data, | ||
335 | char *extra) | ||
336 | { | ||
337 | struct ieee80211softmac_device *mac = ieee80211_priv(net_dev); | ||
338 | |||
339 | /* sanity check */ | ||
340 | if (data->ap_addr.sa_family != ARPHRD_ETHER) { | ||
341 | return -EINVAL; | ||
342 | } | ||
343 | |||
344 | mutex_lock(&mac->associnfo.mutex); | ||
345 | if (is_broadcast_ether_addr(data->ap_addr.sa_data)) { | ||
346 | /* the bssid we have is not to be fixed any longer, | ||
347 | * and we should reassociate to the best AP. */ | ||
348 | mac->associnfo.bssfixed = 0; | ||
349 | /* force reassociation */ | ||
350 | mac->associnfo.bssvalid = 0; | ||
351 | if (mac->associnfo.associated) | ||
352 | queue_delayed_work(mac->wq, &mac->associnfo.work, 0); | ||
353 | } else if (is_zero_ether_addr(data->ap_addr.sa_data)) { | ||
354 | /* the bssid we have is no longer fixed */ | ||
355 | mac->associnfo.bssfixed = 0; | ||
356 | } else { | ||
357 | if (!memcmp(mac->associnfo.bssid, data->ap_addr.sa_data, ETH_ALEN)) { | ||
358 | if (mac->associnfo.associating || mac->associnfo.associated) { | ||
359 | /* bssid unchanged and associated or associating - just return */ | ||
360 | goto out; | ||
361 | } | ||
362 | } else { | ||
363 | /* copy new value in data->ap_addr.sa_data to bssid */ | ||
364 | memcpy(mac->associnfo.bssid, data->ap_addr.sa_data, ETH_ALEN); | ||
365 | } | ||
366 | /* tell the other code that this bssid should be used no matter what */ | ||
367 | mac->associnfo.bssfixed = 1; | ||
368 | /* queue associate if new bssid or (old one again and not associated) */ | ||
369 | queue_delayed_work(mac->wq, &mac->associnfo.work, 0); | ||
370 | } | ||
371 | |||
372 | out: | ||
373 | mutex_unlock(&mac->associnfo.mutex); | ||
374 | |||
375 | return 0; | ||
376 | } | ||
377 | EXPORT_SYMBOL_GPL(ieee80211softmac_wx_set_wap); | ||
378 | |||
379 | int | ||
380 | ieee80211softmac_wx_set_genie(struct net_device *dev, | ||
381 | struct iw_request_info *info, | ||
382 | union iwreq_data *wrqu, | ||
383 | char *extra) | ||
384 | { | ||
385 | struct ieee80211softmac_device *mac = ieee80211_priv(dev); | ||
386 | unsigned long flags; | ||
387 | int err = 0; | ||
388 | char *buf; | ||
389 | int i; | ||
390 | |||
391 | mutex_lock(&mac->associnfo.mutex); | ||
392 | spin_lock_irqsave(&mac->lock, flags); | ||
393 | /* bleh. shouldn't be locked for that kmalloc... */ | ||
394 | |||
395 | if (wrqu->data.length) { | ||
396 | if ((wrqu->data.length < 2) || (extra[1]+2 != wrqu->data.length)) { | ||
397 | /* this is an IE, so the length must be | ||
398 | * correct. Is it possible though that | ||
399 | * more than one IE is passed in? | ||
400 | */ | ||
401 | err = -EINVAL; | ||
402 | goto out; | ||
403 | } | ||
404 | if (mac->wpa.IEbuflen <= wrqu->data.length) { | ||
405 | buf = kmalloc(wrqu->data.length, GFP_ATOMIC); | ||
406 | if (!buf) { | ||
407 | err = -ENOMEM; | ||
408 | goto out; | ||
409 | } | ||
410 | kfree(mac->wpa.IE); | ||
411 | mac->wpa.IE = buf; | ||
412 | mac->wpa.IEbuflen = wrqu->data.length; | ||
413 | } | ||
414 | memcpy(mac->wpa.IE, extra, wrqu->data.length); | ||
415 | dprintk(KERN_INFO PFX "generic IE set to "); | ||
416 | for (i=0;i<wrqu->data.length;i++) | ||
417 | dprintk("%.2x", (u8)mac->wpa.IE[i]); | ||
418 | dprintk("\n"); | ||
419 | mac->wpa.IElen = wrqu->data.length; | ||
420 | } else { | ||
421 | kfree(mac->wpa.IE); | ||
422 | mac->wpa.IE = NULL; | ||
423 | mac->wpa.IElen = 0; | ||
424 | mac->wpa.IEbuflen = 0; | ||
425 | } | ||
426 | |||
427 | out: | ||
428 | spin_unlock_irqrestore(&mac->lock, flags); | ||
429 | mutex_unlock(&mac->associnfo.mutex); | ||
430 | |||
431 | return err; | ||
432 | } | ||
433 | EXPORT_SYMBOL_GPL(ieee80211softmac_wx_set_genie); | ||
434 | |||
435 | int | ||
436 | ieee80211softmac_wx_get_genie(struct net_device *dev, | ||
437 | struct iw_request_info *info, | ||
438 | union iwreq_data *wrqu, | ||
439 | char *extra) | ||
440 | { | ||
441 | struct ieee80211softmac_device *mac = ieee80211_priv(dev); | ||
442 | unsigned long flags; | ||
443 | int err = 0; | ||
444 | int space = wrqu->data.length; | ||
445 | |||
446 | mutex_lock(&mac->associnfo.mutex); | ||
447 | spin_lock_irqsave(&mac->lock, flags); | ||
448 | |||
449 | wrqu->data.length = 0; | ||
450 | |||
451 | if (mac->wpa.IE && mac->wpa.IElen) { | ||
452 | wrqu->data.length = mac->wpa.IElen; | ||
453 | if (mac->wpa.IElen <= space) | ||
454 | memcpy(extra, mac->wpa.IE, mac->wpa.IElen); | ||
455 | else | ||
456 | err = -E2BIG; | ||
457 | } | ||
458 | spin_unlock_irqrestore(&mac->lock, flags); | ||
459 | mutex_unlock(&mac->associnfo.mutex); | ||
460 | |||
461 | return err; | ||
462 | } | ||
463 | EXPORT_SYMBOL_GPL(ieee80211softmac_wx_get_genie); | ||
464 | |||
465 | int | ||
466 | ieee80211softmac_wx_set_mlme(struct net_device *dev, | ||
467 | struct iw_request_info *info, | ||
468 | union iwreq_data *wrqu, | ||
469 | char *extra) | ||
470 | { | ||
471 | struct ieee80211softmac_device *mac = ieee80211_priv(dev); | ||
472 | struct iw_mlme *mlme = (struct iw_mlme *)extra; | ||
473 | u16 reason = mlme->reason_code; | ||
474 | struct ieee80211softmac_network *net; | ||
475 | int err = -EINVAL; | ||
476 | |||
477 | mutex_lock(&mac->associnfo.mutex); | ||
478 | |||
479 | if (memcmp(mac->associnfo.bssid, mlme->addr.sa_data, ETH_ALEN)) { | ||
480 | printk(KERN_DEBUG PFX "wx_set_mlme: requested operation on net we don't use\n"); | ||
481 | goto out; | ||
482 | } | ||
483 | |||
484 | switch (mlme->cmd) { | ||
485 | case IW_MLME_DEAUTH: | ||
486 | net = ieee80211softmac_get_network_by_bssid_locked(mac, mlme->addr.sa_data); | ||
487 | if (!net) { | ||
488 | printk(KERN_DEBUG PFX "wx_set_mlme: we should know the net here...\n"); | ||
489 | goto out; | ||
490 | } | ||
491 | err = ieee80211softmac_deauth_req(mac, net, reason); | ||
492 | goto out; | ||
493 | case IW_MLME_DISASSOC: | ||
494 | ieee80211softmac_send_disassoc_req(mac, reason); | ||
495 | mac->associnfo.associated = 0; | ||
496 | mac->associnfo.associating = 0; | ||
497 | err = 0; | ||
498 | goto out; | ||
499 | default: | ||
500 | err = -EOPNOTSUPP; | ||
501 | } | ||
502 | |||
503 | out: | ||
504 | mutex_unlock(&mac->associnfo.mutex); | ||
505 | |||
506 | return err; | ||
507 | } | ||
508 | EXPORT_SYMBOL_GPL(ieee80211softmac_wx_set_mlme); | ||
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 0d109504ed86..44f5ce1fbfa4 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -784,6 +784,7 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
784 | { | 784 | { |
785 | struct sock *sk = sock->sk; | 785 | struct sock *sk = sock->sk; |
786 | int err = 0; | 786 | int err = 0; |
787 | struct net *net = sk->sk_net; | ||
787 | 788 | ||
788 | switch (cmd) { | 789 | switch (cmd) { |
789 | case SIOCGSTAMP: | 790 | case SIOCGSTAMP: |
@@ -795,12 +796,12 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
795 | case SIOCADDRT: | 796 | case SIOCADDRT: |
796 | case SIOCDELRT: | 797 | case SIOCDELRT: |
797 | case SIOCRTMSG: | 798 | case SIOCRTMSG: |
798 | err = ip_rt_ioctl(sk->sk_net, cmd, (void __user *)arg); | 799 | err = ip_rt_ioctl(net, cmd, (void __user *)arg); |
799 | break; | 800 | break; |
800 | case SIOCDARP: | 801 | case SIOCDARP: |
801 | case SIOCGARP: | 802 | case SIOCGARP: |
802 | case SIOCSARP: | 803 | case SIOCSARP: |
803 | err = arp_ioctl(sk->sk_net, cmd, (void __user *)arg); | 804 | err = arp_ioctl(net, cmd, (void __user *)arg); |
804 | break; | 805 | break; |
805 | case SIOCGIFADDR: | 806 | case SIOCGIFADDR: |
806 | case SIOCSIFADDR: | 807 | case SIOCSIFADDR: |
@@ -813,7 +814,7 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
813 | case SIOCSIFPFLAGS: | 814 | case SIOCSIFPFLAGS: |
814 | case SIOCGIFPFLAGS: | 815 | case SIOCGIFPFLAGS: |
815 | case SIOCSIFFLAGS: | 816 | case SIOCSIFFLAGS: |
816 | err = devinet_ioctl(cmd, (void __user *)arg); | 817 | err = devinet_ioctl(net, cmd, (void __user *)arg); |
817 | break; | 818 | break; |
818 | default: | 819 | default: |
819 | if (sk->sk_prot->ioctl) | 820 | if (sk->sk_prot->ioctl) |
@@ -1059,7 +1060,7 @@ static int inet_sk_reselect_saddr(struct sock *sk) | |||
1059 | if (sysctl_ip_dynaddr > 1) { | 1060 | if (sysctl_ip_dynaddr > 1) { |
1060 | printk(KERN_INFO "%s(): shifting inet->" | 1061 | printk(KERN_INFO "%s(): shifting inet->" |
1061 | "saddr from %d.%d.%d.%d to %d.%d.%d.%d\n", | 1062 | "saddr from %d.%d.%d.%d to %d.%d.%d.%d\n", |
1062 | __FUNCTION__, | 1063 | __func__, |
1063 | NIPQUAD(old_saddr), | 1064 | NIPQUAD(old_saddr), |
1064 | NIPQUAD(new_saddr)); | 1065 | NIPQUAD(new_saddr)); |
1065 | } | 1066 | } |
@@ -1414,7 +1415,7 @@ static int __init inet_init(void) | |||
1414 | 1415 | ||
1415 | ip_init(); | 1416 | ip_init(); |
1416 | 1417 | ||
1417 | tcp_v4_init(&inet_family_ops); | 1418 | tcp_v4_init(); |
1418 | 1419 | ||
1419 | /* Setup TCP slab cache for open requests. */ | 1420 | /* Setup TCP slab cache for open requests. */ |
1420 | tcp_init(); | 1421 | tcp_init(); |
@@ -1429,7 +1430,8 @@ static int __init inet_init(void) | |||
1429 | * Set the ICMP layer up | 1430 | * Set the ICMP layer up |
1430 | */ | 1431 | */ |
1431 | 1432 | ||
1432 | icmp_init(&inet_family_ops); | 1433 | if (icmp_init() < 0) |
1434 | panic("Failed to create the ICMP control socket.\n"); | ||
1433 | 1435 | ||
1434 | /* | 1436 | /* |
1435 | * Initialise the multicast router | 1437 | * Initialise the multicast router |
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 8e17f65f4002..efe01df8fc0e 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -475,7 +475,7 @@ int arp_find(unsigned char *haddr, struct sk_buff *skb) | |||
475 | return 1; | 475 | return 1; |
476 | } | 476 | } |
477 | 477 | ||
478 | paddr = ((struct rtable*)skb->dst)->rt_gateway; | 478 | paddr = skb->rtable->rt_gateway; |
479 | 479 | ||
480 | if (arp_set_predefined(inet_addr_type(&init_net, paddr), haddr, paddr, dev)) | 480 | if (arp_set_predefined(inet_addr_type(&init_net, paddr), haddr, paddr, dev)) |
481 | return 0; | 481 | return 0; |
@@ -570,14 +570,13 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, | |||
570 | * Allocate a buffer | 570 | * Allocate a buffer |
571 | */ | 571 | */ |
572 | 572 | ||
573 | skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4) | 573 | skb = alloc_skb(arp_hdr_len(dev) + LL_RESERVED_SPACE(dev), GFP_ATOMIC); |
574 | + LL_RESERVED_SPACE(dev), GFP_ATOMIC); | ||
575 | if (skb == NULL) | 574 | if (skb == NULL) |
576 | return NULL; | 575 | return NULL; |
577 | 576 | ||
578 | skb_reserve(skb, LL_RESERVED_SPACE(dev)); | 577 | skb_reserve(skb, LL_RESERVED_SPACE(dev)); |
579 | skb_reset_network_header(skb); | 578 | skb_reset_network_header(skb); |
580 | arp = (struct arphdr *) skb_put(skb,sizeof(struct arphdr) + 2*(dev->addr_len+4)); | 579 | arp = (struct arphdr *) skb_put(skb, arp_hdr_len(dev)); |
581 | skb->dev = dev; | 580 | skb->dev = dev; |
582 | skb->protocol = htons(ETH_P_ARP); | 581 | skb->protocol = htons(ETH_P_ARP); |
583 | if (src_hw == NULL) | 582 | if (src_hw == NULL) |
@@ -815,7 +814,7 @@ static int arp_process(struct sk_buff *skb) | |||
815 | if (arp->ar_op == htons(ARPOP_REQUEST) && | 814 | if (arp->ar_op == htons(ARPOP_REQUEST) && |
816 | ip_route_input(skb, tip, sip, 0, dev) == 0) { | 815 | ip_route_input(skb, tip, sip, 0, dev) == 0) { |
817 | 816 | ||
818 | rt = (struct rtable*)skb->dst; | 817 | rt = skb->rtable; |
819 | addr_type = rt->rt_type; | 818 | addr_type = rt->rt_type; |
820 | 819 | ||
821 | if (addr_type == RTN_LOCAL) { | 820 | if (addr_type == RTN_LOCAL) { |
@@ -916,9 +915,7 @@ static int arp_rcv(struct sk_buff *skb, struct net_device *dev, | |||
916 | goto freeskb; | 915 | goto freeskb; |
917 | 916 | ||
918 | /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ | 917 | /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ |
919 | if (!pskb_may_pull(skb, (sizeof(struct arphdr) + | 918 | if (!pskb_may_pull(skb, arp_hdr_len(dev))) |
920 | (2 * dev->addr_len) + | ||
921 | (2 * sizeof(u32))))) | ||
922 | goto freeskb; | 919 | goto freeskb; |
923 | 920 | ||
924 | arp = arp_hdr(skb); | 921 | arp = arp_hdr(skb); |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 87490f7bb0f7..4a10dbbbe0a1 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -446,9 +446,6 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg | |||
446 | 446 | ||
447 | ASSERT_RTNL(); | 447 | ASSERT_RTNL(); |
448 | 448 | ||
449 | if (net != &init_net) | ||
450 | return -EINVAL; | ||
451 | |||
452 | err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy); | 449 | err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy); |
453 | if (err < 0) | 450 | if (err < 0) |
454 | goto errout; | 451 | goto errout; |
@@ -560,9 +557,6 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg | |||
560 | 557 | ||
561 | ASSERT_RTNL(); | 558 | ASSERT_RTNL(); |
562 | 559 | ||
563 | if (net != &init_net) | ||
564 | return -EINVAL; | ||
565 | |||
566 | ifa = rtm_to_ifaddr(net, nlh); | 560 | ifa = rtm_to_ifaddr(net, nlh); |
567 | if (IS_ERR(ifa)) | 561 | if (IS_ERR(ifa)) |
568 | return PTR_ERR(ifa); | 562 | return PTR_ERR(ifa); |
@@ -595,7 +589,7 @@ static __inline__ int inet_abc_len(__be32 addr) | |||
595 | } | 589 | } |
596 | 590 | ||
597 | 591 | ||
598 | int devinet_ioctl(unsigned int cmd, void __user *arg) | 592 | int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg) |
599 | { | 593 | { |
600 | struct ifreq ifr; | 594 | struct ifreq ifr; |
601 | struct sockaddr_in sin_orig; | 595 | struct sockaddr_in sin_orig; |
@@ -624,7 +618,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg) | |||
624 | *colon = 0; | 618 | *colon = 0; |
625 | 619 | ||
626 | #ifdef CONFIG_KMOD | 620 | #ifdef CONFIG_KMOD |
627 | dev_load(&init_net, ifr.ifr_name); | 621 | dev_load(net, ifr.ifr_name); |
628 | #endif | 622 | #endif |
629 | 623 | ||
630 | switch (cmd) { | 624 | switch (cmd) { |
@@ -665,7 +659,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg) | |||
665 | rtnl_lock(); | 659 | rtnl_lock(); |
666 | 660 | ||
667 | ret = -ENODEV; | 661 | ret = -ENODEV; |
668 | if ((dev = __dev_get_by_name(&init_net, ifr.ifr_name)) == NULL) | 662 | if ((dev = __dev_get_by_name(net, ifr.ifr_name)) == NULL) |
669 | goto done; | 663 | goto done; |
670 | 664 | ||
671 | if (colon) | 665 | if (colon) |
@@ -878,6 +872,7 @@ __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope) | |||
878 | { | 872 | { |
879 | __be32 addr = 0; | 873 | __be32 addr = 0; |
880 | struct in_device *in_dev; | 874 | struct in_device *in_dev; |
875 | struct net *net = dev->nd_net; | ||
881 | 876 | ||
882 | rcu_read_lock(); | 877 | rcu_read_lock(); |
883 | in_dev = __in_dev_get_rcu(dev); | 878 | in_dev = __in_dev_get_rcu(dev); |
@@ -906,7 +901,7 @@ no_in_dev: | |||
906 | */ | 901 | */ |
907 | read_lock(&dev_base_lock); | 902 | read_lock(&dev_base_lock); |
908 | rcu_read_lock(); | 903 | rcu_read_lock(); |
909 | for_each_netdev(&init_net, dev) { | 904 | for_each_netdev(net, dev) { |
910 | if ((in_dev = __in_dev_get_rcu(dev)) == NULL) | 905 | if ((in_dev = __in_dev_get_rcu(dev)) == NULL) |
911 | continue; | 906 | continue; |
912 | 907 | ||
@@ -1045,9 +1040,6 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, | |||
1045 | struct net_device *dev = ptr; | 1040 | struct net_device *dev = ptr; |
1046 | struct in_device *in_dev = __in_dev_get_rtnl(dev); | 1041 | struct in_device *in_dev = __in_dev_get_rtnl(dev); |
1047 | 1042 | ||
1048 | if (dev->nd_net != &init_net) | ||
1049 | return NOTIFY_DONE; | ||
1050 | |||
1051 | ASSERT_RTNL(); | 1043 | ASSERT_RTNL(); |
1052 | 1044 | ||
1053 | if (!in_dev) { | 1045 | if (!in_dev) { |
@@ -1173,9 +1165,6 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) | |||
1173 | struct in_ifaddr *ifa; | 1165 | struct in_ifaddr *ifa; |
1174 | int s_ip_idx, s_idx = cb->args[0]; | 1166 | int s_ip_idx, s_idx = cb->args[0]; |
1175 | 1167 | ||
1176 | if (net != &init_net) | ||
1177 | return 0; | ||
1178 | |||
1179 | s_ip_idx = ip_idx = cb->args[1]; | 1168 | s_ip_idx = ip_idx = cb->args[1]; |
1180 | idx = 0; | 1169 | idx = 0; |
1181 | for_each_netdev(net, dev) { | 1170 | for_each_netdev(net, dev) { |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index a13c074dac09..ff9a8e643fcc 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -229,14 +229,16 @@ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1]; | |||
229 | * | 229 | * |
230 | * On SMP we have one ICMP socket per-cpu. | 230 | * On SMP we have one ICMP socket per-cpu. |
231 | */ | 231 | */ |
232 | static DEFINE_PER_CPU(struct socket *, __icmp_socket) = NULL; | 232 | static struct sock *icmp_sk(struct net *net) |
233 | #define icmp_socket __get_cpu_var(__icmp_socket) | 233 | { |
234 | return net->ipv4.icmp_sk[smp_processor_id()]; | ||
235 | } | ||
234 | 236 | ||
235 | static inline int icmp_xmit_lock(void) | 237 | static inline int icmp_xmit_lock(struct sock *sk) |
236 | { | 238 | { |
237 | local_bh_disable(); | 239 | local_bh_disable(); |
238 | 240 | ||
239 | if (unlikely(!spin_trylock(&icmp_socket->sk->sk_lock.slock))) { | 241 | if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { |
240 | /* This can happen if the output path signals a | 242 | /* This can happen if the output path signals a |
241 | * dst_link_failure() for an outgoing ICMP packet. | 243 | * dst_link_failure() for an outgoing ICMP packet. |
242 | */ | 244 | */ |
@@ -246,9 +248,9 @@ static inline int icmp_xmit_lock(void) | |||
246 | return 0; | 248 | return 0; |
247 | } | 249 | } |
248 | 250 | ||
249 | static inline void icmp_xmit_unlock(void) | 251 | static inline void icmp_xmit_unlock(struct sock *sk) |
250 | { | 252 | { |
251 | spin_unlock_bh(&icmp_socket->sk->sk_lock.slock); | 253 | spin_unlock_bh(&sk->sk_lock.slock); |
252 | } | 254 | } |
253 | 255 | ||
254 | /* | 256 | /* |
@@ -346,19 +348,21 @@ static int icmp_glue_bits(void *from, char *to, int offset, int len, int odd, | |||
346 | static void icmp_push_reply(struct icmp_bxm *icmp_param, | 348 | static void icmp_push_reply(struct icmp_bxm *icmp_param, |
347 | struct ipcm_cookie *ipc, struct rtable *rt) | 349 | struct ipcm_cookie *ipc, struct rtable *rt) |
348 | { | 350 | { |
351 | struct sock *sk; | ||
349 | struct sk_buff *skb; | 352 | struct sk_buff *skb; |
350 | 353 | ||
351 | if (ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param, | 354 | sk = icmp_sk(rt->u.dst.dev->nd_net); |
355 | if (ip_append_data(sk, icmp_glue_bits, icmp_param, | ||
352 | icmp_param->data_len+icmp_param->head_len, | 356 | icmp_param->data_len+icmp_param->head_len, |
353 | icmp_param->head_len, | 357 | icmp_param->head_len, |
354 | ipc, rt, MSG_DONTWAIT) < 0) | 358 | ipc, rt, MSG_DONTWAIT) < 0) |
355 | ip_flush_pending_frames(icmp_socket->sk); | 359 | ip_flush_pending_frames(sk); |
356 | else if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) { | 360 | else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) { |
357 | struct icmphdr *icmph = icmp_hdr(skb); | 361 | struct icmphdr *icmph = icmp_hdr(skb); |
358 | __wsum csum = 0; | 362 | __wsum csum = 0; |
359 | struct sk_buff *skb1; | 363 | struct sk_buff *skb1; |
360 | 364 | ||
361 | skb_queue_walk(&icmp_socket->sk->sk_write_queue, skb1) { | 365 | skb_queue_walk(&sk->sk_write_queue, skb1) { |
362 | csum = csum_add(csum, skb1->csum); | 366 | csum = csum_add(csum, skb1->csum); |
363 | } | 367 | } |
364 | csum = csum_partial_copy_nocheck((void *)&icmp_param->data, | 368 | csum = csum_partial_copy_nocheck((void *)&icmp_param->data, |
@@ -366,7 +370,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param, | |||
366 | icmp_param->head_len, csum); | 370 | icmp_param->head_len, csum); |
367 | icmph->checksum = csum_fold(csum); | 371 | icmph->checksum = csum_fold(csum); |
368 | skb->ip_summed = CHECKSUM_NONE; | 372 | skb->ip_summed = CHECKSUM_NONE; |
369 | ip_push_pending_frames(icmp_socket->sk); | 373 | ip_push_pending_frames(sk); |
370 | } | 374 | } |
371 | } | 375 | } |
372 | 376 | ||
@@ -376,16 +380,17 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param, | |||
376 | 380 | ||
377 | static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) | 381 | static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) |
378 | { | 382 | { |
379 | struct sock *sk = icmp_socket->sk; | ||
380 | struct inet_sock *inet = inet_sk(sk); | ||
381 | struct ipcm_cookie ipc; | 383 | struct ipcm_cookie ipc; |
382 | struct rtable *rt = (struct rtable *)skb->dst; | 384 | struct rtable *rt = skb->rtable; |
385 | struct net *net = rt->u.dst.dev->nd_net; | ||
386 | struct sock *sk = icmp_sk(net); | ||
387 | struct inet_sock *inet = inet_sk(sk); | ||
383 | __be32 daddr; | 388 | __be32 daddr; |
384 | 389 | ||
385 | if (ip_options_echo(&icmp_param->replyopts, skb)) | 390 | if (ip_options_echo(&icmp_param->replyopts, skb)) |
386 | return; | 391 | return; |
387 | 392 | ||
388 | if (icmp_xmit_lock()) | 393 | if (icmp_xmit_lock(sk)) |
389 | return; | 394 | return; |
390 | 395 | ||
391 | icmp_param->data.icmph.checksum = 0; | 396 | icmp_param->data.icmph.checksum = 0; |
@@ -405,7 +410,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) | |||
405 | .tos = RT_TOS(ip_hdr(skb)->tos) } }, | 410 | .tos = RT_TOS(ip_hdr(skb)->tos) } }, |
406 | .proto = IPPROTO_ICMP }; | 411 | .proto = IPPROTO_ICMP }; |
407 | security_skb_classify_flow(skb, &fl); | 412 | security_skb_classify_flow(skb, &fl); |
408 | if (ip_route_output_key(rt->u.dst.dev->nd_net, &rt, &fl)) | 413 | if (ip_route_output_key(net, &rt, &fl)) |
409 | goto out_unlock; | 414 | goto out_unlock; |
410 | } | 415 | } |
411 | if (icmpv4_xrlim_allow(rt, icmp_param->data.icmph.type, | 416 | if (icmpv4_xrlim_allow(rt, icmp_param->data.icmph.type, |
@@ -413,7 +418,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) | |||
413 | icmp_push_reply(icmp_param, &ipc, rt); | 418 | icmp_push_reply(icmp_param, &ipc, rt); |
414 | ip_rt_put(rt); | 419 | ip_rt_put(rt); |
415 | out_unlock: | 420 | out_unlock: |
416 | icmp_xmit_unlock(); | 421 | icmp_xmit_unlock(sk); |
417 | } | 422 | } |
418 | 423 | ||
419 | 424 | ||
@@ -433,15 +438,17 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) | |||
433 | struct iphdr *iph; | 438 | struct iphdr *iph; |
434 | int room; | 439 | int room; |
435 | struct icmp_bxm icmp_param; | 440 | struct icmp_bxm icmp_param; |
436 | struct rtable *rt = (struct rtable *)skb_in->dst; | 441 | struct rtable *rt = skb_in->rtable; |
437 | struct ipcm_cookie ipc; | 442 | struct ipcm_cookie ipc; |
438 | __be32 saddr; | 443 | __be32 saddr; |
439 | u8 tos; | 444 | u8 tos; |
440 | struct net *net; | 445 | struct net *net; |
446 | struct sock *sk; | ||
441 | 447 | ||
442 | if (!rt) | 448 | if (!rt) |
443 | goto out; | 449 | goto out; |
444 | net = rt->u.dst.dev->nd_net; | 450 | net = rt->u.dst.dev->nd_net; |
451 | sk = icmp_sk(net); | ||
445 | 452 | ||
446 | /* | 453 | /* |
447 | * Find the original header. It is expected to be valid, of course. | 454 | * Find the original header. It is expected to be valid, of course. |
@@ -505,7 +512,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) | |||
505 | } | 512 | } |
506 | } | 513 | } |
507 | 514 | ||
508 | if (icmp_xmit_lock()) | 515 | if (icmp_xmit_lock(sk)) |
509 | return; | 516 | return; |
510 | 517 | ||
511 | /* | 518 | /* |
@@ -544,7 +551,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) | |||
544 | icmp_param.data.icmph.checksum = 0; | 551 | icmp_param.data.icmph.checksum = 0; |
545 | icmp_param.skb = skb_in; | 552 | icmp_param.skb = skb_in; |
546 | icmp_param.offset = skb_network_offset(skb_in); | 553 | icmp_param.offset = skb_network_offset(skb_in); |
547 | inet_sk(icmp_socket->sk)->tos = tos; | 554 | inet_sk(sk)->tos = tos; |
548 | ipc.addr = iph->saddr; | 555 | ipc.addr = iph->saddr; |
549 | ipc.opt = &icmp_param.replyopts; | 556 | ipc.opt = &icmp_param.replyopts; |
550 | 557 | ||
@@ -609,7 +616,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) | |||
609 | RT_TOS(tos), rt2->u.dst.dev); | 616 | RT_TOS(tos), rt2->u.dst.dev); |
610 | 617 | ||
611 | dst_release(&rt2->u.dst); | 618 | dst_release(&rt2->u.dst); |
612 | rt2 = (struct rtable *)skb_in->dst; | 619 | rt2 = skb_in->rtable; |
613 | skb_in->dst = odst; | 620 | skb_in->dst = odst; |
614 | } | 621 | } |
615 | 622 | ||
@@ -652,7 +659,7 @@ route_done: | |||
652 | ende: | 659 | ende: |
653 | ip_rt_put(rt); | 660 | ip_rt_put(rt); |
654 | out_unlock: | 661 | out_unlock: |
655 | icmp_xmit_unlock(); | 662 | icmp_xmit_unlock(sk); |
656 | out:; | 663 | out:; |
657 | } | 664 | } |
658 | 665 | ||
@@ -936,7 +943,7 @@ static void icmp_address(struct sk_buff *skb) | |||
936 | 943 | ||
937 | static void icmp_address_reply(struct sk_buff *skb) | 944 | static void icmp_address_reply(struct sk_buff *skb) |
938 | { | 945 | { |
939 | struct rtable *rt = (struct rtable *)skb->dst; | 946 | struct rtable *rt = skb->rtable; |
940 | struct net_device *dev = skb->dev; | 947 | struct net_device *dev = skb->dev; |
941 | struct in_device *in_dev; | 948 | struct in_device *in_dev; |
942 | struct in_ifaddr *ifa; | 949 | struct in_ifaddr *ifa; |
@@ -981,7 +988,7 @@ static void icmp_discard(struct sk_buff *skb) | |||
981 | int icmp_rcv(struct sk_buff *skb) | 988 | int icmp_rcv(struct sk_buff *skb) |
982 | { | 989 | { |
983 | struct icmphdr *icmph; | 990 | struct icmphdr *icmph; |
984 | struct rtable *rt = (struct rtable *)skb->dst; | 991 | struct rtable *rt = skb->rtable; |
985 | 992 | ||
986 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { | 993 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { |
987 | int nh; | 994 | int nh; |
@@ -1139,29 +1146,46 @@ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = { | |||
1139 | }, | 1146 | }, |
1140 | }; | 1147 | }; |
1141 | 1148 | ||
1142 | void __init icmp_init(struct net_proto_family *ops) | 1149 | static void __net_exit icmp_sk_exit(struct net *net) |
1143 | { | 1150 | { |
1144 | struct inet_sock *inet; | ||
1145 | int i; | 1151 | int i; |
1146 | 1152 | ||
1147 | for_each_possible_cpu(i) { | 1153 | for_each_possible_cpu(i) |
1148 | int err; | 1154 | sk_release_kernel(net->ipv4.icmp_sk[i]); |
1155 | kfree(net->ipv4.icmp_sk); | ||
1156 | net->ipv4.icmp_sk = NULL; | ||
1157 | } | ||
1149 | 1158 | ||
1150 | err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_ICMP, | 1159 | int __net_init icmp_sk_init(struct net *net) |
1151 | &per_cpu(__icmp_socket, i)); | 1160 | { |
1161 | int i, err; | ||
1152 | 1162 | ||
1163 | net->ipv4.icmp_sk = | ||
1164 | kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL); | ||
1165 | if (net->ipv4.icmp_sk == NULL) | ||
1166 | return -ENOMEM; | ||
1167 | |||
1168 | for_each_possible_cpu(i) { | ||
1169 | struct sock *sk; | ||
1170 | struct socket *sock; | ||
1171 | struct inet_sock *inet; | ||
1172 | |||
1173 | err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_ICMP, &sock); | ||
1153 | if (err < 0) | 1174 | if (err < 0) |
1154 | panic("Failed to create the ICMP control socket.\n"); | 1175 | goto fail; |
1176 | |||
1177 | net->ipv4.icmp_sk[i] = sk = sock->sk; | ||
1178 | sk_change_net(sk, net); | ||
1155 | 1179 | ||
1156 | per_cpu(__icmp_socket, i)->sk->sk_allocation = GFP_ATOMIC; | 1180 | sk->sk_allocation = GFP_ATOMIC; |
1157 | 1181 | ||
1158 | /* Enough space for 2 64K ICMP packets, including | 1182 | /* Enough space for 2 64K ICMP packets, including |
1159 | * sk_buff struct overhead. | 1183 | * sk_buff struct overhead. |
1160 | */ | 1184 | */ |
1161 | per_cpu(__icmp_socket, i)->sk->sk_sndbuf = | 1185 | sk->sk_sndbuf = |
1162 | (2 * ((64 * 1024) + sizeof(struct sk_buff))); | 1186 | (2 * ((64 * 1024) + sizeof(struct sk_buff))); |
1163 | 1187 | ||
1164 | inet = inet_sk(per_cpu(__icmp_socket, i)->sk); | 1188 | inet = inet_sk(sk); |
1165 | inet->uc_ttl = -1; | 1189 | inet->uc_ttl = -1; |
1166 | inet->pmtudisc = IP_PMTUDISC_DONT; | 1190 | inet->pmtudisc = IP_PMTUDISC_DONT; |
1167 | 1191 | ||
@@ -1169,8 +1193,25 @@ void __init icmp_init(struct net_proto_family *ops) | |||
1169 | * see it, we do not wish this socket to see incoming | 1193 | * see it, we do not wish this socket to see incoming |
1170 | * packets. | 1194 | * packets. |
1171 | */ | 1195 | */ |
1172 | per_cpu(__icmp_socket, i)->sk->sk_prot->unhash(per_cpu(__icmp_socket, i)->sk); | 1196 | sk->sk_prot->unhash(sk); |
1173 | } | 1197 | } |
1198 | return 0; | ||
1199 | |||
1200 | fail: | ||
1201 | for_each_possible_cpu(i) | ||
1202 | sk_release_kernel(net->ipv4.icmp_sk[i]); | ||
1203 | kfree(net->ipv4.icmp_sk); | ||
1204 | return err; | ||
1205 | } | ||
1206 | |||
1207 | static struct pernet_operations __net_initdata icmp_sk_ops = { | ||
1208 | .init = icmp_sk_init, | ||
1209 | .exit = icmp_sk_exit, | ||
1210 | }; | ||
1211 | |||
1212 | int __init icmp_init(void) | ||
1213 | { | ||
1214 | return register_pernet_device(&icmp_sk_ops); | ||
1174 | } | 1215 | } |
1175 | 1216 | ||
1176 | EXPORT_SYMBOL(icmp_err_convert); | 1217 | EXPORT_SYMBOL(icmp_err_convert); |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 732cd07e6071..6a4ee8da6994 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -948,7 +948,7 @@ int igmp_rcv(struct sk_buff *skb) | |||
948 | case IGMPV2_HOST_MEMBERSHIP_REPORT: | 948 | case IGMPV2_HOST_MEMBERSHIP_REPORT: |
949 | case IGMPV3_HOST_MEMBERSHIP_REPORT: | 949 | case IGMPV3_HOST_MEMBERSHIP_REPORT: |
950 | /* Is it our report looped back? */ | 950 | /* Is it our report looped back? */ |
951 | if (((struct rtable*)skb->dst)->fl.iif == 0) | 951 | if (skb->rtable->fl.iif == 0) |
952 | break; | 952 | break; |
953 | /* don't rely on MC router hearing unicast reports */ | 953 | /* don't rely on MC router hearing unicast reports */ |
954 | if (skb->pkt_type == PACKET_MULTICAST || | 954 | if (skb->pkt_type == PACKET_MULTICAST || |
@@ -1198,6 +1198,9 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) | |||
1198 | 1198 | ||
1199 | ASSERT_RTNL(); | 1199 | ASSERT_RTNL(); |
1200 | 1200 | ||
1201 | if (in_dev->dev->nd_net != &init_net) | ||
1202 | return; | ||
1203 | |||
1201 | for (im=in_dev->mc_list; im; im=im->next) { | 1204 | for (im=in_dev->mc_list; im; im=im->next) { |
1202 | if (im->multiaddr == addr) { | 1205 | if (im->multiaddr == addr) { |
1203 | im->users++; | 1206 | im->users++; |
@@ -1277,6 +1280,9 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr) | |||
1277 | 1280 | ||
1278 | ASSERT_RTNL(); | 1281 | ASSERT_RTNL(); |
1279 | 1282 | ||
1283 | if (in_dev->dev->nd_net != &init_net) | ||
1284 | return; | ||
1285 | |||
1280 | for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) { | 1286 | for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) { |
1281 | if (i->multiaddr==addr) { | 1287 | if (i->multiaddr==addr) { |
1282 | if (--i->users == 0) { | 1288 | if (--i->users == 0) { |
@@ -1304,6 +1310,9 @@ void ip_mc_down(struct in_device *in_dev) | |||
1304 | 1310 | ||
1305 | ASSERT_RTNL(); | 1311 | ASSERT_RTNL(); |
1306 | 1312 | ||
1313 | if (in_dev->dev->nd_net != &init_net) | ||
1314 | return; | ||
1315 | |||
1307 | for (i=in_dev->mc_list; i; i=i->next) | 1316 | for (i=in_dev->mc_list; i; i=i->next) |
1308 | igmp_group_dropped(i); | 1317 | igmp_group_dropped(i); |
1309 | 1318 | ||
@@ -1324,6 +1333,9 @@ void ip_mc_init_dev(struct in_device *in_dev) | |||
1324 | { | 1333 | { |
1325 | ASSERT_RTNL(); | 1334 | ASSERT_RTNL(); |
1326 | 1335 | ||
1336 | if (in_dev->dev->nd_net != &init_net) | ||
1337 | return; | ||
1338 | |||
1327 | in_dev->mc_tomb = NULL; | 1339 | in_dev->mc_tomb = NULL; |
1328 | #ifdef CONFIG_IP_MULTICAST | 1340 | #ifdef CONFIG_IP_MULTICAST |
1329 | in_dev->mr_gq_running = 0; | 1341 | in_dev->mr_gq_running = 0; |
@@ -1347,6 +1359,9 @@ void ip_mc_up(struct in_device *in_dev) | |||
1347 | 1359 | ||
1348 | ASSERT_RTNL(); | 1360 | ASSERT_RTNL(); |
1349 | 1361 | ||
1362 | if (in_dev->dev->nd_net != &init_net) | ||
1363 | return; | ||
1364 | |||
1350 | ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); | 1365 | ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); |
1351 | 1366 | ||
1352 | for (i=in_dev->mc_list; i; i=i->next) | 1367 | for (i=in_dev->mc_list; i; i=i->next) |
@@ -1363,6 +1378,9 @@ void ip_mc_destroy_dev(struct in_device *in_dev) | |||
1363 | 1378 | ||
1364 | ASSERT_RTNL(); | 1379 | ASSERT_RTNL(); |
1365 | 1380 | ||
1381 | if (in_dev->dev->nd_net != &init_net) | ||
1382 | return; | ||
1383 | |||
1366 | /* Deactivate timers */ | 1384 | /* Deactivate timers */ |
1367 | ip_mc_down(in_dev); | 1385 | ip_mc_down(in_dev); |
1368 | 1386 | ||
@@ -1744,6 +1762,9 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr) | |||
1744 | if (!ipv4_is_multicast(addr)) | 1762 | if (!ipv4_is_multicast(addr)) |
1745 | return -EINVAL; | 1763 | return -EINVAL; |
1746 | 1764 | ||
1765 | if (sk->sk_net != &init_net) | ||
1766 | return -EPROTONOSUPPORT; | ||
1767 | |||
1747 | rtnl_lock(); | 1768 | rtnl_lock(); |
1748 | 1769 | ||
1749 | in_dev = ip_mc_find_dev(imr); | 1770 | in_dev = ip_mc_find_dev(imr); |
@@ -1812,6 +1833,9 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) | |||
1812 | u32 ifindex; | 1833 | u32 ifindex; |
1813 | int ret = -EADDRNOTAVAIL; | 1834 | int ret = -EADDRNOTAVAIL; |
1814 | 1835 | ||
1836 | if (sk->sk_net != &init_net) | ||
1837 | return -EPROTONOSUPPORT; | ||
1838 | |||
1815 | rtnl_lock(); | 1839 | rtnl_lock(); |
1816 | in_dev = ip_mc_find_dev(imr); | 1840 | in_dev = ip_mc_find_dev(imr); |
1817 | ifindex = imr->imr_ifindex; | 1841 | ifindex = imr->imr_ifindex; |
@@ -1857,6 +1881,9 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct | |||
1857 | if (!ipv4_is_multicast(addr)) | 1881 | if (!ipv4_is_multicast(addr)) |
1858 | return -EINVAL; | 1882 | return -EINVAL; |
1859 | 1883 | ||
1884 | if (sk->sk_net != &init_net) | ||
1885 | return -EPROTONOSUPPORT; | ||
1886 | |||
1860 | rtnl_lock(); | 1887 | rtnl_lock(); |
1861 | 1888 | ||
1862 | imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr; | 1889 | imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr; |
@@ -1990,6 +2017,9 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) | |||
1990 | msf->imsf_fmode != MCAST_EXCLUDE) | 2017 | msf->imsf_fmode != MCAST_EXCLUDE) |
1991 | return -EINVAL; | 2018 | return -EINVAL; |
1992 | 2019 | ||
2020 | if (sk->sk_net != &init_net) | ||
2021 | return -EPROTONOSUPPORT; | ||
2022 | |||
1993 | rtnl_lock(); | 2023 | rtnl_lock(); |
1994 | 2024 | ||
1995 | imr.imr_multiaddr.s_addr = msf->imsf_multiaddr; | 2025 | imr.imr_multiaddr.s_addr = msf->imsf_multiaddr; |
@@ -2070,6 +2100,9 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf, | |||
2070 | if (!ipv4_is_multicast(addr)) | 2100 | if (!ipv4_is_multicast(addr)) |
2071 | return -EINVAL; | 2101 | return -EINVAL; |
2072 | 2102 | ||
2103 | if (sk->sk_net != &init_net) | ||
2104 | return -EPROTONOSUPPORT; | ||
2105 | |||
2073 | rtnl_lock(); | 2106 | rtnl_lock(); |
2074 | 2107 | ||
2075 | imr.imr_multiaddr.s_addr = msf->imsf_multiaddr; | 2108 | imr.imr_multiaddr.s_addr = msf->imsf_multiaddr; |
@@ -2132,6 +2165,9 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf, | |||
2132 | if (!ipv4_is_multicast(addr)) | 2165 | if (!ipv4_is_multicast(addr)) |
2133 | return -EINVAL; | 2166 | return -EINVAL; |
2134 | 2167 | ||
2168 | if (sk->sk_net != &init_net) | ||
2169 | return -EPROTONOSUPPORT; | ||
2170 | |||
2135 | rtnl_lock(); | 2171 | rtnl_lock(); |
2136 | 2172 | ||
2137 | err = -EADDRNOTAVAIL; | 2173 | err = -EADDRNOTAVAIL; |
@@ -2216,6 +2252,9 @@ void ip_mc_drop_socket(struct sock *sk) | |||
2216 | if (inet->mc_list == NULL) | 2252 | if (inet->mc_list == NULL) |
2217 | return; | 2253 | return; |
2218 | 2254 | ||
2255 | if (sk->sk_net != &init_net) | ||
2256 | return; | ||
2257 | |||
2219 | rtnl_lock(); | 2258 | rtnl_lock(); |
2220 | while ((iml = inet->mc_list) != NULL) { | 2259 | while ((iml = inet->mc_list) != NULL) { |
2221 | struct in_device *in_dev; | 2260 | struct in_device *in_dev; |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index b189278c7bc1..c0e0fa03fce1 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -463,7 +463,7 @@ void inet_csk_reqsk_queue_prune(struct sock *parent, | |||
463 | if (time_after_eq(now, req->expires)) { | 463 | if (time_after_eq(now, req->expires)) { |
464 | if ((req->retrans < thresh || | 464 | if ((req->retrans < thresh || |
465 | (inet_rsk(req)->acked && req->retrans < max_retries)) | 465 | (inet_rsk(req)->acked && req->retrans < max_retries)) |
466 | && !req->rsk_ops->rtx_syn_ack(parent, req, NULL)) { | 466 | && !req->rsk_ops->rtx_syn_ack(parent, req)) { |
467 | unsigned long timeo; | 467 | unsigned long timeo; |
468 | 468 | ||
469 | if (req->retrans++ == 0) | 469 | if (req->retrans++ == 0) |
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 0b3b328d82db..9d6d3befd854 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c | |||
@@ -80,7 +80,7 @@ int ip_forward(struct sk_buff *skb) | |||
80 | if (!xfrm4_route_forward(skb)) | 80 | if (!xfrm4_route_forward(skb)) |
81 | goto drop; | 81 | goto drop; |
82 | 82 | ||
83 | rt = (struct rtable*)skb->dst; | 83 | rt = skb->rtable; |
84 | 84 | ||
85 | if (opt->is_strictroute && rt->rt_dst != rt->rt_gateway) | 85 | if (opt->is_strictroute && rt->rt_dst != rt->rt_gateway) |
86 | goto sr_failed; | 86 | goto sr_failed; |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index e7821ba7a9a0..f9ee84420cb3 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -619,7 +619,7 @@ static int ipgre_rcv(struct sk_buff *skb) | |||
619 | #ifdef CONFIG_NET_IPGRE_BROADCAST | 619 | #ifdef CONFIG_NET_IPGRE_BROADCAST |
620 | if (ipv4_is_multicast(iph->daddr)) { | 620 | if (ipv4_is_multicast(iph->daddr)) { |
621 | /* Looped back packet, drop it! */ | 621 | /* Looped back packet, drop it! */ |
622 | if (((struct rtable*)skb->dst)->fl.iif == 0) | 622 | if (skb->rtable->fl.iif == 0) |
623 | goto drop; | 623 | goto drop; |
624 | tunnel->stat.multicast++; | 624 | tunnel->stat.multicast++; |
625 | skb->pkt_type = PACKET_BROADCAST; | 625 | skb->pkt_type = PACKET_BROADCAST; |
@@ -699,7 +699,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
699 | } | 699 | } |
700 | 700 | ||
701 | if (skb->protocol == htons(ETH_P_IP)) { | 701 | if (skb->protocol == htons(ETH_P_IP)) { |
702 | rt = (struct rtable*)skb->dst; | 702 | rt = skb->rtable; |
703 | if ((dst = rt->rt_gateway) == 0) | 703 | if ((dst = rt->rt_gateway) == 0) |
704 | goto tx_error_icmp; | 704 | goto tx_error_icmp; |
705 | } | 705 | } |
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 65631391d479..d36e310b314d 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
@@ -351,7 +351,7 @@ static int ip_rcv_finish(struct sk_buff *skb) | |||
351 | if (iph->ihl > 5 && ip_rcv_options(skb)) | 351 | if (iph->ihl > 5 && ip_rcv_options(skb)) |
352 | goto drop; | 352 | goto drop; |
353 | 353 | ||
354 | rt = (struct rtable*)skb->dst; | 354 | rt = skb->rtable; |
355 | if (rt->rt_type == RTN_MULTICAST) | 355 | if (rt->rt_type == RTN_MULTICAST) |
356 | IP_INC_STATS_BH(IPSTATS_MIB_INMCASTPKTS); | 356 | IP_INC_STATS_BH(IPSTATS_MIB_INMCASTPKTS); |
357 | else if (rt->rt_type == RTN_BROADCAST) | 357 | else if (rt->rt_type == RTN_BROADCAST) |
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 4d315158fd3c..df93a9c2efda 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c | |||
@@ -107,10 +107,7 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb) | |||
107 | sptr = skb_network_header(skb); | 107 | sptr = skb_network_header(skb); |
108 | dptr = dopt->__data; | 108 | dptr = dopt->__data; |
109 | 109 | ||
110 | if (skb->dst) | 110 | daddr = skb->rtable->rt_spec_dst; |
111 | daddr = ((struct rtable*)skb->dst)->rt_spec_dst; | ||
112 | else | ||
113 | daddr = ip_hdr(skb)->daddr; | ||
114 | 111 | ||
115 | if (sopt->rr) { | 112 | if (sopt->rr) { |
116 | optlen = sptr[sopt->rr+1]; | 113 | optlen = sptr[sopt->rr+1]; |
@@ -261,7 +258,7 @@ int ip_options_compile(struct ip_options * opt, struct sk_buff * skb) | |||
261 | unsigned char * optptr; | 258 | unsigned char * optptr; |
262 | int optlen; | 259 | int optlen; |
263 | unsigned char * pp_ptr = NULL; | 260 | unsigned char * pp_ptr = NULL; |
264 | struct rtable *rt = skb ? (struct rtable*)skb->dst : NULL; | 261 | struct rtable *rt = skb ? skb->rtable : NULL; |
265 | 262 | ||
266 | if (!opt) { | 263 | if (!opt) { |
267 | opt = &(IPCB(skb)->opt); | 264 | opt = &(IPCB(skb)->opt); |
@@ -561,7 +558,7 @@ void ip_forward_options(struct sk_buff *skb) | |||
561 | { | 558 | { |
562 | struct ip_options * opt = &(IPCB(skb)->opt); | 559 | struct ip_options * opt = &(IPCB(skb)->opt); |
563 | unsigned char * optptr; | 560 | unsigned char * optptr; |
564 | struct rtable *rt = (struct rtable*)skb->dst; | 561 | struct rtable *rt = skb->rtable; |
565 | unsigned char *raw = skb_network_header(skb); | 562 | unsigned char *raw = skb_network_header(skb); |
566 | 563 | ||
567 | if (opt->rr_needaddr) { | 564 | if (opt->rr_needaddr) { |
@@ -609,7 +606,7 @@ int ip_options_rcv_srr(struct sk_buff *skb) | |||
609 | __be32 nexthop; | 606 | __be32 nexthop; |
610 | struct iphdr *iph = ip_hdr(skb); | 607 | struct iphdr *iph = ip_hdr(skb); |
611 | unsigned char *optptr = skb_network_header(skb) + opt->srr; | 608 | unsigned char *optptr = skb_network_header(skb) + opt->srr; |
612 | struct rtable *rt = (struct rtable*)skb->dst; | 609 | struct rtable *rt = skb->rtable; |
613 | struct rtable *rt2; | 610 | struct rtable *rt2; |
614 | int err; | 611 | int err; |
615 | 612 | ||
@@ -634,13 +631,13 @@ int ip_options_rcv_srr(struct sk_buff *skb) | |||
634 | } | 631 | } |
635 | memcpy(&nexthop, &optptr[srrptr-1], 4); | 632 | memcpy(&nexthop, &optptr[srrptr-1], 4); |
636 | 633 | ||
637 | rt = (struct rtable*)skb->dst; | 634 | rt = skb->rtable; |
638 | skb->dst = NULL; | 635 | skb->rtable = NULL; |
639 | err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev); | 636 | err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev); |
640 | rt2 = (struct rtable*)skb->dst; | 637 | rt2 = skb->rtable; |
641 | if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) { | 638 | if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) { |
642 | ip_rt_put(rt2); | 639 | ip_rt_put(rt2); |
643 | skb->dst = &rt->u.dst; | 640 | skb->rtable = rt; |
644 | return -EINVAL; | 641 | return -EINVAL; |
645 | } | 642 | } |
646 | ip_rt_put(rt); | 643 | ip_rt_put(rt); |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 341779e685d9..dc494ea594a7 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -142,7 +142,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, | |||
142 | __be32 saddr, __be32 daddr, struct ip_options *opt) | 142 | __be32 saddr, __be32 daddr, struct ip_options *opt) |
143 | { | 143 | { |
144 | struct inet_sock *inet = inet_sk(sk); | 144 | struct inet_sock *inet = inet_sk(sk); |
145 | struct rtable *rt = (struct rtable *)skb->dst; | 145 | struct rtable *rt = skb->rtable; |
146 | struct iphdr *iph; | 146 | struct iphdr *iph; |
147 | 147 | ||
148 | /* Build the IP header. */ | 148 | /* Build the IP header. */ |
@@ -240,7 +240,7 @@ static int ip_finish_output(struct sk_buff *skb) | |||
240 | int ip_mc_output(struct sk_buff *skb) | 240 | int ip_mc_output(struct sk_buff *skb) |
241 | { | 241 | { |
242 | struct sock *sk = skb->sk; | 242 | struct sock *sk = skb->sk; |
243 | struct rtable *rt = (struct rtable*)skb->dst; | 243 | struct rtable *rt = skb->rtable; |
244 | struct net_device *dev = rt->u.dst.dev; | 244 | struct net_device *dev = rt->u.dst.dev; |
245 | 245 | ||
246 | /* | 246 | /* |
@@ -321,7 +321,7 @@ int ip_queue_xmit(struct sk_buff *skb, int ipfragok) | |||
321 | /* Skip all of this if the packet is already routed, | 321 | /* Skip all of this if the packet is already routed, |
322 | * f.e. by something like SCTP. | 322 | * f.e. by something like SCTP. |
323 | */ | 323 | */ |
324 | rt = (struct rtable *) skb->dst; | 324 | rt = skb->rtable; |
325 | if (rt != NULL) | 325 | if (rt != NULL) |
326 | goto packet_routed; | 326 | goto packet_routed; |
327 | 327 | ||
@@ -441,7 +441,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*)) | |||
441 | unsigned int mtu, hlen, left, len, ll_rs, pad; | 441 | unsigned int mtu, hlen, left, len, ll_rs, pad; |
442 | int offset; | 442 | int offset; |
443 | __be16 not_last_frag; | 443 | __be16 not_last_frag; |
444 | struct rtable *rt = (struct rtable*)skb->dst; | 444 | struct rtable *rt = skb->rtable; |
445 | int err = 0; | 445 | int err = 0; |
446 | 446 | ||
447 | dev = rt->u.dst.dev; | 447 | dev = rt->u.dst.dev; |
@@ -1357,7 +1357,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar | |||
1357 | } replyopts; | 1357 | } replyopts; |
1358 | struct ipcm_cookie ipc; | 1358 | struct ipcm_cookie ipc; |
1359 | __be32 daddr; | 1359 | __be32 daddr; |
1360 | struct rtable *rt = (struct rtable*)skb->dst; | 1360 | struct rtable *rt = skb->rtable; |
1361 | 1361 | ||
1362 | if (ip_options_echo(&replyopts.opt, skb)) | 1362 | if (ip_options_echo(&replyopts.opt, skb)) |
1363 | return; | 1363 | return; |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index f72457b4b0a7..bb3cbe5ec36d 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -57,7 +57,7 @@ | |||
57 | static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb) | 57 | static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb) |
58 | { | 58 | { |
59 | struct in_pktinfo info; | 59 | struct in_pktinfo info; |
60 | struct rtable *rt = (struct rtable *)skb->dst; | 60 | struct rtable *rt = skb->rtable; |
61 | 61 | ||
62 | info.ipi_addr.s_addr = ip_hdr(skb)->daddr; | 62 | info.ipi_addr.s_addr = ip_hdr(skb)->daddr; |
63 | if (rt) { | 63 | if (rt) { |
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 7c992fbbc2c3..96138b128de8 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c | |||
@@ -292,7 +292,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg) | |||
292 | 292 | ||
293 | mm_segment_t oldfs = get_fs(); | 293 | mm_segment_t oldfs = get_fs(); |
294 | set_fs(get_ds()); | 294 | set_fs(get_ds()); |
295 | res = devinet_ioctl(cmd, (struct ifreq __user *) arg); | 295 | res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg); |
296 | set_fs(oldfs); | 296 | set_fs(oldfs); |
297 | return res; | 297 | return res; |
298 | } | 298 | } |
@@ -460,10 +460,7 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt | |||
460 | if (rarp->ar_pro != htons(ETH_P_IP)) | 460 | if (rarp->ar_pro != htons(ETH_P_IP)) |
461 | goto drop; | 461 | goto drop; |
462 | 462 | ||
463 | if (!pskb_may_pull(skb, | 463 | if (!pskb_may_pull(skb, arp_hdr_len(dev))) |
464 | sizeof(struct arphdr) + | ||
465 | (2 * dev->addr_len) + | ||
466 | (2 * 4))) | ||
467 | goto drop; | 464 | goto drop; |
468 | 465 | ||
469 | /* OK, it is all there and looks valid, process... */ | 466 | /* OK, it is all there and looks valid, process... */ |
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index dbaed69de06a..894bce96284a 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -528,7 +528,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
528 | 528 | ||
529 | if (!dst) { | 529 | if (!dst) { |
530 | /* NBMA tunnel */ | 530 | /* NBMA tunnel */ |
531 | if ((rt = (struct rtable*)skb->dst) == NULL) { | 531 | if ((rt = skb->rtable) == NULL) { |
532 | tunnel->stat.tx_fifo_errors++; | 532 | tunnel->stat.tx_fifo_errors++; |
533 | goto tx_error; | 533 | goto tx_error; |
534 | } | 534 | } |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index a94f52c207a7..7d63d74ef62a 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -1283,7 +1283,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local | |||
1283 | if (vif_table[vif].dev != skb->dev) { | 1283 | if (vif_table[vif].dev != skb->dev) { |
1284 | int true_vifi; | 1284 | int true_vifi; |
1285 | 1285 | ||
1286 | if (((struct rtable*)skb->dst)->fl.iif == 0) { | 1286 | if (skb->rtable->fl.iif == 0) { |
1287 | /* It is our own packet, looped back. | 1287 | /* It is our own packet, looped back. |
1288 | Very complicated situation... | 1288 | Very complicated situation... |
1289 | 1289 | ||
@@ -1357,7 +1357,7 @@ dont_forward: | |||
1357 | int ip_mr_input(struct sk_buff *skb) | 1357 | int ip_mr_input(struct sk_buff *skb) |
1358 | { | 1358 | { |
1359 | struct mfc_cache *cache; | 1359 | struct mfc_cache *cache; |
1360 | int local = ((struct rtable*)skb->dst)->rt_flags&RTCF_LOCAL; | 1360 | int local = skb->rtable->rt_flags&RTCF_LOCAL; |
1361 | 1361 | ||
1362 | /* Packet is looped back after forward, it should not be | 1362 | /* Packet is looped back after forward, it should not be |
1363 | forwarded second time, but still can be delivered locally. | 1363 | forwarded second time, but still can be delivered locally. |
@@ -1594,7 +1594,7 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait) | |||
1594 | { | 1594 | { |
1595 | int err; | 1595 | int err; |
1596 | struct mfc_cache *cache; | 1596 | struct mfc_cache *cache; |
1597 | struct rtable *rt = (struct rtable*)skb->dst; | 1597 | struct rtable *rt = skb->rtable; |
1598 | 1598 | ||
1599 | read_lock(&mrt_lock); | 1599 | read_lock(&mrt_lock); |
1600 | cache = ipmr_cache_find(rt->rt_src, rt->rt_dst); | 1600 | cache = ipmr_cache_find(rt->rt_src, rt->rt_dst); |
diff --git a/net/ipv4/ipvs/ip_vs_proto_tcp.c b/net/ipv4/ipvs/ip_vs_proto_tcp.c index 12dc0d640b6d..620e40ff79a9 100644 --- a/net/ipv4/ipvs/ip_vs_proto_tcp.c +++ b/net/ipv4/ipvs/ip_vs_proto_tcp.c | |||
@@ -550,7 +550,7 @@ tcp_app_conn_bind(struct ip_vs_conn *cp) | |||
550 | 550 | ||
551 | IP_VS_DBG(9, "%s: Binding conn %u.%u.%u.%u:%u->" | 551 | IP_VS_DBG(9, "%s: Binding conn %u.%u.%u.%u:%u->" |
552 | "%u.%u.%u.%u:%u to app %s on port %u\n", | 552 | "%u.%u.%u.%u:%u to app %s on port %u\n", |
553 | __FUNCTION__, | 553 | __func__, |
554 | NIPQUAD(cp->caddr), ntohs(cp->cport), | 554 | NIPQUAD(cp->caddr), ntohs(cp->cport), |
555 | NIPQUAD(cp->vaddr), ntohs(cp->vport), | 555 | NIPQUAD(cp->vaddr), ntohs(cp->vport), |
556 | inc->name, ntohs(inc->port)); | 556 | inc->name, ntohs(inc->port)); |
diff --git a/net/ipv4/ipvs/ip_vs_proto_udp.c b/net/ipv4/ipvs/ip_vs_proto_udp.c index 1fa7b330b9ac..1caa2908373f 100644 --- a/net/ipv4/ipvs/ip_vs_proto_udp.c +++ b/net/ipv4/ipvs/ip_vs_proto_udp.c | |||
@@ -344,7 +344,7 @@ static int udp_app_conn_bind(struct ip_vs_conn *cp) | |||
344 | 344 | ||
345 | IP_VS_DBG(9, "%s: Binding conn %u.%u.%u.%u:%u->" | 345 | IP_VS_DBG(9, "%s: Binding conn %u.%u.%u.%u:%u->" |
346 | "%u.%u.%u.%u:%u to app %s on port %u\n", | 346 | "%u.%u.%u.%u:%u to app %s on port %u\n", |
347 | __FUNCTION__, | 347 | __func__, |
348 | NIPQUAD(cp->caddr), ntohs(cp->cport), | 348 | NIPQUAD(cp->caddr), ntohs(cp->cport), |
349 | NIPQUAD(cp->vaddr), ntohs(cp->vport), | 349 | NIPQUAD(cp->vaddr), ntohs(cp->vport), |
350 | inc->name, ntohs(inc->port)); | 350 | inc->name, ntohs(inc->port)); |
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c index 948378d0a755..69c56663cc9a 100644 --- a/net/ipv4/ipvs/ip_vs_sync.c +++ b/net/ipv4/ipvs/ip_vs_sync.c | |||
@@ -916,7 +916,7 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid) | |||
916 | if (!tinfo) | 916 | if (!tinfo) |
917 | return -ENOMEM; | 917 | return -ENOMEM; |
918 | 918 | ||
919 | IP_VS_DBG(7, "%s: pid %d\n", __FUNCTION__, task_pid_nr(current)); | 919 | IP_VS_DBG(7, "%s: pid %d\n", __func__, task_pid_nr(current)); |
920 | IP_VS_DBG(7, "Each ip_vs_sync_conn entry need %Zd bytes\n", | 920 | IP_VS_DBG(7, "Each ip_vs_sync_conn entry need %Zd bytes\n", |
921 | sizeof(struct ip_vs_sync_conn)); | 921 | sizeof(struct ip_vs_sync_conn)); |
922 | 922 | ||
@@ -956,7 +956,7 @@ int stop_sync_thread(int state) | |||
956 | (state == IP_VS_STATE_BACKUP && !sync_backup_pid)) | 956 | (state == IP_VS_STATE_BACKUP && !sync_backup_pid)) |
957 | return -ESRCH; | 957 | return -ESRCH; |
958 | 958 | ||
959 | IP_VS_DBG(7, "%s: pid %d\n", __FUNCTION__, task_pid_nr(current)); | 959 | IP_VS_DBG(7, "%s: pid %d\n", __func__, task_pid_nr(current)); |
960 | IP_VS_INFO("stopping sync thread %d ...\n", | 960 | IP_VS_INFO("stopping sync thread %d ...\n", |
961 | (state == IP_VS_STATE_MASTER) ? | 961 | (state == IP_VS_STATE_MASTER) ? |
962 | sync_master_pid : sync_backup_pid); | 962 | sync_master_pid : sync_backup_pid); |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index a7591ce344d2..756bc0e1a7c6 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -52,7 +52,7 @@ MODULE_DESCRIPTION("arptables core"); | |||
52 | do { \ | 52 | do { \ |
53 | if (!(x)) \ | 53 | if (!(x)) \ |
54 | printk("ARP_NF_ASSERT: %s:%s:%u\n", \ | 54 | printk("ARP_NF_ASSERT: %s:%s:%u\n", \ |
55 | __FUNCTION__, __FILE__, __LINE__); \ | 55 | __func__, __FILE__, __LINE__); \ |
56 | } while(0) | 56 | } while(0) |
57 | #else | 57 | #else |
58 | #define ARP_NF_ASSERT(x) | 58 | #define ARP_NF_ASSERT(x) |
@@ -233,10 +233,7 @@ unsigned int arpt_do_table(struct sk_buff *skb, | |||
233 | void *table_base; | 233 | void *table_base; |
234 | struct xt_table_info *private; | 234 | struct xt_table_info *private; |
235 | 235 | ||
236 | /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ | 236 | if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) |
237 | if (!pskb_may_pull(skb, (sizeof(struct arphdr) + | ||
238 | (2 * skb->dev->addr_len) + | ||
239 | (2 * sizeof(u32))))) | ||
240 | return NF_DROP; | 237 | return NF_DROP; |
241 | 238 | ||
242 | indev = in ? in->name : nulldevname; | 239 | indev = in ? in->name : nulldevname; |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 600737f122d2..85a75e186b4b 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -53,7 +53,7 @@ MODULE_DESCRIPTION("IPv4 packet filter"); | |||
53 | do { \ | 53 | do { \ |
54 | if (!(x)) \ | 54 | if (!(x)) \ |
55 | printk("IP_NF_ASSERT: %s:%s:%u\n", \ | 55 | printk("IP_NF_ASSERT: %s:%s:%u\n", \ |
56 | __FUNCTION__, __FILE__, __LINE__); \ | 56 | __func__, __FILE__, __LINE__); \ |
57 | } while(0) | 57 | } while(0) |
58 | #else | 58 | #else |
59 | #define IP_NF_ASSERT(x) | 59 | #define IP_NF_ASSERT(x) |
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c index d80fee8327e4..c6817b18366a 100644 --- a/net/ipv4/netfilter/ipt_MASQUERADE.c +++ b/net/ipv4/netfilter/ipt_MASQUERADE.c | |||
@@ -77,7 +77,7 @@ masquerade_tg(struct sk_buff *skb, const struct net_device *in, | |||
77 | return NF_ACCEPT; | 77 | return NF_ACCEPT; |
78 | 78 | ||
79 | mr = targinfo; | 79 | mr = targinfo; |
80 | rt = (struct rtable *)skb->dst; | 80 | rt = skb->rtable; |
81 | newsrc = inet_select_addr(out, rt->rt_gateway, RT_SCOPE_UNIVERSE); | 81 | newsrc = inet_select_addr(out, rt->rt_gateway, RT_SCOPE_UNIVERSE); |
82 | if (!newsrc) { | 82 | if (!newsrc) { |
83 | printk("MASQUERADE: %s ate my IP address\n", out->name); | 83 | printk("MASQUERADE: %s ate my IP address\n", out->name); |
@@ -139,18 +139,8 @@ static int masq_inet_event(struct notifier_block *this, | |||
139 | unsigned long event, | 139 | unsigned long event, |
140 | void *ptr) | 140 | void *ptr) |
141 | { | 141 | { |
142 | const struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev; | 142 | struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev; |
143 | 143 | return masq_device_event(this, event, dev); | |
144 | if (event == NETDEV_DOWN) { | ||
145 | /* IP address was deleted. Search entire table for | ||
146 | conntracks which were associated with that device, | ||
147 | and forget them. */ | ||
148 | NF_CT_ASSERT(dev->ifindex != 0); | ||
149 | |||
150 | nf_ct_iterate_cleanup(device_cmp, (void *)(long)dev->ifindex); | ||
151 | } | ||
152 | |||
153 | return NOTIFY_DONE; | ||
154 | } | 144 | } |
155 | 145 | ||
156 | static struct notifier_block masq_dev_notifier = { | 146 | static struct notifier_block masq_dev_notifier = { |
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c index 089252e82c01..9668c3a23efe 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c | |||
@@ -379,7 +379,7 @@ static const struct file_operations ct_cpu_seq_fops = { | |||
379 | .open = ct_cpu_seq_open, | 379 | .open = ct_cpu_seq_open, |
380 | .read = seq_read, | 380 | .read = seq_read, |
381 | .llseek = seq_lseek, | 381 | .llseek = seq_lseek, |
382 | .release = seq_release_private, | 382 | .release = seq_release, |
383 | }; | 383 | }; |
384 | 384 | ||
385 | int __init nf_conntrack_ipv4_compat_init(void) | 385 | int __init nf_conntrack_ipv4_compat_init(void) |
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c index ca57f47bbd25..2fca727aa8ba 100644 --- a/net/ipv4/netfilter/nf_nat_helper.c +++ b/net/ipv4/netfilter/nf_nat_helper.c | |||
@@ -139,7 +139,7 @@ nf_nat_mangle_tcp_packet(struct sk_buff *skb, | |||
139 | const char *rep_buffer, | 139 | const char *rep_buffer, |
140 | unsigned int rep_len) | 140 | unsigned int rep_len) |
141 | { | 141 | { |
142 | struct rtable *rt = (struct rtable *)skb->dst; | 142 | struct rtable *rt = skb->rtable; |
143 | struct iphdr *iph; | 143 | struct iphdr *iph; |
144 | struct tcphdr *tcph; | 144 | struct tcphdr *tcph; |
145 | int oldlen, datalen; | 145 | int oldlen, datalen; |
@@ -217,7 +217,7 @@ nf_nat_mangle_udp_packet(struct sk_buff *skb, | |||
217 | const char *rep_buffer, | 217 | const char *rep_buffer, |
218 | unsigned int rep_len) | 218 | unsigned int rep_len) |
219 | { | 219 | { |
220 | struct rtable *rt = (struct rtable *)skb->dst; | 220 | struct rtable *rt = skb->rtable; |
221 | struct iphdr *iph; | 221 | struct iphdr *iph; |
222 | struct udphdr *udph; | 222 | struct udphdr *udph; |
223 | int datalen, oldlen; | 223 | int datalen, oldlen; |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 7b5e8e1d94be..1051326c36b2 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -273,6 +273,7 @@ static unsigned int rt_hash_code(u32 daddr, u32 saddr) | |||
273 | 273 | ||
274 | #ifdef CONFIG_PROC_FS | 274 | #ifdef CONFIG_PROC_FS |
275 | struct rt_cache_iter_state { | 275 | struct rt_cache_iter_state { |
276 | struct seq_net_private p; | ||
276 | int bucket; | 277 | int bucket; |
277 | int genid; | 278 | int genid; |
278 | }; | 279 | }; |
@@ -285,7 +286,8 @@ static struct rtable *rt_cache_get_first(struct rt_cache_iter_state *st) | |||
285 | rcu_read_lock_bh(); | 286 | rcu_read_lock_bh(); |
286 | r = rcu_dereference(rt_hash_table[st->bucket].chain); | 287 | r = rcu_dereference(rt_hash_table[st->bucket].chain); |
287 | while (r) { | 288 | while (r) { |
288 | if (r->rt_genid == st->genid) | 289 | if (r->u.dst.dev->nd_net == st->p.net && |
290 | r->rt_genid == st->genid) | ||
289 | return r; | 291 | return r; |
290 | r = rcu_dereference(r->u.dst.rt_next); | 292 | r = rcu_dereference(r->u.dst.rt_next); |
291 | } | 293 | } |
@@ -294,7 +296,8 @@ static struct rtable *rt_cache_get_first(struct rt_cache_iter_state *st) | |||
294 | return r; | 296 | return r; |
295 | } | 297 | } |
296 | 298 | ||
297 | static struct rtable *rt_cache_get_next(struct rt_cache_iter_state *st, struct rtable *r) | 299 | static struct rtable *__rt_cache_get_next(struct rt_cache_iter_state *st, |
300 | struct rtable *r) | ||
298 | { | 301 | { |
299 | r = r->u.dst.rt_next; | 302 | r = r->u.dst.rt_next; |
300 | while (!r) { | 303 | while (!r) { |
@@ -307,16 +310,25 @@ static struct rtable *rt_cache_get_next(struct rt_cache_iter_state *st, struct r | |||
307 | return rcu_dereference(r); | 310 | return rcu_dereference(r); |
308 | } | 311 | } |
309 | 312 | ||
313 | static struct rtable *rt_cache_get_next(struct rt_cache_iter_state *st, | ||
314 | struct rtable *r) | ||
315 | { | ||
316 | while ((r = __rt_cache_get_next(st, r)) != NULL) { | ||
317 | if (r->u.dst.dev->nd_net != st->p.net) | ||
318 | continue; | ||
319 | if (r->rt_genid == st->genid) | ||
320 | break; | ||
321 | } | ||
322 | return r; | ||
323 | } | ||
324 | |||
310 | static struct rtable *rt_cache_get_idx(struct rt_cache_iter_state *st, loff_t pos) | 325 | static struct rtable *rt_cache_get_idx(struct rt_cache_iter_state *st, loff_t pos) |
311 | { | 326 | { |
312 | struct rtable *r = rt_cache_get_first(st); | 327 | struct rtable *r = rt_cache_get_first(st); |
313 | 328 | ||
314 | if (r) | 329 | if (r) |
315 | while (pos && (r = rt_cache_get_next(st, r))) { | 330 | while (pos && (r = rt_cache_get_next(st, r))) |
316 | if (r->rt_genid != st->genid) | ||
317 | continue; | ||
318 | --pos; | 331 | --pos; |
319 | } | ||
320 | return pos ? NULL : r; | 332 | return pos ? NULL : r; |
321 | } | 333 | } |
322 | 334 | ||
@@ -390,7 +402,7 @@ static const struct seq_operations rt_cache_seq_ops = { | |||
390 | 402 | ||
391 | static int rt_cache_seq_open(struct inode *inode, struct file *file) | 403 | static int rt_cache_seq_open(struct inode *inode, struct file *file) |
392 | { | 404 | { |
393 | return seq_open_private(file, &rt_cache_seq_ops, | 405 | return seq_open_net(inode, file, &rt_cache_seq_ops, |
394 | sizeof(struct rt_cache_iter_state)); | 406 | sizeof(struct rt_cache_iter_state)); |
395 | } | 407 | } |
396 | 408 | ||
@@ -399,7 +411,7 @@ static const struct file_operations rt_cache_seq_fops = { | |||
399 | .open = rt_cache_seq_open, | 411 | .open = rt_cache_seq_open, |
400 | .read = seq_read, | 412 | .read = seq_read, |
401 | .llseek = seq_lseek, | 413 | .llseek = seq_lseek, |
402 | .release = seq_release_private, | 414 | .release = seq_release_net, |
403 | }; | 415 | }; |
404 | 416 | ||
405 | 417 | ||
@@ -533,7 +545,7 @@ static int ip_rt_acct_read(char *buffer, char **start, off_t offset, | |||
533 | } | 545 | } |
534 | #endif | 546 | #endif |
535 | 547 | ||
536 | static __init int ip_rt_proc_init(struct net *net) | 548 | static int __net_init ip_rt_do_proc_init(struct net *net) |
537 | { | 549 | { |
538 | struct proc_dir_entry *pde; | 550 | struct proc_dir_entry *pde; |
539 | 551 | ||
@@ -564,8 +576,26 @@ err2: | |||
564 | err1: | 576 | err1: |
565 | return -ENOMEM; | 577 | return -ENOMEM; |
566 | } | 578 | } |
579 | |||
580 | static void __net_exit ip_rt_do_proc_exit(struct net *net) | ||
581 | { | ||
582 | remove_proc_entry("rt_cache", net->proc_net_stat); | ||
583 | remove_proc_entry("rt_cache", net->proc_net); | ||
584 | remove_proc_entry("rt_acct", net->proc_net); | ||
585 | } | ||
586 | |||
587 | static struct pernet_operations ip_rt_proc_ops __net_initdata = { | ||
588 | .init = ip_rt_do_proc_init, | ||
589 | .exit = ip_rt_do_proc_exit, | ||
590 | }; | ||
591 | |||
592 | static int __init ip_rt_proc_init(void) | ||
593 | { | ||
594 | return register_pernet_subsys(&ip_rt_proc_ops); | ||
595 | } | ||
596 | |||
567 | #else | 597 | #else |
568 | static inline int ip_rt_proc_init(struct net *net) | 598 | static inline int ip_rt_proc_init(void) |
569 | { | 599 | { |
570 | return 0; | 600 | return 0; |
571 | } | 601 | } |
@@ -1131,10 +1161,12 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1131 | __be32 skeys[2] = { saddr, 0 }; | 1161 | __be32 skeys[2] = { saddr, 0 }; |
1132 | int ikeys[2] = { dev->ifindex, 0 }; | 1162 | int ikeys[2] = { dev->ifindex, 0 }; |
1133 | struct netevent_redirect netevent; | 1163 | struct netevent_redirect netevent; |
1164 | struct net *net; | ||
1134 | 1165 | ||
1135 | if (!in_dev) | 1166 | if (!in_dev) |
1136 | return; | 1167 | return; |
1137 | 1168 | ||
1169 | net = dev->nd_net; | ||
1138 | if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) | 1170 | if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) |
1139 | || ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) | 1171 | || ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) |
1140 | || ipv4_is_zeronet(new_gw)) | 1172 | || ipv4_is_zeronet(new_gw)) |
@@ -1146,7 +1178,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1146 | if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev)) | 1178 | if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev)) |
1147 | goto reject_redirect; | 1179 | goto reject_redirect; |
1148 | } else { | 1180 | } else { |
1149 | if (inet_addr_type(&init_net, new_gw) != RTN_UNICAST) | 1181 | if (inet_addr_type(net, new_gw) != RTN_UNICAST) |
1150 | goto reject_redirect; | 1182 | goto reject_redirect; |
1151 | } | 1183 | } |
1152 | 1184 | ||
@@ -1164,7 +1196,8 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1164 | rth->fl.fl4_src != skeys[i] || | 1196 | rth->fl.fl4_src != skeys[i] || |
1165 | rth->fl.oif != ikeys[k] || | 1197 | rth->fl.oif != ikeys[k] || |
1166 | rth->fl.iif != 0 || | 1198 | rth->fl.iif != 0 || |
1167 | rth->rt_genid != atomic_read(&rt_genid)) { | 1199 | rth->rt_genid != atomic_read(&rt_genid) || |
1200 | rth->u.dst.dev->nd_net != net) { | ||
1168 | rthp = &rth->u.dst.rt_next; | 1201 | rthp = &rth->u.dst.rt_next; |
1169 | continue; | 1202 | continue; |
1170 | } | 1203 | } |
@@ -1256,7 +1289,7 @@ reject_redirect: | |||
1256 | 1289 | ||
1257 | static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) | 1290 | static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) |
1258 | { | 1291 | { |
1259 | struct rtable *rt = (struct rtable*)dst; | 1292 | struct rtable *rt = (struct rtable *)dst; |
1260 | struct dst_entry *ret = dst; | 1293 | struct dst_entry *ret = dst; |
1261 | 1294 | ||
1262 | if (rt) { | 1295 | if (rt) { |
@@ -1297,7 +1330,7 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) | |||
1297 | 1330 | ||
1298 | void ip_rt_send_redirect(struct sk_buff *skb) | 1331 | void ip_rt_send_redirect(struct sk_buff *skb) |
1299 | { | 1332 | { |
1300 | struct rtable *rt = (struct rtable*)skb->dst; | 1333 | struct rtable *rt = skb->rtable; |
1301 | struct in_device *in_dev = in_dev_get(rt->u.dst.dev); | 1334 | struct in_device *in_dev = in_dev_get(rt->u.dst.dev); |
1302 | 1335 | ||
1303 | if (!in_dev) | 1336 | if (!in_dev) |
@@ -1346,7 +1379,7 @@ out: | |||
1346 | 1379 | ||
1347 | static int ip_error(struct sk_buff *skb) | 1380 | static int ip_error(struct sk_buff *skb) |
1348 | { | 1381 | { |
1349 | struct rtable *rt = (struct rtable*)skb->dst; | 1382 | struct rtable *rt = skb->rtable; |
1350 | unsigned long now; | 1383 | unsigned long now; |
1351 | int code; | 1384 | int code; |
1352 | 1385 | ||
@@ -1515,7 +1548,7 @@ static void ipv4_link_failure(struct sk_buff *skb) | |||
1515 | 1548 | ||
1516 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); | 1549 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); |
1517 | 1550 | ||
1518 | rt = (struct rtable *) skb->dst; | 1551 | rt = skb->rtable; |
1519 | if (rt) | 1552 | if (rt) |
1520 | dst_set_expires(&rt->u.dst, 0); | 1553 | dst_set_expires(&rt->u.dst, 0); |
1521 | } | 1554 | } |
@@ -1675,7 +1708,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1675 | 1708 | ||
1676 | in_dev_put(in_dev); | 1709 | in_dev_put(in_dev); |
1677 | hash = rt_hash(daddr, saddr, dev->ifindex); | 1710 | hash = rt_hash(daddr, saddr, dev->ifindex); |
1678 | return rt_intern_hash(hash, rth, (struct rtable**) &skb->dst); | 1711 | return rt_intern_hash(hash, rth, &skb->rtable); |
1679 | 1712 | ||
1680 | e_nobufs: | 1713 | e_nobufs: |
1681 | in_dev_put(in_dev); | 1714 | in_dev_put(in_dev); |
@@ -1836,7 +1869,7 @@ static inline int ip_mkroute_input(struct sk_buff *skb, | |||
1836 | 1869 | ||
1837 | /* put it into the cache */ | 1870 | /* put it into the cache */ |
1838 | hash = rt_hash(daddr, saddr, fl->iif); | 1871 | hash = rt_hash(daddr, saddr, fl->iif); |
1839 | return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst); | 1872 | return rt_intern_hash(hash, rth, &skb->rtable); |
1840 | } | 1873 | } |
1841 | 1874 | ||
1842 | /* | 1875 | /* |
@@ -1992,7 +2025,7 @@ local_input: | |||
1992 | } | 2025 | } |
1993 | rth->rt_type = res.type; | 2026 | rth->rt_type = res.type; |
1994 | hash = rt_hash(daddr, saddr, fl.iif); | 2027 | hash = rt_hash(daddr, saddr, fl.iif); |
1995 | err = rt_intern_hash(hash, rth, (struct rtable**)&skb->dst); | 2028 | err = rt_intern_hash(hash, rth, &skb->rtable); |
1996 | goto done; | 2029 | goto done; |
1997 | 2030 | ||
1998 | no_route: | 2031 | no_route: |
@@ -2058,7 +2091,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
2058 | dst_use(&rth->u.dst, jiffies); | 2091 | dst_use(&rth->u.dst, jiffies); |
2059 | RT_CACHE_STAT_INC(in_hit); | 2092 | RT_CACHE_STAT_INC(in_hit); |
2060 | rcu_read_unlock(); | 2093 | rcu_read_unlock(); |
2061 | skb->dst = (struct dst_entry*)rth; | 2094 | skb->rtable = rth; |
2062 | return 0; | 2095 | return 0; |
2063 | } | 2096 | } |
2064 | RT_CACHE_STAT_INC(in_hlist_search); | 2097 | RT_CACHE_STAT_INC(in_hlist_search); |
@@ -2565,7 +2598,7 @@ int ip_route_output_key(struct net *net, struct rtable **rp, struct flowi *flp) | |||
2565 | static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, | 2598 | static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, |
2566 | int nowait, unsigned int flags) | 2599 | int nowait, unsigned int flags) |
2567 | { | 2600 | { |
2568 | struct rtable *rt = (struct rtable*)skb->dst; | 2601 | struct rtable *rt = skb->rtable; |
2569 | struct rtmsg *r; | 2602 | struct rtmsg *r; |
2570 | struct nlmsghdr *nlh; | 2603 | struct nlmsghdr *nlh; |
2571 | long expires; | 2604 | long expires; |
@@ -2668,9 +2701,6 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void | |||
2668 | int err; | 2701 | int err; |
2669 | struct sk_buff *skb; | 2702 | struct sk_buff *skb; |
2670 | 2703 | ||
2671 | if (net != &init_net) | ||
2672 | return -EINVAL; | ||
2673 | |||
2674 | err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy); | 2704 | err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy); |
2675 | if (err < 0) | 2705 | if (err < 0) |
2676 | goto errout; | 2706 | goto errout; |
@@ -2700,7 +2730,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void | |||
2700 | if (iif) { | 2730 | if (iif) { |
2701 | struct net_device *dev; | 2731 | struct net_device *dev; |
2702 | 2732 | ||
2703 | dev = __dev_get_by_index(&init_net, iif); | 2733 | dev = __dev_get_by_index(net, iif); |
2704 | if (dev == NULL) { | 2734 | if (dev == NULL) { |
2705 | err = -ENODEV; | 2735 | err = -ENODEV; |
2706 | goto errout_free; | 2736 | goto errout_free; |
@@ -2712,7 +2742,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void | |||
2712 | err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev); | 2742 | err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev); |
2713 | local_bh_enable(); | 2743 | local_bh_enable(); |
2714 | 2744 | ||
2715 | rt = (struct rtable*) skb->dst; | 2745 | rt = skb->rtable; |
2716 | if (err == 0 && rt->u.dst.error) | 2746 | if (err == 0 && rt->u.dst.error) |
2717 | err = -rt->u.dst.error; | 2747 | err = -rt->u.dst.error; |
2718 | } else { | 2748 | } else { |
@@ -2726,22 +2756,22 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void | |||
2726 | }, | 2756 | }, |
2727 | .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0, | 2757 | .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0, |
2728 | }; | 2758 | }; |
2729 | err = ip_route_output_key(&init_net, &rt, &fl); | 2759 | err = ip_route_output_key(net, &rt, &fl); |
2730 | } | 2760 | } |
2731 | 2761 | ||
2732 | if (err) | 2762 | if (err) |
2733 | goto errout_free; | 2763 | goto errout_free; |
2734 | 2764 | ||
2735 | skb->dst = &rt->u.dst; | 2765 | skb->rtable = rt; |
2736 | if (rtm->rtm_flags & RTM_F_NOTIFY) | 2766 | if (rtm->rtm_flags & RTM_F_NOTIFY) |
2737 | rt->rt_flags |= RTCF_NOTIFY; | 2767 | rt->rt_flags |= RTCF_NOTIFY; |
2738 | 2768 | ||
2739 | err = rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, | 2769 | err = rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, |
2740 | RTM_NEWROUTE, 0, 0); | 2770 | RTM_NEWROUTE, 0, 0); |
2741 | if (err <= 0) | 2771 | if (err <= 0) |
2742 | goto errout_free; | 2772 | goto errout_free; |
2743 | 2773 | ||
2744 | err = rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).pid); | 2774 | err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid); |
2745 | errout: | 2775 | errout: |
2746 | return err; | 2776 | return err; |
2747 | 2777 | ||
@@ -2755,6 +2785,9 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
2755 | struct rtable *rt; | 2785 | struct rtable *rt; |
2756 | int h, s_h; | 2786 | int h, s_h; |
2757 | int idx, s_idx; | 2787 | int idx, s_idx; |
2788 | struct net *net; | ||
2789 | |||
2790 | net = skb->sk->sk_net; | ||
2758 | 2791 | ||
2759 | s_h = cb->args[0]; | 2792 | s_h = cb->args[0]; |
2760 | if (s_h < 0) | 2793 | if (s_h < 0) |
@@ -2764,7 +2797,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
2764 | rcu_read_lock_bh(); | 2797 | rcu_read_lock_bh(); |
2765 | for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt; | 2798 | for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt; |
2766 | rt = rcu_dereference(rt->u.dst.rt_next), idx++) { | 2799 | rt = rcu_dereference(rt->u.dst.rt_next), idx++) { |
2767 | if (idx < s_idx) | 2800 | if (rt->u.dst.dev->nd_net != net || idx < s_idx) |
2768 | continue; | 2801 | continue; |
2769 | if (rt->rt_genid != atomic_read(&rt_genid)) | 2802 | if (rt->rt_genid != atomic_read(&rt_genid)) |
2770 | continue; | 2803 | continue; |
@@ -3040,7 +3073,7 @@ int __init ip_rt_init(void) | |||
3040 | ip_rt_secret_interval; | 3073 | ip_rt_secret_interval; |
3041 | add_timer(&rt_secret_timer); | 3074 | add_timer(&rt_secret_timer); |
3042 | 3075 | ||
3043 | if (ip_rt_proc_init(&init_net)) | 3076 | if (ip_rt_proc_init()) |
3044 | printk(KERN_ERR "Unable to create route proc files\n"); | 3077 | printk(KERN_ERR "Unable to create route proc files\n"); |
3045 | #ifdef CONFIG_XFRM | 3078 | #ifdef CONFIG_XFRM |
3046 | xfrm_init(); | 3079 | xfrm_init(); |
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index f470fe4511db..4704f27f6c0b 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c | |||
@@ -10,8 +10,6 @@ | |||
10 | * 2 of the License, or (at your option) any later version. | 10 | * 2 of the License, or (at your option) any later version. |
11 | * | 11 | * |
12 | * $Id: syncookies.c,v 1.18 2002/02/01 22:01:04 davem Exp $ | 12 | * $Id: syncookies.c,v 1.18 2002/02/01 22:01:04 davem Exp $ |
13 | * | ||
14 | * Missing: IPv6 support. | ||
15 | */ | 13 | */ |
16 | 14 | ||
17 | #include <linux/tcp.h> | 15 | #include <linux/tcp.h> |
@@ -23,22 +21,25 @@ | |||
23 | 21 | ||
24 | extern int sysctl_tcp_syncookies; | 22 | extern int sysctl_tcp_syncookies; |
25 | 23 | ||
26 | static __u32 syncookie_secret[2][16-3+SHA_DIGEST_WORDS]; | 24 | __u32 syncookie_secret[2][16-3+SHA_DIGEST_WORDS]; |
25 | EXPORT_SYMBOL(syncookie_secret); | ||
27 | 26 | ||
28 | static __init int init_syncookies(void) | 27 | static __init int init_syncookies(void) |
29 | { | 28 | { |
30 | get_random_bytes(syncookie_secret, sizeof(syncookie_secret)); | 29 | get_random_bytes(syncookie_secret, sizeof(syncookie_secret)); |
31 | return 0; | 30 | return 0; |
32 | } | 31 | } |
33 | module_init(init_syncookies); | 32 | __initcall(init_syncookies); |
34 | 33 | ||
35 | #define COOKIEBITS 24 /* Upper bits store count */ | 34 | #define COOKIEBITS 24 /* Upper bits store count */ |
36 | #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) | 35 | #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) |
37 | 36 | ||
37 | static DEFINE_PER_CPU(__u32, cookie_scratch)[16 + 5 + SHA_WORKSPACE_WORDS]; | ||
38 | |||
38 | static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport, | 39 | static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport, |
39 | u32 count, int c) | 40 | u32 count, int c) |
40 | { | 41 | { |
41 | __u32 tmp[16 + 5 + SHA_WORKSPACE_WORDS]; | 42 | __u32 *tmp = __get_cpu_var(cookie_scratch); |
42 | 43 | ||
43 | memcpy(tmp + 3, syncookie_secret[c], sizeof(syncookie_secret[c])); | 44 | memcpy(tmp + 3, syncookie_secret[c], sizeof(syncookie_secret[c])); |
44 | tmp[0] = (__force u32)saddr; | 45 | tmp[0] = (__force u32)saddr; |
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 3aa0b23c1ea0..eb5b9854c8c7 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c | |||
@@ -1,12 +1,13 @@ | |||
1 | /* | 1 | /* |
2 | * TCP CUBIC: Binary Increase Congestion control for TCP v2.1 | 2 | * TCP CUBIC: Binary Increase Congestion control for TCP v2.2 |
3 | * | 3 | * Home page: |
4 | * http://netsrv.csc.ncsu.edu/twiki/bin/view/Main/BIC | ||
4 | * This is from the implementation of CUBIC TCP in | 5 | * This is from the implementation of CUBIC TCP in |
5 | * Injong Rhee, Lisong Xu. | 6 | * Injong Rhee, Lisong Xu. |
6 | * "CUBIC: A New TCP-Friendly High-Speed TCP Variant | 7 | * "CUBIC: A New TCP-Friendly High-Speed TCP Variant |
7 | * in PFLDnet 2005 | 8 | * in PFLDnet 2005 |
8 | * Available from: | 9 | * Available from: |
9 | * http://www.csc.ncsu.edu/faculty/rhee/export/bitcp/cubic-paper.pdf | 10 | * http://netsrv.csc.ncsu.edu/export/cubic-paper.pdf |
10 | * | 11 | * |
11 | * Unless CUBIC is enabled and congestion window is large | 12 | * Unless CUBIC is enabled and congestion window is large |
12 | * this behaves the same as the original Reno. | 13 | * this behaves the same as the original Reno. |
@@ -20,15 +21,10 @@ | |||
20 | #define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation | 21 | #define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation |
21 | * max_cwnd = snd_cwnd * beta | 22 | * max_cwnd = snd_cwnd * beta |
22 | */ | 23 | */ |
23 | #define BICTCP_B 4 /* | ||
24 | * In binary search, | ||
25 | * go to point (max+min)/N | ||
26 | */ | ||
27 | #define BICTCP_HZ 10 /* BIC HZ 2^10 = 1024 */ | 24 | #define BICTCP_HZ 10 /* BIC HZ 2^10 = 1024 */ |
28 | 25 | ||
29 | static int fast_convergence __read_mostly = 1; | 26 | static int fast_convergence __read_mostly = 1; |
30 | static int max_increment __read_mostly = 16; | 27 | static int beta __read_mostly = 717; /* = 717/1024 (BICTCP_BETA_SCALE) */ |
31 | static int beta __read_mostly = 819; /* = 819/1024 (BICTCP_BETA_SCALE) */ | ||
32 | static int initial_ssthresh __read_mostly; | 28 | static int initial_ssthresh __read_mostly; |
33 | static int bic_scale __read_mostly = 41; | 29 | static int bic_scale __read_mostly = 41; |
34 | static int tcp_friendliness __read_mostly = 1; | 30 | static int tcp_friendliness __read_mostly = 1; |
@@ -40,9 +36,7 @@ static u64 cube_factor __read_mostly; | |||
40 | /* Note parameters that are used for precomputing scale factors are read-only */ | 36 | /* Note parameters that are used for precomputing scale factors are read-only */ |
41 | module_param(fast_convergence, int, 0644); | 37 | module_param(fast_convergence, int, 0644); |
42 | MODULE_PARM_DESC(fast_convergence, "turn on/off fast convergence"); | 38 | MODULE_PARM_DESC(fast_convergence, "turn on/off fast convergence"); |
43 | module_param(max_increment, int, 0644); | 39 | module_param(beta, int, 0644); |
44 | MODULE_PARM_DESC(max_increment, "Limit on increment allowed during binary search"); | ||
45 | module_param(beta, int, 0444); | ||
46 | MODULE_PARM_DESC(beta, "beta for multiplicative increase"); | 40 | MODULE_PARM_DESC(beta, "beta for multiplicative increase"); |
47 | module_param(initial_ssthresh, int, 0644); | 41 | module_param(initial_ssthresh, int, 0644); |
48 | MODULE_PARM_DESC(initial_ssthresh, "initial value of slow start threshold"); | 42 | MODULE_PARM_DESC(initial_ssthresh, "initial value of slow start threshold"); |
@@ -145,7 +139,7 @@ static u32 cubic_root(u64 a) | |||
145 | static inline void bictcp_update(struct bictcp *ca, u32 cwnd) | 139 | static inline void bictcp_update(struct bictcp *ca, u32 cwnd) |
146 | { | 140 | { |
147 | u64 offs; | 141 | u64 offs; |
148 | u32 delta, t, bic_target, min_cnt, max_cnt; | 142 | u32 delta, t, bic_target, max_cnt; |
149 | 143 | ||
150 | ca->ack_cnt++; /* count the number of ACKs */ | 144 | ca->ack_cnt++; /* count the number of ACKs */ |
151 | 145 | ||
@@ -211,19 +205,6 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) | |||
211 | ca->cnt = 100 * cwnd; /* very small increment*/ | 205 | ca->cnt = 100 * cwnd; /* very small increment*/ |
212 | } | 206 | } |
213 | 207 | ||
214 | if (ca->delay_min > 0) { | ||
215 | /* max increment = Smax * rtt / 0.1 */ | ||
216 | min_cnt = (cwnd * HZ * 8)/(10 * max_increment * ca->delay_min); | ||
217 | |||
218 | /* use concave growth when the target is above the origin */ | ||
219 | if (ca->cnt < min_cnt && t >= ca->bic_K) | ||
220 | ca->cnt = min_cnt; | ||
221 | } | ||
222 | |||
223 | /* slow start and low utilization */ | ||
224 | if (ca->loss_cwnd == 0) /* could be aggressive in slow start */ | ||
225 | ca->cnt = 50; | ||
226 | |||
227 | /* TCP Friendly */ | 208 | /* TCP Friendly */ |
228 | if (tcp_friendliness) { | 209 | if (tcp_friendliness) { |
229 | u32 scale = beta_scale; | 210 | u32 scale = beta_scale; |
@@ -391,4 +372,4 @@ module_exit(cubictcp_unregister); | |||
391 | MODULE_AUTHOR("Sangtae Ha, Stephen Hemminger"); | 372 | MODULE_AUTHOR("Sangtae Ha, Stephen Hemminger"); |
392 | MODULE_LICENSE("GPL"); | 373 | MODULE_LICENSE("GPL"); |
393 | MODULE_DESCRIPTION("CUBIC TCP"); | 374 | MODULE_DESCRIPTION("CUBIC TCP"); |
394 | MODULE_VERSION("2.1"); | 375 | MODULE_VERSION("2.2"); |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 7facdb0f6960..9cf446427cc2 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -3561,7 +3561,7 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th) | |||
3561 | * cases we should never reach this piece of code. | 3561 | * cases we should never reach this piece of code. |
3562 | */ | 3562 | */ |
3563 | printk(KERN_ERR "%s: Impossible, sk->sk_state=%d\n", | 3563 | printk(KERN_ERR "%s: Impossible, sk->sk_state=%d\n", |
3564 | __FUNCTION__, sk->sk_state); | 3564 | __func__, sk->sk_state); |
3565 | break; | 3565 | break; |
3566 | } | 3566 | } |
3567 | 3567 | ||
@@ -5330,6 +5330,7 @@ discard: | |||
5330 | 5330 | ||
5331 | EXPORT_SYMBOL(sysctl_tcp_ecn); | 5331 | EXPORT_SYMBOL(sysctl_tcp_ecn); |
5332 | EXPORT_SYMBOL(sysctl_tcp_reordering); | 5332 | EXPORT_SYMBOL(sysctl_tcp_reordering); |
5333 | EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); | ||
5333 | EXPORT_SYMBOL(tcp_parse_options); | 5334 | EXPORT_SYMBOL(tcp_parse_options); |
5334 | EXPORT_SYMBOL(tcp_rcv_established); | 5335 | EXPORT_SYMBOL(tcp_rcv_established); |
5335 | EXPORT_SYMBOL(tcp_rcv_state_process); | 5336 | EXPORT_SYMBOL(tcp_rcv_state_process); |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 00156bf421ca..a79e324638eb 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -552,7 +552,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) | |||
552 | if (th->rst) | 552 | if (th->rst) |
553 | return; | 553 | return; |
554 | 554 | ||
555 | if (((struct rtable *)skb->dst)->rt_type != RTN_LOCAL) | 555 | if (skb->rtable->rt_type != RTN_LOCAL) |
556 | return; | 556 | return; |
557 | 557 | ||
558 | /* Swap the send and the receive. */ | 558 | /* Swap the send and the receive. */ |
@@ -723,8 +723,8 @@ static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, | |||
723 | * This still operates on a request_sock only, not on a big | 723 | * This still operates on a request_sock only, not on a big |
724 | * socket. | 724 | * socket. |
725 | */ | 725 | */ |
726 | static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req, | 726 | static int __tcp_v4_send_synack(struct sock *sk, struct request_sock *req, |
727 | struct dst_entry *dst) | 727 | struct dst_entry *dst) |
728 | { | 728 | { |
729 | const struct inet_request_sock *ireq = inet_rsk(req); | 729 | const struct inet_request_sock *ireq = inet_rsk(req); |
730 | int err = -1; | 730 | int err = -1; |
@@ -732,7 +732,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req, | |||
732 | 732 | ||
733 | /* First, grab a route. */ | 733 | /* First, grab a route. */ |
734 | if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL) | 734 | if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL) |
735 | goto out; | 735 | return -1; |
736 | 736 | ||
737 | skb = tcp_make_synack(sk, dst, req); | 737 | skb = tcp_make_synack(sk, dst, req); |
738 | 738 | ||
@@ -751,11 +751,15 @@ static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req, | |||
751 | err = net_xmit_eval(err); | 751 | err = net_xmit_eval(err); |
752 | } | 752 | } |
753 | 753 | ||
754 | out: | ||
755 | dst_release(dst); | 754 | dst_release(dst); |
756 | return err; | 755 | return err; |
757 | } | 756 | } |
758 | 757 | ||
758 | static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req) | ||
759 | { | ||
760 | return __tcp_v4_send_synack(sk, req, NULL); | ||
761 | } | ||
762 | |||
759 | /* | 763 | /* |
760 | * IPv4 request_sock destructor. | 764 | * IPv4 request_sock destructor. |
761 | */ | 765 | */ |
@@ -1258,8 +1262,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1258 | #endif | 1262 | #endif |
1259 | 1263 | ||
1260 | /* Never answer to SYNs send to broadcast or multicast */ | 1264 | /* Never answer to SYNs send to broadcast or multicast */ |
1261 | if (((struct rtable *)skb->dst)->rt_flags & | 1265 | if (skb->rtable->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) |
1262 | (RTCF_BROADCAST | RTCF_MULTICAST)) | ||
1263 | goto drop; | 1266 | goto drop; |
1264 | 1267 | ||
1265 | /* TW buckets are converted to open requests without | 1268 | /* TW buckets are converted to open requests without |
@@ -1351,8 +1354,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1351 | (s32)(peer->tcp_ts - req->ts_recent) > | 1354 | (s32)(peer->tcp_ts - req->ts_recent) > |
1352 | TCP_PAWS_WINDOW) { | 1355 | TCP_PAWS_WINDOW) { |
1353 | NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED); | 1356 | NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED); |
1354 | dst_release(dst); | 1357 | goto drop_and_release; |
1355 | goto drop_and_free; | ||
1356 | } | 1358 | } |
1357 | } | 1359 | } |
1358 | /* Kill the following clause, if you dislike this way. */ | 1360 | /* Kill the following clause, if you dislike this way. */ |
@@ -1372,24 +1374,21 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1372 | "request from %u.%u.%u.%u/%u\n", | 1374 | "request from %u.%u.%u.%u/%u\n", |
1373 | NIPQUAD(saddr), | 1375 | NIPQUAD(saddr), |
1374 | ntohs(tcp_hdr(skb)->source)); | 1376 | ntohs(tcp_hdr(skb)->source)); |
1375 | dst_release(dst); | 1377 | goto drop_and_release; |
1376 | goto drop_and_free; | ||
1377 | } | 1378 | } |
1378 | 1379 | ||
1379 | isn = tcp_v4_init_sequence(skb); | 1380 | isn = tcp_v4_init_sequence(skb); |
1380 | } | 1381 | } |
1381 | tcp_rsk(req)->snt_isn = isn; | 1382 | tcp_rsk(req)->snt_isn = isn; |
1382 | 1383 | ||
1383 | if (tcp_v4_send_synack(sk, req, dst)) | 1384 | if (__tcp_v4_send_synack(sk, req, dst) || want_cookie) |
1384 | goto drop_and_free; | 1385 | goto drop_and_free; |
1385 | 1386 | ||
1386 | if (want_cookie) { | 1387 | inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); |
1387 | reqsk_free(req); | ||
1388 | } else { | ||
1389 | inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); | ||
1390 | } | ||
1391 | return 0; | 1388 | return 0; |
1392 | 1389 | ||
1390 | drop_and_release: | ||
1391 | dst_release(dst); | ||
1393 | drop_and_free: | 1392 | drop_and_free: |
1394 | reqsk_free(req); | 1393 | reqsk_free(req); |
1395 | drop: | 1394 | drop: |
@@ -2443,7 +2442,7 @@ struct proto tcp_prot = { | |||
2443 | REF_PROTO_INUSE(tcp) | 2442 | REF_PROTO_INUSE(tcp) |
2444 | }; | 2443 | }; |
2445 | 2444 | ||
2446 | void __init tcp_v4_init(struct net_proto_family *ops) | 2445 | void __init tcp_v4_init(void) |
2447 | { | 2446 | { |
2448 | if (inet_csk_ctl_sock_create(&tcp_socket, PF_INET, SOCK_RAW, | 2447 | if (inet_csk_ctl_sock_create(&tcp_socket, PF_INET, SOCK_RAW, |
2449 | IPPROTO_TCP) < 0) | 2448 | IPPROTO_TCP) < 0) |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index b61b76847ad9..8245247a6ceb 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -35,6 +35,8 @@ | |||
35 | #endif | 35 | #endif |
36 | 36 | ||
37 | int sysctl_tcp_syncookies __read_mostly = SYNC_INIT; | 37 | int sysctl_tcp_syncookies __read_mostly = SYNC_INIT; |
38 | EXPORT_SYMBOL(sysctl_tcp_syncookies); | ||
39 | |||
38 | int sysctl_tcp_abort_on_overflow __read_mostly; | 40 | int sysctl_tcp_abort_on_overflow __read_mostly; |
39 | 41 | ||
40 | struct inet_timewait_death_row tcp_death_row = { | 42 | struct inet_timewait_death_row tcp_death_row = { |
@@ -536,7 +538,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
536 | * Enforce "SYN-ACK" according to figure 8, figure 6 | 538 | * Enforce "SYN-ACK" according to figure 8, figure 6 |
537 | * of RFC793, fixed by RFC1122. | 539 | * of RFC793, fixed by RFC1122. |
538 | */ | 540 | */ |
539 | req->rsk_ops->rtx_syn_ack(sk, req, NULL); | 541 | req->rsk_ops->rtx_syn_ack(sk, req); |
540 | return NULL; | 542 | return NULL; |
541 | } | 543 | } |
542 | 544 | ||
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 01578f544ad6..67f84f5035c4 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2568,6 +2568,7 @@ void tcp_send_probe0(struct sock *sk) | |||
2568 | } | 2568 | } |
2569 | } | 2569 | } |
2570 | 2570 | ||
2571 | EXPORT_SYMBOL(tcp_select_initial_window); | ||
2571 | EXPORT_SYMBOL(tcp_connect); | 2572 | EXPORT_SYMBOL(tcp_connect); |
2572 | EXPORT_SYMBOL(tcp_make_synack); | 2573 | EXPORT_SYMBOL(tcp_make_synack); |
2573 | EXPORT_SYMBOL(tcp_simple_retransmit); | 2574 | EXPORT_SYMBOL(tcp_simple_retransmit); |
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c index 001b881ca36f..d49c6d68c8a9 100644 --- a/net/ipv4/udplite.c +++ b/net/ipv4/udplite.c | |||
@@ -106,14 +106,14 @@ void __init udplite4_register(void) | |||
106 | 106 | ||
107 | #ifdef CONFIG_PROC_FS | 107 | #ifdef CONFIG_PROC_FS |
108 | if (udp_proc_register(&udplite4_seq_afinfo)) /* udplite4_proc_init() */ | 108 | if (udp_proc_register(&udplite4_seq_afinfo)) /* udplite4_proc_init() */ |
109 | printk(KERN_ERR "%s: Cannot register /proc!\n", __FUNCTION__); | 109 | printk(KERN_ERR "%s: Cannot register /proc!\n", __func__); |
110 | #endif | 110 | #endif |
111 | return; | 111 | return; |
112 | 112 | ||
113 | out_unregister_proto: | 113 | out_unregister_proto: |
114 | proto_unregister(&udplite_prot); | 114 | proto_unregister(&udplite_prot); |
115 | out_register_err: | 115 | out_register_err: |
116 | printk(KERN_CRIT "%s: Cannot add UDP-Lite protocol.\n", __FUNCTION__); | 116 | printk(KERN_CRIT "%s: Cannot add UDP-Lite protocol.\n", __func__); |
117 | } | 117 | } |
118 | 118 | ||
119 | EXPORT_SYMBOL(udplite_hash); | 119 | EXPORT_SYMBOL(udplite_hash); |
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile index 24f3aa0f2a35..ae14617e607f 100644 --- a/net/ipv6/Makefile +++ b/net/ipv6/Makefile | |||
@@ -16,6 +16,7 @@ ipv6-$(CONFIG_XFRM) += xfrm6_policy.o xfrm6_state.o xfrm6_input.o \ | |||
16 | ipv6-$(CONFIG_NETFILTER) += netfilter.o | 16 | ipv6-$(CONFIG_NETFILTER) += netfilter.o |
17 | ipv6-$(CONFIG_IPV6_MULTIPLE_TABLES) += fib6_rules.o | 17 | ipv6-$(CONFIG_IPV6_MULTIPLE_TABLES) += fib6_rules.o |
18 | ipv6-$(CONFIG_PROC_FS) += proc.o | 18 | ipv6-$(CONFIG_PROC_FS) += proc.o |
19 | ipv6-$(CONFIG_SYN_COOKIES) += syncookies.o | ||
19 | 20 | ||
20 | ipv6-objs += $(ipv6-y) | 21 | ipv6-objs += $(ipv6-y) |
21 | 22 | ||
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 101e0e70ba27..4b86d388bf63 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -349,7 +349,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev) | |||
349 | if (snmp6_alloc_dev(ndev) < 0) { | 349 | if (snmp6_alloc_dev(ndev) < 0) { |
350 | ADBG((KERN_WARNING | 350 | ADBG((KERN_WARNING |
351 | "%s(): cannot allocate memory for statistics; dev=%s.\n", | 351 | "%s(): cannot allocate memory for statistics; dev=%s.\n", |
352 | __FUNCTION__, dev->name)); | 352 | __func__, dev->name)); |
353 | neigh_parms_release(&nd_tbl, ndev->nd_parms); | 353 | neigh_parms_release(&nd_tbl, ndev->nd_parms); |
354 | ndev->dead = 1; | 354 | ndev->dead = 1; |
355 | in6_dev_finish_destroy(ndev); | 355 | in6_dev_finish_destroy(ndev); |
@@ -359,7 +359,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev) | |||
359 | if (snmp6_register_dev(ndev) < 0) { | 359 | if (snmp6_register_dev(ndev) < 0) { |
360 | ADBG((KERN_WARNING | 360 | ADBG((KERN_WARNING |
361 | "%s(): cannot create /proc/net/dev_snmp6/%s\n", | 361 | "%s(): cannot create /proc/net/dev_snmp6/%s\n", |
362 | __FUNCTION__, dev->name)); | 362 | __func__, dev->name)); |
363 | neigh_parms_release(&nd_tbl, ndev->nd_parms); | 363 | neigh_parms_release(&nd_tbl, ndev->nd_parms); |
364 | ndev->dead = 1; | 364 | ndev->dead = 1; |
365 | in6_dev_finish_destroy(ndev); | 365 | in6_dev_finish_destroy(ndev); |
@@ -493,7 +493,7 @@ static void addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old) | |||
493 | dev_forward_change((struct inet6_dev *)table->extra1); | 493 | dev_forward_change((struct inet6_dev *)table->extra1); |
494 | 494 | ||
495 | if (*p) | 495 | if (*p) |
496 | rt6_purge_dflt_routers(); | 496 | rt6_purge_dflt_routers(net); |
497 | } | 497 | } |
498 | #endif | 498 | #endif |
499 | 499 | ||
@@ -561,7 +561,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, | |||
561 | write_lock(&addrconf_hash_lock); | 561 | write_lock(&addrconf_hash_lock); |
562 | 562 | ||
563 | /* Ignore adding duplicate addresses on an interface */ | 563 | /* Ignore adding duplicate addresses on an interface */ |
564 | if (ipv6_chk_same_addr(&init_net, addr, idev->dev)) { | 564 | if (ipv6_chk_same_addr(idev->dev->nd_net, addr, idev->dev)) { |
565 | ADBG(("ipv6_add_addr: already assigned\n")); | 565 | ADBG(("ipv6_add_addr: already assigned\n")); |
566 | err = -EEXIST; | 566 | err = -EEXIST; |
567 | goto out; | 567 | goto out; |
@@ -751,9 +751,9 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) | |||
751 | if ((ifp->flags & IFA_F_PERMANENT) && onlink < 1) { | 751 | if ((ifp->flags & IFA_F_PERMANENT) && onlink < 1) { |
752 | struct in6_addr prefix; | 752 | struct in6_addr prefix; |
753 | struct rt6_info *rt; | 753 | struct rt6_info *rt; |
754 | 754 | struct net *net = ifp->idev->dev->nd_net; | |
755 | ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len); | 755 | ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len); |
756 | rt = rt6_lookup(&prefix, NULL, ifp->idev->dev->ifindex, 1); | 756 | rt = rt6_lookup(net, &prefix, NULL, ifp->idev->dev->ifindex, 1); |
757 | 757 | ||
758 | if (rt && ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0)) { | 758 | if (rt && ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0)) { |
759 | if (onlink == 0) { | 759 | if (onlink == 0) { |
@@ -905,6 +905,7 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev, | |||
905 | { | 905 | { |
906 | struct ipv6_saddr_score hiscore; | 906 | struct ipv6_saddr_score hiscore; |
907 | struct inet6_ifaddr *ifa_result = NULL; | 907 | struct inet6_ifaddr *ifa_result = NULL; |
908 | struct net *net = daddr_dev->nd_net; | ||
908 | int daddr_type = __ipv6_addr_type(daddr); | 909 | int daddr_type = __ipv6_addr_type(daddr); |
909 | int daddr_scope = __ipv6_addr_src_scope(daddr_type); | 910 | int daddr_scope = __ipv6_addr_src_scope(daddr_type); |
910 | int daddr_ifindex = daddr_dev ? daddr_dev->ifindex : 0; | 911 | int daddr_ifindex = daddr_dev ? daddr_dev->ifindex : 0; |
@@ -916,7 +917,7 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev, | |||
916 | read_lock(&dev_base_lock); | 917 | read_lock(&dev_base_lock); |
917 | rcu_read_lock(); | 918 | rcu_read_lock(); |
918 | 919 | ||
919 | for_each_netdev(&init_net, dev) { | 920 | for_each_netdev(net, dev) { |
920 | struct inet6_dev *idev; | 921 | struct inet6_dev *idev; |
921 | struct inet6_ifaddr *ifa; | 922 | struct inet6_ifaddr *ifa; |
922 | 923 | ||
@@ -1125,6 +1126,11 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev, | |||
1125 | if (hiscore.rule < 7) | 1126 | if (hiscore.rule < 7) |
1126 | hiscore.rule++; | 1127 | hiscore.rule++; |
1127 | #endif | 1128 | #endif |
1129 | |||
1130 | /* Skip rule 8 for orchid -> non-orchid address pairs. */ | ||
1131 | if (ipv6_addr_orchid(&ifa->addr) && !ipv6_addr_orchid(daddr)) | ||
1132 | continue; | ||
1133 | |||
1128 | /* Rule 8: Use longest matching prefix */ | 1134 | /* Rule 8: Use longest matching prefix */ |
1129 | if (hiscore.rule < 8) { | 1135 | if (hiscore.rule < 8) { |
1130 | hiscore.matchlen = ipv6_addr_diff(&ifa_result->addr, daddr); | 1136 | hiscore.matchlen = ipv6_addr_diff(&ifa_result->addr, daddr); |
@@ -1162,14 +1168,7 @@ record_it: | |||
1162 | return 0; | 1168 | return 0; |
1163 | } | 1169 | } |
1164 | 1170 | ||
1165 | 1171 | EXPORT_SYMBOL(ipv6_dev_get_saddr); | |
1166 | int ipv6_get_saddr(struct dst_entry *dst, | ||
1167 | struct in6_addr *daddr, struct in6_addr *saddr) | ||
1168 | { | ||
1169 | return ipv6_dev_get_saddr(dst ? ip6_dst_idev(dst)->dev : NULL, daddr, saddr); | ||
1170 | } | ||
1171 | |||
1172 | EXPORT_SYMBOL(ipv6_get_saddr); | ||
1173 | 1172 | ||
1174 | int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, | 1173 | int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, |
1175 | unsigned char banned_flags) | 1174 | unsigned char banned_flags) |
@@ -1557,7 +1556,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev, | |||
1557 | .fc_expires = expires, | 1556 | .fc_expires = expires, |
1558 | .fc_dst_len = plen, | 1557 | .fc_dst_len = plen, |
1559 | .fc_flags = RTF_UP | flags, | 1558 | .fc_flags = RTF_UP | flags, |
1560 | .fc_nlinfo.nl_net = &init_net, | 1559 | .fc_nlinfo.nl_net = dev->nd_net, |
1561 | }; | 1560 | }; |
1562 | 1561 | ||
1563 | ipv6_addr_copy(&cfg.fc_dst, pfx); | 1562 | ipv6_addr_copy(&cfg.fc_dst, pfx); |
@@ -1584,7 +1583,7 @@ static void addrconf_add_mroute(struct net_device *dev) | |||
1584 | .fc_ifindex = dev->ifindex, | 1583 | .fc_ifindex = dev->ifindex, |
1585 | .fc_dst_len = 8, | 1584 | .fc_dst_len = 8, |
1586 | .fc_flags = RTF_UP, | 1585 | .fc_flags = RTF_UP, |
1587 | .fc_nlinfo.nl_net = &init_net, | 1586 | .fc_nlinfo.nl_net = dev->nd_net, |
1588 | }; | 1587 | }; |
1589 | 1588 | ||
1590 | ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0); | 1589 | ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0); |
@@ -1601,7 +1600,7 @@ static void sit_route_add(struct net_device *dev) | |||
1601 | .fc_ifindex = dev->ifindex, | 1600 | .fc_ifindex = dev->ifindex, |
1602 | .fc_dst_len = 96, | 1601 | .fc_dst_len = 96, |
1603 | .fc_flags = RTF_UP | RTF_NONEXTHOP, | 1602 | .fc_flags = RTF_UP | RTF_NONEXTHOP, |
1604 | .fc_nlinfo.nl_net = &init_net, | 1603 | .fc_nlinfo.nl_net = dev->nd_net, |
1605 | }; | 1604 | }; |
1606 | 1605 | ||
1607 | /* prefix length - 96 bits "::d.d.d.d" */ | 1606 | /* prefix length - 96 bits "::d.d.d.d" */ |
@@ -1702,7 +1701,8 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len) | |||
1702 | 1701 | ||
1703 | if (pinfo->onlink) { | 1702 | if (pinfo->onlink) { |
1704 | struct rt6_info *rt; | 1703 | struct rt6_info *rt; |
1705 | rt = rt6_lookup(&pinfo->prefix, NULL, dev->ifindex, 1); | 1704 | rt = rt6_lookup(dev->nd_net, &pinfo->prefix, NULL, |
1705 | dev->ifindex, 1); | ||
1706 | 1706 | ||
1707 | if (rt && ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0)) { | 1707 | if (rt && ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0)) { |
1708 | if (rt->rt6i_flags&RTF_EXPIRES) { | 1708 | if (rt->rt6i_flags&RTF_EXPIRES) { |
@@ -1745,7 +1745,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len) | |||
1745 | 1745 | ||
1746 | ok: | 1746 | ok: |
1747 | 1747 | ||
1748 | ifp = ipv6_get_ifaddr(&init_net, &addr, dev, 1); | 1748 | ifp = ipv6_get_ifaddr(dev->nd_net, &addr, dev, 1); |
1749 | 1749 | ||
1750 | if (ifp == NULL && valid_lft) { | 1750 | if (ifp == NULL && valid_lft) { |
1751 | int max_addresses = in6_dev->cnf.max_addresses; | 1751 | int max_addresses = in6_dev->cnf.max_addresses; |
@@ -1868,7 +1868,7 @@ ok: | |||
1868 | * Special case for SIT interfaces where we create a new "virtual" | 1868 | * Special case for SIT interfaces where we create a new "virtual" |
1869 | * device. | 1869 | * device. |
1870 | */ | 1870 | */ |
1871 | int addrconf_set_dstaddr(void __user *arg) | 1871 | int addrconf_set_dstaddr(struct net *net, void __user *arg) |
1872 | { | 1872 | { |
1873 | struct in6_ifreq ireq; | 1873 | struct in6_ifreq ireq; |
1874 | struct net_device *dev; | 1874 | struct net_device *dev; |
@@ -1880,7 +1880,7 @@ int addrconf_set_dstaddr(void __user *arg) | |||
1880 | if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq))) | 1880 | if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq))) |
1881 | goto err_exit; | 1881 | goto err_exit; |
1882 | 1882 | ||
1883 | dev = __dev_get_by_index(&init_net, ireq.ifr6_ifindex); | 1883 | dev = __dev_get_by_index(net, ireq.ifr6_ifindex); |
1884 | 1884 | ||
1885 | err = -ENODEV; | 1885 | err = -ENODEV; |
1886 | if (dev == NULL) | 1886 | if (dev == NULL) |
@@ -1911,7 +1911,8 @@ int addrconf_set_dstaddr(void __user *arg) | |||
1911 | 1911 | ||
1912 | if (err == 0) { | 1912 | if (err == 0) { |
1913 | err = -ENOBUFS; | 1913 | err = -ENOBUFS; |
1914 | if ((dev = __dev_get_by_name(&init_net, p.name)) == NULL) | 1914 | dev = __dev_get_by_name(net, p.name); |
1915 | if (!dev) | ||
1915 | goto err_exit; | 1916 | goto err_exit; |
1916 | err = dev_open(dev); | 1917 | err = dev_open(dev); |
1917 | } | 1918 | } |
@@ -1926,8 +1927,9 @@ err_exit: | |||
1926 | /* | 1927 | /* |
1927 | * Manual configuration of address on an interface | 1928 | * Manual configuration of address on an interface |
1928 | */ | 1929 | */ |
1929 | static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen, | 1930 | static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx, |
1930 | __u8 ifa_flags, __u32 prefered_lft, __u32 valid_lft) | 1931 | int plen, __u8 ifa_flags, __u32 prefered_lft, |
1932 | __u32 valid_lft) | ||
1931 | { | 1933 | { |
1932 | struct inet6_ifaddr *ifp; | 1934 | struct inet6_ifaddr *ifp; |
1933 | struct inet6_dev *idev; | 1935 | struct inet6_dev *idev; |
@@ -1941,7 +1943,8 @@ static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen, | |||
1941 | if (!valid_lft || prefered_lft > valid_lft) | 1943 | if (!valid_lft || prefered_lft > valid_lft) |
1942 | return -EINVAL; | 1944 | return -EINVAL; |
1943 | 1945 | ||
1944 | if ((dev = __dev_get_by_index(&init_net, ifindex)) == NULL) | 1946 | dev = __dev_get_by_index(net, ifindex); |
1947 | if (!dev) | ||
1945 | return -ENODEV; | 1948 | return -ENODEV; |
1946 | 1949 | ||
1947 | if ((idev = addrconf_add_dev(dev)) == NULL) | 1950 | if ((idev = addrconf_add_dev(dev)) == NULL) |
@@ -1986,13 +1989,15 @@ static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen, | |||
1986 | return PTR_ERR(ifp); | 1989 | return PTR_ERR(ifp); |
1987 | } | 1990 | } |
1988 | 1991 | ||
1989 | static int inet6_addr_del(int ifindex, struct in6_addr *pfx, int plen) | 1992 | static int inet6_addr_del(struct net *net, int ifindex, struct in6_addr *pfx, |
1993 | int plen) | ||
1990 | { | 1994 | { |
1991 | struct inet6_ifaddr *ifp; | 1995 | struct inet6_ifaddr *ifp; |
1992 | struct inet6_dev *idev; | 1996 | struct inet6_dev *idev; |
1993 | struct net_device *dev; | 1997 | struct net_device *dev; |
1994 | 1998 | ||
1995 | if ((dev = __dev_get_by_index(&init_net, ifindex)) == NULL) | 1999 | dev = __dev_get_by_index(net, ifindex); |
2000 | if (!dev) | ||
1996 | return -ENODEV; | 2001 | return -ENODEV; |
1997 | 2002 | ||
1998 | if ((idev = __in6_dev_get(dev)) == NULL) | 2003 | if ((idev = __in6_dev_get(dev)) == NULL) |
@@ -2020,7 +2025,7 @@ static int inet6_addr_del(int ifindex, struct in6_addr *pfx, int plen) | |||
2020 | } | 2025 | } |
2021 | 2026 | ||
2022 | 2027 | ||
2023 | int addrconf_add_ifaddr(void __user *arg) | 2028 | int addrconf_add_ifaddr(struct net *net, void __user *arg) |
2024 | { | 2029 | { |
2025 | struct in6_ifreq ireq; | 2030 | struct in6_ifreq ireq; |
2026 | int err; | 2031 | int err; |
@@ -2032,13 +2037,14 @@ int addrconf_add_ifaddr(void __user *arg) | |||
2032 | return -EFAULT; | 2037 | return -EFAULT; |
2033 | 2038 | ||
2034 | rtnl_lock(); | 2039 | rtnl_lock(); |
2035 | err = inet6_addr_add(ireq.ifr6_ifindex, &ireq.ifr6_addr, ireq.ifr6_prefixlen, | 2040 | err = inet6_addr_add(net, ireq.ifr6_ifindex, &ireq.ifr6_addr, |
2036 | IFA_F_PERMANENT, INFINITY_LIFE_TIME, INFINITY_LIFE_TIME); | 2041 | ireq.ifr6_prefixlen, IFA_F_PERMANENT, |
2042 | INFINITY_LIFE_TIME, INFINITY_LIFE_TIME); | ||
2037 | rtnl_unlock(); | 2043 | rtnl_unlock(); |
2038 | return err; | 2044 | return err; |
2039 | } | 2045 | } |
2040 | 2046 | ||
2041 | int addrconf_del_ifaddr(void __user *arg) | 2047 | int addrconf_del_ifaddr(struct net *net, void __user *arg) |
2042 | { | 2048 | { |
2043 | struct in6_ifreq ireq; | 2049 | struct in6_ifreq ireq; |
2044 | int err; | 2050 | int err; |
@@ -2050,7 +2056,8 @@ int addrconf_del_ifaddr(void __user *arg) | |||
2050 | return -EFAULT; | 2056 | return -EFAULT; |
2051 | 2057 | ||
2052 | rtnl_lock(); | 2058 | rtnl_lock(); |
2053 | err = inet6_addr_del(ireq.ifr6_ifindex, &ireq.ifr6_addr, ireq.ifr6_prefixlen); | 2059 | err = inet6_addr_del(net, ireq.ifr6_ifindex, &ireq.ifr6_addr, |
2060 | ireq.ifr6_prefixlen); | ||
2054 | rtnl_unlock(); | 2061 | rtnl_unlock(); |
2055 | return err; | 2062 | return err; |
2056 | } | 2063 | } |
@@ -2061,6 +2068,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev) | |||
2061 | struct inet6_ifaddr * ifp; | 2068 | struct inet6_ifaddr * ifp; |
2062 | struct in6_addr addr; | 2069 | struct in6_addr addr; |
2063 | struct net_device *dev; | 2070 | struct net_device *dev; |
2071 | struct net *net = idev->dev->nd_net; | ||
2064 | int scope; | 2072 | int scope; |
2065 | 2073 | ||
2066 | ASSERT_RTNL(); | 2074 | ASSERT_RTNL(); |
@@ -2087,7 +2095,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev) | |||
2087 | return; | 2095 | return; |
2088 | } | 2096 | } |
2089 | 2097 | ||
2090 | for_each_netdev(&init_net, dev) { | 2098 | for_each_netdev(net, dev) { |
2091 | struct in_device * in_dev = __in_dev_get_rtnl(dev); | 2099 | struct in_device * in_dev = __in_dev_get_rtnl(dev); |
2092 | if (in_dev && (dev->flags & IFF_UP)) { | 2100 | if (in_dev && (dev->flags & IFF_UP)) { |
2093 | struct in_ifaddr * ifa; | 2101 | struct in_ifaddr * ifa; |
@@ -2250,15 +2258,16 @@ ipv6_inherit_linklocal(struct inet6_dev *idev, struct net_device *link_dev) | |||
2250 | static void ip6_tnl_add_linklocal(struct inet6_dev *idev) | 2258 | static void ip6_tnl_add_linklocal(struct inet6_dev *idev) |
2251 | { | 2259 | { |
2252 | struct net_device *link_dev; | 2260 | struct net_device *link_dev; |
2261 | struct net *net = idev->dev->nd_net; | ||
2253 | 2262 | ||
2254 | /* first try to inherit the link-local address from the link device */ | 2263 | /* first try to inherit the link-local address from the link device */ |
2255 | if (idev->dev->iflink && | 2264 | if (idev->dev->iflink && |
2256 | (link_dev = __dev_get_by_index(&init_net, idev->dev->iflink))) { | 2265 | (link_dev = __dev_get_by_index(net, idev->dev->iflink))) { |
2257 | if (!ipv6_inherit_linklocal(idev, link_dev)) | 2266 | if (!ipv6_inherit_linklocal(idev, link_dev)) |
2258 | return; | 2267 | return; |
2259 | } | 2268 | } |
2260 | /* then try to inherit it from any device */ | 2269 | /* then try to inherit it from any device */ |
2261 | for_each_netdev(&init_net, link_dev) { | 2270 | for_each_netdev(net, link_dev) { |
2262 | if (!ipv6_inherit_linklocal(idev, link_dev)) | 2271 | if (!ipv6_inherit_linklocal(idev, link_dev)) |
2263 | return; | 2272 | return; |
2264 | } | 2273 | } |
@@ -2291,9 +2300,6 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
2291 | int run_pending = 0; | 2300 | int run_pending = 0; |
2292 | int err; | 2301 | int err; |
2293 | 2302 | ||
2294 | if (dev->nd_net != &init_net) | ||
2295 | return NOTIFY_DONE; | ||
2296 | |||
2297 | switch(event) { | 2303 | switch(event) { |
2298 | case NETDEV_REGISTER: | 2304 | case NETDEV_REGISTER: |
2299 | if (!idev && dev->mtu >= IPV6_MIN_MTU) { | 2305 | if (!idev && dev->mtu >= IPV6_MIN_MTU) { |
@@ -2433,6 +2439,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2433 | { | 2439 | { |
2434 | struct inet6_dev *idev; | 2440 | struct inet6_dev *idev; |
2435 | struct inet6_ifaddr *ifa, **bifa; | 2441 | struct inet6_ifaddr *ifa, **bifa; |
2442 | struct net *net = dev->nd_net; | ||
2436 | int i; | 2443 | int i; |
2437 | 2444 | ||
2438 | ASSERT_RTNL(); | 2445 | ASSERT_RTNL(); |
@@ -2440,7 +2447,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2440 | if (dev == init_net.loopback_dev && how == 1) | 2447 | if (dev == init_net.loopback_dev && how == 1) |
2441 | how = 0; | 2448 | how = 0; |
2442 | 2449 | ||
2443 | rt6_ifdown(dev); | 2450 | rt6_ifdown(net, dev); |
2444 | neigh_ifdown(&nd_tbl, dev); | 2451 | neigh_ifdown(&nd_tbl, dev); |
2445 | 2452 | ||
2446 | idev = __in6_dev_get(dev); | 2453 | idev = __in6_dev_get(dev); |
@@ -3050,9 +3057,6 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
3050 | struct in6_addr *pfx; | 3057 | struct in6_addr *pfx; |
3051 | int err; | 3058 | int err; |
3052 | 3059 | ||
3053 | if (net != &init_net) | ||
3054 | return -EINVAL; | ||
3055 | |||
3056 | err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy); | 3060 | err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy); |
3057 | if (err < 0) | 3061 | if (err < 0) |
3058 | return err; | 3062 | return err; |
@@ -3062,7 +3066,7 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
3062 | if (pfx == NULL) | 3066 | if (pfx == NULL) |
3063 | return -EINVAL; | 3067 | return -EINVAL; |
3064 | 3068 | ||
3065 | return inet6_addr_del(ifm->ifa_index, pfx, ifm->ifa_prefixlen); | 3069 | return inet6_addr_del(net, ifm->ifa_index, pfx, ifm->ifa_prefixlen); |
3066 | } | 3070 | } |
3067 | 3071 | ||
3068 | static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags, | 3072 | static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags, |
@@ -3115,9 +3119,6 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
3115 | u8 ifa_flags; | 3119 | u8 ifa_flags; |
3116 | int err; | 3120 | int err; |
3117 | 3121 | ||
3118 | if (net != &init_net) | ||
3119 | return -EINVAL; | ||
3120 | |||
3121 | err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy); | 3122 | err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy); |
3122 | if (err < 0) | 3123 | if (err < 0) |
3123 | return err; | 3124 | return err; |
@@ -3138,7 +3139,7 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
3138 | valid_lft = INFINITY_LIFE_TIME; | 3139 | valid_lft = INFINITY_LIFE_TIME; |
3139 | } | 3140 | } |
3140 | 3141 | ||
3141 | dev = __dev_get_by_index(&init_net, ifm->ifa_index); | 3142 | dev = __dev_get_by_index(net, ifm->ifa_index); |
3142 | if (dev == NULL) | 3143 | if (dev == NULL) |
3143 | return -ENODEV; | 3144 | return -ENODEV; |
3144 | 3145 | ||
@@ -3151,8 +3152,9 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
3151 | * It would be best to check for !NLM_F_CREATE here but | 3152 | * It would be best to check for !NLM_F_CREATE here but |
3152 | * userspace alreay relies on not having to provide this. | 3153 | * userspace alreay relies on not having to provide this. |
3153 | */ | 3154 | */ |
3154 | return inet6_addr_add(ifm->ifa_index, pfx, ifm->ifa_prefixlen, | 3155 | return inet6_addr_add(net, ifm->ifa_index, pfx, |
3155 | ifa_flags, preferred_lft, valid_lft); | 3156 | ifm->ifa_prefixlen, ifa_flags, |
3157 | preferred_lft, valid_lft); | ||
3156 | } | 3158 | } |
3157 | 3159 | ||
3158 | if (nlh->nlmsg_flags & NLM_F_EXCL || | 3160 | if (nlh->nlmsg_flags & NLM_F_EXCL || |
@@ -3317,12 +3319,13 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, | |||
3317 | struct inet6_ifaddr *ifa; | 3319 | struct inet6_ifaddr *ifa; |
3318 | struct ifmcaddr6 *ifmca; | 3320 | struct ifmcaddr6 *ifmca; |
3319 | struct ifacaddr6 *ifaca; | 3321 | struct ifacaddr6 *ifaca; |
3322 | struct net *net = skb->sk->sk_net; | ||
3320 | 3323 | ||
3321 | s_idx = cb->args[0]; | 3324 | s_idx = cb->args[0]; |
3322 | s_ip_idx = ip_idx = cb->args[1]; | 3325 | s_ip_idx = ip_idx = cb->args[1]; |
3323 | 3326 | ||
3324 | idx = 0; | 3327 | idx = 0; |
3325 | for_each_netdev(&init_net, dev) { | 3328 | for_each_netdev(net, dev) { |
3326 | if (idx < s_idx) | 3329 | if (idx < s_idx) |
3327 | goto cont; | 3330 | goto cont; |
3328 | if (idx > s_idx) | 3331 | if (idx > s_idx) |
@@ -3389,35 +3392,23 @@ cont: | |||
3389 | 3392 | ||
3390 | static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) | 3393 | static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) |
3391 | { | 3394 | { |
3392 | struct net *net = skb->sk->sk_net; | ||
3393 | enum addr_type_t type = UNICAST_ADDR; | 3395 | enum addr_type_t type = UNICAST_ADDR; |
3394 | 3396 | ||
3395 | if (net != &init_net) | ||
3396 | return 0; | ||
3397 | |||
3398 | return inet6_dump_addr(skb, cb, type); | 3397 | return inet6_dump_addr(skb, cb, type); |
3399 | } | 3398 | } |
3400 | 3399 | ||
3401 | static int inet6_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb) | 3400 | static int inet6_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb) |
3402 | { | 3401 | { |
3403 | struct net *net = skb->sk->sk_net; | ||
3404 | enum addr_type_t type = MULTICAST_ADDR; | 3402 | enum addr_type_t type = MULTICAST_ADDR; |
3405 | 3403 | ||
3406 | if (net != &init_net) | ||
3407 | return 0; | ||
3408 | |||
3409 | return inet6_dump_addr(skb, cb, type); | 3404 | return inet6_dump_addr(skb, cb, type); |
3410 | } | 3405 | } |
3411 | 3406 | ||
3412 | 3407 | ||
3413 | static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb) | 3408 | static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb) |
3414 | { | 3409 | { |
3415 | struct net *net = skb->sk->sk_net; | ||
3416 | enum addr_type_t type = ANYCAST_ADDR; | 3410 | enum addr_type_t type = ANYCAST_ADDR; |
3417 | 3411 | ||
3418 | if (net != &init_net) | ||
3419 | return 0; | ||
3420 | |||
3421 | return inet6_dump_addr(skb, cb, type); | 3412 | return inet6_dump_addr(skb, cb, type); |
3422 | } | 3413 | } |
3423 | 3414 | ||
@@ -3433,9 +3424,6 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh, | |||
3433 | struct sk_buff *skb; | 3424 | struct sk_buff *skb; |
3434 | int err; | 3425 | int err; |
3435 | 3426 | ||
3436 | if (net != &init_net) | ||
3437 | return -EINVAL; | ||
3438 | |||
3439 | err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy); | 3427 | err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy); |
3440 | if (err < 0) | 3428 | if (err < 0) |
3441 | goto errout; | 3429 | goto errout; |
@@ -3448,7 +3436,7 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh, | |||
3448 | 3436 | ||
3449 | ifm = nlmsg_data(nlh); | 3437 | ifm = nlmsg_data(nlh); |
3450 | if (ifm->ifa_index) | 3438 | if (ifm->ifa_index) |
3451 | dev = __dev_get_by_index(&init_net, ifm->ifa_index); | 3439 | dev = __dev_get_by_index(net, ifm->ifa_index); |
3452 | 3440 | ||
3453 | if ((ifa = ipv6_get_ifaddr(net, addr, dev, 1)) == NULL) { | 3441 | if ((ifa = ipv6_get_ifaddr(net, addr, dev, 1)) == NULL) { |
3454 | err = -EADDRNOTAVAIL; | 3442 | err = -EADDRNOTAVAIL; |
@@ -3468,7 +3456,7 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh, | |||
3468 | kfree_skb(skb); | 3456 | kfree_skb(skb); |
3469 | goto errout_ifa; | 3457 | goto errout_ifa; |
3470 | } | 3458 | } |
3471 | err = rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).pid); | 3459 | err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid); |
3472 | errout_ifa: | 3460 | errout_ifa: |
3473 | in6_ifa_put(ifa); | 3461 | in6_ifa_put(ifa); |
3474 | errout: | 3462 | errout: |
@@ -3478,6 +3466,7 @@ errout: | |||
3478 | static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa) | 3466 | static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa) |
3479 | { | 3467 | { |
3480 | struct sk_buff *skb; | 3468 | struct sk_buff *skb; |
3469 | struct net *net = ifa->idev->dev->nd_net; | ||
3481 | int err = -ENOBUFS; | 3470 | int err = -ENOBUFS; |
3482 | 3471 | ||
3483 | skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC); | 3472 | skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC); |
@@ -3491,10 +3480,10 @@ static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa) | |||
3491 | kfree_skb(skb); | 3480 | kfree_skb(skb); |
3492 | goto errout; | 3481 | goto errout; |
3493 | } | 3482 | } |
3494 | err = rtnl_notify(skb, &init_net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC); | 3483 | err = rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC); |
3495 | errout: | 3484 | errout: |
3496 | if (err < 0) | 3485 | if (err < 0) |
3497 | rtnl_set_sk_err(&init_net, RTNLGRP_IPV6_IFADDR, err); | 3486 | rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err); |
3498 | } | 3487 | } |
3499 | 3488 | ||
3500 | static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, | 3489 | static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, |
@@ -3659,12 +3648,9 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) | |||
3659 | struct net_device *dev; | 3648 | struct net_device *dev; |
3660 | struct inet6_dev *idev; | 3649 | struct inet6_dev *idev; |
3661 | 3650 | ||
3662 | if (net != &init_net) | ||
3663 | return 0; | ||
3664 | |||
3665 | read_lock(&dev_base_lock); | 3651 | read_lock(&dev_base_lock); |
3666 | idx = 0; | 3652 | idx = 0; |
3667 | for_each_netdev(&init_net, dev) { | 3653 | for_each_netdev(net, dev) { |
3668 | if (idx < s_idx) | 3654 | if (idx < s_idx) |
3669 | goto cont; | 3655 | goto cont; |
3670 | if ((idev = in6_dev_get(dev)) == NULL) | 3656 | if ((idev = in6_dev_get(dev)) == NULL) |
@@ -3686,6 +3672,7 @@ cont: | |||
3686 | void inet6_ifinfo_notify(int event, struct inet6_dev *idev) | 3672 | void inet6_ifinfo_notify(int event, struct inet6_dev *idev) |
3687 | { | 3673 | { |
3688 | struct sk_buff *skb; | 3674 | struct sk_buff *skb; |
3675 | struct net *net = idev->dev->nd_net; | ||
3689 | int err = -ENOBUFS; | 3676 | int err = -ENOBUFS; |
3690 | 3677 | ||
3691 | skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC); | 3678 | skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC); |
@@ -3699,10 +3686,10 @@ void inet6_ifinfo_notify(int event, struct inet6_dev *idev) | |||
3699 | kfree_skb(skb); | 3686 | kfree_skb(skb); |
3700 | goto errout; | 3687 | goto errout; |
3701 | } | 3688 | } |
3702 | err = rtnl_notify(skb, &init_net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC); | 3689 | err = rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC); |
3703 | errout: | 3690 | errout: |
3704 | if (err < 0) | 3691 | if (err < 0) |
3705 | rtnl_set_sk_err(&init_net, RTNLGRP_IPV6_IFADDR, err); | 3692 | rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err); |
3706 | } | 3693 | } |
3707 | 3694 | ||
3708 | static inline size_t inet6_prefix_nlmsg_size(void) | 3695 | static inline size_t inet6_prefix_nlmsg_size(void) |
@@ -3755,6 +3742,7 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev, | |||
3755 | struct prefix_info *pinfo) | 3742 | struct prefix_info *pinfo) |
3756 | { | 3743 | { |
3757 | struct sk_buff *skb; | 3744 | struct sk_buff *skb; |
3745 | struct net *net = idev->dev->nd_net; | ||
3758 | int err = -ENOBUFS; | 3746 | int err = -ENOBUFS; |
3759 | 3747 | ||
3760 | skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC); | 3748 | skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC); |
@@ -3768,10 +3756,10 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev, | |||
3768 | kfree_skb(skb); | 3756 | kfree_skb(skb); |
3769 | goto errout; | 3757 | goto errout; |
3770 | } | 3758 | } |
3771 | err = rtnl_notify(skb, &init_net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC); | 3759 | err = rtnl_notify(skb, net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC); |
3772 | errout: | 3760 | errout: |
3773 | if (err < 0) | 3761 | if (err < 0) |
3774 | rtnl_set_sk_err(&init_net, RTNLGRP_IPV6_PREFIX, err); | 3762 | rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err); |
3775 | } | 3763 | } |
3776 | 3764 | ||
3777 | static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) | 3765 | static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) |
@@ -4261,6 +4249,32 @@ int unregister_inet6addr_notifier(struct notifier_block *nb) | |||
4261 | 4249 | ||
4262 | EXPORT_SYMBOL(unregister_inet6addr_notifier); | 4250 | EXPORT_SYMBOL(unregister_inet6addr_notifier); |
4263 | 4251 | ||
4252 | |||
4253 | static int addrconf_net_init(struct net *net) | ||
4254 | { | ||
4255 | return 0; | ||
4256 | } | ||
4257 | |||
4258 | static void addrconf_net_exit(struct net *net) | ||
4259 | { | ||
4260 | struct net_device *dev; | ||
4261 | |||
4262 | rtnl_lock(); | ||
4263 | /* clean dev list */ | ||
4264 | for_each_netdev(net, dev) { | ||
4265 | if (__in6_dev_get(dev) == NULL) | ||
4266 | continue; | ||
4267 | addrconf_ifdown(dev, 1); | ||
4268 | } | ||
4269 | addrconf_ifdown(net->loopback_dev, 2); | ||
4270 | rtnl_unlock(); | ||
4271 | } | ||
4272 | |||
4273 | static struct pernet_operations addrconf_net_ops = { | ||
4274 | .init = addrconf_net_init, | ||
4275 | .exit = addrconf_net_exit, | ||
4276 | }; | ||
4277 | |||
4264 | /* | 4278 | /* |
4265 | * Init / cleanup code | 4279 | * Init / cleanup code |
4266 | */ | 4280 | */ |
@@ -4302,14 +4316,9 @@ int __init addrconf_init(void) | |||
4302 | if (err) | 4316 | if (err) |
4303 | goto errlo; | 4317 | goto errlo; |
4304 | 4318 | ||
4305 | ip6_null_entry.u.dst.dev = init_net.loopback_dev; | 4319 | err = register_pernet_device(&addrconf_net_ops); |
4306 | ip6_null_entry.rt6i_idev = in6_dev_get(init_net.loopback_dev); | 4320 | if (err) |
4307 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES | 4321 | return err; |
4308 | ip6_prohibit_entry.u.dst.dev = init_net.loopback_dev; | ||
4309 | ip6_prohibit_entry.rt6i_idev = in6_dev_get(init_net.loopback_dev); | ||
4310 | ip6_blk_hole_entry.u.dst.dev = init_net.loopback_dev; | ||
4311 | ip6_blk_hole_entry.rt6i_idev = in6_dev_get(init_net.loopback_dev); | ||
4312 | #endif | ||
4313 | 4322 | ||
4314 | register_netdevice_notifier(&ipv6_dev_notf); | 4323 | register_netdevice_notifier(&ipv6_dev_notf); |
4315 | 4324 | ||
@@ -4339,31 +4348,19 @@ errlo: | |||
4339 | 4348 | ||
4340 | void addrconf_cleanup(void) | 4349 | void addrconf_cleanup(void) |
4341 | { | 4350 | { |
4342 | struct net_device *dev; | ||
4343 | struct inet6_ifaddr *ifa; | 4351 | struct inet6_ifaddr *ifa; |
4344 | int i; | 4352 | int i; |
4345 | 4353 | ||
4346 | unregister_netdevice_notifier(&ipv6_dev_notf); | 4354 | unregister_netdevice_notifier(&ipv6_dev_notf); |
4355 | unregister_pernet_device(&addrconf_net_ops); | ||
4347 | 4356 | ||
4348 | unregister_pernet_subsys(&addrconf_ops); | 4357 | unregister_pernet_subsys(&addrconf_ops); |
4349 | 4358 | ||
4350 | rtnl_lock(); | 4359 | rtnl_lock(); |
4351 | 4360 | ||
4352 | /* | 4361 | /* |
4353 | * clean dev list. | ||
4354 | */ | ||
4355 | |||
4356 | for_each_netdev(&init_net, dev) { | ||
4357 | if (__in6_dev_get(dev) == NULL) | ||
4358 | continue; | ||
4359 | addrconf_ifdown(dev, 1); | ||
4360 | } | ||
4361 | addrconf_ifdown(init_net.loopback_dev, 2); | ||
4362 | |||
4363 | /* | ||
4364 | * Check hash table. | 4362 | * Check hash table. |
4365 | */ | 4363 | */ |
4366 | |||
4367 | write_lock_bh(&addrconf_hash_lock); | 4364 | write_lock_bh(&addrconf_hash_lock); |
4368 | for (i=0; i < IN6_ADDR_HSIZE; i++) { | 4365 | for (i=0; i < IN6_ADDR_HSIZE; i++) { |
4369 | for (ifa=inet6_addr_lst[i]; ifa; ) { | 4366 | for (ifa=inet6_addr_lst[i]; ifa; ) { |
@@ -4380,6 +4377,7 @@ void addrconf_cleanup(void) | |||
4380 | write_unlock_bh(&addrconf_hash_lock); | 4377 | write_unlock_bh(&addrconf_hash_lock); |
4381 | 4378 | ||
4382 | del_timer(&addr_chk_timer); | 4379 | del_timer(&addr_chk_timer); |
4383 | |||
4384 | rtnl_unlock(); | 4380 | rtnl_unlock(); |
4381 | |||
4382 | unregister_pernet_subsys(&addrconf_net_ops); | ||
4385 | } | 4383 | } |
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c index a3c5a72218fd..de371b5997fe 100644 --- a/net/ipv6/addrlabel.c +++ b/net/ipv6/addrlabel.c | |||
@@ -58,6 +58,7 @@ static struct ip6addrlbl_table | |||
58 | * ::ffff:0:0/96 V4MAPPED 4 | 58 | * ::ffff:0:0/96 V4MAPPED 4 |
59 | * fc00::/7 N/A 5 ULA (RFC 4193) | 59 | * fc00::/7 N/A 5 ULA (RFC 4193) |
60 | * 2001::/32 N/A 6 Teredo (RFC 4380) | 60 | * 2001::/32 N/A 6 Teredo (RFC 4380) |
61 | * 2001:10::/28 N/A 7 ORCHID (RFC 4843) | ||
61 | * | 62 | * |
62 | * Note: 0xffffffff is used if we do not have any policies. | 63 | * Note: 0xffffffff is used if we do not have any policies. |
63 | */ | 64 | */ |
@@ -85,6 +86,10 @@ static const __initdata struct ip6addrlbl_init_table | |||
85 | .prefix = &(struct in6_addr){{{ 0x20, 0x01 }}}, | 86 | .prefix = &(struct in6_addr){{{ 0x20, 0x01 }}}, |
86 | .prefixlen = 32, | 87 | .prefixlen = 32, |
87 | .label = 6, | 88 | .label = 6, |
89 | },{ /* 2001:10::/28 */ | ||
90 | .prefix = &(struct in6_addr){{{ 0x20, 0x01, 0x00, 0x10 }}}, | ||
91 | .prefixlen = 28, | ||
92 | .label = 7, | ||
88 | },{ /* ::ffff:0:0 */ | 93 | },{ /* ::ffff:0:0 */ |
89 | .prefix = &(struct in6_addr){{{ [10] = 0xff, [11] = 0xff }}}, | 94 | .prefix = &(struct in6_addr){{{ [10] = 0xff, [11] = 0xff }}}, |
90 | .prefixlen = 96, | 95 | .prefixlen = 96, |
@@ -161,7 +166,7 @@ u32 ipv6_addr_label(const struct in6_addr *addr, int type, int ifindex) | |||
161 | rcu_read_unlock(); | 166 | rcu_read_unlock(); |
162 | 167 | ||
163 | ADDRLABEL(KERN_DEBUG "%s(addr=" NIP6_FMT ", type=%d, ifindex=%d) => %08x\n", | 168 | ADDRLABEL(KERN_DEBUG "%s(addr=" NIP6_FMT ", type=%d, ifindex=%d) => %08x\n", |
164 | __FUNCTION__, | 169 | __func__, |
165 | NIP6(*addr), type, ifindex, | 170 | NIP6(*addr), type, ifindex, |
166 | label); | 171 | label); |
167 | 172 | ||
@@ -177,7 +182,7 @@ static struct ip6addrlbl_entry *ip6addrlbl_alloc(const struct in6_addr *prefix, | |||
177 | int addrtype; | 182 | int addrtype; |
178 | 183 | ||
179 | ADDRLABEL(KERN_DEBUG "%s(prefix=" NIP6_FMT ", prefixlen=%d, ifindex=%d, label=%u)\n", | 184 | ADDRLABEL(KERN_DEBUG "%s(prefix=" NIP6_FMT ", prefixlen=%d, ifindex=%d, label=%u)\n", |
180 | __FUNCTION__, | 185 | __func__, |
181 | NIP6(*prefix), prefixlen, | 186 | NIP6(*prefix), prefixlen, |
182 | ifindex, | 187 | ifindex, |
183 | (unsigned int)label); | 188 | (unsigned int)label); |
@@ -221,7 +226,7 @@ static int __ip6addrlbl_add(struct ip6addrlbl_entry *newp, int replace) | |||
221 | int ret = 0; | 226 | int ret = 0; |
222 | 227 | ||
223 | ADDRLABEL(KERN_DEBUG "%s(newp=%p, replace=%d)\n", | 228 | ADDRLABEL(KERN_DEBUG "%s(newp=%p, replace=%d)\n", |
224 | __FUNCTION__, | 229 | __func__, |
225 | newp, replace); | 230 | newp, replace); |
226 | 231 | ||
227 | if (hlist_empty(&ip6addrlbl_table.head)) { | 232 | if (hlist_empty(&ip6addrlbl_table.head)) { |
@@ -263,7 +268,7 @@ static int ip6addrlbl_add(const struct in6_addr *prefix, int prefixlen, | |||
263 | int ret = 0; | 268 | int ret = 0; |
264 | 269 | ||
265 | ADDRLABEL(KERN_DEBUG "%s(prefix=" NIP6_FMT ", prefixlen=%d, ifindex=%d, label=%u, replace=%d)\n", | 270 | ADDRLABEL(KERN_DEBUG "%s(prefix=" NIP6_FMT ", prefixlen=%d, ifindex=%d, label=%u, replace=%d)\n", |
266 | __FUNCTION__, | 271 | __func__, |
267 | NIP6(*prefix), prefixlen, | 272 | NIP6(*prefix), prefixlen, |
268 | ifindex, | 273 | ifindex, |
269 | (unsigned int)label, | 274 | (unsigned int)label, |
@@ -289,7 +294,7 @@ static int __ip6addrlbl_del(const struct in6_addr *prefix, int prefixlen, | |||
289 | int ret = -ESRCH; | 294 | int ret = -ESRCH; |
290 | 295 | ||
291 | ADDRLABEL(KERN_DEBUG "%s(prefix=" NIP6_FMT ", prefixlen=%d, ifindex=%d)\n", | 296 | ADDRLABEL(KERN_DEBUG "%s(prefix=" NIP6_FMT ", prefixlen=%d, ifindex=%d)\n", |
292 | __FUNCTION__, | 297 | __func__, |
293 | NIP6(*prefix), prefixlen, | 298 | NIP6(*prefix), prefixlen, |
294 | ifindex); | 299 | ifindex); |
295 | 300 | ||
@@ -313,7 +318,7 @@ static int ip6addrlbl_del(const struct in6_addr *prefix, int prefixlen, | |||
313 | int ret; | 318 | int ret; |
314 | 319 | ||
315 | ADDRLABEL(KERN_DEBUG "%s(prefix=" NIP6_FMT ", prefixlen=%d, ifindex=%d)\n", | 320 | ADDRLABEL(KERN_DEBUG "%s(prefix=" NIP6_FMT ", prefixlen=%d, ifindex=%d)\n", |
316 | __FUNCTION__, | 321 | __func__, |
317 | NIP6(*prefix), prefixlen, | 322 | NIP6(*prefix), prefixlen, |
318 | ifindex); | 323 | ifindex); |
319 | 324 | ||
@@ -330,7 +335,7 @@ static __init int ip6addrlbl_init(void) | |||
330 | int err = 0; | 335 | int err = 0; |
331 | int i; | 336 | int i; |
332 | 337 | ||
333 | ADDRLABEL(KERN_DEBUG "%s()\n", __FUNCTION__); | 338 | ADDRLABEL(KERN_DEBUG "%s()\n", __func__); |
334 | 339 | ||
335 | for (i = 0; i < ARRAY_SIZE(ip6addrlbl_init_table); i++) { | 340 | for (i = 0; i < ARRAY_SIZE(ip6addrlbl_init_table); i++) { |
336 | int ret = ip6addrlbl_add(ip6addrlbl_init_table[i].prefix, | 341 | int ret = ip6addrlbl_add(ip6addrlbl_init_table[i].prefix, |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index f0aa97738746..730a861b8f41 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -92,9 +92,6 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol) | |||
92 | int try_loading_module = 0; | 92 | int try_loading_module = 0; |
93 | int err; | 93 | int err; |
94 | 94 | ||
95 | if (net != &init_net) | ||
96 | return -EAFNOSUPPORT; | ||
97 | |||
98 | if (sock->type != SOCK_RAW && | 95 | if (sock->type != SOCK_RAW && |
99 | sock->type != SOCK_DGRAM && | 96 | sock->type != SOCK_DGRAM && |
100 | !inet_ehash_secret) | 97 | !inet_ehash_secret) |
@@ -248,6 +245,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
248 | struct sock *sk = sock->sk; | 245 | struct sock *sk = sock->sk; |
249 | struct inet_sock *inet = inet_sk(sk); | 246 | struct inet_sock *inet = inet_sk(sk); |
250 | struct ipv6_pinfo *np = inet6_sk(sk); | 247 | struct ipv6_pinfo *np = inet6_sk(sk); |
248 | struct net *net = sk->sk_net; | ||
251 | __be32 v4addr = 0; | 249 | __be32 v4addr = 0; |
252 | unsigned short snum; | 250 | unsigned short snum; |
253 | int addr_type = 0; | 251 | int addr_type = 0; |
@@ -278,7 +276,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
278 | /* Check if the address belongs to the host. */ | 276 | /* Check if the address belongs to the host. */ |
279 | if (addr_type == IPV6_ADDR_MAPPED) { | 277 | if (addr_type == IPV6_ADDR_MAPPED) { |
280 | v4addr = addr->sin6_addr.s6_addr32[3]; | 278 | v4addr = addr->sin6_addr.s6_addr32[3]; |
281 | if (inet_addr_type(&init_net, v4addr) != RTN_LOCAL) { | 279 | if (inet_addr_type(net, v4addr) != RTN_LOCAL) { |
282 | err = -EADDRNOTAVAIL; | 280 | err = -EADDRNOTAVAIL; |
283 | goto out; | 281 | goto out; |
284 | } | 282 | } |
@@ -300,7 +298,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
300 | err = -EINVAL; | 298 | err = -EINVAL; |
301 | goto out; | 299 | goto out; |
302 | } | 300 | } |
303 | dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if); | 301 | dev = dev_get_by_index(net, sk->sk_bound_dev_if); |
304 | if (!dev) { | 302 | if (!dev) { |
305 | err = -ENODEV; | 303 | err = -ENODEV; |
306 | goto out; | 304 | goto out; |
@@ -312,7 +310,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
312 | */ | 310 | */ |
313 | v4addr = LOOPBACK4_IPV6; | 311 | v4addr = LOOPBACK4_IPV6; |
314 | if (!(addr_type & IPV6_ADDR_MULTICAST)) { | 312 | if (!(addr_type & IPV6_ADDR_MULTICAST)) { |
315 | if (!ipv6_chk_addr(&init_net, &addr->sin6_addr, | 313 | if (!ipv6_chk_addr(net, &addr->sin6_addr, |
316 | dev, 0)) { | 314 | dev, 0)) { |
317 | if (dev) | 315 | if (dev) |
318 | dev_put(dev); | 316 | dev_put(dev); |
@@ -440,6 +438,7 @@ EXPORT_SYMBOL(inet6_getname); | |||
440 | int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | 438 | int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) |
441 | { | 439 | { |
442 | struct sock *sk = sock->sk; | 440 | struct sock *sk = sock->sk; |
441 | struct net *net = sk->sk_net; | ||
443 | 442 | ||
444 | switch(cmd) | 443 | switch(cmd) |
445 | { | 444 | { |
@@ -452,14 +451,14 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
452 | case SIOCADDRT: | 451 | case SIOCADDRT: |
453 | case SIOCDELRT: | 452 | case SIOCDELRT: |
454 | 453 | ||
455 | return(ipv6_route_ioctl(cmd,(void __user *)arg)); | 454 | return(ipv6_route_ioctl(net, cmd, (void __user *)arg)); |
456 | 455 | ||
457 | case SIOCSIFADDR: | 456 | case SIOCSIFADDR: |
458 | return addrconf_add_ifaddr((void __user *) arg); | 457 | return addrconf_add_ifaddr(net, (void __user *) arg); |
459 | case SIOCDIFADDR: | 458 | case SIOCDIFADDR: |
460 | return addrconf_del_ifaddr((void __user *) arg); | 459 | return addrconf_del_ifaddr(net, (void __user *) arg); |
461 | case SIOCSIFDSTADDR: | 460 | case SIOCSIFDSTADDR: |
462 | return addrconf_set_dstaddr((void __user *) arg); | 461 | return addrconf_set_dstaddr(net, (void __user *) arg); |
463 | default: | 462 | default: |
464 | if (!sk->sk_prot->ioctl) | 463 | if (!sk->sk_prot->ioctl) |
465 | return -ENOIOCTLCMD; | 464 | return -ENOIOCTLCMD; |
@@ -678,6 +677,129 @@ int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb) | |||
678 | 677 | ||
679 | EXPORT_SYMBOL_GPL(ipv6_opt_accepted); | 678 | EXPORT_SYMBOL_GPL(ipv6_opt_accepted); |
680 | 679 | ||
680 | static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb, | ||
681 | int proto) | ||
682 | { | ||
683 | struct inet6_protocol *ops = NULL; | ||
684 | |||
685 | for (;;) { | ||
686 | struct ipv6_opt_hdr *opth; | ||
687 | int len; | ||
688 | |||
689 | if (proto != NEXTHDR_HOP) { | ||
690 | ops = rcu_dereference(inet6_protos[proto]); | ||
691 | |||
692 | if (unlikely(!ops)) | ||
693 | break; | ||
694 | |||
695 | if (!(ops->flags & INET6_PROTO_GSO_EXTHDR)) | ||
696 | break; | ||
697 | } | ||
698 | |||
699 | if (unlikely(!pskb_may_pull(skb, 8))) | ||
700 | break; | ||
701 | |||
702 | opth = (void *)skb->data; | ||
703 | len = ipv6_optlen(opth); | ||
704 | |||
705 | if (unlikely(!pskb_may_pull(skb, len))) | ||
706 | break; | ||
707 | |||
708 | proto = opth->nexthdr; | ||
709 | __skb_pull(skb, len); | ||
710 | } | ||
711 | |||
712 | return ops; | ||
713 | } | ||
714 | |||
715 | static int ipv6_gso_send_check(struct sk_buff *skb) | ||
716 | { | ||
717 | struct ipv6hdr *ipv6h; | ||
718 | struct inet6_protocol *ops; | ||
719 | int err = -EINVAL; | ||
720 | |||
721 | if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) | ||
722 | goto out; | ||
723 | |||
724 | ipv6h = ipv6_hdr(skb); | ||
725 | __skb_pull(skb, sizeof(*ipv6h)); | ||
726 | err = -EPROTONOSUPPORT; | ||
727 | |||
728 | rcu_read_lock(); | ||
729 | ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); | ||
730 | if (likely(ops && ops->gso_send_check)) { | ||
731 | skb_reset_transport_header(skb); | ||
732 | err = ops->gso_send_check(skb); | ||
733 | } | ||
734 | rcu_read_unlock(); | ||
735 | |||
736 | out: | ||
737 | return err; | ||
738 | } | ||
739 | |||
740 | static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features) | ||
741 | { | ||
742 | struct sk_buff *segs = ERR_PTR(-EINVAL); | ||
743 | struct ipv6hdr *ipv6h; | ||
744 | struct inet6_protocol *ops; | ||
745 | |||
746 | if (!(features & NETIF_F_V6_CSUM)) | ||
747 | features &= ~NETIF_F_SG; | ||
748 | |||
749 | if (unlikely(skb_shinfo(skb)->gso_type & | ||
750 | ~(SKB_GSO_UDP | | ||
751 | SKB_GSO_DODGY | | ||
752 | SKB_GSO_TCP_ECN | | ||
753 | SKB_GSO_TCPV6 | | ||
754 | 0))) | ||
755 | goto out; | ||
756 | |||
757 | if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) | ||
758 | goto out; | ||
759 | |||
760 | ipv6h = ipv6_hdr(skb); | ||
761 | __skb_pull(skb, sizeof(*ipv6h)); | ||
762 | segs = ERR_PTR(-EPROTONOSUPPORT); | ||
763 | |||
764 | rcu_read_lock(); | ||
765 | ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); | ||
766 | if (likely(ops && ops->gso_segment)) { | ||
767 | skb_reset_transport_header(skb); | ||
768 | segs = ops->gso_segment(skb, features); | ||
769 | } | ||
770 | rcu_read_unlock(); | ||
771 | |||
772 | if (unlikely(IS_ERR(segs))) | ||
773 | goto out; | ||
774 | |||
775 | for (skb = segs; skb; skb = skb->next) { | ||
776 | ipv6h = ipv6_hdr(skb); | ||
777 | ipv6h->payload_len = htons(skb->len - skb->mac_len - | ||
778 | sizeof(*ipv6h)); | ||
779 | } | ||
780 | |||
781 | out: | ||
782 | return segs; | ||
783 | } | ||
784 | |||
785 | static struct packet_type ipv6_packet_type = { | ||
786 | .type = __constant_htons(ETH_P_IPV6), | ||
787 | .func = ipv6_rcv, | ||
788 | .gso_send_check = ipv6_gso_send_check, | ||
789 | .gso_segment = ipv6_gso_segment, | ||
790 | }; | ||
791 | |||
792 | static int __init ipv6_packet_init(void) | ||
793 | { | ||
794 | dev_add_pack(&ipv6_packet_type); | ||
795 | return 0; | ||
796 | } | ||
797 | |||
798 | static void ipv6_packet_cleanup(void) | ||
799 | { | ||
800 | dev_remove_pack(&ipv6_packet_type); | ||
801 | } | ||
802 | |||
681 | static int __init init_ipv6_mibs(void) | 803 | static int __init init_ipv6_mibs(void) |
682 | { | 804 | { |
683 | if (snmp_mib_init((void **)ipv6_statistics, | 805 | if (snmp_mib_init((void **)ipv6_statistics, |
@@ -802,19 +924,13 @@ static int __init inet6_init(void) | |||
802 | err = register_pernet_subsys(&inet6_net_ops); | 924 | err = register_pernet_subsys(&inet6_net_ops); |
803 | if (err) | 925 | if (err) |
804 | goto register_pernet_fail; | 926 | goto register_pernet_fail; |
805 | 927 | err = icmpv6_init(); | |
806 | #ifdef CONFIG_SYSCTL | ||
807 | err = ipv6_sysctl_register(); | ||
808 | if (err) | ||
809 | goto sysctl_fail; | ||
810 | #endif | ||
811 | err = icmpv6_init(&inet6_family_ops); | ||
812 | if (err) | 928 | if (err) |
813 | goto icmp_fail; | 929 | goto icmp_fail; |
814 | err = ndisc_init(&inet6_family_ops); | 930 | err = ndisc_init(); |
815 | if (err) | 931 | if (err) |
816 | goto ndisc_fail; | 932 | goto ndisc_fail; |
817 | err = igmp6_init(&inet6_family_ops); | 933 | err = igmp6_init(); |
818 | if (err) | 934 | if (err) |
819 | goto igmp_fail; | 935 | goto igmp_fail; |
820 | err = ipv6_netfilter_init(); | 936 | err = ipv6_netfilter_init(); |
@@ -874,9 +990,19 @@ static int __init inet6_init(void) | |||
874 | err = ipv6_packet_init(); | 990 | err = ipv6_packet_init(); |
875 | if (err) | 991 | if (err) |
876 | goto ipv6_packet_fail; | 992 | goto ipv6_packet_fail; |
993 | |||
994 | #ifdef CONFIG_SYSCTL | ||
995 | err = ipv6_sysctl_register(); | ||
996 | if (err) | ||
997 | goto sysctl_fail; | ||
998 | #endif | ||
877 | out: | 999 | out: |
878 | return err; | 1000 | return err; |
879 | 1001 | ||
1002 | #ifdef CONFIG_SYSCTL | ||
1003 | sysctl_fail: | ||
1004 | ipv6_packet_cleanup(); | ||
1005 | #endif | ||
880 | ipv6_packet_fail: | 1006 | ipv6_packet_fail: |
881 | tcpv6_exit(); | 1007 | tcpv6_exit(); |
882 | tcpv6_fail: | 1008 | tcpv6_fail: |
@@ -918,10 +1044,6 @@ igmp_fail: | |||
918 | ndisc_fail: | 1044 | ndisc_fail: |
919 | icmpv6_cleanup(); | 1045 | icmpv6_cleanup(); |
920 | icmp_fail: | 1046 | icmp_fail: |
921 | #ifdef CONFIG_SYSCTL | ||
922 | ipv6_sysctl_unregister(); | ||
923 | sysctl_fail: | ||
924 | #endif | ||
925 | unregister_pernet_subsys(&inet6_net_ops); | 1047 | unregister_pernet_subsys(&inet6_net_ops); |
926 | register_pernet_fail: | 1048 | register_pernet_fail: |
927 | cleanup_ipv6_mibs(); | 1049 | cleanup_ipv6_mibs(); |
@@ -949,6 +1071,9 @@ static void __exit inet6_exit(void) | |||
949 | /* Disallow any further netlink messages */ | 1071 | /* Disallow any further netlink messages */ |
950 | rtnl_unregister_all(PF_INET6); | 1072 | rtnl_unregister_all(PF_INET6); |
951 | 1073 | ||
1074 | #ifdef CONFIG_SYSCTL | ||
1075 | ipv6_sysctl_unregister(); | ||
1076 | #endif | ||
952 | udpv6_exit(); | 1077 | udpv6_exit(); |
953 | udplitev6_exit(); | 1078 | udplitev6_exit(); |
954 | tcpv6_exit(); | 1079 | tcpv6_exit(); |
@@ -976,9 +1101,7 @@ static void __exit inet6_exit(void) | |||
976 | ndisc_cleanup(); | 1101 | ndisc_cleanup(); |
977 | icmpv6_cleanup(); | 1102 | icmpv6_cleanup(); |
978 | rawv6_exit(); | 1103 | rawv6_exit(); |
979 | #ifdef CONFIG_SYSCTL | 1104 | |
980 | ipv6_sysctl_unregister(); | ||
981 | #endif | ||
982 | unregister_pernet_subsys(&inet6_net_ops); | 1105 | unregister_pernet_subsys(&inet6_net_ops); |
983 | cleanup_ipv6_mibs(); | 1106 | cleanup_ipv6_mibs(); |
984 | proto_unregister(&rawv6_prot); | 1107 | proto_unregister(&rawv6_prot); |
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index 9c7f83fbc3a1..96868b994b37 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c | |||
@@ -101,7 +101,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, struct in6_addr *addr) | |||
101 | if (ifindex == 0) { | 101 | if (ifindex == 0) { |
102 | struct rt6_info *rt; | 102 | struct rt6_info *rt; |
103 | 103 | ||
104 | rt = rt6_lookup(addr, NULL, 0, 0); | 104 | rt = rt6_lookup(&init_net, addr, NULL, 0, 0); |
105 | if (rt) { | 105 | if (rt) { |
106 | dev = rt->rt6i_dev; | 106 | dev = rt->rt6i_dev; |
107 | dev_hold(dev); | 107 | dev_hold(dev); |
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index 695c0ca8a417..55137408f054 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c | |||
@@ -29,24 +29,22 @@ struct fib6_rule | |||
29 | u8 tclass; | 29 | u8 tclass; |
30 | }; | 30 | }; |
31 | 31 | ||
32 | static struct fib_rules_ops fib6_rules_ops; | 32 | struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi *fl, |
33 | 33 | int flags, pol_lookup_t lookup) | |
34 | struct dst_entry *fib6_rule_lookup(struct flowi *fl, int flags, | ||
35 | pol_lookup_t lookup) | ||
36 | { | 34 | { |
37 | struct fib_lookup_arg arg = { | 35 | struct fib_lookup_arg arg = { |
38 | .lookup_ptr = lookup, | 36 | .lookup_ptr = lookup, |
39 | }; | 37 | }; |
40 | 38 | ||
41 | fib_rules_lookup(&fib6_rules_ops, fl, flags, &arg); | 39 | fib_rules_lookup(net->ipv6.fib6_rules_ops, fl, flags, &arg); |
42 | if (arg.rule) | 40 | if (arg.rule) |
43 | fib_rule_put(arg.rule); | 41 | fib_rule_put(arg.rule); |
44 | 42 | ||
45 | if (arg.result) | 43 | if (arg.result) |
46 | return arg.result; | 44 | return arg.result; |
47 | 45 | ||
48 | dst_hold(&ip6_null_entry.u.dst); | 46 | dst_hold(&net->ipv6.ip6_null_entry->u.dst); |
49 | return &ip6_null_entry.u.dst; | 47 | return &net->ipv6.ip6_null_entry->u.dst; |
50 | } | 48 | } |
51 | 49 | ||
52 | static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, | 50 | static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, |
@@ -54,28 +52,29 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, | |||
54 | { | 52 | { |
55 | struct rt6_info *rt = NULL; | 53 | struct rt6_info *rt = NULL; |
56 | struct fib6_table *table; | 54 | struct fib6_table *table; |
55 | struct net *net = rule->fr_net; | ||
57 | pol_lookup_t lookup = arg->lookup_ptr; | 56 | pol_lookup_t lookup = arg->lookup_ptr; |
58 | 57 | ||
59 | switch (rule->action) { | 58 | switch (rule->action) { |
60 | case FR_ACT_TO_TBL: | 59 | case FR_ACT_TO_TBL: |
61 | break; | 60 | break; |
62 | case FR_ACT_UNREACHABLE: | 61 | case FR_ACT_UNREACHABLE: |
63 | rt = &ip6_null_entry; | 62 | rt = net->ipv6.ip6_null_entry; |
64 | goto discard_pkt; | 63 | goto discard_pkt; |
65 | default: | 64 | default: |
66 | case FR_ACT_BLACKHOLE: | 65 | case FR_ACT_BLACKHOLE: |
67 | rt = &ip6_blk_hole_entry; | 66 | rt = net->ipv6.ip6_blk_hole_entry; |
68 | goto discard_pkt; | 67 | goto discard_pkt; |
69 | case FR_ACT_PROHIBIT: | 68 | case FR_ACT_PROHIBIT: |
70 | rt = &ip6_prohibit_entry; | 69 | rt = net->ipv6.ip6_prohibit_entry; |
71 | goto discard_pkt; | 70 | goto discard_pkt; |
72 | } | 71 | } |
73 | 72 | ||
74 | table = fib6_get_table(rule->table); | 73 | table = fib6_get_table(net, rule->table); |
75 | if (table) | 74 | if (table) |
76 | rt = lookup(table, flp, flags); | 75 | rt = lookup(net, table, flp, flags); |
77 | 76 | ||
78 | if (rt != &ip6_null_entry) { | 77 | if (rt != net->ipv6.ip6_null_entry) { |
79 | struct fib6_rule *r = (struct fib6_rule *)rule; | 78 | struct fib6_rule *r = (struct fib6_rule *)rule; |
80 | 79 | ||
81 | /* | 80 | /* |
@@ -85,8 +84,8 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, | |||
85 | if ((rule->flags & FIB_RULE_FIND_SADDR) && | 84 | if ((rule->flags & FIB_RULE_FIND_SADDR) && |
86 | r->src.plen && !(flags & RT6_LOOKUP_F_HAS_SADDR)) { | 85 | r->src.plen && !(flags & RT6_LOOKUP_F_HAS_SADDR)) { |
87 | struct in6_addr saddr; | 86 | struct in6_addr saddr; |
88 | if (ipv6_get_saddr(&rt->u.dst, &flp->fl6_dst, | 87 | if (ipv6_dev_get_saddr(ip6_dst_idev(&rt->u.dst)->dev, |
89 | &saddr)) | 88 | &flp->fl6_dst, &saddr)) |
90 | goto again; | 89 | goto again; |
91 | if (!ipv6_prefix_equal(&saddr, &r->src.addr, | 90 | if (!ipv6_prefix_equal(&saddr, &r->src.addr, |
92 | r->src.plen)) | 91 | r->src.plen)) |
@@ -145,13 +144,14 @@ static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb, | |||
145 | struct nlattr **tb) | 144 | struct nlattr **tb) |
146 | { | 145 | { |
147 | int err = -EINVAL; | 146 | int err = -EINVAL; |
147 | struct net *net = skb->sk->sk_net; | ||
148 | struct fib6_rule *rule6 = (struct fib6_rule *) rule; | 148 | struct fib6_rule *rule6 = (struct fib6_rule *) rule; |
149 | 149 | ||
150 | if (rule->action == FR_ACT_TO_TBL) { | 150 | if (rule->action == FR_ACT_TO_TBL) { |
151 | if (rule->table == RT6_TABLE_UNSPEC) | 151 | if (rule->table == RT6_TABLE_UNSPEC) |
152 | goto errout; | 152 | goto errout; |
153 | 153 | ||
154 | if (fib6_new_table(rule->table) == NULL) { | 154 | if (fib6_new_table(net, rule->table) == NULL) { |
155 | err = -ENOBUFS; | 155 | err = -ENOBUFS; |
156 | goto errout; | 156 | goto errout; |
157 | } | 157 | } |
@@ -234,7 +234,7 @@ static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule) | |||
234 | + nla_total_size(16); /* src */ | 234 | + nla_total_size(16); /* src */ |
235 | } | 235 | } |
236 | 236 | ||
237 | static struct fib_rules_ops fib6_rules_ops = { | 237 | static struct fib_rules_ops fib6_rules_ops_template = { |
238 | .family = AF_INET6, | 238 | .family = AF_INET6, |
239 | .rule_size = sizeof(struct fib6_rule), | 239 | .rule_size = sizeof(struct fib6_rule), |
240 | .addr_size = sizeof(struct in6_addr), | 240 | .addr_size = sizeof(struct in6_addr), |
@@ -247,45 +247,64 @@ static struct fib_rules_ops fib6_rules_ops = { | |||
247 | .nlmsg_payload = fib6_rule_nlmsg_payload, | 247 | .nlmsg_payload = fib6_rule_nlmsg_payload, |
248 | .nlgroup = RTNLGRP_IPV6_RULE, | 248 | .nlgroup = RTNLGRP_IPV6_RULE, |
249 | .policy = fib6_rule_policy, | 249 | .policy = fib6_rule_policy, |
250 | .rules_list = LIST_HEAD_INIT(fib6_rules_ops.rules_list), | ||
251 | .owner = THIS_MODULE, | 250 | .owner = THIS_MODULE, |
252 | .fro_net = &init_net, | 251 | .fro_net = &init_net, |
253 | }; | 252 | }; |
254 | 253 | ||
255 | static int __init fib6_default_rules_init(void) | 254 | static int fib6_rules_net_init(struct net *net) |
256 | { | 255 | { |
257 | int err; | 256 | int err = -ENOMEM; |
258 | 257 | ||
259 | err = fib_default_rule_add(&fib6_rules_ops, 0, | 258 | net->ipv6.fib6_rules_ops = kmemdup(&fib6_rules_ops_template, |
260 | RT6_TABLE_LOCAL, FIB_RULE_PERMANENT); | 259 | sizeof(*net->ipv6.fib6_rules_ops), |
261 | if (err < 0) | 260 | GFP_KERNEL); |
262 | return err; | 261 | if (!net->ipv6.fib6_rules_ops) |
263 | err = fib_default_rule_add(&fib6_rules_ops, 0x7FFE, RT6_TABLE_MAIN, 0); | 262 | goto out; |
264 | if (err < 0) | ||
265 | return err; | ||
266 | return 0; | ||
267 | } | ||
268 | 263 | ||
269 | int __init fib6_rules_init(void) | 264 | net->ipv6.fib6_rules_ops->fro_net = net; |
270 | { | 265 | INIT_LIST_HEAD(&net->ipv6.fib6_rules_ops->rules_list); |
271 | int ret; | ||
272 | 266 | ||
273 | ret = fib6_default_rules_init(); | 267 | err = fib_default_rule_add(net->ipv6.fib6_rules_ops, 0, |
274 | if (ret) | 268 | RT6_TABLE_LOCAL, FIB_RULE_PERMANENT); |
275 | goto out; | 269 | if (err) |
270 | goto out_fib6_rules_ops; | ||
276 | 271 | ||
277 | ret = fib_rules_register(&fib6_rules_ops); | 272 | err = fib_default_rule_add(net->ipv6.fib6_rules_ops, |
278 | if (ret) | 273 | 0x7FFE, RT6_TABLE_MAIN, 0); |
279 | goto out_default_rules_init; | 274 | if (err) |
275 | goto out_fib6_default_rule_add; | ||
276 | |||
277 | err = fib_rules_register(net->ipv6.fib6_rules_ops); | ||
278 | if (err) | ||
279 | goto out_fib6_default_rule_add; | ||
280 | out: | 280 | out: |
281 | return ret; | 281 | return err; |
282 | 282 | ||
283 | out_default_rules_init: | 283 | out_fib6_default_rule_add: |
284 | fib_rules_cleanup_ops(&fib6_rules_ops); | 284 | fib_rules_cleanup_ops(net->ipv6.fib6_rules_ops); |
285 | out_fib6_rules_ops: | ||
286 | kfree(net->ipv6.fib6_rules_ops); | ||
285 | goto out; | 287 | goto out; |
286 | } | 288 | } |
287 | 289 | ||
290 | static void fib6_rules_net_exit(struct net *net) | ||
291 | { | ||
292 | fib_rules_unregister(net->ipv6.fib6_rules_ops); | ||
293 | kfree(net->ipv6.fib6_rules_ops); | ||
294 | } | ||
295 | |||
296 | static struct pernet_operations fib6_rules_net_ops = { | ||
297 | .init = fib6_rules_net_init, | ||
298 | .exit = fib6_rules_net_exit, | ||
299 | }; | ||
300 | |||
301 | int __init fib6_rules_init(void) | ||
302 | { | ||
303 | return register_pernet_subsys(&fib6_rules_net_ops); | ||
304 | } | ||
305 | |||
306 | |||
288 | void fib6_rules_cleanup(void) | 307 | void fib6_rules_cleanup(void) |
289 | { | 308 | { |
290 | fib_rules_unregister(&fib6_rules_ops); | 309 | return unregister_pernet_subsys(&fib6_rules_net_ops); |
291 | } | 310 | } |
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 121d517bf91c..6b5391ab8346 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -80,8 +80,10 @@ EXPORT_SYMBOL(icmpv6msg_statistics); | |||
80 | * | 80 | * |
81 | * On SMP we have one ICMP socket per-cpu. | 81 | * On SMP we have one ICMP socket per-cpu. |
82 | */ | 82 | */ |
83 | static DEFINE_PER_CPU(struct socket *, __icmpv6_socket) = NULL; | 83 | static inline struct sock *icmpv6_sk(struct net *net) |
84 | #define icmpv6_socket __get_cpu_var(__icmpv6_socket) | 84 | { |
85 | return net->ipv6.icmp_sk[smp_processor_id()]; | ||
86 | } | ||
85 | 87 | ||
86 | static int icmpv6_rcv(struct sk_buff *skb); | 88 | static int icmpv6_rcv(struct sk_buff *skb); |
87 | 89 | ||
@@ -90,11 +92,11 @@ static struct inet6_protocol icmpv6_protocol = { | |||
90 | .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, | 92 | .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, |
91 | }; | 93 | }; |
92 | 94 | ||
93 | static __inline__ int icmpv6_xmit_lock(void) | 95 | static __inline__ int icmpv6_xmit_lock(struct sock *sk) |
94 | { | 96 | { |
95 | local_bh_disable(); | 97 | local_bh_disable(); |
96 | 98 | ||
97 | if (unlikely(!spin_trylock(&icmpv6_socket->sk->sk_lock.slock))) { | 99 | if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { |
98 | /* This can happen if the output path (f.e. SIT or | 100 | /* This can happen if the output path (f.e. SIT or |
99 | * ip6ip6 tunnel) signals dst_link_failure() for an | 101 | * ip6ip6 tunnel) signals dst_link_failure() for an |
100 | * outgoing ICMP6 packet. | 102 | * outgoing ICMP6 packet. |
@@ -105,9 +107,9 @@ static __inline__ int icmpv6_xmit_lock(void) | |||
105 | return 0; | 107 | return 0; |
106 | } | 108 | } |
107 | 109 | ||
108 | static __inline__ void icmpv6_xmit_unlock(void) | 110 | static __inline__ void icmpv6_xmit_unlock(struct sock *sk) |
109 | { | 111 | { |
110 | spin_unlock_bh(&icmpv6_socket->sk->sk_lock.slock); | 112 | spin_unlock_bh(&sk->sk_lock.slock); |
111 | } | 113 | } |
112 | 114 | ||
113 | /* | 115 | /* |
@@ -161,6 +163,7 @@ static inline int icmpv6_xrlim_allow(struct sock *sk, int type, | |||
161 | struct flowi *fl) | 163 | struct flowi *fl) |
162 | { | 164 | { |
163 | struct dst_entry *dst; | 165 | struct dst_entry *dst; |
166 | struct net *net = sk->sk_net; | ||
164 | int res = 0; | 167 | int res = 0; |
165 | 168 | ||
166 | /* Informational messages are not limited. */ | 169 | /* Informational messages are not limited. */ |
@@ -176,7 +179,7 @@ static inline int icmpv6_xrlim_allow(struct sock *sk, int type, | |||
176 | * XXX: perhaps the expire for routing entries cloned by | 179 | * XXX: perhaps the expire for routing entries cloned by |
177 | * this lookup should be more aggressive (not longer than timeout). | 180 | * this lookup should be more aggressive (not longer than timeout). |
178 | */ | 181 | */ |
179 | dst = ip6_route_output(sk, fl); | 182 | dst = ip6_route_output(net, sk, fl); |
180 | if (dst->error) { | 183 | if (dst->error) { |
181 | IP6_INC_STATS(ip6_dst_idev(dst), | 184 | IP6_INC_STATS(ip6_dst_idev(dst), |
182 | IPSTATS_MIB_OUTNOROUTES); | 185 | IPSTATS_MIB_OUTNOROUTES); |
@@ -184,7 +187,7 @@ static inline int icmpv6_xrlim_allow(struct sock *sk, int type, | |||
184 | res = 1; | 187 | res = 1; |
185 | } else { | 188 | } else { |
186 | struct rt6_info *rt = (struct rt6_info *)dst; | 189 | struct rt6_info *rt = (struct rt6_info *)dst; |
187 | int tmo = init_net.ipv6.sysctl.icmpv6_time; | 190 | int tmo = net->ipv6.sysctl.icmpv6_time; |
188 | 191 | ||
189 | /* Give more bandwidth to wider prefixes. */ | 192 | /* Give more bandwidth to wider prefixes. */ |
190 | if (rt->rt6i_dst.plen < 128) | 193 | if (rt->rt6i_dst.plen < 128) |
@@ -303,6 +306,7 @@ static inline void mip6_addr_swap(struct sk_buff *skb) {} | |||
303 | void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info, | 306 | void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info, |
304 | struct net_device *dev) | 307 | struct net_device *dev) |
305 | { | 308 | { |
309 | struct net *net = skb->dev->nd_net; | ||
306 | struct inet6_dev *idev = NULL; | 310 | struct inet6_dev *idev = NULL; |
307 | struct ipv6hdr *hdr = ipv6_hdr(skb); | 311 | struct ipv6hdr *hdr = ipv6_hdr(skb); |
308 | struct sock *sk; | 312 | struct sock *sk; |
@@ -332,7 +336,7 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info, | |||
332 | */ | 336 | */ |
333 | addr_type = ipv6_addr_type(&hdr->daddr); | 337 | addr_type = ipv6_addr_type(&hdr->daddr); |
334 | 338 | ||
335 | if (ipv6_chk_addr(&init_net, &hdr->daddr, skb->dev, 0)) | 339 | if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0)) |
336 | saddr = &hdr->daddr; | 340 | saddr = &hdr->daddr; |
337 | 341 | ||
338 | /* | 342 | /* |
@@ -389,12 +393,12 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info, | |||
389 | fl.fl_icmp_code = code; | 393 | fl.fl_icmp_code = code; |
390 | security_skb_classify_flow(skb, &fl); | 394 | security_skb_classify_flow(skb, &fl); |
391 | 395 | ||
392 | if (icmpv6_xmit_lock()) | 396 | sk = icmpv6_sk(net); |
393 | return; | ||
394 | |||
395 | sk = icmpv6_socket->sk; | ||
396 | np = inet6_sk(sk); | 397 | np = inet6_sk(sk); |
397 | 398 | ||
399 | if (icmpv6_xmit_lock(sk)) | ||
400 | return; | ||
401 | |||
398 | if (!icmpv6_xrlim_allow(sk, type, &fl)) | 402 | if (!icmpv6_xrlim_allow(sk, type, &fl)) |
399 | goto out; | 403 | goto out; |
400 | 404 | ||
@@ -498,13 +502,14 @@ out_put: | |||
498 | out_dst_release: | 502 | out_dst_release: |
499 | dst_release(dst); | 503 | dst_release(dst); |
500 | out: | 504 | out: |
501 | icmpv6_xmit_unlock(); | 505 | icmpv6_xmit_unlock(sk); |
502 | } | 506 | } |
503 | 507 | ||
504 | EXPORT_SYMBOL(icmpv6_send); | 508 | EXPORT_SYMBOL(icmpv6_send); |
505 | 509 | ||
506 | static void icmpv6_echo_reply(struct sk_buff *skb) | 510 | static void icmpv6_echo_reply(struct sk_buff *skb) |
507 | { | 511 | { |
512 | struct net *net = skb->dev->nd_net; | ||
508 | struct sock *sk; | 513 | struct sock *sk; |
509 | struct inet6_dev *idev; | 514 | struct inet6_dev *idev; |
510 | struct ipv6_pinfo *np; | 515 | struct ipv6_pinfo *np; |
@@ -535,12 +540,12 @@ static void icmpv6_echo_reply(struct sk_buff *skb) | |||
535 | fl.fl_icmp_type = ICMPV6_ECHO_REPLY; | 540 | fl.fl_icmp_type = ICMPV6_ECHO_REPLY; |
536 | security_skb_classify_flow(skb, &fl); | 541 | security_skb_classify_flow(skb, &fl); |
537 | 542 | ||
538 | if (icmpv6_xmit_lock()) | 543 | sk = icmpv6_sk(net); |
539 | return; | ||
540 | |||
541 | sk = icmpv6_socket->sk; | ||
542 | np = inet6_sk(sk); | 544 | np = inet6_sk(sk); |
543 | 545 | ||
546 | if (icmpv6_xmit_lock(sk)) | ||
547 | return; | ||
548 | |||
544 | if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) | 549 | if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) |
545 | fl.oif = np->mcast_oif; | 550 | fl.oif = np->mcast_oif; |
546 | 551 | ||
@@ -584,7 +589,7 @@ out_put: | |||
584 | in6_dev_put(idev); | 589 | in6_dev_put(idev); |
585 | dst_release(dst); | 590 | dst_release(dst); |
586 | out: | 591 | out: |
587 | icmpv6_xmit_unlock(); | 592 | icmpv6_xmit_unlock(sk); |
588 | } | 593 | } |
589 | 594 | ||
590 | static void icmpv6_notify(struct sk_buff *skb, int type, int code, __be32 info) | 595 | static void icmpv6_notify(struct sk_buff *skb, int type, int code, __be32 info) |
@@ -775,19 +780,41 @@ drop_no_count: | |||
775 | return 0; | 780 | return 0; |
776 | } | 781 | } |
777 | 782 | ||
783 | void icmpv6_flow_init(struct sock *sk, struct flowi *fl, | ||
784 | u8 type, | ||
785 | const struct in6_addr *saddr, | ||
786 | const struct in6_addr *daddr, | ||
787 | int oif) | ||
788 | { | ||
789 | memset(fl, 0, sizeof(*fl)); | ||
790 | ipv6_addr_copy(&fl->fl6_src, saddr); | ||
791 | ipv6_addr_copy(&fl->fl6_dst, daddr); | ||
792 | fl->proto = IPPROTO_ICMPV6; | ||
793 | fl->fl_icmp_type = type; | ||
794 | fl->fl_icmp_code = 0; | ||
795 | fl->oif = oif; | ||
796 | security_sk_classify_flow(sk, fl); | ||
797 | } | ||
798 | |||
778 | /* | 799 | /* |
779 | * Special lock-class for __icmpv6_socket: | 800 | * Special lock-class for __icmpv6_sk: |
780 | */ | 801 | */ |
781 | static struct lock_class_key icmpv6_socket_sk_dst_lock_key; | 802 | static struct lock_class_key icmpv6_socket_sk_dst_lock_key; |
782 | 803 | ||
783 | int __init icmpv6_init(struct net_proto_family *ops) | 804 | static int __net_init icmpv6_sk_init(struct net *net) |
784 | { | 805 | { |
785 | struct sock *sk; | 806 | struct sock *sk; |
786 | int err, i, j; | 807 | int err, i, j; |
787 | 808 | ||
809 | net->ipv6.icmp_sk = | ||
810 | kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL); | ||
811 | if (net->ipv6.icmp_sk == NULL) | ||
812 | return -ENOMEM; | ||
813 | |||
788 | for_each_possible_cpu(i) { | 814 | for_each_possible_cpu(i) { |
815 | struct socket *sock; | ||
789 | err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, | 816 | err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, |
790 | &per_cpu(__icmpv6_socket, i)); | 817 | &sock); |
791 | if (err < 0) { | 818 | if (err < 0) { |
792 | printk(KERN_ERR | 819 | printk(KERN_ERR |
793 | "Failed to initialize the ICMP6 control socket " | 820 | "Failed to initialize the ICMP6 control socket " |
@@ -796,12 +823,14 @@ int __init icmpv6_init(struct net_proto_family *ops) | |||
796 | goto fail; | 823 | goto fail; |
797 | } | 824 | } |
798 | 825 | ||
799 | sk = per_cpu(__icmpv6_socket, i)->sk; | 826 | net->ipv6.icmp_sk[i] = sk = sock->sk; |
827 | sk_change_net(sk, net); | ||
828 | |||
800 | sk->sk_allocation = GFP_ATOMIC; | 829 | sk->sk_allocation = GFP_ATOMIC; |
801 | /* | 830 | /* |
802 | * Split off their lock-class, because sk->sk_dst_lock | 831 | * Split off their lock-class, because sk->sk_dst_lock |
803 | * gets used from softirqs, which is safe for | 832 | * gets used from softirqs, which is safe for |
804 | * __icmpv6_socket (because those never get directly used | 833 | * __icmpv6_sk (because those never get directly used |
805 | * via userspace syscalls), but unsafe for normal sockets. | 834 | * via userspace syscalls), but unsafe for normal sockets. |
806 | */ | 835 | */ |
807 | lockdep_set_class(&sk->sk_dst_lock, | 836 | lockdep_set_class(&sk->sk_dst_lock, |
@@ -815,36 +844,56 @@ int __init icmpv6_init(struct net_proto_family *ops) | |||
815 | 844 | ||
816 | sk->sk_prot->unhash(sk); | 845 | sk->sk_prot->unhash(sk); |
817 | } | 846 | } |
818 | |||
819 | |||
820 | if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0) { | ||
821 | printk(KERN_ERR "Failed to register ICMP6 protocol\n"); | ||
822 | err = -EAGAIN; | ||
823 | goto fail; | ||
824 | } | ||
825 | |||
826 | return 0; | 847 | return 0; |
827 | 848 | ||
828 | fail: | 849 | fail: |
829 | for (j = 0; j < i; j++) { | 850 | for (j = 0; j < i; j++) |
830 | if (!cpu_possible(j)) | 851 | sk_release_kernel(net->ipv6.icmp_sk[j]); |
831 | continue; | 852 | kfree(net->ipv6.icmp_sk); |
832 | sock_release(per_cpu(__icmpv6_socket, j)); | ||
833 | } | ||
834 | |||
835 | return err; | 853 | return err; |
836 | } | 854 | } |
837 | 855 | ||
838 | void icmpv6_cleanup(void) | 856 | static void __net_exit icmpv6_sk_exit(struct net *net) |
839 | { | 857 | { |
840 | int i; | 858 | int i; |
841 | 859 | ||
842 | for_each_possible_cpu(i) { | 860 | for_each_possible_cpu(i) { |
843 | sock_release(per_cpu(__icmpv6_socket, i)); | 861 | sk_release_kernel(net->ipv6.icmp_sk[i]); |
844 | } | 862 | } |
863 | kfree(net->ipv6.icmp_sk); | ||
864 | } | ||
865 | |||
866 | static struct pernet_operations icmpv6_sk_ops = { | ||
867 | .init = icmpv6_sk_init, | ||
868 | .exit = icmpv6_sk_exit, | ||
869 | }; | ||
870 | |||
871 | int __init icmpv6_init(void) | ||
872 | { | ||
873 | int err; | ||
874 | |||
875 | err = register_pernet_subsys(&icmpv6_sk_ops); | ||
876 | if (err < 0) | ||
877 | return err; | ||
878 | |||
879 | err = -EAGAIN; | ||
880 | if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0) | ||
881 | goto fail; | ||
882 | return 0; | ||
883 | |||
884 | fail: | ||
885 | printk(KERN_ERR "Failed to register ICMP6 protocol\n"); | ||
886 | unregister_pernet_subsys(&icmpv6_sk_ops); | ||
887 | return err; | ||
888 | } | ||
889 | |||
890 | void icmpv6_cleanup(void) | ||
891 | { | ||
892 | unregister_pernet_subsys(&icmpv6_sk_ops); | ||
845 | inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6); | 893 | inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6); |
846 | } | 894 | } |
847 | 895 | ||
896 | |||
848 | static const struct icmp6_err { | 897 | static const struct icmp6_err { |
849 | int err; | 898 | int err; |
850 | int fatal; | 899 | int fatal; |
@@ -925,6 +974,10 @@ struct ctl_table *ipv6_icmp_sysctl_init(struct net *net) | |||
925 | table = kmemdup(ipv6_icmp_table_template, | 974 | table = kmemdup(ipv6_icmp_table_template, |
926 | sizeof(ipv6_icmp_table_template), | 975 | sizeof(ipv6_icmp_table_template), |
927 | GFP_KERNEL); | 976 | GFP_KERNEL); |
977 | |||
978 | if (table) | ||
979 | table[0].data = &net->ipv6.sysctl.icmpv6_time; | ||
980 | |||
928 | return table; | 981 | return table; |
929 | } | 982 | } |
930 | #endif | 983 | #endif |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index bab72b6f1444..b0814b0082e7 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -48,8 +48,6 @@ | |||
48 | #define RT6_TRACE(x...) do { ; } while (0) | 48 | #define RT6_TRACE(x...) do { ; } while (0) |
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | struct rt6_statistics rt6_stats; | ||
52 | |||
53 | static struct kmem_cache * fib6_node_kmem __read_mostly; | 51 | static struct kmem_cache * fib6_node_kmem __read_mostly; |
54 | 52 | ||
55 | enum fib_walk_state_t | 53 | enum fib_walk_state_t |
@@ -66,6 +64,7 @@ enum fib_walk_state_t | |||
66 | struct fib6_cleaner_t | 64 | struct fib6_cleaner_t |
67 | { | 65 | { |
68 | struct fib6_walker_t w; | 66 | struct fib6_walker_t w; |
67 | struct net *net; | ||
69 | int (*func)(struct rt6_info *, void *arg); | 68 | int (*func)(struct rt6_info *, void *arg); |
70 | void *arg; | 69 | void *arg; |
71 | }; | 70 | }; |
@@ -78,9 +77,10 @@ static DEFINE_RWLOCK(fib6_walker_lock); | |||
78 | #define FWS_INIT FWS_L | 77 | #define FWS_INIT FWS_L |
79 | #endif | 78 | #endif |
80 | 79 | ||
81 | static void fib6_prune_clones(struct fib6_node *fn, struct rt6_info *rt); | 80 | static void fib6_prune_clones(struct net *net, struct fib6_node *fn, |
82 | static struct rt6_info * fib6_find_prefix(struct fib6_node *fn); | 81 | struct rt6_info *rt); |
83 | static struct fib6_node * fib6_repair_tree(struct fib6_node *fn); | 82 | static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn); |
83 | static struct fib6_node *fib6_repair_tree(struct net *net, struct fib6_node *fn); | ||
84 | static int fib6_walk(struct fib6_walker_t *w); | 84 | static int fib6_walk(struct fib6_walker_t *w); |
85 | static int fib6_walk_continue(struct fib6_walker_t *w); | 85 | static int fib6_walk_continue(struct fib6_walker_t *w); |
86 | 86 | ||
@@ -93,7 +93,7 @@ static int fib6_walk_continue(struct fib6_walker_t *w); | |||
93 | 93 | ||
94 | static __u32 rt_sernum; | 94 | static __u32 rt_sernum; |
95 | 95 | ||
96 | static DEFINE_TIMER(ip6_fib_timer, fib6_run_gc, 0, 0); | 96 | static void fib6_gc_timer_cb(unsigned long arg); |
97 | 97 | ||
98 | static struct fib6_walker_t fib6_walker_list = { | 98 | static struct fib6_walker_t fib6_walker_list = { |
99 | .prev = &fib6_walker_list, | 99 | .prev = &fib6_walker_list, |
@@ -166,22 +166,13 @@ static __inline__ void rt6_release(struct rt6_info *rt) | |||
166 | dst_free(&rt->u.dst); | 166 | dst_free(&rt->u.dst); |
167 | } | 167 | } |
168 | 168 | ||
169 | static struct fib6_table fib6_main_tbl = { | ||
170 | .tb6_id = RT6_TABLE_MAIN, | ||
171 | .tb6_root = { | ||
172 | .leaf = &ip6_null_entry, | ||
173 | .fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO, | ||
174 | }, | ||
175 | }; | ||
176 | |||
177 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES | 169 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES |
178 | #define FIB_TABLE_HASHSZ 256 | 170 | #define FIB_TABLE_HASHSZ 256 |
179 | #else | 171 | #else |
180 | #define FIB_TABLE_HASHSZ 1 | 172 | #define FIB_TABLE_HASHSZ 1 |
181 | #endif | 173 | #endif |
182 | static struct hlist_head fib_table_hash[FIB_TABLE_HASHSZ]; | ||
183 | 174 | ||
184 | static void fib6_link_table(struct fib6_table *tb) | 175 | static void fib6_link_table(struct net *net, struct fib6_table *tb) |
185 | { | 176 | { |
186 | unsigned int h; | 177 | unsigned int h; |
187 | 178 | ||
@@ -197,52 +188,46 @@ static void fib6_link_table(struct fib6_table *tb) | |||
197 | * No protection necessary, this is the only list mutatation | 188 | * No protection necessary, this is the only list mutatation |
198 | * operation, tables never disappear once they exist. | 189 | * operation, tables never disappear once they exist. |
199 | */ | 190 | */ |
200 | hlist_add_head_rcu(&tb->tb6_hlist, &fib_table_hash[h]); | 191 | hlist_add_head_rcu(&tb->tb6_hlist, &net->ipv6.fib_table_hash[h]); |
201 | } | 192 | } |
202 | 193 | ||
203 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES | 194 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES |
204 | static struct fib6_table fib6_local_tbl = { | ||
205 | .tb6_id = RT6_TABLE_LOCAL, | ||
206 | .tb6_root = { | ||
207 | .leaf = &ip6_null_entry, | ||
208 | .fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO, | ||
209 | }, | ||
210 | }; | ||
211 | 195 | ||
212 | static struct fib6_table *fib6_alloc_table(u32 id) | 196 | static struct fib6_table *fib6_alloc_table(struct net *net, u32 id) |
213 | { | 197 | { |
214 | struct fib6_table *table; | 198 | struct fib6_table *table; |
215 | 199 | ||
216 | table = kzalloc(sizeof(*table), GFP_ATOMIC); | 200 | table = kzalloc(sizeof(*table), GFP_ATOMIC); |
217 | if (table != NULL) { | 201 | if (table != NULL) { |
218 | table->tb6_id = id; | 202 | table->tb6_id = id; |
219 | table->tb6_root.leaf = &ip6_null_entry; | 203 | table->tb6_root.leaf = net->ipv6.ip6_null_entry; |
220 | table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; | 204 | table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; |
221 | } | 205 | } |
222 | 206 | ||
223 | return table; | 207 | return table; |
224 | } | 208 | } |
225 | 209 | ||
226 | struct fib6_table *fib6_new_table(u32 id) | 210 | struct fib6_table *fib6_new_table(struct net *net, u32 id) |
227 | { | 211 | { |
228 | struct fib6_table *tb; | 212 | struct fib6_table *tb; |
229 | 213 | ||
230 | if (id == 0) | 214 | if (id == 0) |
231 | id = RT6_TABLE_MAIN; | 215 | id = RT6_TABLE_MAIN; |
232 | tb = fib6_get_table(id); | 216 | tb = fib6_get_table(net, id); |
233 | if (tb) | 217 | if (tb) |
234 | return tb; | 218 | return tb; |
235 | 219 | ||
236 | tb = fib6_alloc_table(id); | 220 | tb = fib6_alloc_table(net, id); |
237 | if (tb != NULL) | 221 | if (tb != NULL) |
238 | fib6_link_table(tb); | 222 | fib6_link_table(net, tb); |
239 | 223 | ||
240 | return tb; | 224 | return tb; |
241 | } | 225 | } |
242 | 226 | ||
243 | struct fib6_table *fib6_get_table(u32 id) | 227 | struct fib6_table *fib6_get_table(struct net *net, u32 id) |
244 | { | 228 | { |
245 | struct fib6_table *tb; | 229 | struct fib6_table *tb; |
230 | struct hlist_head *head; | ||
246 | struct hlist_node *node; | 231 | struct hlist_node *node; |
247 | unsigned int h; | 232 | unsigned int h; |
248 | 233 | ||
@@ -250,7 +235,8 @@ struct fib6_table *fib6_get_table(u32 id) | |||
250 | id = RT6_TABLE_MAIN; | 235 | id = RT6_TABLE_MAIN; |
251 | h = id & (FIB_TABLE_HASHSZ - 1); | 236 | h = id & (FIB_TABLE_HASHSZ - 1); |
252 | rcu_read_lock(); | 237 | rcu_read_lock(); |
253 | hlist_for_each_entry_rcu(tb, node, &fib_table_hash[h], tb6_hlist) { | 238 | head = &net->ipv6.fib_table_hash[h]; |
239 | hlist_for_each_entry_rcu(tb, node, head, tb6_hlist) { | ||
254 | if (tb->tb6_id == id) { | 240 | if (tb->tb6_id == id) { |
255 | rcu_read_unlock(); | 241 | rcu_read_unlock(); |
256 | return tb; | 242 | return tb; |
@@ -261,33 +247,32 @@ struct fib6_table *fib6_get_table(u32 id) | |||
261 | return NULL; | 247 | return NULL; |
262 | } | 248 | } |
263 | 249 | ||
264 | static void __init fib6_tables_init(void) | 250 | static void fib6_tables_init(struct net *net) |
265 | { | 251 | { |
266 | fib6_link_table(&fib6_main_tbl); | 252 | fib6_link_table(net, net->ipv6.fib6_main_tbl); |
267 | fib6_link_table(&fib6_local_tbl); | 253 | fib6_link_table(net, net->ipv6.fib6_local_tbl); |
268 | } | 254 | } |
269 | |||
270 | #else | 255 | #else |
271 | 256 | ||
272 | struct fib6_table *fib6_new_table(u32 id) | 257 | struct fib6_table *fib6_new_table(struct net *net, u32 id) |
273 | { | 258 | { |
274 | return fib6_get_table(id); | 259 | return fib6_get_table(net, id); |
275 | } | 260 | } |
276 | 261 | ||
277 | struct fib6_table *fib6_get_table(u32 id) | 262 | struct fib6_table *fib6_get_table(struct net *net, u32 id) |
278 | { | 263 | { |
279 | return &fib6_main_tbl; | 264 | return net->ipv6.fib6_main_tbl; |
280 | } | 265 | } |
281 | 266 | ||
282 | struct dst_entry *fib6_rule_lookup(struct flowi *fl, int flags, | 267 | struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi *fl, |
283 | pol_lookup_t lookup) | 268 | int flags, pol_lookup_t lookup) |
284 | { | 269 | { |
285 | return (struct dst_entry *) lookup(&fib6_main_tbl, fl, flags); | 270 | return (struct dst_entry *) lookup(net, net->ipv6.fib6_main_tbl, fl, flags); |
286 | } | 271 | } |
287 | 272 | ||
288 | static void __init fib6_tables_init(void) | 273 | static void fib6_tables_init(struct net *net) |
289 | { | 274 | { |
290 | fib6_link_table(&fib6_main_tbl); | 275 | fib6_link_table(net, net->ipv6.fib6_main_tbl); |
291 | } | 276 | } |
292 | 277 | ||
293 | #endif | 278 | #endif |
@@ -368,11 +353,9 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) | |||
368 | struct fib6_walker_t *w; | 353 | struct fib6_walker_t *w; |
369 | struct fib6_table *tb; | 354 | struct fib6_table *tb; |
370 | struct hlist_node *node; | 355 | struct hlist_node *node; |
356 | struct hlist_head *head; | ||
371 | int res = 0; | 357 | int res = 0; |
372 | 358 | ||
373 | if (net != &init_net) | ||
374 | return 0; | ||
375 | |||
376 | s_h = cb->args[0]; | 359 | s_h = cb->args[0]; |
377 | s_e = cb->args[1]; | 360 | s_e = cb->args[1]; |
378 | 361 | ||
@@ -401,7 +384,8 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) | |||
401 | 384 | ||
402 | for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) { | 385 | for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) { |
403 | e = 0; | 386 | e = 0; |
404 | hlist_for_each_entry(tb, node, &fib_table_hash[h], tb6_hlist) { | 387 | head = &net->ipv6.fib_table_hash[h]; |
388 | hlist_for_each_entry(tb, node, head, tb6_hlist) { | ||
405 | if (e < s_e) | 389 | if (e < s_e) |
406 | goto next; | 390 | goto next; |
407 | res = fib6_dump_table(tb, skb, cb); | 391 | res = fib6_dump_table(tb, skb, cb); |
@@ -667,29 +651,29 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, | |||
667 | rt->rt6i_node = fn; | 651 | rt->rt6i_node = fn; |
668 | atomic_inc(&rt->rt6i_ref); | 652 | atomic_inc(&rt->rt6i_ref); |
669 | inet6_rt_notify(RTM_NEWROUTE, rt, info); | 653 | inet6_rt_notify(RTM_NEWROUTE, rt, info); |
670 | rt6_stats.fib_rt_entries++; | 654 | info->nl_net->ipv6.rt6_stats->fib_rt_entries++; |
671 | 655 | ||
672 | if ((fn->fn_flags & RTN_RTINFO) == 0) { | 656 | if ((fn->fn_flags & RTN_RTINFO) == 0) { |
673 | rt6_stats.fib_route_nodes++; | 657 | info->nl_net->ipv6.rt6_stats->fib_route_nodes++; |
674 | fn->fn_flags |= RTN_RTINFO; | 658 | fn->fn_flags |= RTN_RTINFO; |
675 | } | 659 | } |
676 | 660 | ||
677 | return 0; | 661 | return 0; |
678 | } | 662 | } |
679 | 663 | ||
680 | static __inline__ void fib6_start_gc(struct rt6_info *rt) | 664 | static __inline__ void fib6_start_gc(struct net *net, struct rt6_info *rt) |
681 | { | 665 | { |
682 | if (ip6_fib_timer.expires == 0 && | 666 | if (net->ipv6.ip6_fib_timer->expires == 0 && |
683 | (rt->rt6i_flags & (RTF_EXPIRES|RTF_CACHE))) | 667 | (rt->rt6i_flags & (RTF_EXPIRES|RTF_CACHE))) |
684 | mod_timer(&ip6_fib_timer, jiffies + | 668 | mod_timer(net->ipv6.ip6_fib_timer, jiffies + |
685 | init_net.ipv6.sysctl.ip6_rt_gc_interval); | 669 | net->ipv6.sysctl.ip6_rt_gc_interval); |
686 | } | 670 | } |
687 | 671 | ||
688 | void fib6_force_start_gc(void) | 672 | void fib6_force_start_gc(struct net *net) |
689 | { | 673 | { |
690 | if (ip6_fib_timer.expires == 0) | 674 | if (net->ipv6.ip6_fib_timer->expires == 0) |
691 | mod_timer(&ip6_fib_timer, jiffies + | 675 | mod_timer(net->ipv6.ip6_fib_timer, jiffies + |
692 | init_net.ipv6.sysctl.ip6_rt_gc_interval); | 676 | net->ipv6.sysctl.ip6_rt_gc_interval); |
693 | } | 677 | } |
694 | 678 | ||
695 | /* | 679 | /* |
@@ -733,8 +717,8 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info) | |||
733 | if (sfn == NULL) | 717 | if (sfn == NULL) |
734 | goto st_failure; | 718 | goto st_failure; |
735 | 719 | ||
736 | sfn->leaf = &ip6_null_entry; | 720 | sfn->leaf = info->nl_net->ipv6.ip6_null_entry; |
737 | atomic_inc(&ip6_null_entry.rt6i_ref); | 721 | atomic_inc(&info->nl_net->ipv6.ip6_null_entry->rt6i_ref); |
738 | sfn->fn_flags = RTN_ROOT; | 722 | sfn->fn_flags = RTN_ROOT; |
739 | sfn->fn_sernum = fib6_new_sernum(); | 723 | sfn->fn_sernum = fib6_new_sernum(); |
740 | 724 | ||
@@ -776,9 +760,9 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info) | |||
776 | err = fib6_add_rt2node(fn, rt, info); | 760 | err = fib6_add_rt2node(fn, rt, info); |
777 | 761 | ||
778 | if (err == 0) { | 762 | if (err == 0) { |
779 | fib6_start_gc(rt); | 763 | fib6_start_gc(info->nl_net, rt); |
780 | if (!(rt->rt6i_flags&RTF_CACHE)) | 764 | if (!(rt->rt6i_flags&RTF_CACHE)) |
781 | fib6_prune_clones(pn, rt); | 765 | fib6_prune_clones(info->nl_net, pn, rt); |
782 | } | 766 | } |
783 | 767 | ||
784 | out: | 768 | out: |
@@ -789,11 +773,11 @@ out: | |||
789 | * super-tree leaf node we have to find a new one for it. | 773 | * super-tree leaf node we have to find a new one for it. |
790 | */ | 774 | */ |
791 | if (pn != fn && !pn->leaf && !(pn->fn_flags & RTN_RTINFO)) { | 775 | if (pn != fn && !pn->leaf && !(pn->fn_flags & RTN_RTINFO)) { |
792 | pn->leaf = fib6_find_prefix(pn); | 776 | pn->leaf = fib6_find_prefix(info->nl_net, pn); |
793 | #if RT6_DEBUG >= 2 | 777 | #if RT6_DEBUG >= 2 |
794 | if (!pn->leaf) { | 778 | if (!pn->leaf) { |
795 | BUG_TRAP(pn->leaf != NULL); | 779 | BUG_TRAP(pn->leaf != NULL); |
796 | pn->leaf = &ip6_null_entry; | 780 | pn->leaf = info->nl_net->ipv6.ip6_null_entry; |
797 | } | 781 | } |
798 | #endif | 782 | #endif |
799 | atomic_inc(&pn->leaf->rt6i_ref); | 783 | atomic_inc(&pn->leaf->rt6i_ref); |
@@ -809,7 +793,7 @@ out: | |||
809 | */ | 793 | */ |
810 | st_failure: | 794 | st_failure: |
811 | if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) | 795 | if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) |
812 | fib6_repair_tree(fn); | 796 | fib6_repair_tree(info->nl_net, fn); |
813 | dst_free(&rt->u.dst); | 797 | dst_free(&rt->u.dst); |
814 | return err; | 798 | return err; |
815 | #endif | 799 | #endif |
@@ -975,10 +959,10 @@ struct fib6_node * fib6_locate(struct fib6_node *root, | |||
975 | * | 959 | * |
976 | */ | 960 | */ |
977 | 961 | ||
978 | static struct rt6_info * fib6_find_prefix(struct fib6_node *fn) | 962 | static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn) |
979 | { | 963 | { |
980 | if (fn->fn_flags&RTN_ROOT) | 964 | if (fn->fn_flags&RTN_ROOT) |
981 | return &ip6_null_entry; | 965 | return net->ipv6.ip6_null_entry; |
982 | 966 | ||
983 | while(fn) { | 967 | while(fn) { |
984 | if(fn->left) | 968 | if(fn->left) |
@@ -997,7 +981,8 @@ static struct rt6_info * fib6_find_prefix(struct fib6_node *fn) | |||
997 | * is the node we want to try and remove. | 981 | * is the node we want to try and remove. |
998 | */ | 982 | */ |
999 | 983 | ||
1000 | static struct fib6_node * fib6_repair_tree(struct fib6_node *fn) | 984 | static struct fib6_node *fib6_repair_tree(struct net *net, |
985 | struct fib6_node *fn) | ||
1001 | { | 986 | { |
1002 | int children; | 987 | int children; |
1003 | int nstate; | 988 | int nstate; |
@@ -1024,11 +1009,11 @@ static struct fib6_node * fib6_repair_tree(struct fib6_node *fn) | |||
1024 | || (children && fn->fn_flags&RTN_ROOT) | 1009 | || (children && fn->fn_flags&RTN_ROOT) |
1025 | #endif | 1010 | #endif |
1026 | ) { | 1011 | ) { |
1027 | fn->leaf = fib6_find_prefix(fn); | 1012 | fn->leaf = fib6_find_prefix(net, fn); |
1028 | #if RT6_DEBUG >= 2 | 1013 | #if RT6_DEBUG >= 2 |
1029 | if (fn->leaf==NULL) { | 1014 | if (fn->leaf==NULL) { |
1030 | BUG_TRAP(fn->leaf); | 1015 | BUG_TRAP(fn->leaf); |
1031 | fn->leaf = &ip6_null_entry; | 1016 | fn->leaf = net->ipv6.ip6_null_entry; |
1032 | } | 1017 | } |
1033 | #endif | 1018 | #endif |
1034 | atomic_inc(&fn->leaf->rt6i_ref); | 1019 | atomic_inc(&fn->leaf->rt6i_ref); |
@@ -1101,14 +1086,15 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp, | |||
1101 | { | 1086 | { |
1102 | struct fib6_walker_t *w; | 1087 | struct fib6_walker_t *w; |
1103 | struct rt6_info *rt = *rtp; | 1088 | struct rt6_info *rt = *rtp; |
1089 | struct net *net = info->nl_net; | ||
1104 | 1090 | ||
1105 | RT6_TRACE("fib6_del_route\n"); | 1091 | RT6_TRACE("fib6_del_route\n"); |
1106 | 1092 | ||
1107 | /* Unlink it */ | 1093 | /* Unlink it */ |
1108 | *rtp = rt->u.dst.rt6_next; | 1094 | *rtp = rt->u.dst.rt6_next; |
1109 | rt->rt6i_node = NULL; | 1095 | rt->rt6i_node = NULL; |
1110 | rt6_stats.fib_rt_entries--; | 1096 | net->ipv6.rt6_stats->fib_rt_entries--; |
1111 | rt6_stats.fib_discarded_routes++; | 1097 | net->ipv6.rt6_stats->fib_discarded_routes++; |
1112 | 1098 | ||
1113 | /* Reset round-robin state, if necessary */ | 1099 | /* Reset round-robin state, if necessary */ |
1114 | if (fn->rr_ptr == rt) | 1100 | if (fn->rr_ptr == rt) |
@@ -1131,8 +1117,8 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp, | |||
1131 | /* If it was last route, expunge its radix tree node */ | 1117 | /* If it was last route, expunge its radix tree node */ |
1132 | if (fn->leaf == NULL) { | 1118 | if (fn->leaf == NULL) { |
1133 | fn->fn_flags &= ~RTN_RTINFO; | 1119 | fn->fn_flags &= ~RTN_RTINFO; |
1134 | rt6_stats.fib_route_nodes--; | 1120 | net->ipv6.rt6_stats->fib_route_nodes--; |
1135 | fn = fib6_repair_tree(fn); | 1121 | fn = fib6_repair_tree(net, fn); |
1136 | } | 1122 | } |
1137 | 1123 | ||
1138 | if (atomic_read(&rt->rt6i_ref) != 1) { | 1124 | if (atomic_read(&rt->rt6i_ref) != 1) { |
@@ -1144,7 +1130,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp, | |||
1144 | */ | 1130 | */ |
1145 | while (fn) { | 1131 | while (fn) { |
1146 | if (!(fn->fn_flags&RTN_RTINFO) && fn->leaf == rt) { | 1132 | if (!(fn->fn_flags&RTN_RTINFO) && fn->leaf == rt) { |
1147 | fn->leaf = fib6_find_prefix(fn); | 1133 | fn->leaf = fib6_find_prefix(net, fn); |
1148 | atomic_inc(&fn->leaf->rt6i_ref); | 1134 | atomic_inc(&fn->leaf->rt6i_ref); |
1149 | rt6_release(rt); | 1135 | rt6_release(rt); |
1150 | } | 1136 | } |
@@ -1160,6 +1146,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp, | |||
1160 | 1146 | ||
1161 | int fib6_del(struct rt6_info *rt, struct nl_info *info) | 1147 | int fib6_del(struct rt6_info *rt, struct nl_info *info) |
1162 | { | 1148 | { |
1149 | struct net *net = info->nl_net; | ||
1163 | struct fib6_node *fn = rt->rt6i_node; | 1150 | struct fib6_node *fn = rt->rt6i_node; |
1164 | struct rt6_info **rtp; | 1151 | struct rt6_info **rtp; |
1165 | 1152 | ||
@@ -1169,7 +1156,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info) | |||
1169 | return -ENOENT; | 1156 | return -ENOENT; |
1170 | } | 1157 | } |
1171 | #endif | 1158 | #endif |
1172 | if (fn == NULL || rt == &ip6_null_entry) | 1159 | if (fn == NULL || rt == net->ipv6.ip6_null_entry) |
1173 | return -ENOENT; | 1160 | return -ENOENT; |
1174 | 1161 | ||
1175 | BUG_TRAP(fn->fn_flags&RTN_RTINFO); | 1162 | BUG_TRAP(fn->fn_flags&RTN_RTINFO); |
@@ -1184,7 +1171,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info) | |||
1184 | pn = pn->parent; | 1171 | pn = pn->parent; |
1185 | } | 1172 | } |
1186 | #endif | 1173 | #endif |
1187 | fib6_prune_clones(pn, rt); | 1174 | fib6_prune_clones(info->nl_net, pn, rt); |
1188 | } | 1175 | } |
1189 | 1176 | ||
1190 | /* | 1177 | /* |
@@ -1314,12 +1301,12 @@ static int fib6_walk(struct fib6_walker_t *w) | |||
1314 | 1301 | ||
1315 | static int fib6_clean_node(struct fib6_walker_t *w) | 1302 | static int fib6_clean_node(struct fib6_walker_t *w) |
1316 | { | 1303 | { |
1317 | struct nl_info info = { | ||
1318 | .nl_net = &init_net, | ||
1319 | }; | ||
1320 | int res; | 1304 | int res; |
1321 | struct rt6_info *rt; | 1305 | struct rt6_info *rt; |
1322 | struct fib6_cleaner_t *c = container_of(w, struct fib6_cleaner_t, w); | 1306 | struct fib6_cleaner_t *c = container_of(w, struct fib6_cleaner_t, w); |
1307 | struct nl_info info = { | ||
1308 | .nl_net = c->net, | ||
1309 | }; | ||
1323 | 1310 | ||
1324 | for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) { | 1311 | for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) { |
1325 | res = c->func(rt, c->arg); | 1312 | res = c->func(rt, c->arg); |
@@ -1351,7 +1338,7 @@ static int fib6_clean_node(struct fib6_walker_t *w) | |||
1351 | * ignoring pure split nodes) will be scanned. | 1338 | * ignoring pure split nodes) will be scanned. |
1352 | */ | 1339 | */ |
1353 | 1340 | ||
1354 | static void fib6_clean_tree(struct fib6_node *root, | 1341 | static void fib6_clean_tree(struct net *net, struct fib6_node *root, |
1355 | int (*func)(struct rt6_info *, void *arg), | 1342 | int (*func)(struct rt6_info *, void *arg), |
1356 | int prune, void *arg) | 1343 | int prune, void *arg) |
1357 | { | 1344 | { |
@@ -1362,23 +1349,26 @@ static void fib6_clean_tree(struct fib6_node *root, | |||
1362 | c.w.prune = prune; | 1349 | c.w.prune = prune; |
1363 | c.func = func; | 1350 | c.func = func; |
1364 | c.arg = arg; | 1351 | c.arg = arg; |
1352 | c.net = net; | ||
1365 | 1353 | ||
1366 | fib6_walk(&c.w); | 1354 | fib6_walk(&c.w); |
1367 | } | 1355 | } |
1368 | 1356 | ||
1369 | void fib6_clean_all(int (*func)(struct rt6_info *, void *arg), | 1357 | void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg), |
1370 | int prune, void *arg) | 1358 | int prune, void *arg) |
1371 | { | 1359 | { |
1372 | struct fib6_table *table; | 1360 | struct fib6_table *table; |
1373 | struct hlist_node *node; | 1361 | struct hlist_node *node; |
1362 | struct hlist_head *head; | ||
1374 | unsigned int h; | 1363 | unsigned int h; |
1375 | 1364 | ||
1376 | rcu_read_lock(); | 1365 | rcu_read_lock(); |
1377 | for (h = 0; h < FIB_TABLE_HASHSZ; h++) { | 1366 | for (h = 0; h < FIB_TABLE_HASHSZ; h++) { |
1378 | hlist_for_each_entry_rcu(table, node, &fib_table_hash[h], | 1367 | head = &net->ipv6.fib_table_hash[h]; |
1379 | tb6_hlist) { | 1368 | hlist_for_each_entry_rcu(table, node, head, tb6_hlist) { |
1380 | write_lock_bh(&table->tb6_lock); | 1369 | write_lock_bh(&table->tb6_lock); |
1381 | fib6_clean_tree(&table->tb6_root, func, prune, arg); | 1370 | fib6_clean_tree(net, &table->tb6_root, |
1371 | func, prune, arg); | ||
1382 | write_unlock_bh(&table->tb6_lock); | 1372 | write_unlock_bh(&table->tb6_lock); |
1383 | } | 1373 | } |
1384 | } | 1374 | } |
@@ -1395,9 +1385,10 @@ static int fib6_prune_clone(struct rt6_info *rt, void *arg) | |||
1395 | return 0; | 1385 | return 0; |
1396 | } | 1386 | } |
1397 | 1387 | ||
1398 | static void fib6_prune_clones(struct fib6_node *fn, struct rt6_info *rt) | 1388 | static void fib6_prune_clones(struct net *net, struct fib6_node *fn, |
1389 | struct rt6_info *rt) | ||
1399 | { | 1390 | { |
1400 | fib6_clean_tree(fn, fib6_prune_clone, 1, rt); | 1391 | fib6_clean_tree(net, fn, fib6_prune_clone, 1, rt); |
1401 | } | 1392 | } |
1402 | 1393 | ||
1403 | /* | 1394 | /* |
@@ -1447,54 +1438,145 @@ static int fib6_age(struct rt6_info *rt, void *arg) | |||
1447 | 1438 | ||
1448 | static DEFINE_SPINLOCK(fib6_gc_lock); | 1439 | static DEFINE_SPINLOCK(fib6_gc_lock); |
1449 | 1440 | ||
1450 | void fib6_run_gc(unsigned long dummy) | 1441 | void fib6_run_gc(unsigned long expires, struct net *net) |
1451 | { | 1442 | { |
1452 | if (dummy != ~0UL) { | 1443 | if (expires != ~0UL) { |
1453 | spin_lock_bh(&fib6_gc_lock); | 1444 | spin_lock_bh(&fib6_gc_lock); |
1454 | gc_args.timeout = dummy ? (int)dummy : | 1445 | gc_args.timeout = expires ? (int)expires : |
1455 | init_net.ipv6.sysctl.ip6_rt_gc_interval; | 1446 | net->ipv6.sysctl.ip6_rt_gc_interval; |
1456 | } else { | 1447 | } else { |
1457 | local_bh_disable(); | 1448 | local_bh_disable(); |
1458 | if (!spin_trylock(&fib6_gc_lock)) { | 1449 | if (!spin_trylock(&fib6_gc_lock)) { |
1459 | mod_timer(&ip6_fib_timer, jiffies + HZ); | 1450 | mod_timer(net->ipv6.ip6_fib_timer, jiffies + HZ); |
1460 | local_bh_enable(); | 1451 | local_bh_enable(); |
1461 | return; | 1452 | return; |
1462 | } | 1453 | } |
1463 | gc_args.timeout = init_net.ipv6.sysctl.ip6_rt_gc_interval; | 1454 | gc_args.timeout = net->ipv6.sysctl.ip6_rt_gc_interval; |
1464 | } | 1455 | } |
1465 | gc_args.more = 0; | 1456 | gc_args.more = 0; |
1466 | 1457 | ||
1467 | ndisc_dst_gc(&gc_args.more); | 1458 | icmp6_dst_gc(&gc_args.more); |
1468 | fib6_clean_all(fib6_age, 0, NULL); | 1459 | |
1460 | fib6_clean_all(net, fib6_age, 0, NULL); | ||
1469 | 1461 | ||
1470 | if (gc_args.more) | 1462 | if (gc_args.more) |
1471 | mod_timer(&ip6_fib_timer, jiffies + | 1463 | mod_timer(net->ipv6.ip6_fib_timer, jiffies + |
1472 | init_net.ipv6.sysctl.ip6_rt_gc_interval); | 1464 | net->ipv6.sysctl.ip6_rt_gc_interval); |
1473 | else { | 1465 | else { |
1474 | del_timer(&ip6_fib_timer); | 1466 | del_timer(net->ipv6.ip6_fib_timer); |
1475 | ip6_fib_timer.expires = 0; | 1467 | net->ipv6.ip6_fib_timer->expires = 0; |
1476 | } | 1468 | } |
1477 | spin_unlock_bh(&fib6_gc_lock); | 1469 | spin_unlock_bh(&fib6_gc_lock); |
1478 | } | 1470 | } |
1479 | 1471 | ||
1480 | int __init fib6_init(void) | 1472 | static void fib6_gc_timer_cb(unsigned long arg) |
1473 | { | ||
1474 | fib6_run_gc(0, (struct net *)arg); | ||
1475 | } | ||
1476 | |||
1477 | static int fib6_net_init(struct net *net) | ||
1481 | { | 1478 | { |
1482 | int ret; | 1479 | int ret; |
1480 | struct timer_list *timer; | ||
1481 | |||
1482 | ret = -ENOMEM; | ||
1483 | timer = kzalloc(sizeof(*timer), GFP_KERNEL); | ||
1484 | if (!timer) | ||
1485 | goto out; | ||
1486 | |||
1487 | setup_timer(timer, fib6_gc_timer_cb, (unsigned long)net); | ||
1488 | net->ipv6.ip6_fib_timer = timer; | ||
1489 | |||
1490 | net->ipv6.rt6_stats = kzalloc(sizeof(*net->ipv6.rt6_stats), GFP_KERNEL); | ||
1491 | if (!net->ipv6.rt6_stats) | ||
1492 | goto out_timer; | ||
1493 | |||
1494 | net->ipv6.fib_table_hash = | ||
1495 | kzalloc(sizeof(*net->ipv6.fib_table_hash)*FIB_TABLE_HASHSZ, | ||
1496 | GFP_KERNEL); | ||
1497 | if (!net->ipv6.fib_table_hash) | ||
1498 | goto out_rt6_stats; | ||
1499 | |||
1500 | net->ipv6.fib6_main_tbl = kzalloc(sizeof(*net->ipv6.fib6_main_tbl), | ||
1501 | GFP_KERNEL); | ||
1502 | if (!net->ipv6.fib6_main_tbl) | ||
1503 | goto out_fib_table_hash; | ||
1504 | |||
1505 | net->ipv6.fib6_main_tbl->tb6_id = RT6_TABLE_MAIN; | ||
1506 | net->ipv6.fib6_main_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry; | ||
1507 | net->ipv6.fib6_main_tbl->tb6_root.fn_flags = | ||
1508 | RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; | ||
1509 | |||
1510 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES | ||
1511 | net->ipv6.fib6_local_tbl = kzalloc(sizeof(*net->ipv6.fib6_local_tbl), | ||
1512 | GFP_KERNEL); | ||
1513 | if (!net->ipv6.fib6_local_tbl) | ||
1514 | goto out_fib6_main_tbl; | ||
1515 | net->ipv6.fib6_local_tbl->tb6_id = RT6_TABLE_LOCAL; | ||
1516 | net->ipv6.fib6_local_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry; | ||
1517 | net->ipv6.fib6_local_tbl->tb6_root.fn_flags = | ||
1518 | RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; | ||
1519 | #endif | ||
1520 | fib6_tables_init(net); | ||
1521 | |||
1522 | ret = 0; | ||
1523 | out: | ||
1524 | return ret; | ||
1525 | |||
1526 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES | ||
1527 | out_fib6_main_tbl: | ||
1528 | kfree(net->ipv6.fib6_main_tbl); | ||
1529 | #endif | ||
1530 | out_fib_table_hash: | ||
1531 | kfree(net->ipv6.fib_table_hash); | ||
1532 | out_rt6_stats: | ||
1533 | kfree(net->ipv6.rt6_stats); | ||
1534 | out_timer: | ||
1535 | kfree(timer); | ||
1536 | goto out; | ||
1537 | } | ||
1538 | |||
1539 | static void fib6_net_exit(struct net *net) | ||
1540 | { | ||
1541 | rt6_ifdown(net, NULL); | ||
1542 | del_timer(net->ipv6.ip6_fib_timer); | ||
1543 | kfree(net->ipv6.ip6_fib_timer); | ||
1544 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES | ||
1545 | kfree(net->ipv6.fib6_local_tbl); | ||
1546 | #endif | ||
1547 | kfree(net->ipv6.fib6_main_tbl); | ||
1548 | kfree(net->ipv6.fib_table_hash); | ||
1549 | kfree(net->ipv6.rt6_stats); | ||
1550 | } | ||
1551 | |||
1552 | static struct pernet_operations fib6_net_ops = { | ||
1553 | .init = fib6_net_init, | ||
1554 | .exit = fib6_net_exit, | ||
1555 | }; | ||
1556 | |||
1557 | int __init fib6_init(void) | ||
1558 | { | ||
1559 | int ret = -ENOMEM; | ||
1560 | |||
1483 | fib6_node_kmem = kmem_cache_create("fib6_nodes", | 1561 | fib6_node_kmem = kmem_cache_create("fib6_nodes", |
1484 | sizeof(struct fib6_node), | 1562 | sizeof(struct fib6_node), |
1485 | 0, SLAB_HWCACHE_ALIGN, | 1563 | 0, SLAB_HWCACHE_ALIGN, |
1486 | NULL); | 1564 | NULL); |
1487 | if (!fib6_node_kmem) | 1565 | if (!fib6_node_kmem) |
1488 | return -ENOMEM; | 1566 | goto out; |
1489 | 1567 | ||
1490 | fib6_tables_init(); | 1568 | ret = register_pernet_subsys(&fib6_net_ops); |
1569 | if (ret) | ||
1570 | goto out_kmem_cache_create; | ||
1491 | 1571 | ||
1492 | ret = __rtnl_register(PF_INET6, RTM_GETROUTE, NULL, inet6_dump_fib); | 1572 | ret = __rtnl_register(PF_INET6, RTM_GETROUTE, NULL, inet6_dump_fib); |
1493 | if (ret) | 1573 | if (ret) |
1494 | goto out_kmem_cache_create; | 1574 | goto out_unregister_subsys; |
1495 | out: | 1575 | out: |
1496 | return ret; | 1576 | return ret; |
1497 | 1577 | ||
1578 | out_unregister_subsys: | ||
1579 | unregister_pernet_subsys(&fib6_net_ops); | ||
1498 | out_kmem_cache_create: | 1580 | out_kmem_cache_create: |
1499 | kmem_cache_destroy(fib6_node_kmem); | 1581 | kmem_cache_destroy(fib6_node_kmem); |
1500 | goto out; | 1582 | goto out; |
@@ -1502,6 +1584,6 @@ out_kmem_cache_create: | |||
1502 | 1584 | ||
1503 | void fib6_gc_cleanup(void) | 1585 | void fib6_gc_cleanup(void) |
1504 | { | 1586 | { |
1505 | del_timer(&ip6_fib_timer); | 1587 | unregister_pernet_subsys(&fib6_net_ops); |
1506 | kmem_cache_destroy(fib6_node_kmem); | 1588 | kmem_cache_destroy(fib6_node_kmem); |
1507 | } | 1589 | } |
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 178aebc0427a..7e36269826ba 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c | |||
@@ -61,11 +61,6 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt | |||
61 | u32 pkt_len; | 61 | u32 pkt_len; |
62 | struct inet6_dev *idev; | 62 | struct inet6_dev *idev; |
63 | 63 | ||
64 | if (dev->nd_net != &init_net) { | ||
65 | kfree_skb(skb); | ||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | if (skb->pkt_type == PACKET_OTHERHOST) { | 64 | if (skb->pkt_type == PACKET_OTHERHOST) { |
70 | kfree_skb(skb); | 65 | kfree_skb(skb); |
71 | return 0; | 66 | return 0; |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 8b67ca07467d..98762fde2b65 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -404,6 +404,7 @@ int ip6_forward(struct sk_buff *skb) | |||
404 | struct dst_entry *dst = skb->dst; | 404 | struct dst_entry *dst = skb->dst; |
405 | struct ipv6hdr *hdr = ipv6_hdr(skb); | 405 | struct ipv6hdr *hdr = ipv6_hdr(skb); |
406 | struct inet6_skb_parm *opt = IP6CB(skb); | 406 | struct inet6_skb_parm *opt = IP6CB(skb); |
407 | struct net *net = dst->dev->nd_net; | ||
407 | 408 | ||
408 | if (ipv6_devconf.forwarding == 0) | 409 | if (ipv6_devconf.forwarding == 0) |
409 | goto error; | 410 | goto error; |
@@ -450,7 +451,7 @@ int ip6_forward(struct sk_buff *skb) | |||
450 | 451 | ||
451 | /* XXX: idev->cnf.proxy_ndp? */ | 452 | /* XXX: idev->cnf.proxy_ndp? */ |
452 | if (ipv6_devconf.proxy_ndp && | 453 | if (ipv6_devconf.proxy_ndp && |
453 | pneigh_lookup(&nd_tbl, &init_net, &hdr->daddr, skb->dev, 0)) { | 454 | pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) { |
454 | int proxied = ip6_forward_proxy_check(skb); | 455 | int proxied = ip6_forward_proxy_check(skb); |
455 | if (proxied > 0) | 456 | if (proxied > 0) |
456 | return ip6_input(skb); | 457 | return ip6_input(skb); |
@@ -596,7 +597,6 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) | |||
596 | 597 | ||
597 | return offset; | 598 | return offset; |
598 | } | 599 | } |
599 | EXPORT_SYMBOL_GPL(ip6_find_1stfragopt); | ||
600 | 600 | ||
601 | static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | 601 | static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) |
602 | { | 602 | { |
@@ -912,15 +912,17 @@ static int ip6_dst_lookup_tail(struct sock *sk, | |||
912 | struct dst_entry **dst, struct flowi *fl) | 912 | struct dst_entry **dst, struct flowi *fl) |
913 | { | 913 | { |
914 | int err; | 914 | int err; |
915 | struct net *net = sk->sk_net; | ||
915 | 916 | ||
916 | if (*dst == NULL) | 917 | if (*dst == NULL) |
917 | *dst = ip6_route_output(sk, fl); | 918 | *dst = ip6_route_output(net, sk, fl); |
918 | 919 | ||
919 | if ((err = (*dst)->error)) | 920 | if ((err = (*dst)->error)) |
920 | goto out_err_release; | 921 | goto out_err_release; |
921 | 922 | ||
922 | if (ipv6_addr_any(&fl->fl6_src)) { | 923 | if (ipv6_addr_any(&fl->fl6_src)) { |
923 | err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src); | 924 | err = ipv6_dev_get_saddr(ip6_dst_idev(*dst)->dev, |
925 | &fl->fl6_dst, &fl->fl6_src); | ||
924 | if (err) | 926 | if (err) |
925 | goto out_err_release; | 927 | goto out_err_release; |
926 | } | 928 | } |
@@ -939,7 +941,7 @@ static int ip6_dst_lookup_tail(struct sock *sk, | |||
939 | struct flowi fl_gw; | 941 | struct flowi fl_gw; |
940 | int redirect; | 942 | int redirect; |
941 | 943 | ||
942 | ifp = ipv6_get_ifaddr(&init_net, &fl->fl6_src, | 944 | ifp = ipv6_get_ifaddr(net, &fl->fl6_src, |
943 | (*dst)->dev, 1); | 945 | (*dst)->dev, 1); |
944 | 946 | ||
945 | redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC); | 947 | redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC); |
@@ -954,7 +956,7 @@ static int ip6_dst_lookup_tail(struct sock *sk, | |||
954 | dst_release(*dst); | 956 | dst_release(*dst); |
955 | memcpy(&fl_gw, fl, sizeof(struct flowi)); | 957 | memcpy(&fl_gw, fl, sizeof(struct flowi)); |
956 | memset(&fl_gw.fl6_dst, 0, sizeof(struct in6_addr)); | 958 | memset(&fl_gw.fl6_dst, 0, sizeof(struct in6_addr)); |
957 | *dst = ip6_route_output(sk, &fl_gw); | 959 | *dst = ip6_route_output(net, sk, &fl_gw); |
958 | if ((err = (*dst)->error)) | 960 | if ((err = (*dst)->error)) |
959 | goto out_err_release; | 961 | goto out_err_release; |
960 | } | 962 | } |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 78f438880923..61517fe0c57c 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -60,7 +60,7 @@ MODULE_LICENSE("GPL"); | |||
60 | #define IPV6_TLV_TEL_DST_SIZE 8 | 60 | #define IPV6_TLV_TEL_DST_SIZE 8 |
61 | 61 | ||
62 | #ifdef IP6_TNL_DEBUG | 62 | #ifdef IP6_TNL_DEBUG |
63 | #define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __FUNCTION__) | 63 | #define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__) |
64 | #else | 64 | #else |
65 | #define IP6_TNL_TRACE(x...) do {;} while(0) | 65 | #define IP6_TNL_TRACE(x...) do {;} while(0) |
66 | #endif | 66 | #endif |
@@ -602,7 +602,7 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
602 | skb_reset_network_header(skb2); | 602 | skb_reset_network_header(skb2); |
603 | 603 | ||
604 | /* Try to guess incoming interface */ | 604 | /* Try to guess incoming interface */ |
605 | rt = rt6_lookup(&ipv6_hdr(skb2)->saddr, NULL, 0, 0); | 605 | rt = rt6_lookup(&init_net, &ipv6_hdr(skb2)->saddr, NULL, 0, 0); |
606 | 606 | ||
607 | if (rt && rt->rt6i_dev) | 607 | if (rt && rt->rt6i_dev) |
608 | skb2->dev = rt->rt6i_dev; | 608 | skb2->dev = rt->rt6i_dev; |
@@ -847,7 +847,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, | |||
847 | if ((dst = ip6_tnl_dst_check(t)) != NULL) | 847 | if ((dst = ip6_tnl_dst_check(t)) != NULL) |
848 | dst_hold(dst); | 848 | dst_hold(dst); |
849 | else { | 849 | else { |
850 | dst = ip6_route_output(NULL, fl); | 850 | dst = ip6_route_output(&init_net, NULL, fl); |
851 | 851 | ||
852 | if (dst->error || xfrm_lookup(&dst, fl, NULL, 0) < 0) | 852 | if (dst->error || xfrm_lookup(&dst, fl, NULL, 0) < 0) |
853 | goto tx_err_link_failure; | 853 | goto tx_err_link_failure; |
@@ -1112,7 +1112,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t) | |||
1112 | int strict = (ipv6_addr_type(&p->raddr) & | 1112 | int strict = (ipv6_addr_type(&p->raddr) & |
1113 | (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)); | 1113 | (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)); |
1114 | 1114 | ||
1115 | struct rt6_info *rt = rt6_lookup(&p->raddr, &p->laddr, | 1115 | struct rt6_info *rt = rt6_lookup(&init_net, &p->raddr, &p->laddr, |
1116 | p->link, strict); | 1116 | p->link, strict); |
1117 | 1117 | ||
1118 | if (rt == NULL) | 1118 | if (rt == NULL) |
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index bf2a686aa13d..c11c76cab371 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
@@ -57,118 +57,6 @@ | |||
57 | 57 | ||
58 | DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics) __read_mostly; | 58 | DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics) __read_mostly; |
59 | 59 | ||
60 | static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb, | ||
61 | int proto) | ||
62 | { | ||
63 | struct inet6_protocol *ops = NULL; | ||
64 | |||
65 | for (;;) { | ||
66 | struct ipv6_opt_hdr *opth; | ||
67 | int len; | ||
68 | |||
69 | if (proto != NEXTHDR_HOP) { | ||
70 | ops = rcu_dereference(inet6_protos[proto]); | ||
71 | |||
72 | if (unlikely(!ops)) | ||
73 | break; | ||
74 | |||
75 | if (!(ops->flags & INET6_PROTO_GSO_EXTHDR)) | ||
76 | break; | ||
77 | } | ||
78 | |||
79 | if (unlikely(!pskb_may_pull(skb, 8))) | ||
80 | break; | ||
81 | |||
82 | opth = (void *)skb->data; | ||
83 | len = opth->hdrlen * 8 + 8; | ||
84 | |||
85 | if (unlikely(!pskb_may_pull(skb, len))) | ||
86 | break; | ||
87 | |||
88 | proto = opth->nexthdr; | ||
89 | __skb_pull(skb, len); | ||
90 | } | ||
91 | |||
92 | return ops; | ||
93 | } | ||
94 | |||
95 | static int ipv6_gso_send_check(struct sk_buff *skb) | ||
96 | { | ||
97 | struct ipv6hdr *ipv6h; | ||
98 | struct inet6_protocol *ops; | ||
99 | int err = -EINVAL; | ||
100 | |||
101 | if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) | ||
102 | goto out; | ||
103 | |||
104 | ipv6h = ipv6_hdr(skb); | ||
105 | __skb_pull(skb, sizeof(*ipv6h)); | ||
106 | err = -EPROTONOSUPPORT; | ||
107 | |||
108 | rcu_read_lock(); | ||
109 | ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); | ||
110 | if (likely(ops && ops->gso_send_check)) { | ||
111 | skb_reset_transport_header(skb); | ||
112 | err = ops->gso_send_check(skb); | ||
113 | } | ||
114 | rcu_read_unlock(); | ||
115 | |||
116 | out: | ||
117 | return err; | ||
118 | } | ||
119 | |||
120 | static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features) | ||
121 | { | ||
122 | struct sk_buff *segs = ERR_PTR(-EINVAL); | ||
123 | struct ipv6hdr *ipv6h; | ||
124 | struct inet6_protocol *ops; | ||
125 | |||
126 | if (!(features & NETIF_F_V6_CSUM)) | ||
127 | features &= ~NETIF_F_SG; | ||
128 | |||
129 | if (unlikely(skb_shinfo(skb)->gso_type & | ||
130 | ~(SKB_GSO_UDP | | ||
131 | SKB_GSO_DODGY | | ||
132 | SKB_GSO_TCP_ECN | | ||
133 | SKB_GSO_TCPV6 | | ||
134 | 0))) | ||
135 | goto out; | ||
136 | |||
137 | if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) | ||
138 | goto out; | ||
139 | |||
140 | ipv6h = ipv6_hdr(skb); | ||
141 | __skb_pull(skb, sizeof(*ipv6h)); | ||
142 | segs = ERR_PTR(-EPROTONOSUPPORT); | ||
143 | |||
144 | rcu_read_lock(); | ||
145 | ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); | ||
146 | if (likely(ops && ops->gso_segment)) { | ||
147 | skb_reset_transport_header(skb); | ||
148 | segs = ops->gso_segment(skb, features); | ||
149 | } | ||
150 | rcu_read_unlock(); | ||
151 | |||
152 | if (unlikely(IS_ERR(segs))) | ||
153 | goto out; | ||
154 | |||
155 | for (skb = segs; skb; skb = skb->next) { | ||
156 | ipv6h = ipv6_hdr(skb); | ||
157 | ipv6h->payload_len = htons(skb->len - skb->mac_len - | ||
158 | sizeof(*ipv6h)); | ||
159 | } | ||
160 | |||
161 | out: | ||
162 | return segs; | ||
163 | } | ||
164 | |||
165 | static struct packet_type ipv6_packet_type = { | ||
166 | .type = __constant_htons(ETH_P_IPV6), | ||
167 | .func = ipv6_rcv, | ||
168 | .gso_send_check = ipv6_gso_send_check, | ||
169 | .gso_segment = ipv6_gso_segment, | ||
170 | }; | ||
171 | |||
172 | struct ip6_ra_chain *ip6_ra_chain; | 60 | struct ip6_ra_chain *ip6_ra_chain; |
173 | DEFINE_RWLOCK(ip6_ra_lock); | 61 | DEFINE_RWLOCK(ip6_ra_lock); |
174 | 62 | ||
@@ -219,6 +107,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, | |||
219 | char __user *optval, int optlen) | 107 | char __user *optval, int optlen) |
220 | { | 108 | { |
221 | struct ipv6_pinfo *np = inet6_sk(sk); | 109 | struct ipv6_pinfo *np = inet6_sk(sk); |
110 | struct net *net = sk->sk_net; | ||
222 | int val, valbool; | 111 | int val, valbool; |
223 | int retv = -ENOPROTOOPT; | 112 | int retv = -ENOPROTOOPT; |
224 | 113 | ||
@@ -544,7 +433,7 @@ done: | |||
544 | if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val) | 433 | if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val) |
545 | goto e_inval; | 434 | goto e_inval; |
546 | 435 | ||
547 | if (__dev_get_by_index(&init_net, val) == NULL) { | 436 | if (__dev_get_by_index(net, val) == NULL) { |
548 | retv = -ENODEV; | 437 | retv = -ENODEV; |
549 | break; | 438 | break; |
550 | } | 439 | } |
@@ -1128,13 +1017,3 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname, | |||
1128 | EXPORT_SYMBOL(compat_ipv6_getsockopt); | 1017 | EXPORT_SYMBOL(compat_ipv6_getsockopt); |
1129 | #endif | 1018 | #endif |
1130 | 1019 | ||
1131 | int __init ipv6_packet_init(void) | ||
1132 | { | ||
1133 | dev_add_pack(&ipv6_packet_type); | ||
1134 | return 0; | ||
1135 | } | ||
1136 | |||
1137 | void ipv6_packet_cleanup(void) | ||
1138 | { | ||
1139 | dev_remove_pack(&ipv6_packet_type); | ||
1140 | } | ||
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index ab228d1ea114..f2879056fab0 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -126,8 +126,6 @@ static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT; | |||
126 | /* Big mc list lock for all the sockets */ | 126 | /* Big mc list lock for all the sockets */ |
127 | static DEFINE_RWLOCK(ipv6_sk_mc_lock); | 127 | static DEFINE_RWLOCK(ipv6_sk_mc_lock); |
128 | 128 | ||
129 | static struct socket *igmp6_socket; | ||
130 | |||
131 | int __ipv6_dev_mc_dec(struct inet6_dev *idev, struct in6_addr *addr); | 129 | int __ipv6_dev_mc_dec(struct inet6_dev *idev, struct in6_addr *addr); |
132 | 130 | ||
133 | static void igmp6_join_group(struct ifmcaddr6 *ma); | 131 | static void igmp6_join_group(struct ifmcaddr6 *ma); |
@@ -183,6 +181,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, struct in6_addr *addr) | |||
183 | struct net_device *dev = NULL; | 181 | struct net_device *dev = NULL; |
184 | struct ipv6_mc_socklist *mc_lst; | 182 | struct ipv6_mc_socklist *mc_lst; |
185 | struct ipv6_pinfo *np = inet6_sk(sk); | 183 | struct ipv6_pinfo *np = inet6_sk(sk); |
184 | struct net *net = sk->sk_net; | ||
186 | int err; | 185 | int err; |
187 | 186 | ||
188 | if (!ipv6_addr_is_multicast(addr)) | 187 | if (!ipv6_addr_is_multicast(addr)) |
@@ -208,14 +207,14 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, struct in6_addr *addr) | |||
208 | 207 | ||
209 | if (ifindex == 0) { | 208 | if (ifindex == 0) { |
210 | struct rt6_info *rt; | 209 | struct rt6_info *rt; |
211 | rt = rt6_lookup(addr, NULL, 0, 0); | 210 | rt = rt6_lookup(net, addr, NULL, 0, 0); |
212 | if (rt) { | 211 | if (rt) { |
213 | dev = rt->rt6i_dev; | 212 | dev = rt->rt6i_dev; |
214 | dev_hold(dev); | 213 | dev_hold(dev); |
215 | dst_release(&rt->u.dst); | 214 | dst_release(&rt->u.dst); |
216 | } | 215 | } |
217 | } else | 216 | } else |
218 | dev = dev_get_by_index(&init_net, ifindex); | 217 | dev = dev_get_by_index(net, ifindex); |
219 | 218 | ||
220 | if (dev == NULL) { | 219 | if (dev == NULL) { |
221 | sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); | 220 | sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); |
@@ -256,6 +255,7 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, struct in6_addr *addr) | |||
256 | { | 255 | { |
257 | struct ipv6_pinfo *np = inet6_sk(sk); | 256 | struct ipv6_pinfo *np = inet6_sk(sk); |
258 | struct ipv6_mc_socklist *mc_lst, **lnk; | 257 | struct ipv6_mc_socklist *mc_lst, **lnk; |
258 | struct net *net = sk->sk_net; | ||
259 | 259 | ||
260 | write_lock_bh(&ipv6_sk_mc_lock); | 260 | write_lock_bh(&ipv6_sk_mc_lock); |
261 | for (lnk = &np->ipv6_mc_list; (mc_lst = *lnk) !=NULL ; lnk = &mc_lst->next) { | 261 | for (lnk = &np->ipv6_mc_list; (mc_lst = *lnk) !=NULL ; lnk = &mc_lst->next) { |
@@ -266,7 +266,8 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, struct in6_addr *addr) | |||
266 | *lnk = mc_lst->next; | 266 | *lnk = mc_lst->next; |
267 | write_unlock_bh(&ipv6_sk_mc_lock); | 267 | write_unlock_bh(&ipv6_sk_mc_lock); |
268 | 268 | ||
269 | if ((dev = dev_get_by_index(&init_net, mc_lst->ifindex)) != NULL) { | 269 | dev = dev_get_by_index(net, mc_lst->ifindex); |
270 | if (dev != NULL) { | ||
270 | struct inet6_dev *idev = in6_dev_get(dev); | 271 | struct inet6_dev *idev = in6_dev_get(dev); |
271 | 272 | ||
272 | (void) ip6_mc_leave_src(sk, mc_lst, idev); | 273 | (void) ip6_mc_leave_src(sk, mc_lst, idev); |
@@ -286,7 +287,9 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, struct in6_addr *addr) | |||
286 | return -EADDRNOTAVAIL; | 287 | return -EADDRNOTAVAIL; |
287 | } | 288 | } |
288 | 289 | ||
289 | static struct inet6_dev *ip6_mc_find_dev(struct in6_addr *group, int ifindex) | 290 | static struct inet6_dev *ip6_mc_find_dev(struct net *net, |
291 | struct in6_addr *group, | ||
292 | int ifindex) | ||
290 | { | 293 | { |
291 | struct net_device *dev = NULL; | 294 | struct net_device *dev = NULL; |
292 | struct inet6_dev *idev = NULL; | 295 | struct inet6_dev *idev = NULL; |
@@ -294,14 +297,14 @@ static struct inet6_dev *ip6_mc_find_dev(struct in6_addr *group, int ifindex) | |||
294 | if (ifindex == 0) { | 297 | if (ifindex == 0) { |
295 | struct rt6_info *rt; | 298 | struct rt6_info *rt; |
296 | 299 | ||
297 | rt = rt6_lookup(group, NULL, 0, 0); | 300 | rt = rt6_lookup(net, group, NULL, 0, 0); |
298 | if (rt) { | 301 | if (rt) { |
299 | dev = rt->rt6i_dev; | 302 | dev = rt->rt6i_dev; |
300 | dev_hold(dev); | 303 | dev_hold(dev); |
301 | dst_release(&rt->u.dst); | 304 | dst_release(&rt->u.dst); |
302 | } | 305 | } |
303 | } else | 306 | } else |
304 | dev = dev_get_by_index(&init_net, ifindex); | 307 | dev = dev_get_by_index(net, ifindex); |
305 | 308 | ||
306 | if (!dev) | 309 | if (!dev) |
307 | return NULL; | 310 | return NULL; |
@@ -324,6 +327,7 @@ void ipv6_sock_mc_close(struct sock *sk) | |||
324 | { | 327 | { |
325 | struct ipv6_pinfo *np = inet6_sk(sk); | 328 | struct ipv6_pinfo *np = inet6_sk(sk); |
326 | struct ipv6_mc_socklist *mc_lst; | 329 | struct ipv6_mc_socklist *mc_lst; |
330 | struct net *net = sk->sk_net; | ||
327 | 331 | ||
328 | write_lock_bh(&ipv6_sk_mc_lock); | 332 | write_lock_bh(&ipv6_sk_mc_lock); |
329 | while ((mc_lst = np->ipv6_mc_list) != NULL) { | 333 | while ((mc_lst = np->ipv6_mc_list) != NULL) { |
@@ -332,7 +336,7 @@ void ipv6_sock_mc_close(struct sock *sk) | |||
332 | np->ipv6_mc_list = mc_lst->next; | 336 | np->ipv6_mc_list = mc_lst->next; |
333 | write_unlock_bh(&ipv6_sk_mc_lock); | 337 | write_unlock_bh(&ipv6_sk_mc_lock); |
334 | 338 | ||
335 | dev = dev_get_by_index(&init_net, mc_lst->ifindex); | 339 | dev = dev_get_by_index(net, mc_lst->ifindex); |
336 | if (dev) { | 340 | if (dev) { |
337 | struct inet6_dev *idev = in6_dev_get(dev); | 341 | struct inet6_dev *idev = in6_dev_get(dev); |
338 | 342 | ||
@@ -361,6 +365,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk, | |||
361 | struct inet6_dev *idev; | 365 | struct inet6_dev *idev; |
362 | struct ipv6_pinfo *inet6 = inet6_sk(sk); | 366 | struct ipv6_pinfo *inet6 = inet6_sk(sk); |
363 | struct ip6_sf_socklist *psl; | 367 | struct ip6_sf_socklist *psl; |
368 | struct net *net = sk->sk_net; | ||
364 | int i, j, rv; | 369 | int i, j, rv; |
365 | int leavegroup = 0; | 370 | int leavegroup = 0; |
366 | int pmclocked = 0; | 371 | int pmclocked = 0; |
@@ -376,7 +381,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk, | |||
376 | if (!ipv6_addr_is_multicast(group)) | 381 | if (!ipv6_addr_is_multicast(group)) |
377 | return -EINVAL; | 382 | return -EINVAL; |
378 | 383 | ||
379 | idev = ip6_mc_find_dev(group, pgsr->gsr_interface); | 384 | idev = ip6_mc_find_dev(net, group, pgsr->gsr_interface); |
380 | if (!idev) | 385 | if (!idev) |
381 | return -ENODEV; | 386 | return -ENODEV; |
382 | dev = idev->dev; | 387 | dev = idev->dev; |
@@ -500,6 +505,7 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) | |||
500 | struct inet6_dev *idev; | 505 | struct inet6_dev *idev; |
501 | struct ipv6_pinfo *inet6 = inet6_sk(sk); | 506 | struct ipv6_pinfo *inet6 = inet6_sk(sk); |
502 | struct ip6_sf_socklist *newpsl, *psl; | 507 | struct ip6_sf_socklist *newpsl, *psl; |
508 | struct net *net = sk->sk_net; | ||
503 | int leavegroup = 0; | 509 | int leavegroup = 0; |
504 | int i, err; | 510 | int i, err; |
505 | 511 | ||
@@ -511,7 +517,7 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) | |||
511 | gsf->gf_fmode != MCAST_EXCLUDE) | 517 | gsf->gf_fmode != MCAST_EXCLUDE) |
512 | return -EINVAL; | 518 | return -EINVAL; |
513 | 519 | ||
514 | idev = ip6_mc_find_dev(group, gsf->gf_interface); | 520 | idev = ip6_mc_find_dev(net, group, gsf->gf_interface); |
515 | 521 | ||
516 | if (!idev) | 522 | if (!idev) |
517 | return -ENODEV; | 523 | return -ENODEV; |
@@ -592,13 +598,14 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf, | |||
592 | struct net_device *dev; | 598 | struct net_device *dev; |
593 | struct ipv6_pinfo *inet6 = inet6_sk(sk); | 599 | struct ipv6_pinfo *inet6 = inet6_sk(sk); |
594 | struct ip6_sf_socklist *psl; | 600 | struct ip6_sf_socklist *psl; |
601 | struct net *net = sk->sk_net; | ||
595 | 602 | ||
596 | group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr; | 603 | group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr; |
597 | 604 | ||
598 | if (!ipv6_addr_is_multicast(group)) | 605 | if (!ipv6_addr_is_multicast(group)) |
599 | return -EINVAL; | 606 | return -EINVAL; |
600 | 607 | ||
601 | idev = ip6_mc_find_dev(group, gsf->gf_interface); | 608 | idev = ip6_mc_find_dev(net, group, gsf->gf_interface); |
602 | 609 | ||
603 | if (!idev) | 610 | if (!idev) |
604 | return -ENODEV; | 611 | return -ENODEV; |
@@ -1393,7 +1400,8 @@ mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted) | |||
1393 | 1400 | ||
1394 | static struct sk_buff *mld_newpack(struct net_device *dev, int size) | 1401 | static struct sk_buff *mld_newpack(struct net_device *dev, int size) |
1395 | { | 1402 | { |
1396 | struct sock *sk = igmp6_socket->sk; | 1403 | struct net *net = dev->nd_net; |
1404 | struct sock *sk = net->ipv6.igmp_sk; | ||
1397 | struct sk_buff *skb; | 1405 | struct sk_buff *skb; |
1398 | struct mld2_report *pmr; | 1406 | struct mld2_report *pmr; |
1399 | struct in6_addr addr_buf; | 1407 | struct in6_addr addr_buf; |
@@ -1433,25 +1441,6 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size) | |||
1433 | return skb; | 1441 | return skb; |
1434 | } | 1442 | } |
1435 | 1443 | ||
1436 | static inline int mld_dev_queue_xmit2(struct sk_buff *skb) | ||
1437 | { | ||
1438 | struct net_device *dev = skb->dev; | ||
1439 | unsigned char ha[MAX_ADDR_LEN]; | ||
1440 | |||
1441 | ndisc_mc_map(&ipv6_hdr(skb)->daddr, ha, dev, 1); | ||
1442 | if (dev_hard_header(skb, dev, ETH_P_IPV6, ha, NULL, skb->len) < 0) { | ||
1443 | kfree_skb(skb); | ||
1444 | return -EINVAL; | ||
1445 | } | ||
1446 | return dev_queue_xmit(skb); | ||
1447 | } | ||
1448 | |||
1449 | static inline int mld_dev_queue_xmit(struct sk_buff *skb) | ||
1450 | { | ||
1451 | return NF_HOOK(PF_INET6, NF_INET_POST_ROUTING, skb, NULL, skb->dev, | ||
1452 | mld_dev_queue_xmit2); | ||
1453 | } | ||
1454 | |||
1455 | static void mld_sendpack(struct sk_buff *skb) | 1444 | static void mld_sendpack(struct sk_buff *skb) |
1456 | { | 1445 | { |
1457 | struct ipv6hdr *pip6 = ipv6_hdr(skb); | 1446 | struct ipv6hdr *pip6 = ipv6_hdr(skb); |
@@ -1459,7 +1448,9 @@ static void mld_sendpack(struct sk_buff *skb) | |||
1459 | (struct mld2_report *)skb_transport_header(skb); | 1448 | (struct mld2_report *)skb_transport_header(skb); |
1460 | int payload_len, mldlen; | 1449 | int payload_len, mldlen; |
1461 | struct inet6_dev *idev = in6_dev_get(skb->dev); | 1450 | struct inet6_dev *idev = in6_dev_get(skb->dev); |
1451 | struct net *net = skb->dev->nd_net; | ||
1462 | int err; | 1452 | int err; |
1453 | struct flowi fl; | ||
1463 | 1454 | ||
1464 | IP6_INC_STATS(idev, IPSTATS_MIB_OUTREQUESTS); | 1455 | IP6_INC_STATS(idev, IPSTATS_MIB_OUTREQUESTS); |
1465 | payload_len = (skb->tail - skb->network_header) - sizeof(*pip6); | 1456 | payload_len = (skb->tail - skb->network_header) - sizeof(*pip6); |
@@ -1469,8 +1460,25 @@ static void mld_sendpack(struct sk_buff *skb) | |||
1469 | pmr->csum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen, | 1460 | pmr->csum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen, |
1470 | IPPROTO_ICMPV6, csum_partial(skb_transport_header(skb), | 1461 | IPPROTO_ICMPV6, csum_partial(skb_transport_header(skb), |
1471 | mldlen, 0)); | 1462 | mldlen, 0)); |
1463 | |||
1464 | skb->dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr); | ||
1465 | |||
1466 | if (!skb->dst) { | ||
1467 | err = -ENOMEM; | ||
1468 | goto err_out; | ||
1469 | } | ||
1470 | |||
1471 | icmpv6_flow_init(net->ipv6.igmp_sk, &fl, ICMPV6_MLD2_REPORT, | ||
1472 | &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, | ||
1473 | skb->dev->ifindex); | ||
1474 | |||
1475 | err = xfrm_lookup(&skb->dst, &fl, NULL, 0); | ||
1476 | if (err) | ||
1477 | goto err_out; | ||
1478 | |||
1472 | err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev, | 1479 | err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev, |
1473 | mld_dev_queue_xmit); | 1480 | dst_output); |
1481 | out: | ||
1474 | if (!err) { | 1482 | if (!err) { |
1475 | ICMP6MSGOUT_INC_STATS_BH(idev, ICMPV6_MLD2_REPORT); | 1483 | ICMP6MSGOUT_INC_STATS_BH(idev, ICMPV6_MLD2_REPORT); |
1476 | ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS); | 1484 | ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS); |
@@ -1480,6 +1488,11 @@ static void mld_sendpack(struct sk_buff *skb) | |||
1480 | 1488 | ||
1481 | if (likely(idev != NULL)) | 1489 | if (likely(idev != NULL)) |
1482 | in6_dev_put(idev); | 1490 | in6_dev_put(idev); |
1491 | return; | ||
1492 | |||
1493 | err_out: | ||
1494 | kfree_skb(skb); | ||
1495 | goto out; | ||
1483 | } | 1496 | } |
1484 | 1497 | ||
1485 | static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel) | 1498 | static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel) |
@@ -1749,7 +1762,8 @@ static void mld_send_cr(struct inet6_dev *idev) | |||
1749 | 1762 | ||
1750 | static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) | 1763 | static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) |
1751 | { | 1764 | { |
1752 | struct sock *sk = igmp6_socket->sk; | 1765 | struct net *net = dev->nd_net; |
1766 | struct sock *sk = net->ipv6.igmp_sk; | ||
1753 | struct inet6_dev *idev; | 1767 | struct inet6_dev *idev; |
1754 | struct sk_buff *skb; | 1768 | struct sk_buff *skb; |
1755 | struct icmp6hdr *hdr; | 1769 | struct icmp6hdr *hdr; |
@@ -1761,6 +1775,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) | |||
1761 | u8 ra[8] = { IPPROTO_ICMPV6, 0, | 1775 | u8 ra[8] = { IPPROTO_ICMPV6, 0, |
1762 | IPV6_TLV_ROUTERALERT, 2, 0, 0, | 1776 | IPV6_TLV_ROUTERALERT, 2, 0, 0, |
1763 | IPV6_TLV_PADN, 0 }; | 1777 | IPV6_TLV_PADN, 0 }; |
1778 | struct flowi fl; | ||
1764 | 1779 | ||
1765 | rcu_read_lock(); | 1780 | rcu_read_lock(); |
1766 | IP6_INC_STATS(__in6_dev_get(dev), | 1781 | IP6_INC_STATS(__in6_dev_get(dev), |
@@ -1813,8 +1828,23 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) | |||
1813 | 1828 | ||
1814 | idev = in6_dev_get(skb->dev); | 1829 | idev = in6_dev_get(skb->dev); |
1815 | 1830 | ||
1831 | skb->dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr); | ||
1832 | if (!skb->dst) { | ||
1833 | err = -ENOMEM; | ||
1834 | goto err_out; | ||
1835 | } | ||
1836 | |||
1837 | icmpv6_flow_init(sk, &fl, type, | ||
1838 | &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, | ||
1839 | skb->dev->ifindex); | ||
1840 | |||
1841 | err = xfrm_lookup(&skb->dst, &fl, NULL, 0); | ||
1842 | if (err) | ||
1843 | goto err_out; | ||
1844 | |||
1816 | err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev, | 1845 | err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev, |
1817 | mld_dev_queue_xmit); | 1846 | dst_output); |
1847 | out: | ||
1818 | if (!err) { | 1848 | if (!err) { |
1819 | ICMP6MSGOUT_INC_STATS(idev, type); | 1849 | ICMP6MSGOUT_INC_STATS(idev, type); |
1820 | ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS); | 1850 | ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS); |
@@ -1825,6 +1855,10 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) | |||
1825 | if (likely(idev != NULL)) | 1855 | if (likely(idev != NULL)) |
1826 | in6_dev_put(idev); | 1856 | in6_dev_put(idev); |
1827 | return; | 1857 | return; |
1858 | |||
1859 | err_out: | ||
1860 | kfree_skb(skb); | ||
1861 | goto out; | ||
1828 | } | 1862 | } |
1829 | 1863 | ||
1830 | static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode, | 1864 | static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode, |
@@ -2310,6 +2344,7 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev) | |||
2310 | 2344 | ||
2311 | #ifdef CONFIG_PROC_FS | 2345 | #ifdef CONFIG_PROC_FS |
2312 | struct igmp6_mc_iter_state { | 2346 | struct igmp6_mc_iter_state { |
2347 | struct seq_net_private p; | ||
2313 | struct net_device *dev; | 2348 | struct net_device *dev; |
2314 | struct inet6_dev *idev; | 2349 | struct inet6_dev *idev; |
2315 | }; | 2350 | }; |
@@ -2320,9 +2355,10 @@ static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq) | |||
2320 | { | 2355 | { |
2321 | struct ifmcaddr6 *im = NULL; | 2356 | struct ifmcaddr6 *im = NULL; |
2322 | struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); | 2357 | struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); |
2358 | struct net *net = state->p.net; | ||
2323 | 2359 | ||
2324 | state->idev = NULL; | 2360 | state->idev = NULL; |
2325 | for_each_netdev(&init_net, state->dev) { | 2361 | for_each_netdev(net, state->dev) { |
2326 | struct inet6_dev *idev; | 2362 | struct inet6_dev *idev; |
2327 | idev = in6_dev_get(state->dev); | 2363 | idev = in6_dev_get(state->dev); |
2328 | if (!idev) | 2364 | if (!idev) |
@@ -2424,8 +2460,8 @@ static const struct seq_operations igmp6_mc_seq_ops = { | |||
2424 | 2460 | ||
2425 | static int igmp6_mc_seq_open(struct inode *inode, struct file *file) | 2461 | static int igmp6_mc_seq_open(struct inode *inode, struct file *file) |
2426 | { | 2462 | { |
2427 | return seq_open_private(file, &igmp6_mc_seq_ops, | 2463 | return seq_open_net(inode, file, &igmp6_mc_seq_ops, |
2428 | sizeof(struct igmp6_mc_iter_state)); | 2464 | sizeof(struct igmp6_mc_iter_state)); |
2429 | } | 2465 | } |
2430 | 2466 | ||
2431 | static const struct file_operations igmp6_mc_seq_fops = { | 2467 | static const struct file_operations igmp6_mc_seq_fops = { |
@@ -2433,10 +2469,11 @@ static const struct file_operations igmp6_mc_seq_fops = { | |||
2433 | .open = igmp6_mc_seq_open, | 2469 | .open = igmp6_mc_seq_open, |
2434 | .read = seq_read, | 2470 | .read = seq_read, |
2435 | .llseek = seq_lseek, | 2471 | .llseek = seq_lseek, |
2436 | .release = seq_release_private, | 2472 | .release = seq_release_net, |
2437 | }; | 2473 | }; |
2438 | 2474 | ||
2439 | struct igmp6_mcf_iter_state { | 2475 | struct igmp6_mcf_iter_state { |
2476 | struct seq_net_private p; | ||
2440 | struct net_device *dev; | 2477 | struct net_device *dev; |
2441 | struct inet6_dev *idev; | 2478 | struct inet6_dev *idev; |
2442 | struct ifmcaddr6 *im; | 2479 | struct ifmcaddr6 *im; |
@@ -2449,10 +2486,11 @@ static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq) | |||
2449 | struct ip6_sf_list *psf = NULL; | 2486 | struct ip6_sf_list *psf = NULL; |
2450 | struct ifmcaddr6 *im = NULL; | 2487 | struct ifmcaddr6 *im = NULL; |
2451 | struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); | 2488 | struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); |
2489 | struct net *net = state->p.net; | ||
2452 | 2490 | ||
2453 | state->idev = NULL; | 2491 | state->idev = NULL; |
2454 | state->im = NULL; | 2492 | state->im = NULL; |
2455 | for_each_netdev(&init_net, state->dev) { | 2493 | for_each_netdev(net, state->dev) { |
2456 | struct inet6_dev *idev; | 2494 | struct inet6_dev *idev; |
2457 | idev = in6_dev_get(state->dev); | 2495 | idev = in6_dev_get(state->dev); |
2458 | if (unlikely(idev == NULL)) | 2496 | if (unlikely(idev == NULL)) |
@@ -2584,8 +2622,8 @@ static const struct seq_operations igmp6_mcf_seq_ops = { | |||
2584 | 2622 | ||
2585 | static int igmp6_mcf_seq_open(struct inode *inode, struct file *file) | 2623 | static int igmp6_mcf_seq_open(struct inode *inode, struct file *file) |
2586 | { | 2624 | { |
2587 | return seq_open_private(file, &igmp6_mcf_seq_ops, | 2625 | return seq_open_net(inode, file, &igmp6_mcf_seq_ops, |
2588 | sizeof(struct igmp6_mcf_iter_state)); | 2626 | sizeof(struct igmp6_mcf_iter_state)); |
2589 | } | 2627 | } |
2590 | 2628 | ||
2591 | static const struct file_operations igmp6_mcf_seq_fops = { | 2629 | static const struct file_operations igmp6_mcf_seq_fops = { |
@@ -2593,26 +2631,27 @@ static const struct file_operations igmp6_mcf_seq_fops = { | |||
2593 | .open = igmp6_mcf_seq_open, | 2631 | .open = igmp6_mcf_seq_open, |
2594 | .read = seq_read, | 2632 | .read = seq_read, |
2595 | .llseek = seq_lseek, | 2633 | .llseek = seq_lseek, |
2596 | .release = seq_release_private, | 2634 | .release = seq_release_net, |
2597 | }; | 2635 | }; |
2598 | #endif | 2636 | #endif |
2599 | 2637 | ||
2600 | int __init igmp6_init(struct net_proto_family *ops) | 2638 | static int igmp6_net_init(struct net *net) |
2601 | { | 2639 | { |
2602 | struct ipv6_pinfo *np; | 2640 | struct ipv6_pinfo *np; |
2641 | struct socket *sock; | ||
2603 | struct sock *sk; | 2642 | struct sock *sk; |
2604 | int err; | 2643 | int err; |
2605 | 2644 | ||
2606 | err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, &igmp6_socket); | 2645 | err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, &sock); |
2607 | if (err < 0) { | 2646 | if (err < 0) { |
2608 | printk(KERN_ERR | 2647 | printk(KERN_ERR |
2609 | "Failed to initialize the IGMP6 control socket (err %d).\n", | 2648 | "Failed to initialize the IGMP6 control socket (err %d).\n", |
2610 | err); | 2649 | err); |
2611 | igmp6_socket = NULL; /* For safety. */ | 2650 | goto out; |
2612 | return err; | ||
2613 | } | 2651 | } |
2614 | 2652 | ||
2615 | sk = igmp6_socket->sk; | 2653 | net->ipv6.igmp_sk = sk = sock->sk; |
2654 | sk_change_net(sk, net); | ||
2616 | sk->sk_allocation = GFP_ATOMIC; | 2655 | sk->sk_allocation = GFP_ATOMIC; |
2617 | sk->sk_prot->unhash(sk); | 2656 | sk->sk_prot->unhash(sk); |
2618 | 2657 | ||
@@ -2620,20 +2659,45 @@ int __init igmp6_init(struct net_proto_family *ops) | |||
2620 | np->hop_limit = 1; | 2659 | np->hop_limit = 1; |
2621 | 2660 | ||
2622 | #ifdef CONFIG_PROC_FS | 2661 | #ifdef CONFIG_PROC_FS |
2623 | proc_net_fops_create(&init_net, "igmp6", S_IRUGO, &igmp6_mc_seq_fops); | 2662 | err = -ENOMEM; |
2624 | proc_net_fops_create(&init_net, "mcfilter6", S_IRUGO, &igmp6_mcf_seq_fops); | 2663 | if (!proc_net_fops_create(net, "igmp6", S_IRUGO, &igmp6_mc_seq_fops)) |
2664 | goto out_sock_create; | ||
2665 | if (!proc_net_fops_create(net, "mcfilter6", S_IRUGO, | ||
2666 | &igmp6_mcf_seq_fops)) { | ||
2667 | proc_net_remove(net, "igmp6"); | ||
2668 | goto out_sock_create; | ||
2669 | } | ||
2625 | #endif | 2670 | #endif |
2626 | 2671 | ||
2627 | return 0; | 2672 | err = 0; |
2673 | out: | ||
2674 | return err; | ||
2675 | |||
2676 | out_sock_create: | ||
2677 | sk_release_kernel(net->ipv6.igmp_sk); | ||
2678 | goto out; | ||
2628 | } | 2679 | } |
2629 | 2680 | ||
2630 | void igmp6_cleanup(void) | 2681 | static void igmp6_net_exit(struct net *net) |
2631 | { | 2682 | { |
2632 | sock_release(igmp6_socket); | 2683 | sk_release_kernel(net->ipv6.igmp_sk); |
2633 | igmp6_socket = NULL; /* for safety */ | ||
2634 | |||
2635 | #ifdef CONFIG_PROC_FS | 2684 | #ifdef CONFIG_PROC_FS |
2636 | proc_net_remove(&init_net, "mcfilter6"); | 2685 | proc_net_remove(net, "mcfilter6"); |
2637 | proc_net_remove(&init_net, "igmp6"); | 2686 | proc_net_remove(net, "igmp6"); |
2638 | #endif | 2687 | #endif |
2639 | } | 2688 | } |
2689 | |||
2690 | static struct pernet_operations igmp6_net_ops = { | ||
2691 | .init = igmp6_net_init, | ||
2692 | .exit = igmp6_net_exit, | ||
2693 | }; | ||
2694 | |||
2695 | int __init igmp6_init(void) | ||
2696 | { | ||
2697 | return register_pernet_subsys(&igmp6_net_ops); | ||
2698 | } | ||
2699 | |||
2700 | void igmp6_cleanup(void) | ||
2701 | { | ||
2702 | unregister_pernet_subsys(&igmp6_net_ops); | ||
2703 | } | ||
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c index cd8a5bda13cd..42403c626c27 100644 --- a/net/ipv6/mip6.c +++ b/net/ipv6/mip6.c | |||
@@ -304,13 +304,13 @@ static int mip6_destopt_offset(struct xfrm_state *x, struct sk_buff *skb, | |||
304 | static int mip6_destopt_init_state(struct xfrm_state *x) | 304 | static int mip6_destopt_init_state(struct xfrm_state *x) |
305 | { | 305 | { |
306 | if (x->id.spi) { | 306 | if (x->id.spi) { |
307 | printk(KERN_INFO "%s: spi is not 0: %u\n", __FUNCTION__, | 307 | printk(KERN_INFO "%s: spi is not 0: %u\n", __func__, |
308 | x->id.spi); | 308 | x->id.spi); |
309 | return -EINVAL; | 309 | return -EINVAL; |
310 | } | 310 | } |
311 | if (x->props.mode != XFRM_MODE_ROUTEOPTIMIZATION) { | 311 | if (x->props.mode != XFRM_MODE_ROUTEOPTIMIZATION) { |
312 | printk(KERN_INFO "%s: state's mode is not %u: %u\n", | 312 | printk(KERN_INFO "%s: state's mode is not %u: %u\n", |
313 | __FUNCTION__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode); | 313 | __func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode); |
314 | return -EINVAL; | 314 | return -EINVAL; |
315 | } | 315 | } |
316 | 316 | ||
@@ -439,13 +439,13 @@ static int mip6_rthdr_offset(struct xfrm_state *x, struct sk_buff *skb, | |||
439 | static int mip6_rthdr_init_state(struct xfrm_state *x) | 439 | static int mip6_rthdr_init_state(struct xfrm_state *x) |
440 | { | 440 | { |
441 | if (x->id.spi) { | 441 | if (x->id.spi) { |
442 | printk(KERN_INFO "%s: spi is not 0: %u\n", __FUNCTION__, | 442 | printk(KERN_INFO "%s: spi is not 0: %u\n", __func__, |
443 | x->id.spi); | 443 | x->id.spi); |
444 | return -EINVAL; | 444 | return -EINVAL; |
445 | } | 445 | } |
446 | if (x->props.mode != XFRM_MODE_ROUTEOPTIMIZATION) { | 446 | if (x->props.mode != XFRM_MODE_ROUTEOPTIMIZATION) { |
447 | printk(KERN_INFO "%s: state's mode is not %u: %u\n", | 447 | printk(KERN_INFO "%s: state's mode is not %u: %u\n", |
448 | __FUNCTION__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode); | 448 | __func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode); |
449 | return -EINVAL; | 449 | return -EINVAL; |
450 | } | 450 | } |
451 | 451 | ||
@@ -480,15 +480,15 @@ static int __init mip6_init(void) | |||
480 | printk(KERN_INFO "Mobile IPv6\n"); | 480 | printk(KERN_INFO "Mobile IPv6\n"); |
481 | 481 | ||
482 | if (xfrm_register_type(&mip6_destopt_type, AF_INET6) < 0) { | 482 | if (xfrm_register_type(&mip6_destopt_type, AF_INET6) < 0) { |
483 | printk(KERN_INFO "%s: can't add xfrm type(destopt)\n", __FUNCTION__); | 483 | printk(KERN_INFO "%s: can't add xfrm type(destopt)\n", __func__); |
484 | goto mip6_destopt_xfrm_fail; | 484 | goto mip6_destopt_xfrm_fail; |
485 | } | 485 | } |
486 | if (xfrm_register_type(&mip6_rthdr_type, AF_INET6) < 0) { | 486 | if (xfrm_register_type(&mip6_rthdr_type, AF_INET6) < 0) { |
487 | printk(KERN_INFO "%s: can't add xfrm type(rthdr)\n", __FUNCTION__); | 487 | printk(KERN_INFO "%s: can't add xfrm type(rthdr)\n", __func__); |
488 | goto mip6_rthdr_xfrm_fail; | 488 | goto mip6_rthdr_xfrm_fail; |
489 | } | 489 | } |
490 | if (rawv6_mh_filter_register(mip6_mh_filter) < 0) { | 490 | if (rawv6_mh_filter_register(mip6_mh_filter) < 0) { |
491 | printk(KERN_INFO "%s: can't add rawv6 mh filter\n", __FUNCTION__); | 491 | printk(KERN_INFO "%s: can't add rawv6 mh filter\n", __func__); |
492 | goto mip6_rawv6_mh_fail; | 492 | goto mip6_rawv6_mh_fail; |
493 | } | 493 | } |
494 | 494 | ||
@@ -506,11 +506,11 @@ static int __init mip6_init(void) | |||
506 | static void __exit mip6_fini(void) | 506 | static void __exit mip6_fini(void) |
507 | { | 507 | { |
508 | if (rawv6_mh_filter_unregister(mip6_mh_filter) < 0) | 508 | if (rawv6_mh_filter_unregister(mip6_mh_filter) < 0) |
509 | printk(KERN_INFO "%s: can't remove rawv6 mh filter\n", __FUNCTION__); | 509 | printk(KERN_INFO "%s: can't remove rawv6 mh filter\n", __func__); |
510 | if (xfrm_unregister_type(&mip6_rthdr_type, AF_INET6) < 0) | 510 | if (xfrm_unregister_type(&mip6_rthdr_type, AF_INET6) < 0) |
511 | printk(KERN_INFO "%s: can't remove xfrm type(rthdr)\n", __FUNCTION__); | 511 | printk(KERN_INFO "%s: can't remove xfrm type(rthdr)\n", __func__); |
512 | if (xfrm_unregister_type(&mip6_destopt_type, AF_INET6) < 0) | 512 | if (xfrm_unregister_type(&mip6_destopt_type, AF_INET6) < 0) |
513 | printk(KERN_INFO "%s: can't remove xfrm type(destopt)\n", __FUNCTION__); | 513 | printk(KERN_INFO "%s: can't remove xfrm type(destopt)\n", __func__); |
514 | } | 514 | } |
515 | 515 | ||
516 | module_init(mip6_init); | 516 | module_init(mip6_init); |
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 0d33a7d32125..24e76ed98884 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
@@ -89,8 +89,6 @@ | |||
89 | #include <linux/netfilter.h> | 89 | #include <linux/netfilter.h> |
90 | #include <linux/netfilter_ipv6.h> | 90 | #include <linux/netfilter_ipv6.h> |
91 | 91 | ||
92 | static struct socket *ndisc_socket; | ||
93 | |||
94 | static u32 ndisc_hash(const void *pkey, const struct net_device *dev); | 92 | static u32 ndisc_hash(const void *pkey, const struct net_device *dev); |
95 | static int ndisc_constructor(struct neighbour *neigh); | 93 | static int ndisc_constructor(struct neighbour *neigh); |
96 | static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb); | 94 | static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb); |
@@ -270,7 +268,7 @@ static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len, | |||
270 | if (ndopts->nd_opt_array[nd_opt->nd_opt_type]) { | 268 | if (ndopts->nd_opt_array[nd_opt->nd_opt_type]) { |
271 | ND_PRINTK2(KERN_WARNING | 269 | ND_PRINTK2(KERN_WARNING |
272 | "%s(): duplicated ND6 option found: type=%d\n", | 270 | "%s(): duplicated ND6 option found: type=%d\n", |
273 | __FUNCTION__, | 271 | __func__, |
274 | nd_opt->nd_opt_type); | 272 | nd_opt->nd_opt_type); |
275 | } else { | 273 | } else { |
276 | ndopts->nd_opt_array[nd_opt->nd_opt_type] = nd_opt; | 274 | ndopts->nd_opt_array[nd_opt->nd_opt_type] = nd_opt; |
@@ -301,7 +299,7 @@ static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len, | |||
301 | */ | 299 | */ |
302 | ND_PRINTK2(KERN_NOTICE | 300 | ND_PRINTK2(KERN_NOTICE |
303 | "%s(): ignored unsupported option; type=%d, len=%d\n", | 301 | "%s(): ignored unsupported option; type=%d, len=%d\n", |
304 | __FUNCTION__, | 302 | __func__, |
305 | nd_opt->nd_opt_type, nd_opt->nd_opt_len); | 303 | nd_opt->nd_opt_type, nd_opt->nd_opt_len); |
306 | } | 304 | } |
307 | } | 305 | } |
@@ -441,21 +439,6 @@ static void pndisc_destructor(struct pneigh_entry *n) | |||
441 | /* | 439 | /* |
442 | * Send a Neighbour Advertisement | 440 | * Send a Neighbour Advertisement |
443 | */ | 441 | */ |
444 | |||
445 | static inline void ndisc_flow_init(struct flowi *fl, u8 type, | ||
446 | struct in6_addr *saddr, struct in6_addr *daddr, | ||
447 | int oif) | ||
448 | { | ||
449 | memset(fl, 0, sizeof(*fl)); | ||
450 | ipv6_addr_copy(&fl->fl6_src, saddr); | ||
451 | ipv6_addr_copy(&fl->fl6_dst, daddr); | ||
452 | fl->proto = IPPROTO_ICMPV6; | ||
453 | fl->fl_icmp_type = type; | ||
454 | fl->fl_icmp_code = 0; | ||
455 | fl->oif = oif; | ||
456 | security_sk_classify_flow(ndisc_socket->sk, fl); | ||
457 | } | ||
458 | |||
459 | static void __ndisc_send(struct net_device *dev, | 442 | static void __ndisc_send(struct net_device *dev, |
460 | struct neighbour *neigh, | 443 | struct neighbour *neigh, |
461 | struct in6_addr *daddr, struct in6_addr *saddr, | 444 | struct in6_addr *daddr, struct in6_addr *saddr, |
@@ -464,7 +447,8 @@ static void __ndisc_send(struct net_device *dev, | |||
464 | { | 447 | { |
465 | struct flowi fl; | 448 | struct flowi fl; |
466 | struct dst_entry *dst; | 449 | struct dst_entry *dst; |
467 | struct sock *sk = ndisc_socket->sk; | 450 | struct net *net = dev->nd_net; |
451 | struct sock *sk = net->ipv6.ndisc_sk; | ||
468 | struct sk_buff *skb; | 452 | struct sk_buff *skb; |
469 | struct icmp6hdr *hdr; | 453 | struct icmp6hdr *hdr; |
470 | struct inet6_dev *idev; | 454 | struct inet6_dev *idev; |
@@ -474,10 +458,9 @@ static void __ndisc_send(struct net_device *dev, | |||
474 | 458 | ||
475 | type = icmp6h->icmp6_type; | 459 | type = icmp6h->icmp6_type; |
476 | 460 | ||
477 | ndisc_flow_init(&fl, type, saddr, daddr, | 461 | icmpv6_flow_init(sk, &fl, type, saddr, daddr, dev->ifindex); |
478 | dev->ifindex); | ||
479 | 462 | ||
480 | dst = ndisc_dst_alloc(dev, neigh, daddr, ip6_output); | 463 | dst = icmp6_dst_alloc(dev, neigh, daddr); |
481 | if (!dst) | 464 | if (!dst) |
482 | return; | 465 | return; |
483 | 466 | ||
@@ -499,7 +482,7 @@ static void __ndisc_send(struct net_device *dev, | |||
499 | if (!skb) { | 482 | if (!skb) { |
500 | ND_PRINTK0(KERN_ERR | 483 | ND_PRINTK0(KERN_ERR |
501 | "ICMPv6 ND: %s() failed to allocate an skb.\n", | 484 | "ICMPv6 ND: %s() failed to allocate an skb.\n", |
502 | __FUNCTION__); | 485 | __func__); |
503 | dst_release(dst); | 486 | dst_release(dst); |
504 | return; | 487 | return; |
505 | } | 488 | } |
@@ -556,7 +539,7 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh, | |||
556 | }; | 539 | }; |
557 | 540 | ||
558 | /* for anycast or proxy, solicited_addr != src_addr */ | 541 | /* for anycast or proxy, solicited_addr != src_addr */ |
559 | ifp = ipv6_get_ifaddr(&init_net, solicited_addr, dev, 1); | 542 | ifp = ipv6_get_ifaddr(dev->nd_net, solicited_addr, dev, 1); |
560 | if (ifp) { | 543 | if (ifp) { |
561 | src_addr = solicited_addr; | 544 | src_addr = solicited_addr; |
562 | if (ifp->flags & IFA_F_OPTIMISTIC) | 545 | if (ifp->flags & IFA_F_OPTIMISTIC) |
@@ -616,7 +599,7 @@ void ndisc_send_rs(struct net_device *dev, struct in6_addr *saddr, | |||
616 | * suppress the inclusion of the sllao. | 599 | * suppress the inclusion of the sllao. |
617 | */ | 600 | */ |
618 | if (send_sllao) { | 601 | if (send_sllao) { |
619 | struct inet6_ifaddr *ifp = ipv6_get_ifaddr(&init_net, saddr, | 602 | struct inet6_ifaddr *ifp = ipv6_get_ifaddr(dev->nd_net, saddr, |
620 | dev, 1); | 603 | dev, 1); |
621 | if (ifp) { | 604 | if (ifp) { |
622 | if (ifp->flags & IFA_F_OPTIMISTIC) { | 605 | if (ifp->flags & IFA_F_OPTIMISTIC) { |
@@ -654,7 +637,7 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb) | |||
654 | struct in6_addr *target = (struct in6_addr *)&neigh->primary_key; | 637 | struct in6_addr *target = (struct in6_addr *)&neigh->primary_key; |
655 | int probes = atomic_read(&neigh->probes); | 638 | int probes = atomic_read(&neigh->probes); |
656 | 639 | ||
657 | if (skb && ipv6_chk_addr(&init_net, &ipv6_hdr(skb)->saddr, dev, 1)) | 640 | if (skb && ipv6_chk_addr(dev->nd_net, &ipv6_hdr(skb)->saddr, dev, 1)) |
658 | saddr = &ipv6_hdr(skb)->saddr; | 641 | saddr = &ipv6_hdr(skb)->saddr; |
659 | 642 | ||
660 | if ((probes -= neigh->parms->ucast_probes) < 0) { | 643 | if ((probes -= neigh->parms->ucast_probes) < 0) { |
@@ -662,7 +645,7 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb) | |||
662 | ND_PRINTK1(KERN_DEBUG | 645 | ND_PRINTK1(KERN_DEBUG |
663 | "%s(): trying to ucast probe in NUD_INVALID: " | 646 | "%s(): trying to ucast probe in NUD_INVALID: " |
664 | NIP6_FMT "\n", | 647 | NIP6_FMT "\n", |
665 | __FUNCTION__, | 648 | __func__, |
666 | NIP6(*target)); | 649 | NIP6(*target)); |
667 | } | 650 | } |
668 | ndisc_send_ns(dev, neigh, target, target, saddr); | 651 | ndisc_send_ns(dev, neigh, target, target, saddr); |
@@ -742,7 +725,8 @@ static void ndisc_recv_ns(struct sk_buff *skb) | |||
742 | 725 | ||
743 | inc = ipv6_addr_is_multicast(daddr); | 726 | inc = ipv6_addr_is_multicast(daddr); |
744 | 727 | ||
745 | if ((ifp = ipv6_get_ifaddr(&init_net, &msg->target, dev, 1)) != NULL) { | 728 | ifp = ipv6_get_ifaddr(dev->nd_net, &msg->target, dev, 1); |
729 | if (ifp) { | ||
746 | 730 | ||
747 | if (ifp->flags & (IFA_F_TENTATIVE|IFA_F_OPTIMISTIC)) { | 731 | if (ifp->flags & (IFA_F_TENTATIVE|IFA_F_OPTIMISTIC)) { |
748 | if (dad) { | 732 | if (dad) { |
@@ -790,7 +774,7 @@ static void ndisc_recv_ns(struct sk_buff *skb) | |||
790 | if (ipv6_chk_acast_addr(dev, &msg->target) || | 774 | if (ipv6_chk_acast_addr(dev, &msg->target) || |
791 | (idev->cnf.forwarding && | 775 | (idev->cnf.forwarding && |
792 | (ipv6_devconf.proxy_ndp || idev->cnf.proxy_ndp) && | 776 | (ipv6_devconf.proxy_ndp || idev->cnf.proxy_ndp) && |
793 | (pneigh = pneigh_lookup(&nd_tbl, &init_net, | 777 | (pneigh = pneigh_lookup(&nd_tbl, dev->nd_net, |
794 | &msg->target, dev, 0)) != NULL)) { | 778 | &msg->target, dev, 0)) != NULL)) { |
795 | if (!(NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED) && | 779 | if (!(NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED) && |
796 | skb->pkt_type != PACKET_HOST && | 780 | skb->pkt_type != PACKET_HOST && |
@@ -900,7 +884,8 @@ static void ndisc_recv_na(struct sk_buff *skb) | |||
900 | return; | 884 | return; |
901 | } | 885 | } |
902 | } | 886 | } |
903 | if ((ifp = ipv6_get_ifaddr(&init_net, &msg->target, dev, 1))) { | 887 | ifp = ipv6_get_ifaddr(dev->nd_net, &msg->target, dev, 1); |
888 | if (ifp) { | ||
904 | if (ifp->flags & IFA_F_TENTATIVE) { | 889 | if (ifp->flags & IFA_F_TENTATIVE) { |
905 | addrconf_dad_failure(ifp); | 890 | addrconf_dad_failure(ifp); |
906 | return; | 891 | return; |
@@ -931,7 +916,7 @@ static void ndisc_recv_na(struct sk_buff *skb) | |||
931 | */ | 916 | */ |
932 | if (lladdr && !memcmp(lladdr, dev->dev_addr, dev->addr_len) && | 917 | if (lladdr && !memcmp(lladdr, dev->dev_addr, dev->addr_len) && |
933 | ipv6_devconf.forwarding && ipv6_devconf.proxy_ndp && | 918 | ipv6_devconf.forwarding && ipv6_devconf.proxy_ndp && |
934 | pneigh_lookup(&nd_tbl, &init_net, &msg->target, dev, 0)) { | 919 | pneigh_lookup(&nd_tbl, dev->nd_net, &msg->target, dev, 0)) { |
935 | /* XXX: idev->cnf.prixy_ndp */ | 920 | /* XXX: idev->cnf.prixy_ndp */ |
936 | goto out; | 921 | goto out; |
937 | } | 922 | } |
@@ -1021,6 +1006,7 @@ static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt) | |||
1021 | struct sk_buff *skb; | 1006 | struct sk_buff *skb; |
1022 | struct nlmsghdr *nlh; | 1007 | struct nlmsghdr *nlh; |
1023 | struct nduseroptmsg *ndmsg; | 1008 | struct nduseroptmsg *ndmsg; |
1009 | struct net *net = ra->dev->nd_net; | ||
1024 | int err; | 1010 | int err; |
1025 | int base_size = NLMSG_ALIGN(sizeof(struct nduseroptmsg) | 1011 | int base_size = NLMSG_ALIGN(sizeof(struct nduseroptmsg) |
1026 | + (opt->nd_opt_len << 3)); | 1012 | + (opt->nd_opt_len << 3)); |
@@ -1050,7 +1036,7 @@ static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt) | |||
1050 | &ipv6_hdr(ra)->saddr); | 1036 | &ipv6_hdr(ra)->saddr); |
1051 | nlmsg_end(skb, nlh); | 1037 | nlmsg_end(skb, nlh); |
1052 | 1038 | ||
1053 | err = rtnl_notify(skb, &init_net, 0, RTNLGRP_ND_USEROPT, NULL, | 1039 | err = rtnl_notify(skb, net, 0, RTNLGRP_ND_USEROPT, NULL, |
1054 | GFP_ATOMIC); | 1040 | GFP_ATOMIC); |
1055 | if (err < 0) | 1041 | if (err < 0) |
1056 | goto errout; | 1042 | goto errout; |
@@ -1061,7 +1047,7 @@ nla_put_failure: | |||
1061 | nlmsg_free(skb); | 1047 | nlmsg_free(skb); |
1062 | err = -EMSGSIZE; | 1048 | err = -EMSGSIZE; |
1063 | errout: | 1049 | errout: |
1064 | rtnl_set_sk_err(&init_net, RTNLGRP_ND_USEROPT, err); | 1050 | rtnl_set_sk_err(net, RTNLGRP_ND_USEROPT, err); |
1065 | } | 1051 | } |
1066 | 1052 | ||
1067 | static void ndisc_router_discovery(struct sk_buff *skb) | 1053 | static void ndisc_router_discovery(struct sk_buff *skb) |
@@ -1164,7 +1150,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) | |||
1164 | if (rt == NULL) { | 1150 | if (rt == NULL) { |
1165 | ND_PRINTK0(KERN_ERR | 1151 | ND_PRINTK0(KERN_ERR |
1166 | "ICMPv6 RA: %s() failed to add default route.\n", | 1152 | "ICMPv6 RA: %s() failed to add default route.\n", |
1167 | __FUNCTION__); | 1153 | __func__); |
1168 | in6_dev_put(in6_dev); | 1154 | in6_dev_put(in6_dev); |
1169 | return; | 1155 | return; |
1170 | } | 1156 | } |
@@ -1173,7 +1159,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) | |||
1173 | if (neigh == NULL) { | 1159 | if (neigh == NULL) { |
1174 | ND_PRINTK0(KERN_ERR | 1160 | ND_PRINTK0(KERN_ERR |
1175 | "ICMPv6 RA: %s() got default router without neighbour.\n", | 1161 | "ICMPv6 RA: %s() got default router without neighbour.\n", |
1176 | __FUNCTION__); | 1162 | __func__); |
1177 | dst_release(&rt->u.dst); | 1163 | dst_release(&rt->u.dst); |
1178 | in6_dev_put(in6_dev); | 1164 | in6_dev_put(in6_dev); |
1179 | return; | 1165 | return; |
@@ -1406,13 +1392,14 @@ static void ndisc_redirect_rcv(struct sk_buff *skb) | |||
1406 | void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, | 1392 | void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, |
1407 | struct in6_addr *target) | 1393 | struct in6_addr *target) |
1408 | { | 1394 | { |
1409 | struct sock *sk = ndisc_socket->sk; | 1395 | struct net_device *dev = skb->dev; |
1396 | struct net *net = dev->nd_net; | ||
1397 | struct sock *sk = net->ipv6.ndisc_sk; | ||
1410 | int len = sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr); | 1398 | int len = sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr); |
1411 | struct sk_buff *buff; | 1399 | struct sk_buff *buff; |
1412 | struct icmp6hdr *icmph; | 1400 | struct icmp6hdr *icmph; |
1413 | struct in6_addr saddr_buf; | 1401 | struct in6_addr saddr_buf; |
1414 | struct in6_addr *addrp; | 1402 | struct in6_addr *addrp; |
1415 | struct net_device *dev; | ||
1416 | struct rt6_info *rt; | 1403 | struct rt6_info *rt; |
1417 | struct dst_entry *dst; | 1404 | struct dst_entry *dst; |
1418 | struct inet6_dev *idev; | 1405 | struct inet6_dev *idev; |
@@ -1423,8 +1410,6 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, | |||
1423 | int hlen; | 1410 | int hlen; |
1424 | u8 ha_buf[MAX_ADDR_LEN], *ha = NULL; | 1411 | u8 ha_buf[MAX_ADDR_LEN], *ha = NULL; |
1425 | 1412 | ||
1426 | dev = skb->dev; | ||
1427 | |||
1428 | if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) { | 1413 | if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) { |
1429 | ND_PRINTK2(KERN_WARNING | 1414 | ND_PRINTK2(KERN_WARNING |
1430 | "ICMPv6 Redirect: no link-local address on %s\n", | 1415 | "ICMPv6 Redirect: no link-local address on %s\n", |
@@ -1439,10 +1424,10 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, | |||
1439 | return; | 1424 | return; |
1440 | } | 1425 | } |
1441 | 1426 | ||
1442 | ndisc_flow_init(&fl, NDISC_REDIRECT, &saddr_buf, &ipv6_hdr(skb)->saddr, | 1427 | icmpv6_flow_init(sk, &fl, NDISC_REDIRECT, |
1443 | dev->ifindex); | 1428 | &saddr_buf, &ipv6_hdr(skb)->saddr, dev->ifindex); |
1444 | 1429 | ||
1445 | dst = ip6_route_output(NULL, &fl); | 1430 | dst = ip6_route_output(net, NULL, &fl); |
1446 | if (dst == NULL) | 1431 | if (dst == NULL) |
1447 | return; | 1432 | return; |
1448 | 1433 | ||
@@ -1486,7 +1471,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, | |||
1486 | if (buff == NULL) { | 1471 | if (buff == NULL) { |
1487 | ND_PRINTK0(KERN_ERR | 1472 | ND_PRINTK0(KERN_ERR |
1488 | "ICMPv6 Redirect: %s() failed to allocate an skb.\n", | 1473 | "ICMPv6 Redirect: %s() failed to allocate an skb.\n", |
1489 | __FUNCTION__); | 1474 | __func__); |
1490 | dst_release(dst); | 1475 | dst_release(dst); |
1491 | return; | 1476 | return; |
1492 | } | 1477 | } |
@@ -1613,18 +1598,16 @@ int ndisc_rcv(struct sk_buff *skb) | |||
1613 | static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) | 1598 | static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) |
1614 | { | 1599 | { |
1615 | struct net_device *dev = ptr; | 1600 | struct net_device *dev = ptr; |
1616 | 1601 | struct net *net = dev->nd_net; | |
1617 | if (dev->nd_net != &init_net) | ||
1618 | return NOTIFY_DONE; | ||
1619 | 1602 | ||
1620 | switch (event) { | 1603 | switch (event) { |
1621 | case NETDEV_CHANGEADDR: | 1604 | case NETDEV_CHANGEADDR: |
1622 | neigh_changeaddr(&nd_tbl, dev); | 1605 | neigh_changeaddr(&nd_tbl, dev); |
1623 | fib6_run_gc(~0UL); | 1606 | fib6_run_gc(~0UL, net); |
1624 | break; | 1607 | break; |
1625 | case NETDEV_DOWN: | 1608 | case NETDEV_DOWN: |
1626 | neigh_ifdown(&nd_tbl, dev); | 1609 | neigh_ifdown(&nd_tbl, dev); |
1627 | fib6_run_gc(~0UL); | 1610 | fib6_run_gc(~0UL, net); |
1628 | break; | 1611 | break; |
1629 | default: | 1612 | default: |
1630 | break; | 1613 | break; |
@@ -1733,22 +1716,24 @@ static int ndisc_ifinfo_sysctl_strategy(ctl_table *ctl, int __user *name, | |||
1733 | 1716 | ||
1734 | #endif | 1717 | #endif |
1735 | 1718 | ||
1736 | int __init ndisc_init(struct net_proto_family *ops) | 1719 | static int ndisc_net_init(struct net *net) |
1737 | { | 1720 | { |
1721 | struct socket *sock; | ||
1738 | struct ipv6_pinfo *np; | 1722 | struct ipv6_pinfo *np; |
1739 | struct sock *sk; | 1723 | struct sock *sk; |
1740 | int err; | 1724 | int err; |
1741 | 1725 | ||
1742 | err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, &ndisc_socket); | 1726 | err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, &sock); |
1743 | if (err < 0) { | 1727 | if (err < 0) { |
1744 | ND_PRINTK0(KERN_ERR | 1728 | ND_PRINTK0(KERN_ERR |
1745 | "ICMPv6 NDISC: Failed to initialize the control socket (err %d).\n", | 1729 | "ICMPv6 NDISC: Failed to initialize the control socket (err %d).\n", |
1746 | err); | 1730 | err); |
1747 | ndisc_socket = NULL; /* For safety. */ | ||
1748 | return err; | 1731 | return err; |
1749 | } | 1732 | } |
1750 | 1733 | ||
1751 | sk = ndisc_socket->sk; | 1734 | net->ipv6.ndisc_sk = sk = sock->sk; |
1735 | sk_change_net(sk, net); | ||
1736 | |||
1752 | np = inet6_sk(sk); | 1737 | np = inet6_sk(sk); |
1753 | sk->sk_allocation = GFP_ATOMIC; | 1738 | sk->sk_allocation = GFP_ATOMIC; |
1754 | np->hop_limit = 255; | 1739 | np->hop_limit = 255; |
@@ -1756,21 +1741,52 @@ int __init ndisc_init(struct net_proto_family *ops) | |||
1756 | np->mc_loop = 0; | 1741 | np->mc_loop = 0; |
1757 | sk->sk_prot->unhash(sk); | 1742 | sk->sk_prot->unhash(sk); |
1758 | 1743 | ||
1744 | return 0; | ||
1745 | } | ||
1746 | |||
1747 | static void ndisc_net_exit(struct net *net) | ||
1748 | { | ||
1749 | sk_release_kernel(net->ipv6.ndisc_sk); | ||
1750 | } | ||
1751 | |||
1752 | static struct pernet_operations ndisc_net_ops = { | ||
1753 | .init = ndisc_net_init, | ||
1754 | .exit = ndisc_net_exit, | ||
1755 | }; | ||
1756 | |||
1757 | int __init ndisc_init(void) | ||
1758 | { | ||
1759 | int err; | ||
1760 | |||
1761 | err = register_pernet_subsys(&ndisc_net_ops); | ||
1762 | if (err) | ||
1763 | return err; | ||
1759 | /* | 1764 | /* |
1760 | * Initialize the neighbour table | 1765 | * Initialize the neighbour table |
1761 | */ | 1766 | */ |
1762 | |||
1763 | neigh_table_init(&nd_tbl); | 1767 | neigh_table_init(&nd_tbl); |
1764 | 1768 | ||
1765 | #ifdef CONFIG_SYSCTL | 1769 | #ifdef CONFIG_SYSCTL |
1766 | neigh_sysctl_register(NULL, &nd_tbl.parms, NET_IPV6, NET_IPV6_NEIGH, | 1770 | err = neigh_sysctl_register(NULL, &nd_tbl.parms, NET_IPV6, |
1767 | "ipv6", | 1771 | NET_IPV6_NEIGH, "ipv6", |
1768 | &ndisc_ifinfo_sysctl_change, | 1772 | &ndisc_ifinfo_sysctl_change, |
1769 | &ndisc_ifinfo_sysctl_strategy); | 1773 | &ndisc_ifinfo_sysctl_strategy); |
1774 | if (err) | ||
1775 | goto out_unregister_pernet; | ||
1770 | #endif | 1776 | #endif |
1777 | err = register_netdevice_notifier(&ndisc_netdev_notifier); | ||
1778 | if (err) | ||
1779 | goto out_unregister_sysctl; | ||
1780 | out: | ||
1781 | return err; | ||
1771 | 1782 | ||
1772 | register_netdevice_notifier(&ndisc_netdev_notifier); | 1783 | out_unregister_sysctl: |
1773 | return 0; | 1784 | #ifdef CONFIG_SYSCTL |
1785 | neigh_sysctl_unregister(&nd_tbl.parms); | ||
1786 | out_unregister_pernet: | ||
1787 | #endif | ||
1788 | unregister_pernet_subsys(&ndisc_net_ops); | ||
1789 | goto out; | ||
1774 | } | 1790 | } |
1775 | 1791 | ||
1776 | void ndisc_cleanup(void) | 1792 | void ndisc_cleanup(void) |
@@ -1780,6 +1796,5 @@ void ndisc_cleanup(void) | |||
1780 | neigh_sysctl_unregister(&nd_tbl.parms); | 1796 | neigh_sysctl_unregister(&nd_tbl.parms); |
1781 | #endif | 1797 | #endif |
1782 | neigh_table_clear(&nd_tbl); | 1798 | neigh_table_clear(&nd_tbl); |
1783 | sock_release(ndisc_socket); | 1799 | unregister_pernet_subsys(&ndisc_net_ops); |
1784 | ndisc_socket = NULL; /* For safety. */ | ||
1785 | } | 1800 | } |
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index 2e06724dc348..aed51bcc66b4 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c | |||
@@ -23,7 +23,7 @@ int ip6_route_me_harder(struct sk_buff *skb) | |||
23 | .saddr = iph->saddr, } }, | 23 | .saddr = iph->saddr, } }, |
24 | }; | 24 | }; |
25 | 25 | ||
26 | dst = ip6_route_output(skb->sk, &fl); | 26 | dst = ip6_route_output(&init_net, skb->sk, &fl); |
27 | 27 | ||
28 | #ifdef CONFIG_XFRM | 28 | #ifdef CONFIG_XFRM |
29 | if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && | 29 | if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && |
@@ -86,7 +86,7 @@ static int nf_ip6_reroute(struct sk_buff *skb, | |||
86 | 86 | ||
87 | static int nf_ip6_route(struct dst_entry **dst, struct flowi *fl) | 87 | static int nf_ip6_route(struct dst_entry **dst, struct flowi *fl) |
88 | { | 88 | { |
89 | *dst = ip6_route_output(NULL, fl); | 89 | *dst = ip6_route_output(&init_net, NULL, fl); |
90 | return (*dst)->error; | 90 | return (*dst)->error; |
91 | } | 91 | } |
92 | 92 | ||
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index bf9bb6e55bb5..af1ec7ba757c 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -55,7 +55,7 @@ MODULE_DESCRIPTION("IPv6 packet filter"); | |||
55 | do { \ | 55 | do { \ |
56 | if (!(x)) \ | 56 | if (!(x)) \ |
57 | printk("IP_NF_ASSERT: %s:%s:%u\n", \ | 57 | printk("IP_NF_ASSERT: %s:%s:%u\n", \ |
58 | __FUNCTION__, __FILE__, __LINE__); \ | 58 | __func__, __FILE__, __LINE__); \ |
59 | } while(0) | 59 | } while(0) |
60 | #else | 60 | #else |
61 | #define IP_NF_ASSERT(x) | 61 | #define IP_NF_ASSERT(x) |
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c index b23baa635fe0..baf829075f6f 100644 --- a/net/ipv6/netfilter/ip6t_REJECT.c +++ b/net/ipv6/netfilter/ip6t_REJECT.c | |||
@@ -93,7 +93,7 @@ static void send_reset(struct sk_buff *oldskb) | |||
93 | fl.fl_ip_sport = otcph.dest; | 93 | fl.fl_ip_sport = otcph.dest; |
94 | fl.fl_ip_dport = otcph.source; | 94 | fl.fl_ip_dport = otcph.source; |
95 | security_skb_classify_flow(oldskb, &fl); | 95 | security_skb_classify_flow(oldskb, &fl); |
96 | dst = ip6_route_output(NULL, &fl); | 96 | dst = ip6_route_output(&init_net, NULL, &fl); |
97 | if (dst == NULL) | 97 | if (dst == NULL) |
98 | return; | 98 | return; |
99 | if (dst->error || xfrm_lookup(&dst, &fl, NULL, 0)) | 99 | if (dst->error || xfrm_lookup(&dst, &fl, NULL, 0)) |
@@ -177,7 +177,7 @@ reject_tg6(struct sk_buff *skb, const struct net_device *in, | |||
177 | { | 177 | { |
178 | const struct ip6t_reject_info *reject = targinfo; | 178 | const struct ip6t_reject_info *reject = targinfo; |
179 | 179 | ||
180 | pr_debug("%s: medium point\n", __FUNCTION__); | 180 | pr_debug("%s: medium point\n", __func__); |
181 | /* WARNING: This code causes reentry within ip6tables. | 181 | /* WARNING: This code causes reentry within ip6tables. |
182 | This means that the ip6tables jump stack is now crap. We | 182 | This means that the ip6tables jump stack is now crap. We |
183 | must return an absolute verdict. --RR */ | 183 | must return an absolute verdict. --RR */ |
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index 199ef379e501..8a5be290c710 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c | |||
@@ -214,6 +214,9 @@ int snmp6_register_dev(struct inet6_dev *idev) | |||
214 | if (!idev || !idev->dev) | 214 | if (!idev || !idev->dev) |
215 | return -EINVAL; | 215 | return -EINVAL; |
216 | 216 | ||
217 | if (idev->dev->nd_net != &init_net) | ||
218 | return 0; | ||
219 | |||
217 | if (!proc_net_devsnmp6) | 220 | if (!proc_net_devsnmp6) |
218 | return -ENOENT; | 221 | return -ENOENT; |
219 | 222 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index e8b241cb60bc..a4b5aee0f68a 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/if_arp.h> | 40 | #include <linux/if_arp.h> |
41 | #include <linux/proc_fs.h> | 41 | #include <linux/proc_fs.h> |
42 | #include <linux/seq_file.h> | 42 | #include <linux/seq_file.h> |
43 | #include <linux/nsproxy.h> | ||
43 | #include <net/net_namespace.h> | 44 | #include <net/net_namespace.h> |
44 | #include <net/snmp.h> | 45 | #include <net/snmp.h> |
45 | #include <net/ipv6.h> | 46 | #include <net/ipv6.h> |
@@ -87,14 +88,16 @@ static void ip6_link_failure(struct sk_buff *skb); | |||
87 | static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu); | 88 | static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu); |
88 | 89 | ||
89 | #ifdef CONFIG_IPV6_ROUTE_INFO | 90 | #ifdef CONFIG_IPV6_ROUTE_INFO |
90 | static struct rt6_info *rt6_add_route_info(struct in6_addr *prefix, int prefixlen, | 91 | static struct rt6_info *rt6_add_route_info(struct net *net, |
92 | struct in6_addr *prefix, int prefixlen, | ||
91 | struct in6_addr *gwaddr, int ifindex, | 93 | struct in6_addr *gwaddr, int ifindex, |
92 | unsigned pref); | 94 | unsigned pref); |
93 | static struct rt6_info *rt6_get_route_info(struct in6_addr *prefix, int prefixlen, | 95 | static struct rt6_info *rt6_get_route_info(struct net *net, |
96 | struct in6_addr *prefix, int prefixlen, | ||
94 | struct in6_addr *gwaddr, int ifindex); | 97 | struct in6_addr *gwaddr, int ifindex); |
95 | #endif | 98 | #endif |
96 | 99 | ||
97 | static struct dst_ops ip6_dst_ops = { | 100 | static struct dst_ops ip6_dst_ops_template = { |
98 | .family = AF_INET6, | 101 | .family = AF_INET6, |
99 | .protocol = __constant_htons(ETH_P_IPV6), | 102 | .protocol = __constant_htons(ETH_P_IPV6), |
100 | .gc = ip6_dst_gc, | 103 | .gc = ip6_dst_gc, |
@@ -124,7 +127,7 @@ static struct dst_ops ip6_dst_blackhole_ops = { | |||
124 | .entries = ATOMIC_INIT(0), | 127 | .entries = ATOMIC_INIT(0), |
125 | }; | 128 | }; |
126 | 129 | ||
127 | struct rt6_info ip6_null_entry = { | 130 | static struct rt6_info ip6_null_entry_template = { |
128 | .u = { | 131 | .u = { |
129 | .dst = { | 132 | .dst = { |
130 | .__refcnt = ATOMIC_INIT(1), | 133 | .__refcnt = ATOMIC_INIT(1), |
@@ -134,8 +137,6 @@ struct rt6_info ip6_null_entry = { | |||
134 | .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, | 137 | .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, |
135 | .input = ip6_pkt_discard, | 138 | .input = ip6_pkt_discard, |
136 | .output = ip6_pkt_discard_out, | 139 | .output = ip6_pkt_discard_out, |
137 | .ops = &ip6_dst_ops, | ||
138 | .path = (struct dst_entry*)&ip6_null_entry, | ||
139 | } | 140 | } |
140 | }, | 141 | }, |
141 | .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), | 142 | .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), |
@@ -148,7 +149,7 @@ struct rt6_info ip6_null_entry = { | |||
148 | static int ip6_pkt_prohibit(struct sk_buff *skb); | 149 | static int ip6_pkt_prohibit(struct sk_buff *skb); |
149 | static int ip6_pkt_prohibit_out(struct sk_buff *skb); | 150 | static int ip6_pkt_prohibit_out(struct sk_buff *skb); |
150 | 151 | ||
151 | struct rt6_info ip6_prohibit_entry = { | 152 | struct rt6_info ip6_prohibit_entry_template = { |
152 | .u = { | 153 | .u = { |
153 | .dst = { | 154 | .dst = { |
154 | .__refcnt = ATOMIC_INIT(1), | 155 | .__refcnt = ATOMIC_INIT(1), |
@@ -158,8 +159,6 @@ struct rt6_info ip6_prohibit_entry = { | |||
158 | .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, | 159 | .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, |
159 | .input = ip6_pkt_prohibit, | 160 | .input = ip6_pkt_prohibit, |
160 | .output = ip6_pkt_prohibit_out, | 161 | .output = ip6_pkt_prohibit_out, |
161 | .ops = &ip6_dst_ops, | ||
162 | .path = (struct dst_entry*)&ip6_prohibit_entry, | ||
163 | } | 162 | } |
164 | }, | 163 | }, |
165 | .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), | 164 | .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), |
@@ -167,7 +166,7 @@ struct rt6_info ip6_prohibit_entry = { | |||
167 | .rt6i_ref = ATOMIC_INIT(1), | 166 | .rt6i_ref = ATOMIC_INIT(1), |
168 | }; | 167 | }; |
169 | 168 | ||
170 | struct rt6_info ip6_blk_hole_entry = { | 169 | static struct rt6_info ip6_blk_hole_entry_template = { |
171 | .u = { | 170 | .u = { |
172 | .dst = { | 171 | .dst = { |
173 | .__refcnt = ATOMIC_INIT(1), | 172 | .__refcnt = ATOMIC_INIT(1), |
@@ -177,8 +176,6 @@ struct rt6_info ip6_blk_hole_entry = { | |||
177 | .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, | 176 | .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, |
178 | .input = dst_discard, | 177 | .input = dst_discard, |
179 | .output = dst_discard, | 178 | .output = dst_discard, |
180 | .ops = &ip6_dst_ops, | ||
181 | .path = (struct dst_entry*)&ip6_blk_hole_entry, | ||
182 | } | 179 | } |
183 | }, | 180 | }, |
184 | .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), | 181 | .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), |
@@ -189,9 +186,9 @@ struct rt6_info ip6_blk_hole_entry = { | |||
189 | #endif | 186 | #endif |
190 | 187 | ||
191 | /* allocate dst with ip6_dst_ops */ | 188 | /* allocate dst with ip6_dst_ops */ |
192 | static __inline__ struct rt6_info *ip6_dst_alloc(void) | 189 | static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops) |
193 | { | 190 | { |
194 | return (struct rt6_info *)dst_alloc(&ip6_dst_ops); | 191 | return (struct rt6_info *)dst_alloc(ops); |
195 | } | 192 | } |
196 | 193 | ||
197 | static void ip6_dst_destroy(struct dst_entry *dst) | 194 | static void ip6_dst_destroy(struct dst_entry *dst) |
@@ -239,7 +236,8 @@ static inline int rt6_need_strict(struct in6_addr *daddr) | |||
239 | * Route lookup. Any table->tb6_lock is implied. | 236 | * Route lookup. Any table->tb6_lock is implied. |
240 | */ | 237 | */ |
241 | 238 | ||
242 | static __inline__ struct rt6_info *rt6_device_match(struct rt6_info *rt, | 239 | static inline struct rt6_info *rt6_device_match(struct net *net, |
240 | struct rt6_info *rt, | ||
243 | int oif, | 241 | int oif, |
244 | int strict) | 242 | int strict) |
245 | { | 243 | { |
@@ -268,7 +266,7 @@ static __inline__ struct rt6_info *rt6_device_match(struct rt6_info *rt, | |||
268 | return local; | 266 | return local; |
269 | 267 | ||
270 | if (strict) | 268 | if (strict) |
271 | return &ip6_null_entry; | 269 | return net->ipv6.ip6_null_entry; |
272 | } | 270 | } |
273 | return rt; | 271 | return rt; |
274 | } | 272 | } |
@@ -409,9 +407,10 @@ static struct rt6_info *find_rr_leaf(struct fib6_node *fn, | |||
409 | static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict) | 407 | static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict) |
410 | { | 408 | { |
411 | struct rt6_info *match, *rt0; | 409 | struct rt6_info *match, *rt0; |
410 | struct net *net; | ||
412 | 411 | ||
413 | RT6_TRACE("%s(fn->leaf=%p, oif=%d)\n", | 412 | RT6_TRACE("%s(fn->leaf=%p, oif=%d)\n", |
414 | __FUNCTION__, fn->leaf, oif); | 413 | __func__, fn->leaf, oif); |
415 | 414 | ||
416 | rt0 = fn->rr_ptr; | 415 | rt0 = fn->rr_ptr; |
417 | if (!rt0) | 416 | if (!rt0) |
@@ -432,15 +431,17 @@ static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict) | |||
432 | } | 431 | } |
433 | 432 | ||
434 | RT6_TRACE("%s() => %p\n", | 433 | RT6_TRACE("%s() => %p\n", |
435 | __FUNCTION__, match); | 434 | __func__, match); |
436 | 435 | ||
437 | return (match ? match : &ip6_null_entry); | 436 | net = rt0->rt6i_dev->nd_net; |
437 | return (match ? match : net->ipv6.ip6_null_entry); | ||
438 | } | 438 | } |
439 | 439 | ||
440 | #ifdef CONFIG_IPV6_ROUTE_INFO | 440 | #ifdef CONFIG_IPV6_ROUTE_INFO |
441 | int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, | 441 | int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, |
442 | struct in6_addr *gwaddr) | 442 | struct in6_addr *gwaddr) |
443 | { | 443 | { |
444 | struct net *net = dev->nd_net; | ||
444 | struct route_info *rinfo = (struct route_info *) opt; | 445 | struct route_info *rinfo = (struct route_info *) opt; |
445 | struct in6_addr prefix_buf, *prefix; | 446 | struct in6_addr prefix_buf, *prefix; |
446 | unsigned int pref; | 447 | unsigned int pref; |
@@ -488,7 +489,8 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, | |||
488 | prefix = &prefix_buf; | 489 | prefix = &prefix_buf; |
489 | } | 490 | } |
490 | 491 | ||
491 | rt = rt6_get_route_info(prefix, rinfo->prefix_len, gwaddr, dev->ifindex); | 492 | rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, gwaddr, |
493 | dev->ifindex); | ||
492 | 494 | ||
493 | if (rt && !lifetime) { | 495 | if (rt && !lifetime) { |
494 | ip6_del_rt(rt); | 496 | ip6_del_rt(rt); |
@@ -496,7 +498,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, | |||
496 | } | 498 | } |
497 | 499 | ||
498 | if (!rt && lifetime) | 500 | if (!rt && lifetime) |
499 | rt = rt6_add_route_info(prefix, rinfo->prefix_len, gwaddr, dev->ifindex, | 501 | rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex, |
500 | pref); | 502 | pref); |
501 | else if (rt) | 503 | else if (rt) |
502 | rt->rt6i_flags = RTF_ROUTEINFO | | 504 | rt->rt6i_flags = RTF_ROUTEINFO | |
@@ -515,9 +517,9 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, | |||
515 | } | 517 | } |
516 | #endif | 518 | #endif |
517 | 519 | ||
518 | #define BACKTRACK(saddr) \ | 520 | #define BACKTRACK(__net, saddr) \ |
519 | do { \ | 521 | do { \ |
520 | if (rt == &ip6_null_entry) { \ | 522 | if (rt == __net->ipv6.ip6_null_entry) { \ |
521 | struct fib6_node *pn; \ | 523 | struct fib6_node *pn; \ |
522 | while (1) { \ | 524 | while (1) { \ |
523 | if (fn->fn_flags & RTN_TL_ROOT) \ | 525 | if (fn->fn_flags & RTN_TL_ROOT) \ |
@@ -533,7 +535,8 @@ do { \ | |||
533 | } \ | 535 | } \ |
534 | } while(0) | 536 | } while(0) |
535 | 537 | ||
536 | static struct rt6_info *ip6_pol_route_lookup(struct fib6_table *table, | 538 | static struct rt6_info *ip6_pol_route_lookup(struct net *net, |
539 | struct fib6_table *table, | ||
537 | struct flowi *fl, int flags) | 540 | struct flowi *fl, int flags) |
538 | { | 541 | { |
539 | struct fib6_node *fn; | 542 | struct fib6_node *fn; |
@@ -543,8 +546,8 @@ static struct rt6_info *ip6_pol_route_lookup(struct fib6_table *table, | |||
543 | fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src); | 546 | fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src); |
544 | restart: | 547 | restart: |
545 | rt = fn->leaf; | 548 | rt = fn->leaf; |
546 | rt = rt6_device_match(rt, fl->oif, flags); | 549 | rt = rt6_device_match(net, rt, fl->oif, flags); |
547 | BACKTRACK(&fl->fl6_src); | 550 | BACKTRACK(net, &fl->fl6_src); |
548 | out: | 551 | out: |
549 | dst_use(&rt->u.dst, jiffies); | 552 | dst_use(&rt->u.dst, jiffies); |
550 | read_unlock_bh(&table->tb6_lock); | 553 | read_unlock_bh(&table->tb6_lock); |
@@ -552,8 +555,8 @@ out: | |||
552 | 555 | ||
553 | } | 556 | } |
554 | 557 | ||
555 | struct rt6_info *rt6_lookup(struct in6_addr *daddr, struct in6_addr *saddr, | 558 | struct rt6_info *rt6_lookup(struct net *net, struct in6_addr *daddr, |
556 | int oif, int strict) | 559 | struct in6_addr *saddr, int oif, int strict) |
557 | { | 560 | { |
558 | struct flowi fl = { | 561 | struct flowi fl = { |
559 | .oif = oif, | 562 | .oif = oif, |
@@ -571,7 +574,7 @@ struct rt6_info *rt6_lookup(struct in6_addr *daddr, struct in6_addr *saddr, | |||
571 | flags |= RT6_LOOKUP_F_HAS_SADDR; | 574 | flags |= RT6_LOOKUP_F_HAS_SADDR; |
572 | } | 575 | } |
573 | 576 | ||
574 | dst = fib6_rule_lookup(&fl, flags, ip6_pol_route_lookup); | 577 | dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_lookup); |
575 | if (dst->error == 0) | 578 | if (dst->error == 0) |
576 | return (struct rt6_info *) dst; | 579 | return (struct rt6_info *) dst; |
577 | 580 | ||
@@ -604,7 +607,7 @@ static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info) | |||
604 | int ip6_ins_rt(struct rt6_info *rt) | 607 | int ip6_ins_rt(struct rt6_info *rt) |
605 | { | 608 | { |
606 | struct nl_info info = { | 609 | struct nl_info info = { |
607 | .nl_net = &init_net, | 610 | .nl_net = rt->rt6i_dev->nd_net, |
608 | }; | 611 | }; |
609 | return __ip6_ins_rt(rt, &info); | 612 | return __ip6_ins_rt(rt, &info); |
610 | } | 613 | } |
@@ -660,8 +663,8 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, struct in6_addr *d | |||
660 | return rt; | 663 | return rt; |
661 | } | 664 | } |
662 | 665 | ||
663 | static struct rt6_info *ip6_pol_route(struct fib6_table *table, int oif, | 666 | static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif, |
664 | struct flowi *fl, int flags) | 667 | struct flowi *fl, int flags) |
665 | { | 668 | { |
666 | struct fib6_node *fn; | 669 | struct fib6_node *fn; |
667 | struct rt6_info *rt, *nrt; | 670 | struct rt6_info *rt, *nrt; |
@@ -680,8 +683,9 @@ restart_2: | |||
680 | 683 | ||
681 | restart: | 684 | restart: |
682 | rt = rt6_select(fn, oif, strict | reachable); | 685 | rt = rt6_select(fn, oif, strict | reachable); |
683 | BACKTRACK(&fl->fl6_src); | 686 | |
684 | if (rt == &ip6_null_entry || | 687 | BACKTRACK(net, &fl->fl6_src); |
688 | if (rt == net->ipv6.ip6_null_entry || | ||
685 | rt->rt6i_flags & RTF_CACHE) | 689 | rt->rt6i_flags & RTF_CACHE) |
686 | goto out; | 690 | goto out; |
687 | 691 | ||
@@ -699,7 +703,7 @@ restart: | |||
699 | } | 703 | } |
700 | 704 | ||
701 | dst_release(&rt->u.dst); | 705 | dst_release(&rt->u.dst); |
702 | rt = nrt ? : &ip6_null_entry; | 706 | rt = nrt ? : net->ipv6.ip6_null_entry; |
703 | 707 | ||
704 | dst_hold(&rt->u.dst); | 708 | dst_hold(&rt->u.dst); |
705 | if (nrt) { | 709 | if (nrt) { |
@@ -732,15 +736,16 @@ out2: | |||
732 | return rt; | 736 | return rt; |
733 | } | 737 | } |
734 | 738 | ||
735 | static struct rt6_info *ip6_pol_route_input(struct fib6_table *table, | 739 | static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table, |
736 | struct flowi *fl, int flags) | 740 | struct flowi *fl, int flags) |
737 | { | 741 | { |
738 | return ip6_pol_route(table, fl->iif, fl, flags); | 742 | return ip6_pol_route(net, table, fl->iif, fl, flags); |
739 | } | 743 | } |
740 | 744 | ||
741 | void ip6_route_input(struct sk_buff *skb) | 745 | void ip6_route_input(struct sk_buff *skb) |
742 | { | 746 | { |
743 | struct ipv6hdr *iph = ipv6_hdr(skb); | 747 | struct ipv6hdr *iph = ipv6_hdr(skb); |
748 | struct net *net = skb->dev->nd_net; | ||
744 | int flags = RT6_LOOKUP_F_HAS_SADDR; | 749 | int flags = RT6_LOOKUP_F_HAS_SADDR; |
745 | struct flowi fl = { | 750 | struct flowi fl = { |
746 | .iif = skb->dev->ifindex, | 751 | .iif = skb->dev->ifindex, |
@@ -758,16 +763,17 @@ void ip6_route_input(struct sk_buff *skb) | |||
758 | if (rt6_need_strict(&iph->daddr)) | 763 | if (rt6_need_strict(&iph->daddr)) |
759 | flags |= RT6_LOOKUP_F_IFACE; | 764 | flags |= RT6_LOOKUP_F_IFACE; |
760 | 765 | ||
761 | skb->dst = fib6_rule_lookup(&fl, flags, ip6_pol_route_input); | 766 | skb->dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_input); |
762 | } | 767 | } |
763 | 768 | ||
764 | static struct rt6_info *ip6_pol_route_output(struct fib6_table *table, | 769 | static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table, |
765 | struct flowi *fl, int flags) | 770 | struct flowi *fl, int flags) |
766 | { | 771 | { |
767 | return ip6_pol_route(table, fl->oif, fl, flags); | 772 | return ip6_pol_route(net, table, fl->oif, fl, flags); |
768 | } | 773 | } |
769 | 774 | ||
770 | struct dst_entry * ip6_route_output(struct sock *sk, struct flowi *fl) | 775 | struct dst_entry * ip6_route_output(struct net *net, struct sock *sk, |
776 | struct flowi *fl) | ||
771 | { | 777 | { |
772 | int flags = 0; | 778 | int flags = 0; |
773 | 779 | ||
@@ -777,7 +783,7 @@ struct dst_entry * ip6_route_output(struct sock *sk, struct flowi *fl) | |||
777 | if (!ipv6_addr_any(&fl->fl6_src)) | 783 | if (!ipv6_addr_any(&fl->fl6_src)) |
778 | flags |= RT6_LOOKUP_F_HAS_SADDR; | 784 | flags |= RT6_LOOKUP_F_HAS_SADDR; |
779 | 785 | ||
780 | return fib6_rule_lookup(fl, flags, ip6_pol_route_output); | 786 | return fib6_rule_lookup(net, fl, flags, ip6_pol_route_output); |
781 | } | 787 | } |
782 | 788 | ||
783 | EXPORT_SYMBOL(ip6_route_output); | 789 | EXPORT_SYMBOL(ip6_route_output); |
@@ -886,12 +892,12 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu) | |||
886 | 892 | ||
887 | static int ipv6_get_mtu(struct net_device *dev); | 893 | static int ipv6_get_mtu(struct net_device *dev); |
888 | 894 | ||
889 | static inline unsigned int ipv6_advmss(unsigned int mtu) | 895 | static inline unsigned int ipv6_advmss(struct net *net, unsigned int mtu) |
890 | { | 896 | { |
891 | mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr); | 897 | mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr); |
892 | 898 | ||
893 | if (mtu < init_net.ipv6.sysctl.ip6_rt_min_advmss) | 899 | if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss) |
894 | mtu = init_net.ipv6.sysctl.ip6_rt_min_advmss; | 900 | mtu = net->ipv6.sysctl.ip6_rt_min_advmss; |
895 | 901 | ||
896 | /* | 902 | /* |
897 | * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and | 903 | * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and |
@@ -904,21 +910,21 @@ static inline unsigned int ipv6_advmss(unsigned int mtu) | |||
904 | return mtu; | 910 | return mtu; |
905 | } | 911 | } |
906 | 912 | ||
907 | static struct dst_entry *ndisc_dst_gc_list; | 913 | static struct dst_entry *icmp6_dst_gc_list; |
908 | static DEFINE_SPINLOCK(ndisc_lock); | 914 | static DEFINE_SPINLOCK(icmp6_dst_lock); |
909 | 915 | ||
910 | struct dst_entry *ndisc_dst_alloc(struct net_device *dev, | 916 | struct dst_entry *icmp6_dst_alloc(struct net_device *dev, |
911 | struct neighbour *neigh, | 917 | struct neighbour *neigh, |
912 | struct in6_addr *addr, | 918 | struct in6_addr *addr) |
913 | int (*output)(struct sk_buff *)) | ||
914 | { | 919 | { |
915 | struct rt6_info *rt; | 920 | struct rt6_info *rt; |
916 | struct inet6_dev *idev = in6_dev_get(dev); | 921 | struct inet6_dev *idev = in6_dev_get(dev); |
922 | struct net *net = dev->nd_net; | ||
917 | 923 | ||
918 | if (unlikely(idev == NULL)) | 924 | if (unlikely(idev == NULL)) |
919 | return NULL; | 925 | return NULL; |
920 | 926 | ||
921 | rt = ip6_dst_alloc(); | 927 | rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops); |
922 | if (unlikely(rt == NULL)) { | 928 | if (unlikely(rt == NULL)) { |
923 | in6_dev_put(idev); | 929 | in6_dev_put(idev); |
924 | goto out; | 930 | goto out; |
@@ -936,8 +942,8 @@ struct dst_entry *ndisc_dst_alloc(struct net_device *dev, | |||
936 | atomic_set(&rt->u.dst.__refcnt, 1); | 942 | atomic_set(&rt->u.dst.__refcnt, 1); |
937 | rt->u.dst.metrics[RTAX_HOPLIMIT-1] = 255; | 943 | rt->u.dst.metrics[RTAX_HOPLIMIT-1] = 255; |
938 | rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev); | 944 | rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev); |
939 | rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&rt->u.dst)); | 945 | rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst)); |
940 | rt->u.dst.output = output; | 946 | rt->u.dst.output = ip6_output; |
941 | 947 | ||
942 | #if 0 /* there's no chance to use these for ndisc */ | 948 | #if 0 /* there's no chance to use these for ndisc */ |
943 | rt->u.dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST | 949 | rt->u.dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST |
@@ -947,18 +953,18 @@ struct dst_entry *ndisc_dst_alloc(struct net_device *dev, | |||
947 | rt->rt6i_dst.plen = 128; | 953 | rt->rt6i_dst.plen = 128; |
948 | #endif | 954 | #endif |
949 | 955 | ||
950 | spin_lock_bh(&ndisc_lock); | 956 | spin_lock_bh(&icmp6_dst_lock); |
951 | rt->u.dst.next = ndisc_dst_gc_list; | 957 | rt->u.dst.next = icmp6_dst_gc_list; |
952 | ndisc_dst_gc_list = &rt->u.dst; | 958 | icmp6_dst_gc_list = &rt->u.dst; |
953 | spin_unlock_bh(&ndisc_lock); | 959 | spin_unlock_bh(&icmp6_dst_lock); |
954 | 960 | ||
955 | fib6_force_start_gc(); | 961 | fib6_force_start_gc(net); |
956 | 962 | ||
957 | out: | 963 | out: |
958 | return &rt->u.dst; | 964 | return &rt->u.dst; |
959 | } | 965 | } |
960 | 966 | ||
961 | int ndisc_dst_gc(int *more) | 967 | int icmp6_dst_gc(int *more) |
962 | { | 968 | { |
963 | struct dst_entry *dst, *next, **pprev; | 969 | struct dst_entry *dst, *next, **pprev; |
964 | int freed; | 970 | int freed; |
@@ -966,8 +972,8 @@ int ndisc_dst_gc(int *more) | |||
966 | next = NULL; | 972 | next = NULL; |
967 | freed = 0; | 973 | freed = 0; |
968 | 974 | ||
969 | spin_lock_bh(&ndisc_lock); | 975 | spin_lock_bh(&icmp6_dst_lock); |
970 | pprev = &ndisc_dst_gc_list; | 976 | pprev = &icmp6_dst_gc_list; |
971 | 977 | ||
972 | while ((dst = *pprev) != NULL) { | 978 | while ((dst = *pprev) != NULL) { |
973 | if (!atomic_read(&dst->__refcnt)) { | 979 | if (!atomic_read(&dst->__refcnt)) { |
@@ -980,30 +986,33 @@ int ndisc_dst_gc(int *more) | |||
980 | } | 986 | } |
981 | } | 987 | } |
982 | 988 | ||
983 | spin_unlock_bh(&ndisc_lock); | 989 | spin_unlock_bh(&icmp6_dst_lock); |
984 | 990 | ||
985 | return freed; | 991 | return freed; |
986 | } | 992 | } |
987 | 993 | ||
988 | static int ip6_dst_gc(struct dst_ops *ops) | 994 | static int ip6_dst_gc(struct dst_ops *ops) |
989 | { | 995 | { |
990 | static unsigned expire = 30*HZ; | ||
991 | static unsigned long last_gc; | ||
992 | unsigned long now = jiffies; | 996 | unsigned long now = jiffies; |
993 | 997 | struct net *net = ops->dst_net; | |
994 | if (time_after(last_gc + init_net.ipv6.sysctl.ip6_rt_gc_min_interval, now) && | 998 | int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval; |
995 | atomic_read(&ip6_dst_ops.entries) <= init_net.ipv6.sysctl.ip6_rt_max_size) | 999 | int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size; |
1000 | int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity; | ||
1001 | int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout; | ||
1002 | unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc; | ||
1003 | |||
1004 | if (time_after(rt_last_gc + rt_min_interval, now) && | ||
1005 | atomic_read(&ops->entries) <= rt_max_size) | ||
996 | goto out; | 1006 | goto out; |
997 | 1007 | ||
998 | expire++; | 1008 | net->ipv6.ip6_rt_gc_expire++; |
999 | fib6_run_gc(expire); | 1009 | fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net); |
1000 | last_gc = now; | 1010 | net->ipv6.ip6_rt_last_gc = now; |
1001 | if (atomic_read(&ip6_dst_ops.entries) < ip6_dst_ops.gc_thresh) | 1011 | if (atomic_read(&ops->entries) < ops->gc_thresh) |
1002 | expire = init_net.ipv6.sysctl.ip6_rt_gc_timeout>>1; | 1012 | net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1; |
1003 | |||
1004 | out: | 1013 | out: |
1005 | expire -= expire>>init_net.ipv6.sysctl.ip6_rt_gc_elasticity; | 1014 | net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity; |
1006 | return (atomic_read(&ip6_dst_ops.entries) > init_net.ipv6.sysctl.ip6_rt_max_size); | 1015 | return (atomic_read(&ops->entries) > rt_max_size); |
1007 | } | 1016 | } |
1008 | 1017 | ||
1009 | /* Clean host part of a prefix. Not necessary in radix tree, | 1018 | /* Clean host part of a prefix. Not necessary in radix tree, |
@@ -1045,6 +1054,7 @@ int ipv6_get_hoplimit(struct net_device *dev) | |||
1045 | int ip6_route_add(struct fib6_config *cfg) | 1054 | int ip6_route_add(struct fib6_config *cfg) |
1046 | { | 1055 | { |
1047 | int err; | 1056 | int err; |
1057 | struct net *net = cfg->fc_nlinfo.nl_net; | ||
1048 | struct rt6_info *rt = NULL; | 1058 | struct rt6_info *rt = NULL; |
1049 | struct net_device *dev = NULL; | 1059 | struct net_device *dev = NULL; |
1050 | struct inet6_dev *idev = NULL; | 1060 | struct inet6_dev *idev = NULL; |
@@ -1059,7 +1069,7 @@ int ip6_route_add(struct fib6_config *cfg) | |||
1059 | #endif | 1069 | #endif |
1060 | if (cfg->fc_ifindex) { | 1070 | if (cfg->fc_ifindex) { |
1061 | err = -ENODEV; | 1071 | err = -ENODEV; |
1062 | dev = dev_get_by_index(&init_net, cfg->fc_ifindex); | 1072 | dev = dev_get_by_index(net, cfg->fc_ifindex); |
1063 | if (!dev) | 1073 | if (!dev) |
1064 | goto out; | 1074 | goto out; |
1065 | idev = in6_dev_get(dev); | 1075 | idev = in6_dev_get(dev); |
@@ -1070,13 +1080,13 @@ int ip6_route_add(struct fib6_config *cfg) | |||
1070 | if (cfg->fc_metric == 0) | 1080 | if (cfg->fc_metric == 0) |
1071 | cfg->fc_metric = IP6_RT_PRIO_USER; | 1081 | cfg->fc_metric = IP6_RT_PRIO_USER; |
1072 | 1082 | ||
1073 | table = fib6_new_table(cfg->fc_table); | 1083 | table = fib6_new_table(net, cfg->fc_table); |
1074 | if (table == NULL) { | 1084 | if (table == NULL) { |
1075 | err = -ENOBUFS; | 1085 | err = -ENOBUFS; |
1076 | goto out; | 1086 | goto out; |
1077 | } | 1087 | } |
1078 | 1088 | ||
1079 | rt = ip6_dst_alloc(); | 1089 | rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops); |
1080 | 1090 | ||
1081 | if (rt == NULL) { | 1091 | if (rt == NULL) { |
1082 | err = -ENOMEM; | 1092 | err = -ENOMEM; |
@@ -1117,12 +1127,12 @@ int ip6_route_add(struct fib6_config *cfg) | |||
1117 | if ((cfg->fc_flags & RTF_REJECT) || | 1127 | if ((cfg->fc_flags & RTF_REJECT) || |
1118 | (dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK))) { | 1128 | (dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK))) { |
1119 | /* hold loopback dev/idev if we haven't done so. */ | 1129 | /* hold loopback dev/idev if we haven't done so. */ |
1120 | if (dev != init_net.loopback_dev) { | 1130 | if (dev != net->loopback_dev) { |
1121 | if (dev) { | 1131 | if (dev) { |
1122 | dev_put(dev); | 1132 | dev_put(dev); |
1123 | in6_dev_put(idev); | 1133 | in6_dev_put(idev); |
1124 | } | 1134 | } |
1125 | dev = init_net.loopback_dev; | 1135 | dev = net->loopback_dev; |
1126 | dev_hold(dev); | 1136 | dev_hold(dev); |
1127 | idev = in6_dev_get(dev); | 1137 | idev = in6_dev_get(dev); |
1128 | if (!idev) { | 1138 | if (!idev) { |
@@ -1159,7 +1169,7 @@ int ip6_route_add(struct fib6_config *cfg) | |||
1159 | if (!(gwa_type&IPV6_ADDR_UNICAST)) | 1169 | if (!(gwa_type&IPV6_ADDR_UNICAST)) |
1160 | goto out; | 1170 | goto out; |
1161 | 1171 | ||
1162 | grt = rt6_lookup(gw_addr, NULL, cfg->fc_ifindex, 1); | 1172 | grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1); |
1163 | 1173 | ||
1164 | err = -EHOSTUNREACH; | 1174 | err = -EHOSTUNREACH; |
1165 | if (grt == NULL) | 1175 | if (grt == NULL) |
@@ -1226,10 +1236,13 @@ install_route: | |||
1226 | if (!rt->u.dst.metrics[RTAX_MTU-1]) | 1236 | if (!rt->u.dst.metrics[RTAX_MTU-1]) |
1227 | rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev); | 1237 | rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev); |
1228 | if (!rt->u.dst.metrics[RTAX_ADVMSS-1]) | 1238 | if (!rt->u.dst.metrics[RTAX_ADVMSS-1]) |
1229 | rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&rt->u.dst)); | 1239 | rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst)); |
1230 | rt->u.dst.dev = dev; | 1240 | rt->u.dst.dev = dev; |
1231 | rt->rt6i_idev = idev; | 1241 | rt->rt6i_idev = idev; |
1232 | rt->rt6i_table = table; | 1242 | rt->rt6i_table = table; |
1243 | |||
1244 | cfg->fc_nlinfo.nl_net = dev->nd_net; | ||
1245 | |||
1233 | return __ip6_ins_rt(rt, &cfg->fc_nlinfo); | 1246 | return __ip6_ins_rt(rt, &cfg->fc_nlinfo); |
1234 | 1247 | ||
1235 | out: | 1248 | out: |
@@ -1246,8 +1259,9 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info) | |||
1246 | { | 1259 | { |
1247 | int err; | 1260 | int err; |
1248 | struct fib6_table *table; | 1261 | struct fib6_table *table; |
1262 | struct net *net = rt->rt6i_dev->nd_net; | ||
1249 | 1263 | ||
1250 | if (rt == &ip6_null_entry) | 1264 | if (rt == net->ipv6.ip6_null_entry) |
1251 | return -ENOENT; | 1265 | return -ENOENT; |
1252 | 1266 | ||
1253 | table = rt->rt6i_table; | 1267 | table = rt->rt6i_table; |
@@ -1264,7 +1278,7 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info) | |||
1264 | int ip6_del_rt(struct rt6_info *rt) | 1278 | int ip6_del_rt(struct rt6_info *rt) |
1265 | { | 1279 | { |
1266 | struct nl_info info = { | 1280 | struct nl_info info = { |
1267 | .nl_net = &init_net, | 1281 | .nl_net = rt->rt6i_dev->nd_net, |
1268 | }; | 1282 | }; |
1269 | return __ip6_del_rt(rt, &info); | 1283 | return __ip6_del_rt(rt, &info); |
1270 | } | 1284 | } |
@@ -1276,7 +1290,7 @@ static int ip6_route_del(struct fib6_config *cfg) | |||
1276 | struct rt6_info *rt; | 1290 | struct rt6_info *rt; |
1277 | int err = -ESRCH; | 1291 | int err = -ESRCH; |
1278 | 1292 | ||
1279 | table = fib6_get_table(cfg->fc_table); | 1293 | table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table); |
1280 | if (table == NULL) | 1294 | if (table == NULL) |
1281 | return err; | 1295 | return err; |
1282 | 1296 | ||
@@ -1316,7 +1330,8 @@ struct ip6rd_flowi { | |||
1316 | struct in6_addr gateway; | 1330 | struct in6_addr gateway; |
1317 | }; | 1331 | }; |
1318 | 1332 | ||
1319 | static struct rt6_info *__ip6_route_redirect(struct fib6_table *table, | 1333 | static struct rt6_info *__ip6_route_redirect(struct net *net, |
1334 | struct fib6_table *table, | ||
1320 | struct flowi *fl, | 1335 | struct flowi *fl, |
1321 | int flags) | 1336 | int flags) |
1322 | { | 1337 | { |
@@ -1359,8 +1374,8 @@ restart: | |||
1359 | } | 1374 | } |
1360 | 1375 | ||
1361 | if (!rt) | 1376 | if (!rt) |
1362 | rt = &ip6_null_entry; | 1377 | rt = net->ipv6.ip6_null_entry; |
1363 | BACKTRACK(&fl->fl6_src); | 1378 | BACKTRACK(net, &fl->fl6_src); |
1364 | out: | 1379 | out: |
1365 | dst_hold(&rt->u.dst); | 1380 | dst_hold(&rt->u.dst); |
1366 | 1381 | ||
@@ -1375,6 +1390,7 @@ static struct rt6_info *ip6_route_redirect(struct in6_addr *dest, | |||
1375 | struct net_device *dev) | 1390 | struct net_device *dev) |
1376 | { | 1391 | { |
1377 | int flags = RT6_LOOKUP_F_HAS_SADDR; | 1392 | int flags = RT6_LOOKUP_F_HAS_SADDR; |
1393 | struct net *net = dev->nd_net; | ||
1378 | struct ip6rd_flowi rdfl = { | 1394 | struct ip6rd_flowi rdfl = { |
1379 | .fl = { | 1395 | .fl = { |
1380 | .oif = dev->ifindex, | 1396 | .oif = dev->ifindex, |
@@ -1391,7 +1407,8 @@ static struct rt6_info *ip6_route_redirect(struct in6_addr *dest, | |||
1391 | if (rt6_need_strict(dest)) | 1407 | if (rt6_need_strict(dest)) |
1392 | flags |= RT6_LOOKUP_F_IFACE; | 1408 | flags |= RT6_LOOKUP_F_IFACE; |
1393 | 1409 | ||
1394 | return (struct rt6_info *)fib6_rule_lookup((struct flowi *)&rdfl, flags, __ip6_route_redirect); | 1410 | return (struct rt6_info *)fib6_rule_lookup(net, (struct flowi *)&rdfl, |
1411 | flags, __ip6_route_redirect); | ||
1395 | } | 1412 | } |
1396 | 1413 | ||
1397 | void rt6_redirect(struct in6_addr *dest, struct in6_addr *src, | 1414 | void rt6_redirect(struct in6_addr *dest, struct in6_addr *src, |
@@ -1400,10 +1417,11 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *src, | |||
1400 | { | 1417 | { |
1401 | struct rt6_info *rt, *nrt = NULL; | 1418 | struct rt6_info *rt, *nrt = NULL; |
1402 | struct netevent_redirect netevent; | 1419 | struct netevent_redirect netevent; |
1420 | struct net *net = neigh->dev->nd_net; | ||
1403 | 1421 | ||
1404 | rt = ip6_route_redirect(dest, src, saddr, neigh->dev); | 1422 | rt = ip6_route_redirect(dest, src, saddr, neigh->dev); |
1405 | 1423 | ||
1406 | if (rt == &ip6_null_entry) { | 1424 | if (rt == net->ipv6.ip6_null_entry) { |
1407 | if (net_ratelimit()) | 1425 | if (net_ratelimit()) |
1408 | printk(KERN_DEBUG "rt6_redirect: source isn't a valid nexthop " | 1426 | printk(KERN_DEBUG "rt6_redirect: source isn't a valid nexthop " |
1409 | "for redirect target\n"); | 1427 | "for redirect target\n"); |
@@ -1448,7 +1466,8 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *src, | |||
1448 | nrt->rt6i_nexthop = neigh_clone(neigh); | 1466 | nrt->rt6i_nexthop = neigh_clone(neigh); |
1449 | /* Reset pmtu, it may be better */ | 1467 | /* Reset pmtu, it may be better */ |
1450 | nrt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev); | 1468 | nrt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev); |
1451 | nrt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&nrt->u.dst)); | 1469 | nrt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(neigh->dev->nd_net, |
1470 | dst_mtu(&nrt->u.dst)); | ||
1452 | 1471 | ||
1453 | if (ip6_ins_rt(nrt)) | 1472 | if (ip6_ins_rt(nrt)) |
1454 | goto out; | 1473 | goto out; |
@@ -1476,9 +1495,10 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr, | |||
1476 | struct net_device *dev, u32 pmtu) | 1495 | struct net_device *dev, u32 pmtu) |
1477 | { | 1496 | { |
1478 | struct rt6_info *rt, *nrt; | 1497 | struct rt6_info *rt, *nrt; |
1498 | struct net *net = dev->nd_net; | ||
1479 | int allfrag = 0; | 1499 | int allfrag = 0; |
1480 | 1500 | ||
1481 | rt = rt6_lookup(daddr, saddr, dev->ifindex, 0); | 1501 | rt = rt6_lookup(net, daddr, saddr, dev->ifindex, 0); |
1482 | if (rt == NULL) | 1502 | if (rt == NULL) |
1483 | return; | 1503 | return; |
1484 | 1504 | ||
@@ -1511,7 +1531,7 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr, | |||
1511 | rt->u.dst.metrics[RTAX_MTU-1] = pmtu; | 1531 | rt->u.dst.metrics[RTAX_MTU-1] = pmtu; |
1512 | if (allfrag) | 1532 | if (allfrag) |
1513 | rt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG; | 1533 | rt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG; |
1514 | dst_set_expires(&rt->u.dst, init_net.ipv6.sysctl.ip6_rt_mtu_expires); | 1534 | dst_set_expires(&rt->u.dst, net->ipv6.sysctl.ip6_rt_mtu_expires); |
1515 | rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES; | 1535 | rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES; |
1516 | goto out; | 1536 | goto out; |
1517 | } | 1537 | } |
@@ -1537,7 +1557,7 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr, | |||
1537 | * which is 10 mins. After 10 mins the decreased pmtu is expired | 1557 | * which is 10 mins. After 10 mins the decreased pmtu is expired |
1538 | * and detecting PMTU increase will be automatically happened. | 1558 | * and detecting PMTU increase will be automatically happened. |
1539 | */ | 1559 | */ |
1540 | dst_set_expires(&nrt->u.dst, init_net.ipv6.sysctl.ip6_rt_mtu_expires); | 1560 | dst_set_expires(&nrt->u.dst, net->ipv6.sysctl.ip6_rt_mtu_expires); |
1541 | nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES; | 1561 | nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES; |
1542 | 1562 | ||
1543 | ip6_ins_rt(nrt); | 1563 | ip6_ins_rt(nrt); |
@@ -1552,7 +1572,8 @@ out: | |||
1552 | 1572 | ||
1553 | static struct rt6_info * ip6_rt_copy(struct rt6_info *ort) | 1573 | static struct rt6_info * ip6_rt_copy(struct rt6_info *ort) |
1554 | { | 1574 | { |
1555 | struct rt6_info *rt = ip6_dst_alloc(); | 1575 | struct net *net = ort->rt6i_dev->nd_net; |
1576 | struct rt6_info *rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops); | ||
1556 | 1577 | ||
1557 | if (rt) { | 1578 | if (rt) { |
1558 | rt->u.dst.input = ort->u.dst.input; | 1579 | rt->u.dst.input = ort->u.dst.input; |
@@ -1583,14 +1604,15 @@ static struct rt6_info * ip6_rt_copy(struct rt6_info *ort) | |||
1583 | } | 1604 | } |
1584 | 1605 | ||
1585 | #ifdef CONFIG_IPV6_ROUTE_INFO | 1606 | #ifdef CONFIG_IPV6_ROUTE_INFO |
1586 | static struct rt6_info *rt6_get_route_info(struct in6_addr *prefix, int prefixlen, | 1607 | static struct rt6_info *rt6_get_route_info(struct net *net, |
1608 | struct in6_addr *prefix, int prefixlen, | ||
1587 | struct in6_addr *gwaddr, int ifindex) | 1609 | struct in6_addr *gwaddr, int ifindex) |
1588 | { | 1610 | { |
1589 | struct fib6_node *fn; | 1611 | struct fib6_node *fn; |
1590 | struct rt6_info *rt = NULL; | 1612 | struct rt6_info *rt = NULL; |
1591 | struct fib6_table *table; | 1613 | struct fib6_table *table; |
1592 | 1614 | ||
1593 | table = fib6_get_table(RT6_TABLE_INFO); | 1615 | table = fib6_get_table(net, RT6_TABLE_INFO); |
1594 | if (table == NULL) | 1616 | if (table == NULL) |
1595 | return NULL; | 1617 | return NULL; |
1596 | 1618 | ||
@@ -1614,7 +1636,8 @@ out: | |||
1614 | return rt; | 1636 | return rt; |
1615 | } | 1637 | } |
1616 | 1638 | ||
1617 | static struct rt6_info *rt6_add_route_info(struct in6_addr *prefix, int prefixlen, | 1639 | static struct rt6_info *rt6_add_route_info(struct net *net, |
1640 | struct in6_addr *prefix, int prefixlen, | ||
1618 | struct in6_addr *gwaddr, int ifindex, | 1641 | struct in6_addr *gwaddr, int ifindex, |
1619 | unsigned pref) | 1642 | unsigned pref) |
1620 | { | 1643 | { |
@@ -1625,6 +1648,9 @@ static struct rt6_info *rt6_add_route_info(struct in6_addr *prefix, int prefixle | |||
1625 | .fc_dst_len = prefixlen, | 1648 | .fc_dst_len = prefixlen, |
1626 | .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | | 1649 | .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | |
1627 | RTF_UP | RTF_PREF(pref), | 1650 | RTF_UP | RTF_PREF(pref), |
1651 | .fc_nlinfo.pid = 0, | ||
1652 | .fc_nlinfo.nlh = NULL, | ||
1653 | .fc_nlinfo.nl_net = net, | ||
1628 | }; | 1654 | }; |
1629 | 1655 | ||
1630 | ipv6_addr_copy(&cfg.fc_dst, prefix); | 1656 | ipv6_addr_copy(&cfg.fc_dst, prefix); |
@@ -1636,7 +1662,7 @@ static struct rt6_info *rt6_add_route_info(struct in6_addr *prefix, int prefixle | |||
1636 | 1662 | ||
1637 | ip6_route_add(&cfg); | 1663 | ip6_route_add(&cfg); |
1638 | 1664 | ||
1639 | return rt6_get_route_info(prefix, prefixlen, gwaddr, ifindex); | 1665 | return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex); |
1640 | } | 1666 | } |
1641 | #endif | 1667 | #endif |
1642 | 1668 | ||
@@ -1645,7 +1671,7 @@ struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *d | |||
1645 | struct rt6_info *rt; | 1671 | struct rt6_info *rt; |
1646 | struct fib6_table *table; | 1672 | struct fib6_table *table; |
1647 | 1673 | ||
1648 | table = fib6_get_table(RT6_TABLE_DFLT); | 1674 | table = fib6_get_table(dev->nd_net, RT6_TABLE_DFLT); |
1649 | if (table == NULL) | 1675 | if (table == NULL) |
1650 | return NULL; | 1676 | return NULL; |
1651 | 1677 | ||
@@ -1674,6 +1700,9 @@ struct rt6_info *rt6_add_dflt_router(struct in6_addr *gwaddr, | |||
1674 | .fc_ifindex = dev->ifindex, | 1700 | .fc_ifindex = dev->ifindex, |
1675 | .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | | 1701 | .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | |
1676 | RTF_UP | RTF_EXPIRES | RTF_PREF(pref), | 1702 | RTF_UP | RTF_EXPIRES | RTF_PREF(pref), |
1703 | .fc_nlinfo.pid = 0, | ||
1704 | .fc_nlinfo.nlh = NULL, | ||
1705 | .fc_nlinfo.nl_net = dev->nd_net, | ||
1677 | }; | 1706 | }; |
1678 | 1707 | ||
1679 | ipv6_addr_copy(&cfg.fc_gateway, gwaddr); | 1708 | ipv6_addr_copy(&cfg.fc_gateway, gwaddr); |
@@ -1683,13 +1712,13 @@ struct rt6_info *rt6_add_dflt_router(struct in6_addr *gwaddr, | |||
1683 | return rt6_get_dflt_router(gwaddr, dev); | 1712 | return rt6_get_dflt_router(gwaddr, dev); |
1684 | } | 1713 | } |
1685 | 1714 | ||
1686 | void rt6_purge_dflt_routers(void) | 1715 | void rt6_purge_dflt_routers(struct net *net) |
1687 | { | 1716 | { |
1688 | struct rt6_info *rt; | 1717 | struct rt6_info *rt; |
1689 | struct fib6_table *table; | 1718 | struct fib6_table *table; |
1690 | 1719 | ||
1691 | /* NOTE: Keep consistent with rt6_get_dflt_router */ | 1720 | /* NOTE: Keep consistent with rt6_get_dflt_router */ |
1692 | table = fib6_get_table(RT6_TABLE_DFLT); | 1721 | table = fib6_get_table(net, RT6_TABLE_DFLT); |
1693 | if (table == NULL) | 1722 | if (table == NULL) |
1694 | return; | 1723 | return; |
1695 | 1724 | ||
@@ -1706,7 +1735,8 @@ restart: | |||
1706 | read_unlock_bh(&table->tb6_lock); | 1735 | read_unlock_bh(&table->tb6_lock); |
1707 | } | 1736 | } |
1708 | 1737 | ||
1709 | static void rtmsg_to_fib6_config(struct in6_rtmsg *rtmsg, | 1738 | static void rtmsg_to_fib6_config(struct net *net, |
1739 | struct in6_rtmsg *rtmsg, | ||
1710 | struct fib6_config *cfg) | 1740 | struct fib6_config *cfg) |
1711 | { | 1741 | { |
1712 | memset(cfg, 0, sizeof(*cfg)); | 1742 | memset(cfg, 0, sizeof(*cfg)); |
@@ -1719,14 +1749,14 @@ static void rtmsg_to_fib6_config(struct in6_rtmsg *rtmsg, | |||
1719 | cfg->fc_src_len = rtmsg->rtmsg_src_len; | 1749 | cfg->fc_src_len = rtmsg->rtmsg_src_len; |
1720 | cfg->fc_flags = rtmsg->rtmsg_flags; | 1750 | cfg->fc_flags = rtmsg->rtmsg_flags; |
1721 | 1751 | ||
1722 | cfg->fc_nlinfo.nl_net = &init_net; | 1752 | cfg->fc_nlinfo.nl_net = net; |
1723 | 1753 | ||
1724 | ipv6_addr_copy(&cfg->fc_dst, &rtmsg->rtmsg_dst); | 1754 | ipv6_addr_copy(&cfg->fc_dst, &rtmsg->rtmsg_dst); |
1725 | ipv6_addr_copy(&cfg->fc_src, &rtmsg->rtmsg_src); | 1755 | ipv6_addr_copy(&cfg->fc_src, &rtmsg->rtmsg_src); |
1726 | ipv6_addr_copy(&cfg->fc_gateway, &rtmsg->rtmsg_gateway); | 1756 | ipv6_addr_copy(&cfg->fc_gateway, &rtmsg->rtmsg_gateway); |
1727 | } | 1757 | } |
1728 | 1758 | ||
1729 | int ipv6_route_ioctl(unsigned int cmd, void __user *arg) | 1759 | int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg) |
1730 | { | 1760 | { |
1731 | struct fib6_config cfg; | 1761 | struct fib6_config cfg; |
1732 | struct in6_rtmsg rtmsg; | 1762 | struct in6_rtmsg rtmsg; |
@@ -1742,7 +1772,7 @@ int ipv6_route_ioctl(unsigned int cmd, void __user *arg) | |||
1742 | if (err) | 1772 | if (err) |
1743 | return -EFAULT; | 1773 | return -EFAULT; |
1744 | 1774 | ||
1745 | rtmsg_to_fib6_config(&rtmsg, &cfg); | 1775 | rtmsg_to_fib6_config(net, &rtmsg, &cfg); |
1746 | 1776 | ||
1747 | rtnl_lock(); | 1777 | rtnl_lock(); |
1748 | switch (cmd) { | 1778 | switch (cmd) { |
@@ -1821,21 +1851,22 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, | |||
1821 | const struct in6_addr *addr, | 1851 | const struct in6_addr *addr, |
1822 | int anycast) | 1852 | int anycast) |
1823 | { | 1853 | { |
1824 | struct rt6_info *rt = ip6_dst_alloc(); | 1854 | struct net *net = idev->dev->nd_net; |
1855 | struct rt6_info *rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops); | ||
1825 | 1856 | ||
1826 | if (rt == NULL) | 1857 | if (rt == NULL) |
1827 | return ERR_PTR(-ENOMEM); | 1858 | return ERR_PTR(-ENOMEM); |
1828 | 1859 | ||
1829 | dev_hold(init_net.loopback_dev); | 1860 | dev_hold(net->loopback_dev); |
1830 | in6_dev_hold(idev); | 1861 | in6_dev_hold(idev); |
1831 | 1862 | ||
1832 | rt->u.dst.flags = DST_HOST; | 1863 | rt->u.dst.flags = DST_HOST; |
1833 | rt->u.dst.input = ip6_input; | 1864 | rt->u.dst.input = ip6_input; |
1834 | rt->u.dst.output = ip6_output; | 1865 | rt->u.dst.output = ip6_output; |
1835 | rt->rt6i_dev = init_net.loopback_dev; | 1866 | rt->rt6i_dev = net->loopback_dev; |
1836 | rt->rt6i_idev = idev; | 1867 | rt->rt6i_idev = idev; |
1837 | rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev); | 1868 | rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev); |
1838 | rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&rt->u.dst)); | 1869 | rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst)); |
1839 | rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1; | 1870 | rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1; |
1840 | rt->u.dst.obsolete = -1; | 1871 | rt->u.dst.obsolete = -1; |
1841 | 1872 | ||
@@ -1852,26 +1883,39 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, | |||
1852 | 1883 | ||
1853 | ipv6_addr_copy(&rt->rt6i_dst.addr, addr); | 1884 | ipv6_addr_copy(&rt->rt6i_dst.addr, addr); |
1854 | rt->rt6i_dst.plen = 128; | 1885 | rt->rt6i_dst.plen = 128; |
1855 | rt->rt6i_table = fib6_get_table(RT6_TABLE_LOCAL); | 1886 | rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL); |
1856 | 1887 | ||
1857 | atomic_set(&rt->u.dst.__refcnt, 1); | 1888 | atomic_set(&rt->u.dst.__refcnt, 1); |
1858 | 1889 | ||
1859 | return rt; | 1890 | return rt; |
1860 | } | 1891 | } |
1861 | 1892 | ||
1893 | struct arg_dev_net { | ||
1894 | struct net_device *dev; | ||
1895 | struct net *net; | ||
1896 | }; | ||
1897 | |||
1862 | static int fib6_ifdown(struct rt6_info *rt, void *arg) | 1898 | static int fib6_ifdown(struct rt6_info *rt, void *arg) |
1863 | { | 1899 | { |
1864 | if (((void*)rt->rt6i_dev == arg || arg == NULL) && | 1900 | struct net_device *dev = ((struct arg_dev_net *)arg)->dev; |
1865 | rt != &ip6_null_entry) { | 1901 | struct net *net = ((struct arg_dev_net *)arg)->net; |
1902 | |||
1903 | if (((void *)rt->rt6i_dev == dev || dev == NULL) && | ||
1904 | rt != net->ipv6.ip6_null_entry) { | ||
1866 | RT6_TRACE("deleted by ifdown %p\n", rt); | 1905 | RT6_TRACE("deleted by ifdown %p\n", rt); |
1867 | return -1; | 1906 | return -1; |
1868 | } | 1907 | } |
1869 | return 0; | 1908 | return 0; |
1870 | } | 1909 | } |
1871 | 1910 | ||
1872 | void rt6_ifdown(struct net_device *dev) | 1911 | void rt6_ifdown(struct net *net, struct net_device *dev) |
1873 | { | 1912 | { |
1874 | fib6_clean_all(fib6_ifdown, 0, dev); | 1913 | struct arg_dev_net adn = { |
1914 | .dev = dev, | ||
1915 | .net = net, | ||
1916 | }; | ||
1917 | |||
1918 | fib6_clean_all(net, fib6_ifdown, 0, &adn); | ||
1875 | } | 1919 | } |
1876 | 1920 | ||
1877 | struct rt6_mtu_change_arg | 1921 | struct rt6_mtu_change_arg |
@@ -1884,6 +1928,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg) | |||
1884 | { | 1928 | { |
1885 | struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg; | 1929 | struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg; |
1886 | struct inet6_dev *idev; | 1930 | struct inet6_dev *idev; |
1931 | struct net *net = arg->dev->nd_net; | ||
1887 | 1932 | ||
1888 | /* In IPv6 pmtu discovery is not optional, | 1933 | /* In IPv6 pmtu discovery is not optional, |
1889 | so that RTAX_MTU lock cannot disable it. | 1934 | so that RTAX_MTU lock cannot disable it. |
@@ -1915,7 +1960,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg) | |||
1915 | (dst_mtu(&rt->u.dst) < arg->mtu && | 1960 | (dst_mtu(&rt->u.dst) < arg->mtu && |
1916 | dst_mtu(&rt->u.dst) == idev->cnf.mtu6))) { | 1961 | dst_mtu(&rt->u.dst) == idev->cnf.mtu6))) { |
1917 | rt->u.dst.metrics[RTAX_MTU-1] = arg->mtu; | 1962 | rt->u.dst.metrics[RTAX_MTU-1] = arg->mtu; |
1918 | rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(arg->mtu); | 1963 | rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, arg->mtu); |
1919 | } | 1964 | } |
1920 | return 0; | 1965 | return 0; |
1921 | } | 1966 | } |
@@ -1927,7 +1972,7 @@ void rt6_mtu_change(struct net_device *dev, unsigned mtu) | |||
1927 | .mtu = mtu, | 1972 | .mtu = mtu, |
1928 | }; | 1973 | }; |
1929 | 1974 | ||
1930 | fib6_clean_all(rt6_mtu_change_route, 0, &arg); | 1975 | fib6_clean_all(dev->nd_net, rt6_mtu_change_route, 0, &arg); |
1931 | } | 1976 | } |
1932 | 1977 | ||
1933 | static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = { | 1978 | static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = { |
@@ -2010,13 +2055,9 @@ errout: | |||
2010 | 2055 | ||
2011 | static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | 2056 | static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) |
2012 | { | 2057 | { |
2013 | struct net *net = skb->sk->sk_net; | ||
2014 | struct fib6_config cfg; | 2058 | struct fib6_config cfg; |
2015 | int err; | 2059 | int err; |
2016 | 2060 | ||
2017 | if (net != &init_net) | ||
2018 | return -EINVAL; | ||
2019 | |||
2020 | err = rtm_to_fib6_config(skb, nlh, &cfg); | 2061 | err = rtm_to_fib6_config(skb, nlh, &cfg); |
2021 | if (err < 0) | 2062 | if (err < 0) |
2022 | return err; | 2063 | return err; |
@@ -2026,13 +2067,9 @@ static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *a | |||
2026 | 2067 | ||
2027 | static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | 2068 | static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) |
2028 | { | 2069 | { |
2029 | struct net *net = skb->sk->sk_net; | ||
2030 | struct fib6_config cfg; | 2070 | struct fib6_config cfg; |
2031 | int err; | 2071 | int err; |
2032 | 2072 | ||
2033 | if (net != &init_net) | ||
2034 | return -EINVAL; | ||
2035 | |||
2036 | err = rtm_to_fib6_config(skb, nlh, &cfg); | 2073 | err = rtm_to_fib6_config(skb, nlh, &cfg); |
2037 | if (err < 0) | 2074 | if (err < 0) |
2038 | return err; | 2075 | return err; |
@@ -2122,7 +2159,8 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt, | |||
2122 | NLA_PUT_U32(skb, RTA_IIF, iif); | 2159 | NLA_PUT_U32(skb, RTA_IIF, iif); |
2123 | else if (dst) { | 2160 | else if (dst) { |
2124 | struct in6_addr saddr_buf; | 2161 | struct in6_addr saddr_buf; |
2125 | if (ipv6_get_saddr(&rt->u.dst, dst, &saddr_buf) == 0) | 2162 | if (ipv6_dev_get_saddr(ip6_dst_idev(&rt->u.dst)->dev, |
2163 | dst, &saddr_buf) == 0) | ||
2126 | NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf); | 2164 | NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf); |
2127 | } | 2165 | } |
2128 | 2166 | ||
@@ -2175,9 +2213,6 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void | |||
2175 | struct flowi fl; | 2213 | struct flowi fl; |
2176 | int err, iif = 0; | 2214 | int err, iif = 0; |
2177 | 2215 | ||
2178 | if (net != &init_net) | ||
2179 | return -EINVAL; | ||
2180 | |||
2181 | err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy); | 2216 | err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy); |
2182 | if (err < 0) | 2217 | if (err < 0) |
2183 | goto errout; | 2218 | goto errout; |
@@ -2207,7 +2242,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void | |||
2207 | 2242 | ||
2208 | if (iif) { | 2243 | if (iif) { |
2209 | struct net_device *dev; | 2244 | struct net_device *dev; |
2210 | dev = __dev_get_by_index(&init_net, iif); | 2245 | dev = __dev_get_by_index(net, iif); |
2211 | if (!dev) { | 2246 | if (!dev) { |
2212 | err = -ENODEV; | 2247 | err = -ENODEV; |
2213 | goto errout; | 2248 | goto errout; |
@@ -2226,7 +2261,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void | |||
2226 | skb_reset_mac_header(skb); | 2261 | skb_reset_mac_header(skb); |
2227 | skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr)); | 2262 | skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr)); |
2228 | 2263 | ||
2229 | rt = (struct rt6_info*) ip6_route_output(NULL, &fl); | 2264 | rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl); |
2230 | skb->dst = &rt->u.dst; | 2265 | skb->dst = &rt->u.dst; |
2231 | 2266 | ||
2232 | err = rt6_fill_node(skb, rt, &fl.fl6_dst, &fl.fl6_src, iif, | 2267 | err = rt6_fill_node(skb, rt, &fl.fl6_dst, &fl.fl6_src, iif, |
@@ -2237,7 +2272,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void | |||
2237 | goto errout; | 2272 | goto errout; |
2238 | } | 2273 | } |
2239 | 2274 | ||
2240 | err = rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).pid); | 2275 | err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid); |
2241 | errout: | 2276 | errout: |
2242 | return err; | 2277 | return err; |
2243 | } | 2278 | } |
@@ -2245,6 +2280,7 @@ errout: | |||
2245 | void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info) | 2280 | void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info) |
2246 | { | 2281 | { |
2247 | struct sk_buff *skb; | 2282 | struct sk_buff *skb; |
2283 | struct net *net = info->nl_net; | ||
2248 | u32 seq; | 2284 | u32 seq; |
2249 | int err; | 2285 | int err; |
2250 | 2286 | ||
@@ -2263,11 +2299,31 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info) | |||
2263 | kfree_skb(skb); | 2299 | kfree_skb(skb); |
2264 | goto errout; | 2300 | goto errout; |
2265 | } | 2301 | } |
2266 | err = rtnl_notify(skb, &init_net, info->pid, | 2302 | err = rtnl_notify(skb, net, info->pid, RTNLGRP_IPV6_ROUTE, |
2267 | RTNLGRP_IPV6_ROUTE, info->nlh, gfp_any()); | 2303 | info->nlh, gfp_any()); |
2268 | errout: | 2304 | errout: |
2269 | if (err < 0) | 2305 | if (err < 0) |
2270 | rtnl_set_sk_err(&init_net, RTNLGRP_IPV6_ROUTE, err); | 2306 | rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err); |
2307 | } | ||
2308 | |||
2309 | static int ip6_route_dev_notify(struct notifier_block *this, | ||
2310 | unsigned long event, void *data) | ||
2311 | { | ||
2312 | struct net_device *dev = (struct net_device *)data; | ||
2313 | struct net *net = dev->nd_net; | ||
2314 | |||
2315 | if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) { | ||
2316 | net->ipv6.ip6_null_entry->u.dst.dev = dev; | ||
2317 | net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev); | ||
2318 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES | ||
2319 | net->ipv6.ip6_prohibit_entry->u.dst.dev = dev; | ||
2320 | net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev); | ||
2321 | net->ipv6.ip6_blk_hole_entry->u.dst.dev = dev; | ||
2322 | net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev); | ||
2323 | #endif | ||
2324 | } | ||
2325 | |||
2326 | return NOTIFY_OK; | ||
2271 | } | 2327 | } |
2272 | 2328 | ||
2273 | /* | 2329 | /* |
@@ -2316,13 +2372,25 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg) | |||
2316 | 2372 | ||
2317 | static int ipv6_route_show(struct seq_file *m, void *v) | 2373 | static int ipv6_route_show(struct seq_file *m, void *v) |
2318 | { | 2374 | { |
2319 | fib6_clean_all(rt6_info_route, 0, m); | 2375 | struct net *net = (struct net *)m->private; |
2376 | fib6_clean_all(net, rt6_info_route, 0, m); | ||
2320 | return 0; | 2377 | return 0; |
2321 | } | 2378 | } |
2322 | 2379 | ||
2323 | static int ipv6_route_open(struct inode *inode, struct file *file) | 2380 | static int ipv6_route_open(struct inode *inode, struct file *file) |
2324 | { | 2381 | { |
2325 | return single_open(file, ipv6_route_show, NULL); | 2382 | struct net *net = get_proc_net(inode); |
2383 | if (!net) | ||
2384 | return -ENXIO; | ||
2385 | return single_open(file, ipv6_route_show, net); | ||
2386 | } | ||
2387 | |||
2388 | static int ipv6_route_release(struct inode *inode, struct file *file) | ||
2389 | { | ||
2390 | struct seq_file *seq = file->private_data; | ||
2391 | struct net *net = seq->private; | ||
2392 | put_net(net); | ||
2393 | return single_release(inode, file); | ||
2326 | } | 2394 | } |
2327 | 2395 | ||
2328 | static const struct file_operations ipv6_route_proc_fops = { | 2396 | static const struct file_operations ipv6_route_proc_fops = { |
@@ -2330,24 +2398,36 @@ static const struct file_operations ipv6_route_proc_fops = { | |||
2330 | .open = ipv6_route_open, | 2398 | .open = ipv6_route_open, |
2331 | .read = seq_read, | 2399 | .read = seq_read, |
2332 | .llseek = seq_lseek, | 2400 | .llseek = seq_lseek, |
2333 | .release = single_release, | 2401 | .release = ipv6_route_release, |
2334 | }; | 2402 | }; |
2335 | 2403 | ||
2336 | static int rt6_stats_seq_show(struct seq_file *seq, void *v) | 2404 | static int rt6_stats_seq_show(struct seq_file *seq, void *v) |
2337 | { | 2405 | { |
2406 | struct net *net = (struct net *)seq->private; | ||
2338 | seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n", | 2407 | seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n", |
2339 | rt6_stats.fib_nodes, rt6_stats.fib_route_nodes, | 2408 | net->ipv6.rt6_stats->fib_nodes, |
2340 | rt6_stats.fib_rt_alloc, rt6_stats.fib_rt_entries, | 2409 | net->ipv6.rt6_stats->fib_route_nodes, |
2341 | rt6_stats.fib_rt_cache, | 2410 | net->ipv6.rt6_stats->fib_rt_alloc, |
2342 | atomic_read(&ip6_dst_ops.entries), | 2411 | net->ipv6.rt6_stats->fib_rt_entries, |
2343 | rt6_stats.fib_discarded_routes); | 2412 | net->ipv6.rt6_stats->fib_rt_cache, |
2413 | atomic_read(&net->ipv6.ip6_dst_ops->entries), | ||
2414 | net->ipv6.rt6_stats->fib_discarded_routes); | ||
2344 | 2415 | ||
2345 | return 0; | 2416 | return 0; |
2346 | } | 2417 | } |
2347 | 2418 | ||
2348 | static int rt6_stats_seq_open(struct inode *inode, struct file *file) | 2419 | static int rt6_stats_seq_open(struct inode *inode, struct file *file) |
2349 | { | 2420 | { |
2350 | return single_open(file, rt6_stats_seq_show, NULL); | 2421 | struct net *net = get_proc_net(inode); |
2422 | return single_open(file, rt6_stats_seq_show, net); | ||
2423 | } | ||
2424 | |||
2425 | static int rt6_stats_seq_release(struct inode *inode, struct file *file) | ||
2426 | { | ||
2427 | struct seq_file *seq = file->private_data; | ||
2428 | struct net *net = (struct net *)seq->private; | ||
2429 | put_net(net); | ||
2430 | return single_release(inode, file); | ||
2351 | } | 2431 | } |
2352 | 2432 | ||
2353 | static const struct file_operations rt6_stats_seq_fops = { | 2433 | static const struct file_operations rt6_stats_seq_fops = { |
@@ -2355,42 +2435,8 @@ static const struct file_operations rt6_stats_seq_fops = { | |||
2355 | .open = rt6_stats_seq_open, | 2435 | .open = rt6_stats_seq_open, |
2356 | .read = seq_read, | 2436 | .read = seq_read, |
2357 | .llseek = seq_lseek, | 2437 | .llseek = seq_lseek, |
2358 | .release = single_release, | 2438 | .release = rt6_stats_seq_release, |
2359 | }; | 2439 | }; |
2360 | |||
2361 | static int ipv6_route_proc_init(struct net *net) | ||
2362 | { | ||
2363 | int ret = -ENOMEM; | ||
2364 | if (!proc_net_fops_create(net, "ipv6_route", | ||
2365 | 0, &ipv6_route_proc_fops)) | ||
2366 | goto out; | ||
2367 | |||
2368 | if (!proc_net_fops_create(net, "rt6_stats", | ||
2369 | S_IRUGO, &rt6_stats_seq_fops)) | ||
2370 | goto out_ipv6_route; | ||
2371 | |||
2372 | ret = 0; | ||
2373 | out: | ||
2374 | return ret; | ||
2375 | out_ipv6_route: | ||
2376 | proc_net_remove(net, "ipv6_route"); | ||
2377 | goto out; | ||
2378 | } | ||
2379 | |||
2380 | static void ipv6_route_proc_fini(struct net *net) | ||
2381 | { | ||
2382 | proc_net_remove(net, "ipv6_route"); | ||
2383 | proc_net_remove(net, "rt6_stats"); | ||
2384 | } | ||
2385 | #else | ||
2386 | static inline int ipv6_route_proc_init(struct net *net) | ||
2387 | { | ||
2388 | return 0; | ||
2389 | } | ||
2390 | static inline void ipv6_route_proc_fini(struct net *net) | ||
2391 | { | ||
2392 | return ; | ||
2393 | } | ||
2394 | #endif /* CONFIG_PROC_FS */ | 2440 | #endif /* CONFIG_PROC_FS */ |
2395 | 2441 | ||
2396 | #ifdef CONFIG_SYSCTL | 2442 | #ifdef CONFIG_SYSCTL |
@@ -2399,10 +2445,11 @@ static | |||
2399 | int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, struct file * filp, | 2445 | int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, struct file * filp, |
2400 | void __user *buffer, size_t *lenp, loff_t *ppos) | 2446 | void __user *buffer, size_t *lenp, loff_t *ppos) |
2401 | { | 2447 | { |
2402 | int delay = init_net.ipv6.sysctl.flush_delay; | 2448 | struct net *net = current->nsproxy->net_ns; |
2449 | int delay = net->ipv6.sysctl.flush_delay; | ||
2403 | if (write) { | 2450 | if (write) { |
2404 | proc_dointvec(ctl, write, filp, buffer, lenp, ppos); | 2451 | proc_dointvec(ctl, write, filp, buffer, lenp, ppos); |
2405 | fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay); | 2452 | fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net); |
2406 | return 0; | 2453 | return 0; |
2407 | } else | 2454 | } else |
2408 | return -EINVAL; | 2455 | return -EINVAL; |
@@ -2419,7 +2466,7 @@ ctl_table ipv6_route_table_template[] = { | |||
2419 | { | 2466 | { |
2420 | .ctl_name = NET_IPV6_ROUTE_GC_THRESH, | 2467 | .ctl_name = NET_IPV6_ROUTE_GC_THRESH, |
2421 | .procname = "gc_thresh", | 2468 | .procname = "gc_thresh", |
2422 | .data = &ip6_dst_ops.gc_thresh, | 2469 | .data = &ip6_dst_ops_template.gc_thresh, |
2423 | .maxlen = sizeof(int), | 2470 | .maxlen = sizeof(int), |
2424 | .mode = 0644, | 2471 | .mode = 0644, |
2425 | .proc_handler = &proc_dointvec, | 2472 | .proc_handler = &proc_dointvec, |
@@ -2505,33 +2552,141 @@ struct ctl_table *ipv6_route_sysctl_init(struct net *net) | |||
2505 | table = kmemdup(ipv6_route_table_template, | 2552 | table = kmemdup(ipv6_route_table_template, |
2506 | sizeof(ipv6_route_table_template), | 2553 | sizeof(ipv6_route_table_template), |
2507 | GFP_KERNEL); | 2554 | GFP_KERNEL); |
2555 | |||
2556 | if (table) { | ||
2557 | table[0].data = &net->ipv6.sysctl.flush_delay; | ||
2558 | table[1].data = &net->ipv6.ip6_dst_ops->gc_thresh; | ||
2559 | table[2].data = &net->ipv6.sysctl.ip6_rt_max_size; | ||
2560 | table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; | ||
2561 | table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout; | ||
2562 | table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval; | ||
2563 | table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity; | ||
2564 | table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires; | ||
2565 | table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss; | ||
2566 | } | ||
2567 | |||
2508 | return table; | 2568 | return table; |
2509 | } | 2569 | } |
2510 | #endif | 2570 | #endif |
2511 | 2571 | ||
2572 | static int ip6_route_net_init(struct net *net) | ||
2573 | { | ||
2574 | int ret = 0; | ||
2575 | |||
2576 | ret = -ENOMEM; | ||
2577 | net->ipv6.ip6_dst_ops = kmemdup(&ip6_dst_ops_template, | ||
2578 | sizeof(*net->ipv6.ip6_dst_ops), | ||
2579 | GFP_KERNEL); | ||
2580 | if (!net->ipv6.ip6_dst_ops) | ||
2581 | goto out; | ||
2582 | net->ipv6.ip6_dst_ops->dst_net = net; | ||
2583 | |||
2584 | net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template, | ||
2585 | sizeof(*net->ipv6.ip6_null_entry), | ||
2586 | GFP_KERNEL); | ||
2587 | if (!net->ipv6.ip6_null_entry) | ||
2588 | goto out_ip6_dst_ops; | ||
2589 | net->ipv6.ip6_null_entry->u.dst.path = | ||
2590 | (struct dst_entry *)net->ipv6.ip6_null_entry; | ||
2591 | net->ipv6.ip6_null_entry->u.dst.ops = net->ipv6.ip6_dst_ops; | ||
2592 | |||
2593 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES | ||
2594 | net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template, | ||
2595 | sizeof(*net->ipv6.ip6_prohibit_entry), | ||
2596 | GFP_KERNEL); | ||
2597 | if (!net->ipv6.ip6_prohibit_entry) { | ||
2598 | kfree(net->ipv6.ip6_null_entry); | ||
2599 | goto out; | ||
2600 | } | ||
2601 | net->ipv6.ip6_prohibit_entry->u.dst.path = | ||
2602 | (struct dst_entry *)net->ipv6.ip6_prohibit_entry; | ||
2603 | net->ipv6.ip6_prohibit_entry->u.dst.ops = net->ipv6.ip6_dst_ops; | ||
2604 | |||
2605 | net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template, | ||
2606 | sizeof(*net->ipv6.ip6_blk_hole_entry), | ||
2607 | GFP_KERNEL); | ||
2608 | if (!net->ipv6.ip6_blk_hole_entry) { | ||
2609 | kfree(net->ipv6.ip6_null_entry); | ||
2610 | kfree(net->ipv6.ip6_prohibit_entry); | ||
2611 | goto out; | ||
2612 | } | ||
2613 | net->ipv6.ip6_blk_hole_entry->u.dst.path = | ||
2614 | (struct dst_entry *)net->ipv6.ip6_blk_hole_entry; | ||
2615 | net->ipv6.ip6_blk_hole_entry->u.dst.ops = net->ipv6.ip6_dst_ops; | ||
2616 | #endif | ||
2617 | |||
2618 | #ifdef CONFIG_PROC_FS | ||
2619 | proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops); | ||
2620 | proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops); | ||
2621 | #endif | ||
2622 | net->ipv6.ip6_rt_gc_expire = 30*HZ; | ||
2623 | |||
2624 | ret = 0; | ||
2625 | out: | ||
2626 | return ret; | ||
2627 | |||
2628 | out_ip6_dst_ops: | ||
2629 | kfree(net->ipv6.ip6_dst_ops); | ||
2630 | goto out; | ||
2631 | } | ||
2632 | |||
2633 | static void ip6_route_net_exit(struct net *net) | ||
2634 | { | ||
2635 | #ifdef CONFIG_PROC_FS | ||
2636 | proc_net_remove(net, "ipv6_route"); | ||
2637 | proc_net_remove(net, "rt6_stats"); | ||
2638 | #endif | ||
2639 | kfree(net->ipv6.ip6_null_entry); | ||
2640 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES | ||
2641 | kfree(net->ipv6.ip6_prohibit_entry); | ||
2642 | kfree(net->ipv6.ip6_blk_hole_entry); | ||
2643 | #endif | ||
2644 | kfree(net->ipv6.ip6_dst_ops); | ||
2645 | } | ||
2646 | |||
2647 | static struct pernet_operations ip6_route_net_ops = { | ||
2648 | .init = ip6_route_net_init, | ||
2649 | .exit = ip6_route_net_exit, | ||
2650 | }; | ||
2651 | |||
2652 | static struct notifier_block ip6_route_dev_notifier = { | ||
2653 | .notifier_call = ip6_route_dev_notify, | ||
2654 | .priority = 0, | ||
2655 | }; | ||
2656 | |||
2512 | int __init ip6_route_init(void) | 2657 | int __init ip6_route_init(void) |
2513 | { | 2658 | { |
2514 | int ret; | 2659 | int ret; |
2515 | 2660 | ||
2516 | ip6_dst_ops.kmem_cachep = | 2661 | ret = -ENOMEM; |
2662 | ip6_dst_ops_template.kmem_cachep = | ||
2517 | kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0, | 2663 | kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0, |
2518 | SLAB_HWCACHE_ALIGN, NULL); | 2664 | SLAB_HWCACHE_ALIGN, NULL); |
2519 | if (!ip6_dst_ops.kmem_cachep) | 2665 | if (!ip6_dst_ops_template.kmem_cachep) |
2520 | return -ENOMEM; | 2666 | goto out;; |
2521 | 2667 | ||
2522 | ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops.kmem_cachep; | 2668 | ret = register_pernet_subsys(&ip6_route_net_ops); |
2523 | |||
2524 | ret = fib6_init(); | ||
2525 | if (ret) | 2669 | if (ret) |
2526 | goto out_kmem_cache; | 2670 | goto out_kmem_cache; |
2527 | 2671 | ||
2528 | ret = ipv6_route_proc_init(&init_net); | 2672 | /* Registering of the loopback is done before this portion of code, |
2673 | * the loopback reference in rt6_info will not be taken, do it | ||
2674 | * manually for init_net */ | ||
2675 | init_net.ipv6.ip6_null_entry->u.dst.dev = init_net.loopback_dev; | ||
2676 | init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); | ||
2677 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES | ||
2678 | init_net.ipv6.ip6_prohibit_entry->u.dst.dev = init_net.loopback_dev; | ||
2679 | init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); | ||
2680 | init_net.ipv6.ip6_blk_hole_entry->u.dst.dev = init_net.loopback_dev; | ||
2681 | init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); | ||
2682 | #endif | ||
2683 | ret = fib6_init(); | ||
2529 | if (ret) | 2684 | if (ret) |
2530 | goto out_fib6_init; | 2685 | goto out_register_subsys; |
2531 | 2686 | ||
2532 | ret = xfrm6_init(); | 2687 | ret = xfrm6_init(); |
2533 | if (ret) | 2688 | if (ret) |
2534 | goto out_proc_init; | 2689 | goto out_fib6_init; |
2535 | 2690 | ||
2536 | ret = fib6_rules_init(); | 2691 | ret = fib6_rules_init(); |
2537 | if (ret) | 2692 | if (ret) |
@@ -2543,7 +2698,10 @@ int __init ip6_route_init(void) | |||
2543 | __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL)) | 2698 | __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL)) |
2544 | goto fib6_rules_init; | 2699 | goto fib6_rules_init; |
2545 | 2700 | ||
2546 | ret = 0; | 2701 | ret = register_netdevice_notifier(&ip6_route_dev_notifier); |
2702 | if (ret) | ||
2703 | goto fib6_rules_init; | ||
2704 | |||
2547 | out: | 2705 | out: |
2548 | return ret; | 2706 | return ret; |
2549 | 2707 | ||
@@ -2551,22 +2709,21 @@ fib6_rules_init: | |||
2551 | fib6_rules_cleanup(); | 2709 | fib6_rules_cleanup(); |
2552 | xfrm6_init: | 2710 | xfrm6_init: |
2553 | xfrm6_fini(); | 2711 | xfrm6_fini(); |
2554 | out_proc_init: | ||
2555 | ipv6_route_proc_fini(&init_net); | ||
2556 | out_fib6_init: | 2712 | out_fib6_init: |
2557 | rt6_ifdown(NULL); | ||
2558 | fib6_gc_cleanup(); | 2713 | fib6_gc_cleanup(); |
2714 | out_register_subsys: | ||
2715 | unregister_pernet_subsys(&ip6_route_net_ops); | ||
2559 | out_kmem_cache: | 2716 | out_kmem_cache: |
2560 | kmem_cache_destroy(ip6_dst_ops.kmem_cachep); | 2717 | kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep); |
2561 | goto out; | 2718 | goto out; |
2562 | } | 2719 | } |
2563 | 2720 | ||
2564 | void ip6_route_cleanup(void) | 2721 | void ip6_route_cleanup(void) |
2565 | { | 2722 | { |
2723 | unregister_netdevice_notifier(&ip6_route_dev_notifier); | ||
2566 | fib6_rules_cleanup(); | 2724 | fib6_rules_cleanup(); |
2567 | ipv6_route_proc_fini(&init_net); | ||
2568 | xfrm6_fini(); | 2725 | xfrm6_fini(); |
2569 | rt6_ifdown(NULL); | ||
2570 | fib6_gc_cleanup(); | 2726 | fib6_gc_cleanup(); |
2571 | kmem_cache_destroy(ip6_dst_ops.kmem_cachep); | 2727 | unregister_pernet_subsys(&ip6_route_net_ops); |
2728 | kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep); | ||
2572 | } | 2729 | } |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 1656c003b989..1b8196c8d145 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -339,11 +339,11 @@ out: | |||
339 | skb_reset_network_header(skb2); | 339 | skb_reset_network_header(skb2); |
340 | 340 | ||
341 | /* Try to guess incoming interface */ | 341 | /* Try to guess incoming interface */ |
342 | rt6i = rt6_lookup(&iph6->saddr, NULL, NULL, 0); | 342 | rt6i = rt6_lookup(&init_net, &iph6->saddr, NULL, NULL, 0); |
343 | if (rt6i && rt6i->rt6i_dev) { | 343 | if (rt6i && rt6i->rt6i_dev) { |
344 | skb2->dev = rt6i->rt6i_dev; | 344 | skb2->dev = rt6i->rt6i_dev; |
345 | 345 | ||
346 | rt6i = rt6_lookup(&iph6->daddr, &iph6->saddr, NULL, 0); | 346 | rt6i = rt6_lookup(&init_net, &iph6->daddr, &iph6->saddr, NULL, 0); |
347 | 347 | ||
348 | if (rt6i && rt6i->rt6i_dev && rt6i->rt6i_dev->type == ARPHRD_SIT) { | 348 | if (rt6i && rt6i->rt6i_dev && rt6i->rt6i_dev->type == ARPHRD_SIT) { |
349 | struct ip_tunnel *t = netdev_priv(rt6i->rt6i_dev); | 349 | struct ip_tunnel *t = netdev_priv(rt6i->rt6i_dev); |
@@ -393,7 +393,7 @@ isatap_srcok(struct sk_buff *skb, struct iphdr *iph, struct net_device *dev) | |||
393 | fl.oif = dev->ifindex; | 393 | fl.oif = dev->ifindex; |
394 | security_skb_classify_flow(skb, &fl); | 394 | security_skb_classify_flow(skb, &fl); |
395 | 395 | ||
396 | dst = ip6_route_output(NULL, &fl); | 396 | dst = ip6_route_output(&init_net, NULL, &fl); |
397 | if (!dst->error && (dst->dev == dev) && (neigh = dst->neighbour)) { | 397 | if (!dst->error && (dst->dev == dev) && (neigh = dst->neighbour)) { |
398 | 398 | ||
399 | addr6 = (struct in6_addr*)&neigh->primary_key; | 399 | addr6 = (struct in6_addr*)&neigh->primary_key; |
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c new file mode 100644 index 000000000000..827c5aa7524c --- /dev/null +++ b/net/ipv6/syncookies.c | |||
@@ -0,0 +1,267 @@ | |||
1 | /* | ||
2 | * IPv6 Syncookies implementation for the Linux kernel | ||
3 | * | ||
4 | * Authors: | ||
5 | * Glenn Griffin <ggriffin.kernel@gmail.com> | ||
6 | * | ||
7 | * Based on IPv4 implementation by Andi Kleen | ||
8 | * linux/net/ipv4/syncookies.c | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License | ||
12 | * as published by the Free Software Foundation; either version | ||
13 | * 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #include <linux/tcp.h> | ||
18 | #include <linux/random.h> | ||
19 | #include <linux/cryptohash.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <net/ipv6.h> | ||
22 | #include <net/tcp.h> | ||
23 | |||
24 | extern int sysctl_tcp_syncookies; | ||
25 | extern __u32 syncookie_secret[2][16-3+SHA_DIGEST_WORDS]; | ||
26 | |||
27 | #define COOKIEBITS 24 /* Upper bits store count */ | ||
28 | #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) | ||
29 | |||
30 | /* | ||
31 | * This table has to be sorted and terminated with (__u16)-1. | ||
32 | * XXX generate a better table. | ||
33 | * Unresolved Issues: HIPPI with a 64k MSS is not well supported. | ||
34 | * | ||
35 | * Taken directly from ipv4 implementation. | ||
36 | * Should this list be modified for ipv6 use or is it close enough? | ||
37 | * rfc 2460 8.3 suggests mss values 20 bytes less than ipv4 counterpart | ||
38 | */ | ||
39 | static __u16 const msstab[] = { | ||
40 | 64 - 1, | ||
41 | 256 - 1, | ||
42 | 512 - 1, | ||
43 | 536 - 1, | ||
44 | 1024 - 1, | ||
45 | 1440 - 1, | ||
46 | 1460 - 1, | ||
47 | 4312 - 1, | ||
48 | (__u16)-1 | ||
49 | }; | ||
50 | /* The number doesn't include the -1 terminator */ | ||
51 | #define NUM_MSS (ARRAY_SIZE(msstab) - 1) | ||
52 | |||
53 | /* | ||
54 | * This (misnamed) value is the age of syncookie which is permitted. | ||
55 | * Its ideal value should be dependent on TCP_TIMEOUT_INIT and | ||
56 | * sysctl_tcp_retries1. It's a rather complicated formula (exponential | ||
57 | * backoff) to compute at runtime so it's currently hardcoded here. | ||
58 | */ | ||
59 | #define COUNTER_TRIES 4 | ||
60 | |||
61 | static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, | ||
62 | struct request_sock *req, | ||
63 | struct dst_entry *dst) | ||
64 | { | ||
65 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
66 | struct sock *child; | ||
67 | |||
68 | child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst); | ||
69 | if (child) | ||
70 | inet_csk_reqsk_queue_add(sk, req, child); | ||
71 | else | ||
72 | reqsk_free(req); | ||
73 | |||
74 | return child; | ||
75 | } | ||
76 | |||
77 | static DEFINE_PER_CPU(__u32, cookie_scratch)[16 + 5 + SHA_WORKSPACE_WORDS]; | ||
78 | |||
79 | static u32 cookie_hash(struct in6_addr *saddr, struct in6_addr *daddr, | ||
80 | __be16 sport, __be16 dport, u32 count, int c) | ||
81 | { | ||
82 | __u32 *tmp = __get_cpu_var(cookie_scratch); | ||
83 | |||
84 | /* | ||
85 | * we have 320 bits of information to hash, copy in the remaining | ||
86 | * 192 bits required for sha_transform, from the syncookie_secret | ||
87 | * and overwrite the digest with the secret | ||
88 | */ | ||
89 | memcpy(tmp + 10, syncookie_secret[c], 44); | ||
90 | memcpy(tmp, saddr, 16); | ||
91 | memcpy(tmp + 4, daddr, 16); | ||
92 | tmp[8] = ((__force u32)sport << 16) + (__force u32)dport; | ||
93 | tmp[9] = count; | ||
94 | sha_transform(tmp + 16, (__u8 *)tmp, tmp + 16 + 5); | ||
95 | |||
96 | return tmp[17]; | ||
97 | } | ||
98 | |||
99 | static __u32 secure_tcp_syn_cookie(struct in6_addr *saddr, struct in6_addr *daddr, | ||
100 | __be16 sport, __be16 dport, __u32 sseq, | ||
101 | __u32 count, __u32 data) | ||
102 | { | ||
103 | return (cookie_hash(saddr, daddr, sport, dport, 0, 0) + | ||
104 | sseq + (count << COOKIEBITS) + | ||
105 | ((cookie_hash(saddr, daddr, sport, dport, count, 1) + data) | ||
106 | & COOKIEMASK)); | ||
107 | } | ||
108 | |||
109 | static __u32 check_tcp_syn_cookie(__u32 cookie, struct in6_addr *saddr, | ||
110 | struct in6_addr *daddr, __be16 sport, | ||
111 | __be16 dport, __u32 sseq, __u32 count, | ||
112 | __u32 maxdiff) | ||
113 | { | ||
114 | __u32 diff; | ||
115 | |||
116 | cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq; | ||
117 | |||
118 | diff = (count - (cookie >> COOKIEBITS)) & ((__u32) -1 >> COOKIEBITS); | ||
119 | if (diff >= maxdiff) | ||
120 | return (__u32)-1; | ||
121 | |||
122 | return (cookie - | ||
123 | cookie_hash(saddr, daddr, sport, dport, count - diff, 1)) | ||
124 | & COOKIEMASK; | ||
125 | } | ||
126 | |||
127 | __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) | ||
128 | { | ||
129 | struct ipv6hdr *iph = ipv6_hdr(skb); | ||
130 | const struct tcphdr *th = tcp_hdr(skb); | ||
131 | int mssind; | ||
132 | const __u16 mss = *mssp; | ||
133 | |||
134 | tcp_sk(sk)->last_synq_overflow = jiffies; | ||
135 | |||
136 | for (mssind = 0; mss > msstab[mssind + 1]; mssind++) | ||
137 | ; | ||
138 | *mssp = msstab[mssind] + 1; | ||
139 | |||
140 | NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESSENT); | ||
141 | |||
142 | return secure_tcp_syn_cookie(&iph->saddr, &iph->daddr, th->source, | ||
143 | th->dest, ntohl(th->seq), | ||
144 | jiffies / (HZ * 60), mssind); | ||
145 | } | ||
146 | |||
147 | static inline int cookie_check(struct sk_buff *skb, __u32 cookie) | ||
148 | { | ||
149 | struct ipv6hdr *iph = ipv6_hdr(skb); | ||
150 | const struct tcphdr *th = tcp_hdr(skb); | ||
151 | __u32 seq = ntohl(th->seq) - 1; | ||
152 | __u32 mssind = check_tcp_syn_cookie(cookie, &iph->saddr, &iph->daddr, | ||
153 | th->source, th->dest, seq, | ||
154 | jiffies / (HZ * 60), COUNTER_TRIES); | ||
155 | |||
156 | return mssind < NUM_MSS ? msstab[mssind] + 1 : 0; | ||
157 | } | ||
158 | |||
159 | struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | ||
160 | { | ||
161 | struct inet_request_sock *ireq; | ||
162 | struct inet6_request_sock *ireq6; | ||
163 | struct tcp_request_sock *treq; | ||
164 | struct ipv6_pinfo *np = inet6_sk(sk); | ||
165 | struct tcp_sock *tp = tcp_sk(sk); | ||
166 | const struct tcphdr *th = tcp_hdr(skb); | ||
167 | __u32 cookie = ntohl(th->ack_seq) - 1; | ||
168 | struct sock *ret = sk; | ||
169 | struct request_sock *req; | ||
170 | int mss; | ||
171 | struct dst_entry *dst; | ||
172 | __u8 rcv_wscale; | ||
173 | |||
174 | if (!sysctl_tcp_syncookies || !th->ack) | ||
175 | goto out; | ||
176 | |||
177 | if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) || | ||
178 | (mss = cookie_check(skb, cookie)) == 0) { | ||
179 | NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESFAILED); | ||
180 | goto out; | ||
181 | } | ||
182 | |||
183 | NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESRECV); | ||
184 | |||
185 | ret = NULL; | ||
186 | req = inet6_reqsk_alloc(&tcp6_request_sock_ops); | ||
187 | if (!req) | ||
188 | goto out; | ||
189 | |||
190 | ireq = inet_rsk(req); | ||
191 | ireq6 = inet6_rsk(req); | ||
192 | treq = tcp_rsk(req); | ||
193 | ireq6->pktopts = NULL; | ||
194 | |||
195 | if (security_inet_conn_request(sk, skb, req)) { | ||
196 | reqsk_free(req); | ||
197 | goto out; | ||
198 | } | ||
199 | |||
200 | req->mss = mss; | ||
201 | ireq->rmt_port = th->source; | ||
202 | ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr); | ||
203 | ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr); | ||
204 | if (ipv6_opt_accepted(sk, skb) || | ||
205 | np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || | ||
206 | np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { | ||
207 | atomic_inc(&skb->users); | ||
208 | ireq6->pktopts = skb; | ||
209 | } | ||
210 | |||
211 | ireq6->iif = sk->sk_bound_dev_if; | ||
212 | /* So that link locals have meaning */ | ||
213 | if (!sk->sk_bound_dev_if && | ||
214 | ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL) | ||
215 | ireq6->iif = inet6_iif(skb); | ||
216 | |||
217 | req->expires = 0UL; | ||
218 | req->retrans = 0; | ||
219 | ireq->snd_wscale = ireq->rcv_wscale = ireq->tstamp_ok = 0; | ||
220 | ireq->wscale_ok = ireq->sack_ok = 0; | ||
221 | treq->rcv_isn = ntohl(th->seq) - 1; | ||
222 | treq->snt_isn = cookie; | ||
223 | |||
224 | /* | ||
225 | * We need to lookup the dst_entry to get the correct window size. | ||
226 | * This is taken from tcp_v6_syn_recv_sock. Somebody please enlighten | ||
227 | * me if there is a preferred way. | ||
228 | */ | ||
229 | { | ||
230 | struct in6_addr *final_p = NULL, final; | ||
231 | struct flowi fl; | ||
232 | memset(&fl, 0, sizeof(fl)); | ||
233 | fl.proto = IPPROTO_TCP; | ||
234 | ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); | ||
235 | if (np->opt && np->opt->srcrt) { | ||
236 | struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt; | ||
237 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
238 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
239 | final_p = &final; | ||
240 | } | ||
241 | ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr); | ||
242 | fl.oif = sk->sk_bound_dev_if; | ||
243 | fl.fl_ip_dport = inet_rsk(req)->rmt_port; | ||
244 | fl.fl_ip_sport = inet_sk(sk)->sport; | ||
245 | security_req_classify_flow(req, &fl); | ||
246 | if (ip6_dst_lookup(sk, &dst, &fl)) { | ||
247 | reqsk_free(req); | ||
248 | goto out; | ||
249 | } | ||
250 | if (final_p) | ||
251 | ipv6_addr_copy(&fl.fl6_dst, final_p); | ||
252 | if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0) | ||
253 | goto out; | ||
254 | } | ||
255 | |||
256 | req->window_clamp = dst_metric(dst, RTAX_WINDOW); | ||
257 | tcp_select_initial_window(tcp_full_space(sk), req->mss, | ||
258 | &req->rcv_wnd, &req->window_clamp, | ||
259 | 0, &rcv_wscale); | ||
260 | |||
261 | ireq->rcv_wscale = rcv_wscale; | ||
262 | |||
263 | ret = get_cookie_sock(sk, skb, req, dst); | ||
264 | |||
265 | out: return ret; | ||
266 | } | ||
267 | |||
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c index d6d3e68086f8..3804dcbbfab0 100644 --- a/net/ipv6/sysctl_net_ipv6.c +++ b/net/ipv6/sysctl_net_ipv6.c | |||
@@ -71,24 +71,11 @@ static int ipv6_sysctl_net_init(struct net *net) | |||
71 | ipv6_route_table = ipv6_route_sysctl_init(net); | 71 | ipv6_route_table = ipv6_route_sysctl_init(net); |
72 | if (!ipv6_route_table) | 72 | if (!ipv6_route_table) |
73 | goto out_ipv6_table; | 73 | goto out_ipv6_table; |
74 | ipv6_table[0].child = ipv6_route_table; | ||
74 | 75 | ||
75 | ipv6_icmp_table = ipv6_icmp_sysctl_init(net); | 76 | ipv6_icmp_table = ipv6_icmp_sysctl_init(net); |
76 | if (!ipv6_icmp_table) | 77 | if (!ipv6_icmp_table) |
77 | goto out_ipv6_route_table; | 78 | goto out_ipv6_route_table; |
78 | |||
79 | ipv6_route_table[0].data = &net->ipv6.sysctl.flush_delay; | ||
80 | /* ipv6_route_table[1].data will be handled when we have | ||
81 | routes per namespace */ | ||
82 | ipv6_route_table[2].data = &net->ipv6.sysctl.ip6_rt_max_size; | ||
83 | ipv6_route_table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; | ||
84 | ipv6_route_table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout; | ||
85 | ipv6_route_table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval; | ||
86 | ipv6_route_table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity; | ||
87 | ipv6_route_table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires; | ||
88 | ipv6_route_table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss; | ||
89 | ipv6_table[0].child = ipv6_route_table; | ||
90 | |||
91 | ipv6_icmp_table[0].data = &net->ipv6.sysctl.icmpv6_time; | ||
92 | ipv6_table[1].child = ipv6_icmp_table; | 79 | ipv6_table[1].child = ipv6_icmp_table; |
93 | 80 | ||
94 | ipv6_table[2].data = &net->ipv6.sysctl.bindv6only; | 81 | ipv6_table[2].data = &net->ipv6.sysctl.bindv6only; |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 12750f2b05ab..caf0cc1c00e1 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -69,9 +69,6 @@ | |||
69 | #include <linux/crypto.h> | 69 | #include <linux/crypto.h> |
70 | #include <linux/scatterlist.h> | 70 | #include <linux/scatterlist.h> |
71 | 71 | ||
72 | /* Socket used for sending RSTs and ACKs */ | ||
73 | static struct socket *tcp6_socket; | ||
74 | |||
75 | static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb); | 72 | static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb); |
76 | static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req); | 73 | static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req); |
77 | static void tcp_v6_send_check(struct sock *sk, int len, | 74 | static void tcp_v6_send_check(struct sock *sk, int len, |
@@ -455,8 +452,7 @@ out: | |||
455 | } | 452 | } |
456 | 453 | ||
457 | 454 | ||
458 | static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, | 455 | static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req) |
459 | struct dst_entry *dst) | ||
460 | { | 456 | { |
461 | struct inet6_request_sock *treq = inet6_rsk(req); | 457 | struct inet6_request_sock *treq = inet6_rsk(req); |
462 | struct ipv6_pinfo *np = inet6_sk(sk); | 458 | struct ipv6_pinfo *np = inet6_sk(sk); |
@@ -464,6 +460,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, | |||
464 | struct ipv6_txoptions *opt = NULL; | 460 | struct ipv6_txoptions *opt = NULL; |
465 | struct in6_addr * final_p = NULL, final; | 461 | struct in6_addr * final_p = NULL, final; |
466 | struct flowi fl; | 462 | struct flowi fl; |
463 | struct dst_entry *dst; | ||
467 | int err = -1; | 464 | int err = -1; |
468 | 465 | ||
469 | memset(&fl, 0, sizeof(fl)); | 466 | memset(&fl, 0, sizeof(fl)); |
@@ -476,24 +473,22 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, | |||
476 | fl.fl_ip_sport = inet_sk(sk)->sport; | 473 | fl.fl_ip_sport = inet_sk(sk)->sport; |
477 | security_req_classify_flow(req, &fl); | 474 | security_req_classify_flow(req, &fl); |
478 | 475 | ||
479 | if (dst == NULL) { | 476 | opt = np->opt; |
480 | opt = np->opt; | 477 | if (opt && opt->srcrt) { |
481 | if (opt && opt->srcrt) { | 478 | struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt; |
482 | struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt; | 479 | ipv6_addr_copy(&final, &fl.fl6_dst); |
483 | ipv6_addr_copy(&final, &fl.fl6_dst); | 480 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); |
484 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | 481 | final_p = &final; |
485 | final_p = &final; | ||
486 | } | ||
487 | |||
488 | err = ip6_dst_lookup(sk, &dst, &fl); | ||
489 | if (err) | ||
490 | goto done; | ||
491 | if (final_p) | ||
492 | ipv6_addr_copy(&fl.fl6_dst, final_p); | ||
493 | if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) | ||
494 | goto done; | ||
495 | } | 482 | } |
496 | 483 | ||
484 | err = ip6_dst_lookup(sk, &dst, &fl); | ||
485 | if (err) | ||
486 | goto done; | ||
487 | if (final_p) | ||
488 | ipv6_addr_copy(&fl.fl6_dst, final_p); | ||
489 | if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) | ||
490 | goto done; | ||
491 | |||
497 | skb = tcp_make_synack(sk, dst, req); | 492 | skb = tcp_make_synack(sk, dst, req); |
498 | if (skb) { | 493 | if (skb) { |
499 | struct tcphdr *th = tcp_hdr(skb); | 494 | struct tcphdr *th = tcp_hdr(skb); |
@@ -514,6 +509,20 @@ done: | |||
514 | return err; | 509 | return err; |
515 | } | 510 | } |
516 | 511 | ||
512 | static inline void syn_flood_warning(struct sk_buff *skb) | ||
513 | { | ||
514 | #ifdef CONFIG_SYN_COOKIES | ||
515 | if (sysctl_tcp_syncookies) | ||
516 | printk(KERN_INFO | ||
517 | "TCPv6: Possible SYN flooding on port %d. " | ||
518 | "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest)); | ||
519 | else | ||
520 | #endif | ||
521 | printk(KERN_INFO | ||
522 | "TCPv6: Possible SYN flooding on port %d. " | ||
523 | "Dropping request.\n", ntohs(tcp_hdr(skb)->dest)); | ||
524 | } | ||
525 | |||
517 | static void tcp_v6_reqsk_destructor(struct request_sock *req) | 526 | static void tcp_v6_reqsk_destructor(struct request_sock *req) |
518 | { | 527 | { |
519 | if (inet6_rsk(req)->pktopts) | 528 | if (inet6_rsk(req)->pktopts) |
@@ -741,7 +750,7 @@ static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | |||
741 | 750 | ||
742 | hp = tcp_get_md5sig_pool(); | 751 | hp = tcp_get_md5sig_pool(); |
743 | if (!hp) { | 752 | if (!hp) { |
744 | printk(KERN_WARNING "%s(): hash pool not found...\n", __FUNCTION__); | 753 | printk(KERN_WARNING "%s(): hash pool not found...\n", __func__); |
745 | goto clear_hash_noput; | 754 | goto clear_hash_noput; |
746 | } | 755 | } |
747 | bp = &hp->md5_blk.ip6; | 756 | bp = &hp->md5_blk.ip6; |
@@ -781,17 +790,17 @@ static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | |||
781 | /* Now store the hash into the packet */ | 790 | /* Now store the hash into the packet */ |
782 | err = crypto_hash_init(desc); | 791 | err = crypto_hash_init(desc); |
783 | if (err) { | 792 | if (err) { |
784 | printk(KERN_WARNING "%s(): hash_init failed\n", __FUNCTION__); | 793 | printk(KERN_WARNING "%s(): hash_init failed\n", __func__); |
785 | goto clear_hash; | 794 | goto clear_hash; |
786 | } | 795 | } |
787 | err = crypto_hash_update(desc, sg, nbytes); | 796 | err = crypto_hash_update(desc, sg, nbytes); |
788 | if (err) { | 797 | if (err) { |
789 | printk(KERN_WARNING "%s(): hash_update failed\n", __FUNCTION__); | 798 | printk(KERN_WARNING "%s(): hash_update failed\n", __func__); |
790 | goto clear_hash; | 799 | goto clear_hash; |
791 | } | 800 | } |
792 | err = crypto_hash_final(desc, md5_hash); | 801 | err = crypto_hash_final(desc, md5_hash); |
793 | if (err) { | 802 | if (err) { |
794 | printk(KERN_WARNING "%s(): hash_final failed\n", __FUNCTION__); | 803 | printk(KERN_WARNING "%s(): hash_final failed\n", __func__); |
795 | goto clear_hash; | 804 | goto clear_hash; |
796 | } | 805 | } |
797 | 806 | ||
@@ -917,7 +926,7 @@ done_opts: | |||
917 | } | 926 | } |
918 | #endif | 927 | #endif |
919 | 928 | ||
920 | static struct request_sock_ops tcp6_request_sock_ops __read_mostly = { | 929 | struct request_sock_ops tcp6_request_sock_ops __read_mostly = { |
921 | .family = AF_INET6, | 930 | .family = AF_INET6, |
922 | .obj_size = sizeof(struct tcp6_request_sock), | 931 | .obj_size = sizeof(struct tcp6_request_sock), |
923 | .rtx_syn_ack = tcp_v6_send_synack, | 932 | .rtx_syn_ack = tcp_v6_send_synack, |
@@ -979,6 +988,8 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) | |||
979 | struct tcphdr *th = tcp_hdr(skb), *t1; | 988 | struct tcphdr *th = tcp_hdr(skb), *t1; |
980 | struct sk_buff *buff; | 989 | struct sk_buff *buff; |
981 | struct flowi fl; | 990 | struct flowi fl; |
991 | struct net *net = skb->dst->dev->nd_net; | ||
992 | struct sock *ctl_sk = net->ipv6.tcp_sk; | ||
982 | unsigned int tot_len = sizeof(*th); | 993 | unsigned int tot_len = sizeof(*th); |
983 | #ifdef CONFIG_TCP_MD5SIG | 994 | #ifdef CONFIG_TCP_MD5SIG |
984 | struct tcp_md5sig_key *key; | 995 | struct tcp_md5sig_key *key; |
@@ -1059,11 +1070,14 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) | |||
1059 | fl.fl_ip_sport = t1->source; | 1070 | fl.fl_ip_sport = t1->source; |
1060 | security_skb_classify_flow(skb, &fl); | 1071 | security_skb_classify_flow(skb, &fl); |
1061 | 1072 | ||
1062 | /* sk = NULL, but it is safe for now. RST socket required. */ | 1073 | /* Pass a socket to ip6_dst_lookup either it is for RST |
1063 | if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) { | 1074 | * Underlying function will use this to retrieve the network |
1075 | * namespace | ||
1076 | */ | ||
1077 | if (!ip6_dst_lookup(ctl_sk, &buff->dst, &fl)) { | ||
1064 | 1078 | ||
1065 | if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) { | 1079 | if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) { |
1066 | ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0); | 1080 | ip6_xmit(ctl_sk, buff, &fl, NULL, 0); |
1067 | TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); | 1081 | TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); |
1068 | TCP_INC_STATS_BH(TCP_MIB_OUTRSTS); | 1082 | TCP_INC_STATS_BH(TCP_MIB_OUTRSTS); |
1069 | return; | 1083 | return; |
@@ -1079,6 +1093,8 @@ static void tcp_v6_send_ack(struct tcp_timewait_sock *tw, | |||
1079 | struct tcphdr *th = tcp_hdr(skb), *t1; | 1093 | struct tcphdr *th = tcp_hdr(skb), *t1; |
1080 | struct sk_buff *buff; | 1094 | struct sk_buff *buff; |
1081 | struct flowi fl; | 1095 | struct flowi fl; |
1096 | struct net *net = skb->dev->nd_net; | ||
1097 | struct sock *ctl_sk = net->ipv6.tcp_sk; | ||
1082 | unsigned int tot_len = sizeof(struct tcphdr); | 1098 | unsigned int tot_len = sizeof(struct tcphdr); |
1083 | __be32 *topt; | 1099 | __be32 *topt; |
1084 | #ifdef CONFIG_TCP_MD5SIG | 1100 | #ifdef CONFIG_TCP_MD5SIG |
@@ -1160,9 +1176,9 @@ static void tcp_v6_send_ack(struct tcp_timewait_sock *tw, | |||
1160 | fl.fl_ip_sport = t1->source; | 1176 | fl.fl_ip_sport = t1->source; |
1161 | security_skb_classify_flow(skb, &fl); | 1177 | security_skb_classify_flow(skb, &fl); |
1162 | 1178 | ||
1163 | if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) { | 1179 | if (!ip6_dst_lookup(ctl_sk, &buff->dst, &fl)) { |
1164 | if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) { | 1180 | if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) { |
1165 | ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0); | 1181 | ip6_xmit(ctl_sk, buff, &fl, NULL, 0); |
1166 | TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); | 1182 | TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); |
1167 | return; | 1183 | return; |
1168 | } | 1184 | } |
@@ -1215,9 +1231,9 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) | |||
1215 | return NULL; | 1231 | return NULL; |
1216 | } | 1232 | } |
1217 | 1233 | ||
1218 | #if 0 /*def CONFIG_SYN_COOKIES*/ | 1234 | #ifdef CONFIG_SYN_COOKIES |
1219 | if (!th->rst && !th->syn && th->ack) | 1235 | if (!th->rst && !th->syn && th->ack) |
1220 | sk = cookie_v6_check(sk, skb, &(IPCB(skb)->opt)); | 1236 | sk = cookie_v6_check(sk, skb); |
1221 | #endif | 1237 | #endif |
1222 | return sk; | 1238 | return sk; |
1223 | } | 1239 | } |
@@ -1233,6 +1249,11 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1233 | struct tcp_sock *tp = tcp_sk(sk); | 1249 | struct tcp_sock *tp = tcp_sk(sk); |
1234 | struct request_sock *req = NULL; | 1250 | struct request_sock *req = NULL; |
1235 | __u32 isn = TCP_SKB_CB(skb)->when; | 1251 | __u32 isn = TCP_SKB_CB(skb)->when; |
1252 | #ifdef CONFIG_SYN_COOKIES | ||
1253 | int want_cookie = 0; | ||
1254 | #else | ||
1255 | #define want_cookie 0 | ||
1256 | #endif | ||
1236 | 1257 | ||
1237 | if (skb->protocol == htons(ETH_P_IP)) | 1258 | if (skb->protocol == htons(ETH_P_IP)) |
1238 | return tcp_v4_conn_request(sk, skb); | 1259 | return tcp_v4_conn_request(sk, skb); |
@@ -1240,12 +1261,14 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1240 | if (!ipv6_unicast_destination(skb)) | 1261 | if (!ipv6_unicast_destination(skb)) |
1241 | goto drop; | 1262 | goto drop; |
1242 | 1263 | ||
1243 | /* | ||
1244 | * There are no SYN attacks on IPv6, yet... | ||
1245 | */ | ||
1246 | if (inet_csk_reqsk_queue_is_full(sk) && !isn) { | 1264 | if (inet_csk_reqsk_queue_is_full(sk) && !isn) { |
1247 | if (net_ratelimit()) | 1265 | if (net_ratelimit()) |
1248 | printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n"); | 1266 | syn_flood_warning(skb); |
1267 | #ifdef CONFIG_SYN_COOKIES | ||
1268 | if (sysctl_tcp_syncookies) | ||
1269 | want_cookie = 1; | ||
1270 | else | ||
1271 | #endif | ||
1249 | goto drop; | 1272 | goto drop; |
1250 | } | 1273 | } |
1251 | 1274 | ||
@@ -1266,39 +1289,51 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1266 | 1289 | ||
1267 | tcp_parse_options(skb, &tmp_opt, 0); | 1290 | tcp_parse_options(skb, &tmp_opt, 0); |
1268 | 1291 | ||
1292 | if (want_cookie) { | ||
1293 | tcp_clear_options(&tmp_opt); | ||
1294 | tmp_opt.saw_tstamp = 0; | ||
1295 | } | ||
1296 | |||
1269 | tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; | 1297 | tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; |
1270 | tcp_openreq_init(req, &tmp_opt, skb); | 1298 | tcp_openreq_init(req, &tmp_opt, skb); |
1271 | 1299 | ||
1272 | treq = inet6_rsk(req); | 1300 | treq = inet6_rsk(req); |
1273 | ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr); | 1301 | ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr); |
1274 | ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr); | 1302 | ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr); |
1275 | TCP_ECN_create_request(req, tcp_hdr(skb)); | ||
1276 | treq->pktopts = NULL; | 1303 | treq->pktopts = NULL; |
1277 | if (ipv6_opt_accepted(sk, skb) || | 1304 | if (!want_cookie) |
1278 | np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || | 1305 | TCP_ECN_create_request(req, tcp_hdr(skb)); |
1279 | np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { | 1306 | |
1280 | atomic_inc(&skb->users); | 1307 | if (want_cookie) { |
1281 | treq->pktopts = skb; | 1308 | isn = cookie_v6_init_sequence(sk, skb, &req->mss); |
1282 | } | 1309 | } else if (!isn) { |
1283 | treq->iif = sk->sk_bound_dev_if; | 1310 | if (ipv6_opt_accepted(sk, skb) || |
1311 | np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || | ||
1312 | np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { | ||
1313 | atomic_inc(&skb->users); | ||
1314 | treq->pktopts = skb; | ||
1315 | } | ||
1316 | treq->iif = sk->sk_bound_dev_if; | ||
1284 | 1317 | ||
1285 | /* So that link locals have meaning */ | 1318 | /* So that link locals have meaning */ |
1286 | if (!sk->sk_bound_dev_if && | 1319 | if (!sk->sk_bound_dev_if && |
1287 | ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL) | 1320 | ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL) |
1288 | treq->iif = inet6_iif(skb); | 1321 | treq->iif = inet6_iif(skb); |
1289 | 1322 | ||
1290 | if (isn == 0) | ||
1291 | isn = tcp_v6_init_sequence(skb); | 1323 | isn = tcp_v6_init_sequence(skb); |
1324 | } | ||
1292 | 1325 | ||
1293 | tcp_rsk(req)->snt_isn = isn; | 1326 | tcp_rsk(req)->snt_isn = isn; |
1294 | 1327 | ||
1295 | security_inet_conn_request(sk, skb, req); | 1328 | security_inet_conn_request(sk, skb, req); |
1296 | 1329 | ||
1297 | if (tcp_v6_send_synack(sk, req, NULL)) | 1330 | if (tcp_v6_send_synack(sk, req)) |
1298 | goto drop; | 1331 | goto drop; |
1299 | 1332 | ||
1300 | inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); | 1333 | if (!want_cookie) { |
1301 | return 0; | 1334 | inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); |
1335 | return 0; | ||
1336 | } | ||
1302 | 1337 | ||
1303 | drop: | 1338 | drop: |
1304 | if (req) | 1339 | if (req) |
@@ -2164,6 +2199,31 @@ static struct inet_protosw tcpv6_protosw = { | |||
2164 | INET_PROTOSW_ICSK, | 2199 | INET_PROTOSW_ICSK, |
2165 | }; | 2200 | }; |
2166 | 2201 | ||
2202 | static int tcpv6_net_init(struct net *net) | ||
2203 | { | ||
2204 | int err; | ||
2205 | struct socket *sock; | ||
2206 | struct sock *sk; | ||
2207 | |||
2208 | err = inet_csk_ctl_sock_create(&sock, PF_INET6, SOCK_RAW, IPPROTO_TCP); | ||
2209 | if (err) | ||
2210 | return err; | ||
2211 | |||
2212 | net->ipv6.tcp_sk = sk = sock->sk; | ||
2213 | sk_change_net(sk, net); | ||
2214 | return err; | ||
2215 | } | ||
2216 | |||
2217 | static void tcpv6_net_exit(struct net *net) | ||
2218 | { | ||
2219 | sk_release_kernel(net->ipv6.tcp_sk); | ||
2220 | } | ||
2221 | |||
2222 | static struct pernet_operations tcpv6_net_ops = { | ||
2223 | .init = tcpv6_net_init, | ||
2224 | .exit = tcpv6_net_exit, | ||
2225 | }; | ||
2226 | |||
2167 | int __init tcpv6_init(void) | 2227 | int __init tcpv6_init(void) |
2168 | { | 2228 | { |
2169 | int ret; | 2229 | int ret; |
@@ -2177,8 +2237,7 @@ int __init tcpv6_init(void) | |||
2177 | if (ret) | 2237 | if (ret) |
2178 | goto out_tcpv6_protocol; | 2238 | goto out_tcpv6_protocol; |
2179 | 2239 | ||
2180 | ret = inet_csk_ctl_sock_create(&tcp6_socket, PF_INET6, | 2240 | ret = register_pernet_subsys(&tcpv6_net_ops); |
2181 | SOCK_RAW, IPPROTO_TCP); | ||
2182 | if (ret) | 2241 | if (ret) |
2183 | goto out_tcpv6_protosw; | 2242 | goto out_tcpv6_protosw; |
2184 | out: | 2243 | out: |
@@ -2193,7 +2252,7 @@ out_tcpv6_protosw: | |||
2193 | 2252 | ||
2194 | void tcpv6_exit(void) | 2253 | void tcpv6_exit(void) |
2195 | { | 2254 | { |
2196 | sock_release(tcp6_socket); | 2255 | unregister_pernet_subsys(&tcpv6_net_ops); |
2197 | inet6_unregister_protosw(&tcpv6_protosw); | 2256 | inet6_unregister_protosw(&tcpv6_protosw); |
2198 | inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP); | 2257 | inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP); |
2199 | } | 2258 | } |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 53739de829db..d6e311f6c8eb 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -323,6 +323,9 @@ static struct sock *udp_v6_mcast_next(struct sock *sk, | |||
323 | sk_for_each_from(s, node) { | 323 | sk_for_each_from(s, node) { |
324 | struct inet_sock *inet = inet_sk(s); | 324 | struct inet_sock *inet = inet_sk(s); |
325 | 325 | ||
326 | if (s->sk_net != sk->sk_net) | ||
327 | continue; | ||
328 | |||
326 | if (s->sk_hash == num && s->sk_family == PF_INET6) { | 329 | if (s->sk_hash == num && s->sk_family == PF_INET6) { |
327 | struct ipv6_pinfo *np = inet6_sk(s); | 330 | struct ipv6_pinfo *np = inet6_sk(s); |
328 | if (inet->dport) { | 331 | if (inet->dport) { |
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 7d20199ee1f3..e96dafdc7032 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c | |||
@@ -38,7 +38,7 @@ static struct dst_entry *xfrm6_dst_lookup(int tos, xfrm_address_t *saddr, | |||
38 | if (saddr) | 38 | if (saddr) |
39 | memcpy(&fl.fl6_src, saddr, sizeof(fl.fl6_src)); | 39 | memcpy(&fl.fl6_src, saddr, sizeof(fl.fl6_src)); |
40 | 40 | ||
41 | dst = ip6_route_output(NULL, &fl); | 41 | dst = ip6_route_output(&init_net, NULL, &fl); |
42 | 42 | ||
43 | err = dst->error; | 43 | err = dst->error; |
44 | if (dst->error) { | 44 | if (dst->error) { |
@@ -57,8 +57,9 @@ static int xfrm6_get_saddr(xfrm_address_t *saddr, xfrm_address_t *daddr) | |||
57 | if (IS_ERR(dst)) | 57 | if (IS_ERR(dst)) |
58 | return -EHOSTUNREACH; | 58 | return -EHOSTUNREACH; |
59 | 59 | ||
60 | ipv6_get_saddr(dst, (struct in6_addr *)&daddr->a6, | 60 | ipv6_dev_get_saddr(ip6_dst_idev(dst)->dev, |
61 | (struct in6_addr *)&saddr->a6); | 61 | (struct in6_addr *)&daddr->a6, |
62 | (struct in6_addr *)&saddr->a6); | ||
62 | dst_release(dst); | 63 | dst_release(dst); |
63 | return 0; | 64 | return 0; |
64 | } | 65 | } |
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 240b0cbfb532..6f21a53cb3e7 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c | |||
@@ -85,14 +85,14 @@ static int irda_data_indication(void *instance, void *sap, struct sk_buff *skb) | |||
85 | struct sock *sk; | 85 | struct sock *sk; |
86 | int err; | 86 | int err; |
87 | 87 | ||
88 | IRDA_DEBUG(3, "%s()\n", __FUNCTION__); | 88 | IRDA_DEBUG(3, "%s()\n", __func__); |
89 | 89 | ||
90 | self = instance; | 90 | self = instance; |
91 | sk = instance; | 91 | sk = instance; |
92 | 92 | ||
93 | err = sock_queue_rcv_skb(sk, skb); | 93 | err = sock_queue_rcv_skb(sk, skb); |
94 | if (err) { | 94 | if (err) { |
95 | IRDA_DEBUG(1, "%s(), error: no more mem!\n", __FUNCTION__); | 95 | IRDA_DEBUG(1, "%s(), error: no more mem!\n", __func__); |
96 | self->rx_flow = FLOW_STOP; | 96 | self->rx_flow = FLOW_STOP; |
97 | 97 | ||
98 | /* When we return error, TTP will need to requeue the skb */ | 98 | /* When we return error, TTP will need to requeue the skb */ |
@@ -116,7 +116,7 @@ static void irda_disconnect_indication(void *instance, void *sap, | |||
116 | 116 | ||
117 | self = instance; | 117 | self = instance; |
118 | 118 | ||
119 | IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self); | 119 | IRDA_DEBUG(2, "%s(%p)\n", __func__, self); |
120 | 120 | ||
121 | /* Don't care about it, but let's not leak it */ | 121 | /* Don't care about it, but let's not leak it */ |
122 | if(skb) | 122 | if(skb) |
@@ -125,7 +125,7 @@ static void irda_disconnect_indication(void *instance, void *sap, | |||
125 | sk = instance; | 125 | sk = instance; |
126 | if (sk == NULL) { | 126 | if (sk == NULL) { |
127 | IRDA_DEBUG(0, "%s(%p) : BUG : sk is NULL\n", | 127 | IRDA_DEBUG(0, "%s(%p) : BUG : sk is NULL\n", |
128 | __FUNCTION__, self); | 128 | __func__, self); |
129 | return; | 129 | return; |
130 | } | 130 | } |
131 | 131 | ||
@@ -181,7 +181,7 @@ static void irda_connect_confirm(void *instance, void *sap, | |||
181 | 181 | ||
182 | self = instance; | 182 | self = instance; |
183 | 183 | ||
184 | IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self); | 184 | IRDA_DEBUG(2, "%s(%p)\n", __func__, self); |
185 | 185 | ||
186 | sk = instance; | 186 | sk = instance; |
187 | if (sk == NULL) { | 187 | if (sk == NULL) { |
@@ -203,7 +203,7 @@ static void irda_connect_confirm(void *instance, void *sap, | |||
203 | case SOCK_STREAM: | 203 | case SOCK_STREAM: |
204 | if (max_sdu_size != 0) { | 204 | if (max_sdu_size != 0) { |
205 | IRDA_ERROR("%s: max_sdu_size must be 0\n", | 205 | IRDA_ERROR("%s: max_sdu_size must be 0\n", |
206 | __FUNCTION__); | 206 | __func__); |
207 | return; | 207 | return; |
208 | } | 208 | } |
209 | self->max_data_size = irttp_get_max_seg_size(self->tsap); | 209 | self->max_data_size = irttp_get_max_seg_size(self->tsap); |
@@ -211,7 +211,7 @@ static void irda_connect_confirm(void *instance, void *sap, | |||
211 | case SOCK_SEQPACKET: | 211 | case SOCK_SEQPACKET: |
212 | if (max_sdu_size == 0) { | 212 | if (max_sdu_size == 0) { |
213 | IRDA_ERROR("%s: max_sdu_size cannot be 0\n", | 213 | IRDA_ERROR("%s: max_sdu_size cannot be 0\n", |
214 | __FUNCTION__); | 214 | __func__); |
215 | return; | 215 | return; |
216 | } | 216 | } |
217 | self->max_data_size = max_sdu_size; | 217 | self->max_data_size = max_sdu_size; |
@@ -220,7 +220,7 @@ static void irda_connect_confirm(void *instance, void *sap, | |||
220 | self->max_data_size = irttp_get_max_seg_size(self->tsap); | 220 | self->max_data_size = irttp_get_max_seg_size(self->tsap); |
221 | } | 221 | } |
222 | 222 | ||
223 | IRDA_DEBUG(2, "%s(), max_data_size=%d\n", __FUNCTION__, | 223 | IRDA_DEBUG(2, "%s(), max_data_size=%d\n", __func__, |
224 | self->max_data_size); | 224 | self->max_data_size); |
225 | 225 | ||
226 | memcpy(&self->qos_tx, qos, sizeof(struct qos_info)); | 226 | memcpy(&self->qos_tx, qos, sizeof(struct qos_info)); |
@@ -245,7 +245,7 @@ static void irda_connect_indication(void *instance, void *sap, | |||
245 | 245 | ||
246 | self = instance; | 246 | self = instance; |
247 | 247 | ||
248 | IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self); | 248 | IRDA_DEBUG(2, "%s(%p)\n", __func__, self); |
249 | 249 | ||
250 | sk = instance; | 250 | sk = instance; |
251 | if (sk == NULL) { | 251 | if (sk == NULL) { |
@@ -264,7 +264,7 @@ static void irda_connect_indication(void *instance, void *sap, | |||
264 | case SOCK_STREAM: | 264 | case SOCK_STREAM: |
265 | if (max_sdu_size != 0) { | 265 | if (max_sdu_size != 0) { |
266 | IRDA_ERROR("%s: max_sdu_size must be 0\n", | 266 | IRDA_ERROR("%s: max_sdu_size must be 0\n", |
267 | __FUNCTION__); | 267 | __func__); |
268 | kfree_skb(skb); | 268 | kfree_skb(skb); |
269 | return; | 269 | return; |
270 | } | 270 | } |
@@ -273,7 +273,7 @@ static void irda_connect_indication(void *instance, void *sap, | |||
273 | case SOCK_SEQPACKET: | 273 | case SOCK_SEQPACKET: |
274 | if (max_sdu_size == 0) { | 274 | if (max_sdu_size == 0) { |
275 | IRDA_ERROR("%s: max_sdu_size cannot be 0\n", | 275 | IRDA_ERROR("%s: max_sdu_size cannot be 0\n", |
276 | __FUNCTION__); | 276 | __func__); |
277 | kfree_skb(skb); | 277 | kfree_skb(skb); |
278 | return; | 278 | return; |
279 | } | 279 | } |
@@ -283,7 +283,7 @@ static void irda_connect_indication(void *instance, void *sap, | |||
283 | self->max_data_size = irttp_get_max_seg_size(self->tsap); | 283 | self->max_data_size = irttp_get_max_seg_size(self->tsap); |
284 | } | 284 | } |
285 | 285 | ||
286 | IRDA_DEBUG(2, "%s(), max_data_size=%d\n", __FUNCTION__, | 286 | IRDA_DEBUG(2, "%s(), max_data_size=%d\n", __func__, |
287 | self->max_data_size); | 287 | self->max_data_size); |
288 | 288 | ||
289 | memcpy(&self->qos_tx, qos, sizeof(struct qos_info)); | 289 | memcpy(&self->qos_tx, qos, sizeof(struct qos_info)); |
@@ -302,13 +302,13 @@ static void irda_connect_response(struct irda_sock *self) | |||
302 | { | 302 | { |
303 | struct sk_buff *skb; | 303 | struct sk_buff *skb; |
304 | 304 | ||
305 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | 305 | IRDA_DEBUG(2, "%s()\n", __func__); |
306 | 306 | ||
307 | skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER, | 307 | skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER, |
308 | GFP_ATOMIC); | 308 | GFP_ATOMIC); |
309 | if (skb == NULL) { | 309 | if (skb == NULL) { |
310 | IRDA_DEBUG(0, "%s() Unable to allocate sk_buff!\n", | 310 | IRDA_DEBUG(0, "%s() Unable to allocate sk_buff!\n", |
311 | __FUNCTION__); | 311 | __func__); |
312 | return; | 312 | return; |
313 | } | 313 | } |
314 | 314 | ||
@@ -329,7 +329,7 @@ static void irda_flow_indication(void *instance, void *sap, LOCAL_FLOW flow) | |||
329 | struct irda_sock *self; | 329 | struct irda_sock *self; |
330 | struct sock *sk; | 330 | struct sock *sk; |
331 | 331 | ||
332 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | 332 | IRDA_DEBUG(2, "%s()\n", __func__); |
333 | 333 | ||
334 | self = instance; | 334 | self = instance; |
335 | sk = instance; | 335 | sk = instance; |
@@ -338,17 +338,17 @@ static void irda_flow_indication(void *instance, void *sap, LOCAL_FLOW flow) | |||
338 | switch (flow) { | 338 | switch (flow) { |
339 | case FLOW_STOP: | 339 | case FLOW_STOP: |
340 | IRDA_DEBUG(1, "%s(), IrTTP wants us to slow down\n", | 340 | IRDA_DEBUG(1, "%s(), IrTTP wants us to slow down\n", |
341 | __FUNCTION__); | 341 | __func__); |
342 | self->tx_flow = flow; | 342 | self->tx_flow = flow; |
343 | break; | 343 | break; |
344 | case FLOW_START: | 344 | case FLOW_START: |
345 | self->tx_flow = flow; | 345 | self->tx_flow = flow; |
346 | IRDA_DEBUG(1, "%s(), IrTTP wants us to start again\n", | 346 | IRDA_DEBUG(1, "%s(), IrTTP wants us to start again\n", |
347 | __FUNCTION__); | 347 | __func__); |
348 | wake_up_interruptible(sk->sk_sleep); | 348 | wake_up_interruptible(sk->sk_sleep); |
349 | break; | 349 | break; |
350 | default: | 350 | default: |
351 | IRDA_DEBUG(0, "%s(), Unknown flow command!\n", __FUNCTION__); | 351 | IRDA_DEBUG(0, "%s(), Unknown flow command!\n", __func__); |
352 | /* Unknown flow command, better stop */ | 352 | /* Unknown flow command, better stop */ |
353 | self->tx_flow = flow; | 353 | self->tx_flow = flow; |
354 | break; | 354 | break; |
@@ -370,11 +370,11 @@ static void irda_getvalue_confirm(int result, __u16 obj_id, | |||
370 | 370 | ||
371 | self = (struct irda_sock *) priv; | 371 | self = (struct irda_sock *) priv; |
372 | if (!self) { | 372 | if (!self) { |
373 | IRDA_WARNING("%s: lost myself!\n", __FUNCTION__); | 373 | IRDA_WARNING("%s: lost myself!\n", __func__); |
374 | return; | 374 | return; |
375 | } | 375 | } |
376 | 376 | ||
377 | IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self); | 377 | IRDA_DEBUG(2, "%s(%p)\n", __func__, self); |
378 | 378 | ||
379 | /* We probably don't need to make any more queries */ | 379 | /* We probably don't need to make any more queries */ |
380 | iriap_close(self->iriap); | 380 | iriap_close(self->iriap); |
@@ -382,7 +382,7 @@ static void irda_getvalue_confirm(int result, __u16 obj_id, | |||
382 | 382 | ||
383 | /* Check if request succeeded */ | 383 | /* Check if request succeeded */ |
384 | if (result != IAS_SUCCESS) { | 384 | if (result != IAS_SUCCESS) { |
385 | IRDA_DEBUG(1, "%s(), IAS query failed! (%d)\n", __FUNCTION__, | 385 | IRDA_DEBUG(1, "%s(), IAS query failed! (%d)\n", __func__, |
386 | result); | 386 | result); |
387 | 387 | ||
388 | self->errno = result; /* We really need it later */ | 388 | self->errno = result; /* We really need it later */ |
@@ -415,11 +415,11 @@ static void irda_selective_discovery_indication(discinfo_t *discovery, | |||
415 | { | 415 | { |
416 | struct irda_sock *self; | 416 | struct irda_sock *self; |
417 | 417 | ||
418 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | 418 | IRDA_DEBUG(2, "%s()\n", __func__); |
419 | 419 | ||
420 | self = (struct irda_sock *) priv; | 420 | self = (struct irda_sock *) priv; |
421 | if (!self) { | 421 | if (!self) { |
422 | IRDA_WARNING("%s: lost myself!\n", __FUNCTION__); | 422 | IRDA_WARNING("%s: lost myself!\n", __func__); |
423 | return; | 423 | return; |
424 | } | 424 | } |
425 | 425 | ||
@@ -442,7 +442,7 @@ static void irda_discovery_timeout(u_long priv) | |||
442 | { | 442 | { |
443 | struct irda_sock *self; | 443 | struct irda_sock *self; |
444 | 444 | ||
445 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | 445 | IRDA_DEBUG(2, "%s()\n", __func__); |
446 | 446 | ||
447 | self = (struct irda_sock *) priv; | 447 | self = (struct irda_sock *) priv; |
448 | BUG_ON(self == NULL); | 448 | BUG_ON(self == NULL); |
@@ -467,7 +467,7 @@ static int irda_open_tsap(struct irda_sock *self, __u8 tsap_sel, char *name) | |||
467 | notify_t notify; | 467 | notify_t notify; |
468 | 468 | ||
469 | if (self->tsap) { | 469 | if (self->tsap) { |
470 | IRDA_WARNING("%s: busy!\n", __FUNCTION__); | 470 | IRDA_WARNING("%s: busy!\n", __func__); |
471 | return -EBUSY; | 471 | return -EBUSY; |
472 | } | 472 | } |
473 | 473 | ||
@@ -486,7 +486,7 @@ static int irda_open_tsap(struct irda_sock *self, __u8 tsap_sel, char *name) | |||
486 | ¬ify); | 486 | ¬ify); |
487 | if (self->tsap == NULL) { | 487 | if (self->tsap == NULL) { |
488 | IRDA_DEBUG(0, "%s(), Unable to allocate TSAP!\n", | 488 | IRDA_DEBUG(0, "%s(), Unable to allocate TSAP!\n", |
489 | __FUNCTION__); | 489 | __func__); |
490 | return -ENOMEM; | 490 | return -ENOMEM; |
491 | } | 491 | } |
492 | /* Remember which TSAP selector we actually got */ | 492 | /* Remember which TSAP selector we actually got */ |
@@ -507,7 +507,7 @@ static int irda_open_lsap(struct irda_sock *self, int pid) | |||
507 | notify_t notify; | 507 | notify_t notify; |
508 | 508 | ||
509 | if (self->lsap) { | 509 | if (self->lsap) { |
510 | IRDA_WARNING("%s(), busy!\n", __FUNCTION__); | 510 | IRDA_WARNING("%s(), busy!\n", __func__); |
511 | return -EBUSY; | 511 | return -EBUSY; |
512 | } | 512 | } |
513 | 513 | ||
@@ -519,7 +519,7 @@ static int irda_open_lsap(struct irda_sock *self, int pid) | |||
519 | 519 | ||
520 | self->lsap = irlmp_open_lsap(LSAP_CONNLESS, ¬ify, pid); | 520 | self->lsap = irlmp_open_lsap(LSAP_CONNLESS, ¬ify, pid); |
521 | if (self->lsap == NULL) { | 521 | if (self->lsap == NULL) { |
522 | IRDA_DEBUG( 0, "%s(), Unable to allocate LSAP!\n", __FUNCTION__); | 522 | IRDA_DEBUG( 0, "%s(), Unable to allocate LSAP!\n", __func__); |
523 | return -ENOMEM; | 523 | return -ENOMEM; |
524 | } | 524 | } |
525 | 525 | ||
@@ -540,11 +540,11 @@ static int irda_open_lsap(struct irda_sock *self, int pid) | |||
540 | */ | 540 | */ |
541 | static int irda_find_lsap_sel(struct irda_sock *self, char *name) | 541 | static int irda_find_lsap_sel(struct irda_sock *self, char *name) |
542 | { | 542 | { |
543 | IRDA_DEBUG(2, "%s(%p, %s)\n", __FUNCTION__, self, name); | 543 | IRDA_DEBUG(2, "%s(%p, %s)\n", __func__, self, name); |
544 | 544 | ||
545 | if (self->iriap) { | 545 | if (self->iriap) { |
546 | IRDA_WARNING("%s(): busy with a previous query\n", | 546 | IRDA_WARNING("%s(): busy with a previous query\n", |
547 | __FUNCTION__); | 547 | __func__); |
548 | return -EBUSY; | 548 | return -EBUSY; |
549 | } | 549 | } |
550 | 550 | ||
@@ -580,7 +580,7 @@ static int irda_find_lsap_sel(struct irda_sock *self, char *name) | |||
580 | switch (self->ias_result->type) { | 580 | switch (self->ias_result->type) { |
581 | case IAS_INTEGER: | 581 | case IAS_INTEGER: |
582 | IRDA_DEBUG(4, "%s() int=%d\n", | 582 | IRDA_DEBUG(4, "%s() int=%d\n", |
583 | __FUNCTION__, self->ias_result->t.integer); | 583 | __func__, self->ias_result->t.integer); |
584 | 584 | ||
585 | if (self->ias_result->t.integer != -1) | 585 | if (self->ias_result->t.integer != -1) |
586 | self->dtsap_sel = self->ias_result->t.integer; | 586 | self->dtsap_sel = self->ias_result->t.integer; |
@@ -589,7 +589,7 @@ static int irda_find_lsap_sel(struct irda_sock *self, char *name) | |||
589 | break; | 589 | break; |
590 | default: | 590 | default: |
591 | self->dtsap_sel = 0; | 591 | self->dtsap_sel = 0; |
592 | IRDA_DEBUG(0, "%s(), bad type!\n", __FUNCTION__); | 592 | IRDA_DEBUG(0, "%s(), bad type!\n", __func__); |
593 | break; | 593 | break; |
594 | } | 594 | } |
595 | if (self->ias_result) | 595 | if (self->ias_result) |
@@ -627,7 +627,7 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name) | |||
627 | __u32 daddr = DEV_ADDR_ANY; /* Address we found the service on */ | 627 | __u32 daddr = DEV_ADDR_ANY; /* Address we found the service on */ |
628 | __u8 dtsap_sel = 0x0; /* TSAP associated with it */ | 628 | __u8 dtsap_sel = 0x0; /* TSAP associated with it */ |
629 | 629 | ||
630 | IRDA_DEBUG(2, "%s(), name=%s\n", __FUNCTION__, name); | 630 | IRDA_DEBUG(2, "%s(), name=%s\n", __func__, name); |
631 | 631 | ||
632 | /* Ask lmp for the current discovery log | 632 | /* Ask lmp for the current discovery log |
633 | * Note : we have to use irlmp_get_discoveries(), as opposed | 633 | * Note : we have to use irlmp_get_discoveries(), as opposed |
@@ -649,7 +649,7 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name) | |||
649 | self->daddr = discoveries[i].daddr; | 649 | self->daddr = discoveries[i].daddr; |
650 | self->saddr = 0x0; | 650 | self->saddr = 0x0; |
651 | IRDA_DEBUG(1, "%s(), trying daddr = %08x\n", | 651 | IRDA_DEBUG(1, "%s(), trying daddr = %08x\n", |
652 | __FUNCTION__, self->daddr); | 652 | __func__, self->daddr); |
653 | 653 | ||
654 | /* Query remote LM-IAS for this service */ | 654 | /* Query remote LM-IAS for this service */ |
655 | err = irda_find_lsap_sel(self, name); | 655 | err = irda_find_lsap_sel(self, name); |
@@ -658,7 +658,7 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name) | |||
658 | /* We found the requested service */ | 658 | /* We found the requested service */ |
659 | if(daddr != DEV_ADDR_ANY) { | 659 | if(daddr != DEV_ADDR_ANY) { |
660 | IRDA_DEBUG(1, "%s(), discovered service ''%s'' in two different devices !!!\n", | 660 | IRDA_DEBUG(1, "%s(), discovered service ''%s'' in two different devices !!!\n", |
661 | __FUNCTION__, name); | 661 | __func__, name); |
662 | self->daddr = DEV_ADDR_ANY; | 662 | self->daddr = DEV_ADDR_ANY; |
663 | kfree(discoveries); | 663 | kfree(discoveries); |
664 | return(-ENOTUNIQ); | 664 | return(-ENOTUNIQ); |
@@ -672,7 +672,7 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name) | |||
672 | break; | 672 | break; |
673 | default: | 673 | default: |
674 | /* Something bad did happen :-( */ | 674 | /* Something bad did happen :-( */ |
675 | IRDA_DEBUG(0, "%s(), unexpected IAS query failure\n", __FUNCTION__); | 675 | IRDA_DEBUG(0, "%s(), unexpected IAS query failure\n", __func__); |
676 | self->daddr = DEV_ADDR_ANY; | 676 | self->daddr = DEV_ADDR_ANY; |
677 | kfree(discoveries); | 677 | kfree(discoveries); |
678 | return(-EHOSTUNREACH); | 678 | return(-EHOSTUNREACH); |
@@ -685,7 +685,7 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name) | |||
685 | /* Check out what we found */ | 685 | /* Check out what we found */ |
686 | if(daddr == DEV_ADDR_ANY) { | 686 | if(daddr == DEV_ADDR_ANY) { |
687 | IRDA_DEBUG(1, "%s(), cannot discover service ''%s'' in any device !!!\n", | 687 | IRDA_DEBUG(1, "%s(), cannot discover service ''%s'' in any device !!!\n", |
688 | __FUNCTION__, name); | 688 | __func__, name); |
689 | self->daddr = DEV_ADDR_ANY; | 689 | self->daddr = DEV_ADDR_ANY; |
690 | return(-EADDRNOTAVAIL); | 690 | return(-EADDRNOTAVAIL); |
691 | } | 691 | } |
@@ -696,7 +696,7 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name) | |||
696 | self->dtsap_sel = dtsap_sel; | 696 | self->dtsap_sel = dtsap_sel; |
697 | 697 | ||
698 | IRDA_DEBUG(1, "%s(), discovered requested service ''%s'' at address %08x\n", | 698 | IRDA_DEBUG(1, "%s(), discovered requested service ''%s'' at address %08x\n", |
699 | __FUNCTION__, name, self->daddr); | 699 | __func__, name, self->daddr); |
700 | 700 | ||
701 | return 0; | 701 | return 0; |
702 | } | 702 | } |
@@ -727,8 +727,8 @@ static int irda_getname(struct socket *sock, struct sockaddr *uaddr, | |||
727 | saddr.sir_addr = self->saddr; | 727 | saddr.sir_addr = self->saddr; |
728 | } | 728 | } |
729 | 729 | ||
730 | IRDA_DEBUG(1, "%s(), tsap_sel = %#x\n", __FUNCTION__, saddr.sir_lsap_sel); | 730 | IRDA_DEBUG(1, "%s(), tsap_sel = %#x\n", __func__, saddr.sir_lsap_sel); |
731 | IRDA_DEBUG(1, "%s(), addr = %08x\n", __FUNCTION__, saddr.sir_addr); | 731 | IRDA_DEBUG(1, "%s(), addr = %08x\n", __func__, saddr.sir_addr); |
732 | 732 | ||
733 | /* uaddr_len come to us uninitialised */ | 733 | /* uaddr_len come to us uninitialised */ |
734 | *uaddr_len = sizeof (struct sockaddr_irda); | 734 | *uaddr_len = sizeof (struct sockaddr_irda); |
@@ -747,7 +747,7 @@ static int irda_listen(struct socket *sock, int backlog) | |||
747 | { | 747 | { |
748 | struct sock *sk = sock->sk; | 748 | struct sock *sk = sock->sk; |
749 | 749 | ||
750 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | 750 | IRDA_DEBUG(2, "%s()\n", __func__); |
751 | 751 | ||
752 | if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) && | 752 | if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) && |
753 | (sk->sk_type != SOCK_DGRAM)) | 753 | (sk->sk_type != SOCK_DGRAM)) |
@@ -776,7 +776,7 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
776 | struct irda_sock *self = irda_sk(sk); | 776 | struct irda_sock *self = irda_sk(sk); |
777 | int err; | 777 | int err; |
778 | 778 | ||
779 | IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self); | 779 | IRDA_DEBUG(2, "%s(%p)\n", __func__, self); |
780 | 780 | ||
781 | if (addr_len != sizeof(struct sockaddr_irda)) | 781 | if (addr_len != sizeof(struct sockaddr_irda)) |
782 | return -EINVAL; | 782 | return -EINVAL; |
@@ -787,7 +787,7 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
787 | (sk->sk_protocol == IRDAPROTO_ULTRA)) { | 787 | (sk->sk_protocol == IRDAPROTO_ULTRA)) { |
788 | self->pid = addr->sir_lsap_sel; | 788 | self->pid = addr->sir_lsap_sel; |
789 | if (self->pid & 0x80) { | 789 | if (self->pid & 0x80) { |
790 | IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __FUNCTION__); | 790 | IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__); |
791 | return -EOPNOTSUPP; | 791 | return -EOPNOTSUPP; |
792 | } | 792 | } |
793 | err = irda_open_lsap(self, self->pid); | 793 | err = irda_open_lsap(self, self->pid); |
@@ -835,7 +835,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags) | |||
835 | struct sk_buff *skb; | 835 | struct sk_buff *skb; |
836 | int err; | 836 | int err; |
837 | 837 | ||
838 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | 838 | IRDA_DEBUG(2, "%s()\n", __func__); |
839 | 839 | ||
840 | err = irda_create(sk->sk_net, newsock, sk->sk_protocol); | 840 | err = irda_create(sk->sk_net, newsock, sk->sk_protocol); |
841 | if (err) | 841 | if (err) |
@@ -893,7 +893,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags) | |||
893 | /* Now attach up the new socket */ | 893 | /* Now attach up the new socket */ |
894 | new->tsap = irttp_dup(self->tsap, new); | 894 | new->tsap = irttp_dup(self->tsap, new); |
895 | if (!new->tsap) { | 895 | if (!new->tsap) { |
896 | IRDA_DEBUG(0, "%s(), dup failed!\n", __FUNCTION__); | 896 | IRDA_DEBUG(0, "%s(), dup failed!\n", __func__); |
897 | kfree_skb(skb); | 897 | kfree_skb(skb); |
898 | return -1; | 898 | return -1; |
899 | } | 899 | } |
@@ -954,7 +954,7 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr, | |||
954 | struct irda_sock *self = irda_sk(sk); | 954 | struct irda_sock *self = irda_sk(sk); |
955 | int err; | 955 | int err; |
956 | 956 | ||
957 | IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self); | 957 | IRDA_DEBUG(2, "%s(%p)\n", __func__, self); |
958 | 958 | ||
959 | /* Don't allow connect for Ultra sockets */ | 959 | /* Don't allow connect for Ultra sockets */ |
960 | if ((sk->sk_type == SOCK_DGRAM) && (sk->sk_protocol == IRDAPROTO_ULTRA)) | 960 | if ((sk->sk_type == SOCK_DGRAM) && (sk->sk_protocol == IRDAPROTO_ULTRA)) |
@@ -984,13 +984,13 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr, | |||
984 | /* Try to find one suitable */ | 984 | /* Try to find one suitable */ |
985 | err = irda_discover_daddr_and_lsap_sel(self, addr->sir_name); | 985 | err = irda_discover_daddr_and_lsap_sel(self, addr->sir_name); |
986 | if (err) { | 986 | if (err) { |
987 | IRDA_DEBUG(0, "%s(), auto-connect failed!\n", __FUNCTION__); | 987 | IRDA_DEBUG(0, "%s(), auto-connect failed!\n", __func__); |
988 | return err; | 988 | return err; |
989 | } | 989 | } |
990 | } else { | 990 | } else { |
991 | /* Use the one provided by the user */ | 991 | /* Use the one provided by the user */ |
992 | self->daddr = addr->sir_addr; | 992 | self->daddr = addr->sir_addr; |
993 | IRDA_DEBUG(1, "%s(), daddr = %08x\n", __FUNCTION__, self->daddr); | 993 | IRDA_DEBUG(1, "%s(), daddr = %08x\n", __func__, self->daddr); |
994 | 994 | ||
995 | /* If we don't have a valid service name, we assume the | 995 | /* If we don't have a valid service name, we assume the |
996 | * user want to connect on a specific LSAP. Prevent | 996 | * user want to connect on a specific LSAP. Prevent |
@@ -1000,7 +1000,7 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr, | |||
1000 | /* Query remote LM-IAS using service name */ | 1000 | /* Query remote LM-IAS using service name */ |
1001 | err = irda_find_lsap_sel(self, addr->sir_name); | 1001 | err = irda_find_lsap_sel(self, addr->sir_name); |
1002 | if (err) { | 1002 | if (err) { |
1003 | IRDA_DEBUG(0, "%s(), connect failed!\n", __FUNCTION__); | 1003 | IRDA_DEBUG(0, "%s(), connect failed!\n", __func__); |
1004 | return err; | 1004 | return err; |
1005 | } | 1005 | } |
1006 | } else { | 1006 | } else { |
@@ -1025,7 +1025,7 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr, | |||
1025 | self->saddr, self->daddr, NULL, | 1025 | self->saddr, self->daddr, NULL, |
1026 | self->max_sdu_size_rx, NULL); | 1026 | self->max_sdu_size_rx, NULL); |
1027 | if (err) { | 1027 | if (err) { |
1028 | IRDA_DEBUG(0, "%s(), connect failed!\n", __FUNCTION__); | 1028 | IRDA_DEBUG(0, "%s(), connect failed!\n", __func__); |
1029 | return err; | 1029 | return err; |
1030 | } | 1030 | } |
1031 | 1031 | ||
@@ -1068,7 +1068,7 @@ static int irda_create(struct net *net, struct socket *sock, int protocol) | |||
1068 | struct sock *sk; | 1068 | struct sock *sk; |
1069 | struct irda_sock *self; | 1069 | struct irda_sock *self; |
1070 | 1070 | ||
1071 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | 1071 | IRDA_DEBUG(2, "%s()\n", __func__); |
1072 | 1072 | ||
1073 | if (net != &init_net) | 1073 | if (net != &init_net) |
1074 | return -EAFNOSUPPORT; | 1074 | return -EAFNOSUPPORT; |
@@ -1089,7 +1089,7 @@ static int irda_create(struct net *net, struct socket *sock, int protocol) | |||
1089 | return -ENOMEM; | 1089 | return -ENOMEM; |
1090 | 1090 | ||
1091 | self = irda_sk(sk); | 1091 | self = irda_sk(sk); |
1092 | IRDA_DEBUG(2, "%s() : self is %p\n", __FUNCTION__, self); | 1092 | IRDA_DEBUG(2, "%s() : self is %p\n", __func__, self); |
1093 | 1093 | ||
1094 | init_waitqueue_head(&self->query_wait); | 1094 | init_waitqueue_head(&self->query_wait); |
1095 | 1095 | ||
@@ -1149,7 +1149,7 @@ static int irda_create(struct net *net, struct socket *sock, int protocol) | |||
1149 | */ | 1149 | */ |
1150 | static void irda_destroy_socket(struct irda_sock *self) | 1150 | static void irda_destroy_socket(struct irda_sock *self) |
1151 | { | 1151 | { |
1152 | IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self); | 1152 | IRDA_DEBUG(2, "%s(%p)\n", __func__, self); |
1153 | 1153 | ||
1154 | /* Unregister with IrLMP */ | 1154 | /* Unregister with IrLMP */ |
1155 | irlmp_unregister_client(self->ckey); | 1155 | irlmp_unregister_client(self->ckey); |
@@ -1186,7 +1186,7 @@ static int irda_release(struct socket *sock) | |||
1186 | { | 1186 | { |
1187 | struct sock *sk = sock->sk; | 1187 | struct sock *sk = sock->sk; |
1188 | 1188 | ||
1189 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | 1189 | IRDA_DEBUG(2, "%s()\n", __func__); |
1190 | 1190 | ||
1191 | if (sk == NULL) | 1191 | if (sk == NULL) |
1192 | return 0; | 1192 | return 0; |
@@ -1254,7 +1254,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1254 | struct sk_buff *skb; | 1254 | struct sk_buff *skb; |
1255 | int err = -EPIPE; | 1255 | int err = -EPIPE; |
1256 | 1256 | ||
1257 | IRDA_DEBUG(4, "%s(), len=%zd\n", __FUNCTION__, len); | 1257 | IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len); |
1258 | 1258 | ||
1259 | /* Note : socket.c set MSG_EOR on SEQPACKET sockets */ | 1259 | /* Note : socket.c set MSG_EOR on SEQPACKET sockets */ |
1260 | if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT | | 1260 | if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT | |
@@ -1282,7 +1282,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1282 | /* Check that we don't send out too big frames */ | 1282 | /* Check that we don't send out too big frames */ |
1283 | if (len > self->max_data_size) { | 1283 | if (len > self->max_data_size) { |
1284 | IRDA_DEBUG(2, "%s(), Chopping frame from %zd to %d bytes!\n", | 1284 | IRDA_DEBUG(2, "%s(), Chopping frame from %zd to %d bytes!\n", |
1285 | __FUNCTION__, len, self->max_data_size); | 1285 | __func__, len, self->max_data_size); |
1286 | len = self->max_data_size; | 1286 | len = self->max_data_size; |
1287 | } | 1287 | } |
1288 | 1288 | ||
@@ -1306,7 +1306,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1306 | */ | 1306 | */ |
1307 | err = irttp_data_request(self->tsap, skb); | 1307 | err = irttp_data_request(self->tsap, skb); |
1308 | if (err) { | 1308 | if (err) { |
1309 | IRDA_DEBUG(0, "%s(), err=%d\n", __FUNCTION__, err); | 1309 | IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err); |
1310 | goto out_err; | 1310 | goto out_err; |
1311 | } | 1311 | } |
1312 | /* Tell client how much data we actually sent */ | 1312 | /* Tell client how much data we actually sent */ |
@@ -1332,7 +1332,7 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, | |||
1332 | size_t copied; | 1332 | size_t copied; |
1333 | int err; | 1333 | int err; |
1334 | 1334 | ||
1335 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 1335 | IRDA_DEBUG(4, "%s()\n", __func__); |
1336 | 1336 | ||
1337 | if ((err = sock_error(sk)) < 0) | 1337 | if ((err = sock_error(sk)) < 0) |
1338 | return err; | 1338 | return err; |
@@ -1347,7 +1347,7 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, | |||
1347 | 1347 | ||
1348 | if (copied > size) { | 1348 | if (copied > size) { |
1349 | IRDA_DEBUG(2, "%s(), Received truncated frame (%zd < %zd)!\n", | 1349 | IRDA_DEBUG(2, "%s(), Received truncated frame (%zd < %zd)!\n", |
1350 | __FUNCTION__, copied, size); | 1350 | __func__, copied, size); |
1351 | copied = size; | 1351 | copied = size; |
1352 | msg->msg_flags |= MSG_TRUNC; | 1352 | msg->msg_flags |= MSG_TRUNC; |
1353 | } | 1353 | } |
@@ -1363,7 +1363,7 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, | |||
1363 | */ | 1363 | */ |
1364 | if (self->rx_flow == FLOW_STOP) { | 1364 | if (self->rx_flow == FLOW_STOP) { |
1365 | if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) { | 1365 | if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) { |
1366 | IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __FUNCTION__); | 1366 | IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __func__); |
1367 | self->rx_flow = FLOW_START; | 1367 | self->rx_flow = FLOW_START; |
1368 | irttp_flow_request(self->tsap, FLOW_START); | 1368 | irttp_flow_request(self->tsap, FLOW_START); |
1369 | } | 1369 | } |
@@ -1385,7 +1385,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock, | |||
1385 | int target, err; | 1385 | int target, err; |
1386 | long timeo; | 1386 | long timeo; |
1387 | 1387 | ||
1388 | IRDA_DEBUG(3, "%s()\n", __FUNCTION__); | 1388 | IRDA_DEBUG(3, "%s()\n", __func__); |
1389 | 1389 | ||
1390 | if ((err = sock_error(sk)) < 0) | 1390 | if ((err = sock_error(sk)) < 0) |
1391 | return err; | 1391 | return err; |
@@ -1459,14 +1459,14 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock, | |||
1459 | /* put the skb back if we didn't use it up.. */ | 1459 | /* put the skb back if we didn't use it up.. */ |
1460 | if (skb->len) { | 1460 | if (skb->len) { |
1461 | IRDA_DEBUG(1, "%s(), back on q!\n", | 1461 | IRDA_DEBUG(1, "%s(), back on q!\n", |
1462 | __FUNCTION__); | 1462 | __func__); |
1463 | skb_queue_head(&sk->sk_receive_queue, skb); | 1463 | skb_queue_head(&sk->sk_receive_queue, skb); |
1464 | break; | 1464 | break; |
1465 | } | 1465 | } |
1466 | 1466 | ||
1467 | kfree_skb(skb); | 1467 | kfree_skb(skb); |
1468 | } else { | 1468 | } else { |
1469 | IRDA_DEBUG(0, "%s() questionable!?\n", __FUNCTION__); | 1469 | IRDA_DEBUG(0, "%s() questionable!?\n", __func__); |
1470 | 1470 | ||
1471 | /* put message back and return */ | 1471 | /* put message back and return */ |
1472 | skb_queue_head(&sk->sk_receive_queue, skb); | 1472 | skb_queue_head(&sk->sk_receive_queue, skb); |
@@ -1482,7 +1482,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock, | |||
1482 | */ | 1482 | */ |
1483 | if (self->rx_flow == FLOW_STOP) { | 1483 | if (self->rx_flow == FLOW_STOP) { |
1484 | if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) { | 1484 | if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) { |
1485 | IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __FUNCTION__); | 1485 | IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __func__); |
1486 | self->rx_flow = FLOW_START; | 1486 | self->rx_flow = FLOW_START; |
1487 | irttp_flow_request(self->tsap, FLOW_START); | 1487 | irttp_flow_request(self->tsap, FLOW_START); |
1488 | } | 1488 | } |
@@ -1506,7 +1506,7 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock, | |||
1506 | struct sk_buff *skb; | 1506 | struct sk_buff *skb; |
1507 | int err; | 1507 | int err; |
1508 | 1508 | ||
1509 | IRDA_DEBUG(4, "%s(), len=%zd\n", __FUNCTION__, len); | 1509 | IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len); |
1510 | 1510 | ||
1511 | if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) | 1511 | if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) |
1512 | return -EINVAL; | 1512 | return -EINVAL; |
@@ -1528,7 +1528,7 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock, | |||
1528 | if (len > self->max_data_size) { | 1528 | if (len > self->max_data_size) { |
1529 | IRDA_DEBUG(0, "%s(), Warning to much data! " | 1529 | IRDA_DEBUG(0, "%s(), Warning to much data! " |
1530 | "Chopping frame from %zd to %d bytes!\n", | 1530 | "Chopping frame from %zd to %d bytes!\n", |
1531 | __FUNCTION__, len, self->max_data_size); | 1531 | __func__, len, self->max_data_size); |
1532 | len = self->max_data_size; | 1532 | len = self->max_data_size; |
1533 | } | 1533 | } |
1534 | 1534 | ||
@@ -1540,7 +1540,7 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock, | |||
1540 | skb_reserve(skb, self->max_header_size); | 1540 | skb_reserve(skb, self->max_header_size); |
1541 | skb_reset_transport_header(skb); | 1541 | skb_reset_transport_header(skb); |
1542 | 1542 | ||
1543 | IRDA_DEBUG(4, "%s(), appending user data\n", __FUNCTION__); | 1543 | IRDA_DEBUG(4, "%s(), appending user data\n", __func__); |
1544 | skb_put(skb, len); | 1544 | skb_put(skb, len); |
1545 | err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); | 1545 | err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); |
1546 | if (err) { | 1546 | if (err) { |
@@ -1554,7 +1554,7 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock, | |||
1554 | */ | 1554 | */ |
1555 | err = irttp_udata_request(self->tsap, skb); | 1555 | err = irttp_udata_request(self->tsap, skb); |
1556 | if (err) { | 1556 | if (err) { |
1557 | IRDA_DEBUG(0, "%s(), err=%d\n", __FUNCTION__, err); | 1557 | IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err); |
1558 | return err; | 1558 | return err; |
1559 | } | 1559 | } |
1560 | return len; | 1560 | return len; |
@@ -1577,7 +1577,7 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock, | |||
1577 | struct sk_buff *skb; | 1577 | struct sk_buff *skb; |
1578 | int err; | 1578 | int err; |
1579 | 1579 | ||
1580 | IRDA_DEBUG(4, "%s(), len=%zd\n", __FUNCTION__, len); | 1580 | IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len); |
1581 | 1581 | ||
1582 | if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) | 1582 | if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) |
1583 | return -EINVAL; | 1583 | return -EINVAL; |
@@ -1600,7 +1600,7 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock, | |||
1600 | 1600 | ||
1601 | pid = addr->sir_lsap_sel; | 1601 | pid = addr->sir_lsap_sel; |
1602 | if (pid & 0x80) { | 1602 | if (pid & 0x80) { |
1603 | IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __FUNCTION__); | 1603 | IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__); |
1604 | return -EOPNOTSUPP; | 1604 | return -EOPNOTSUPP; |
1605 | } | 1605 | } |
1606 | } else { | 1606 | } else { |
@@ -1609,7 +1609,7 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock, | |||
1609 | if ((self->lsap == NULL) || | 1609 | if ((self->lsap == NULL) || |
1610 | (sk->sk_state != TCP_ESTABLISHED)) { | 1610 | (sk->sk_state != TCP_ESTABLISHED)) { |
1611 | IRDA_DEBUG(0, "%s(), socket not bound to Ultra PID.\n", | 1611 | IRDA_DEBUG(0, "%s(), socket not bound to Ultra PID.\n", |
1612 | __FUNCTION__); | 1612 | __func__); |
1613 | return -ENOTCONN; | 1613 | return -ENOTCONN; |
1614 | } | 1614 | } |
1615 | /* Use PID from socket */ | 1615 | /* Use PID from socket */ |
@@ -1623,7 +1623,7 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock, | |||
1623 | if (len > self->max_data_size) { | 1623 | if (len > self->max_data_size) { |
1624 | IRDA_DEBUG(0, "%s(), Warning to much data! " | 1624 | IRDA_DEBUG(0, "%s(), Warning to much data! " |
1625 | "Chopping frame from %zd to %d bytes!\n", | 1625 | "Chopping frame from %zd to %d bytes!\n", |
1626 | __FUNCTION__, len, self->max_data_size); | 1626 | __func__, len, self->max_data_size); |
1627 | len = self->max_data_size; | 1627 | len = self->max_data_size; |
1628 | } | 1628 | } |
1629 | 1629 | ||
@@ -1635,7 +1635,7 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock, | |||
1635 | skb_reserve(skb, self->max_header_size); | 1635 | skb_reserve(skb, self->max_header_size); |
1636 | skb_reset_transport_header(skb); | 1636 | skb_reset_transport_header(skb); |
1637 | 1637 | ||
1638 | IRDA_DEBUG(4, "%s(), appending user data\n", __FUNCTION__); | 1638 | IRDA_DEBUG(4, "%s(), appending user data\n", __func__); |
1639 | skb_put(skb, len); | 1639 | skb_put(skb, len); |
1640 | err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); | 1640 | err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); |
1641 | if (err) { | 1641 | if (err) { |
@@ -1646,7 +1646,7 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock, | |||
1646 | err = irlmp_connless_data_request((bound ? self->lsap : NULL), | 1646 | err = irlmp_connless_data_request((bound ? self->lsap : NULL), |
1647 | skb, pid); | 1647 | skb, pid); |
1648 | if (err) { | 1648 | if (err) { |
1649 | IRDA_DEBUG(0, "%s(), err=%d\n", __FUNCTION__, err); | 1649 | IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err); |
1650 | return err; | 1650 | return err; |
1651 | } | 1651 | } |
1652 | return len; | 1652 | return len; |
@@ -1661,7 +1661,7 @@ static int irda_shutdown(struct socket *sock, int how) | |||
1661 | struct sock *sk = sock->sk; | 1661 | struct sock *sk = sock->sk; |
1662 | struct irda_sock *self = irda_sk(sk); | 1662 | struct irda_sock *self = irda_sk(sk); |
1663 | 1663 | ||
1664 | IRDA_DEBUG(1, "%s(%p)\n", __FUNCTION__, self); | 1664 | IRDA_DEBUG(1, "%s(%p)\n", __func__, self); |
1665 | 1665 | ||
1666 | sk->sk_state = TCP_CLOSE; | 1666 | sk->sk_state = TCP_CLOSE; |
1667 | sk->sk_shutdown |= SEND_SHUTDOWN; | 1667 | sk->sk_shutdown |= SEND_SHUTDOWN; |
@@ -1696,7 +1696,7 @@ static unsigned int irda_poll(struct file * file, struct socket *sock, | |||
1696 | struct irda_sock *self = irda_sk(sk); | 1696 | struct irda_sock *self = irda_sk(sk); |
1697 | unsigned int mask; | 1697 | unsigned int mask; |
1698 | 1698 | ||
1699 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 1699 | IRDA_DEBUG(4, "%s()\n", __func__); |
1700 | 1700 | ||
1701 | poll_wait(file, sk->sk_sleep, wait); | 1701 | poll_wait(file, sk->sk_sleep, wait); |
1702 | mask = 0; | 1702 | mask = 0; |
@@ -1705,7 +1705,7 @@ static unsigned int irda_poll(struct file * file, struct socket *sock, | |||
1705 | if (sk->sk_err) | 1705 | if (sk->sk_err) |
1706 | mask |= POLLERR; | 1706 | mask |= POLLERR; |
1707 | if (sk->sk_shutdown & RCV_SHUTDOWN) { | 1707 | if (sk->sk_shutdown & RCV_SHUTDOWN) { |
1708 | IRDA_DEBUG(0, "%s(), POLLHUP\n", __FUNCTION__); | 1708 | IRDA_DEBUG(0, "%s(), POLLHUP\n", __func__); |
1709 | mask |= POLLHUP; | 1709 | mask |= POLLHUP; |
1710 | } | 1710 | } |
1711 | 1711 | ||
@@ -1719,7 +1719,7 @@ static unsigned int irda_poll(struct file * file, struct socket *sock, | |||
1719 | switch (sk->sk_type) { | 1719 | switch (sk->sk_type) { |
1720 | case SOCK_STREAM: | 1720 | case SOCK_STREAM: |
1721 | if (sk->sk_state == TCP_CLOSE) { | 1721 | if (sk->sk_state == TCP_CLOSE) { |
1722 | IRDA_DEBUG(0, "%s(), POLLHUP\n", __FUNCTION__); | 1722 | IRDA_DEBUG(0, "%s(), POLLHUP\n", __func__); |
1723 | mask |= POLLHUP; | 1723 | mask |= POLLHUP; |
1724 | } | 1724 | } |
1725 | 1725 | ||
@@ -1755,7 +1755,7 @@ static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
1755 | { | 1755 | { |
1756 | struct sock *sk = sock->sk; | 1756 | struct sock *sk = sock->sk; |
1757 | 1757 | ||
1758 | IRDA_DEBUG(4, "%s(), cmd=%#x\n", __FUNCTION__, cmd); | 1758 | IRDA_DEBUG(4, "%s(), cmd=%#x\n", __func__, cmd); |
1759 | 1759 | ||
1760 | switch (cmd) { | 1760 | switch (cmd) { |
1761 | case TIOCOUTQ: { | 1761 | case TIOCOUTQ: { |
@@ -1796,7 +1796,7 @@ static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
1796 | case SIOCSIFMETRIC: | 1796 | case SIOCSIFMETRIC: |
1797 | return -EINVAL; | 1797 | return -EINVAL; |
1798 | default: | 1798 | default: |
1799 | IRDA_DEBUG(1, "%s(), doing device ioctl!\n", __FUNCTION__); | 1799 | IRDA_DEBUG(1, "%s(), doing device ioctl!\n", __func__); |
1800 | return -ENOIOCTLCMD; | 1800 | return -ENOIOCTLCMD; |
1801 | } | 1801 | } |
1802 | 1802 | ||
@@ -1833,7 +1833,7 @@ static int irda_setsockopt(struct socket *sock, int level, int optname, | |||
1833 | struct ias_attrib * ias_attr; /* Attribute in IAS object */ | 1833 | struct ias_attrib * ias_attr; /* Attribute in IAS object */ |
1834 | int opt, free_ias = 0; | 1834 | int opt, free_ias = 0; |
1835 | 1835 | ||
1836 | IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self); | 1836 | IRDA_DEBUG(2, "%s(%p)\n", __func__, self); |
1837 | 1837 | ||
1838 | if (level != SOL_IRLMP) | 1838 | if (level != SOL_IRLMP) |
1839 | return -ENOPROTOOPT; | 1839 | return -ENOPROTOOPT; |
@@ -2012,7 +2012,7 @@ static int irda_setsockopt(struct socket *sock, int level, int optname, | |||
2012 | 2012 | ||
2013 | /* Check is the user space own the object */ | 2013 | /* Check is the user space own the object */ |
2014 | if(ias_attr->value->owner != IAS_USER_ATTR) { | 2014 | if(ias_attr->value->owner != IAS_USER_ATTR) { |
2015 | IRDA_DEBUG(1, "%s(), attempting to delete a kernel attribute\n", __FUNCTION__); | 2015 | IRDA_DEBUG(1, "%s(), attempting to delete a kernel attribute\n", __func__); |
2016 | kfree(ias_opt); | 2016 | kfree(ias_opt); |
2017 | return -EPERM; | 2017 | return -EPERM; |
2018 | } | 2018 | } |
@@ -2031,11 +2031,11 @@ static int irda_setsockopt(struct socket *sock, int level, int optname, | |||
2031 | /* Only possible for a seqpacket service (TTP with SAR) */ | 2031 | /* Only possible for a seqpacket service (TTP with SAR) */ |
2032 | if (sk->sk_type != SOCK_SEQPACKET) { | 2032 | if (sk->sk_type != SOCK_SEQPACKET) { |
2033 | IRDA_DEBUG(2, "%s(), setting max_sdu_size = %d\n", | 2033 | IRDA_DEBUG(2, "%s(), setting max_sdu_size = %d\n", |
2034 | __FUNCTION__, opt); | 2034 | __func__, opt); |
2035 | self->max_sdu_size_rx = opt; | 2035 | self->max_sdu_size_rx = opt; |
2036 | } else { | 2036 | } else { |
2037 | IRDA_WARNING("%s: not allowed to set MAXSDUSIZE for this socket type!\n", | 2037 | IRDA_WARNING("%s: not allowed to set MAXSDUSIZE for this socket type!\n", |
2038 | __FUNCTION__); | 2038 | __func__); |
2039 | return -ENOPROTOOPT; | 2039 | return -ENOPROTOOPT; |
2040 | } | 2040 | } |
2041 | break; | 2041 | break; |
@@ -2149,7 +2149,7 @@ static int irda_getsockopt(struct socket *sock, int level, int optname, | |||
2149 | int err; | 2149 | int err; |
2150 | int offset, total; | 2150 | int offset, total; |
2151 | 2151 | ||
2152 | IRDA_DEBUG(2, "%s(%p)\n", __FUNCTION__, self); | 2152 | IRDA_DEBUG(2, "%s(%p)\n", __func__, self); |
2153 | 2153 | ||
2154 | if (level != SOL_IRLMP) | 2154 | if (level != SOL_IRLMP) |
2155 | return -ENOPROTOOPT; | 2155 | return -ENOPROTOOPT; |
@@ -2310,7 +2310,7 @@ bed: | |||
2310 | /* Check that we can proceed with IAP */ | 2310 | /* Check that we can proceed with IAP */ |
2311 | if (self->iriap) { | 2311 | if (self->iriap) { |
2312 | IRDA_WARNING("%s: busy with a previous query\n", | 2312 | IRDA_WARNING("%s: busy with a previous query\n", |
2313 | __FUNCTION__); | 2313 | __func__); |
2314 | kfree(ias_opt); | 2314 | kfree(ias_opt); |
2315 | return -EBUSY; | 2315 | return -EBUSY; |
2316 | } | 2316 | } |
@@ -2406,7 +2406,7 @@ bed: | |||
2406 | if (!self->cachedaddr) { | 2406 | if (!self->cachedaddr) { |
2407 | int ret = 0; | 2407 | int ret = 0; |
2408 | 2408 | ||
2409 | IRDA_DEBUG(1, "%s(), nothing discovered yet, going to sleep...\n", __FUNCTION__); | 2409 | IRDA_DEBUG(1, "%s(), nothing discovered yet, going to sleep...\n", __func__); |
2410 | 2410 | ||
2411 | /* Set watchdog timer to expire in <val> ms. */ | 2411 | /* Set watchdog timer to expire in <val> ms. */ |
2412 | self->errno = 0; | 2412 | self->errno = 0; |
@@ -2424,14 +2424,14 @@ bed: | |||
2424 | if(timer_pending(&(self->watchdog))) | 2424 | if(timer_pending(&(self->watchdog))) |
2425 | del_timer(&(self->watchdog)); | 2425 | del_timer(&(self->watchdog)); |
2426 | 2426 | ||
2427 | IRDA_DEBUG(1, "%s(), ...waking up !\n", __FUNCTION__); | 2427 | IRDA_DEBUG(1, "%s(), ...waking up !\n", __func__); |
2428 | 2428 | ||
2429 | if (ret != 0) | 2429 | if (ret != 0) |
2430 | return ret; | 2430 | return ret; |
2431 | } | 2431 | } |
2432 | else | 2432 | else |
2433 | IRDA_DEBUG(1, "%s(), found immediately !\n", | 2433 | IRDA_DEBUG(1, "%s(), found immediately !\n", |
2434 | __FUNCTION__); | 2434 | __func__); |
2435 | 2435 | ||
2436 | /* Tell IrLMP that we have been notified */ | 2436 | /* Tell IrLMP that we have been notified */ |
2437 | irlmp_update_client(self->ckey, self->mask.word, | 2437 | irlmp_update_client(self->ckey, self->mask.word, |
diff --git a/net/irda/discovery.c b/net/irda/discovery.c index 80c33f408e3f..bfacef8b76f4 100644 --- a/net/irda/discovery.c +++ b/net/irda/discovery.c | |||
@@ -110,7 +110,7 @@ void irlmp_add_discovery_log(hashbin_t *cachelog, hashbin_t *log) | |||
110 | { | 110 | { |
111 | discovery_t *discovery; | 111 | discovery_t *discovery; |
112 | 112 | ||
113 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 113 | IRDA_DEBUG(4, "%s()\n", __func__); |
114 | 114 | ||
115 | /* | 115 | /* |
116 | * If log is missing this means that IrLAP was unable to perform the | 116 | * If log is missing this means that IrLAP was unable to perform the |
@@ -157,7 +157,7 @@ void irlmp_expire_discoveries(hashbin_t *log, __u32 saddr, int force) | |||
157 | int i = 0; /* How many we expired */ | 157 | int i = 0; /* How many we expired */ |
158 | 158 | ||
159 | IRDA_ASSERT(log != NULL, return;); | 159 | IRDA_ASSERT(log != NULL, return;); |
160 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 160 | IRDA_DEBUG(4, "%s()\n", __func__); |
161 | 161 | ||
162 | spin_lock_irqsave(&log->hb_spinlock, flags); | 162 | spin_lock_irqsave(&log->hb_spinlock, flags); |
163 | 163 | ||
diff --git a/net/irda/ircomm/ircomm_core.c b/net/irda/ircomm/ircomm_core.c index 6eef1f2a7553..018c92941aba 100644 --- a/net/irda/ircomm/ircomm_core.c +++ b/net/irda/ircomm/ircomm_core.c | |||
@@ -70,7 +70,7 @@ static int __init ircomm_init(void) | |||
70 | { | 70 | { |
71 | ircomm = hashbin_new(HB_LOCK); | 71 | ircomm = hashbin_new(HB_LOCK); |
72 | if (ircomm == NULL) { | 72 | if (ircomm == NULL) { |
73 | IRDA_ERROR("%s(), can't allocate hashbin!\n", __FUNCTION__); | 73 | IRDA_ERROR("%s(), can't allocate hashbin!\n", __func__); |
74 | return -ENOMEM; | 74 | return -ENOMEM; |
75 | } | 75 | } |
76 | 76 | ||
@@ -91,7 +91,7 @@ static int __init ircomm_init(void) | |||
91 | 91 | ||
92 | static void __exit ircomm_cleanup(void) | 92 | static void __exit ircomm_cleanup(void) |
93 | { | 93 | { |
94 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 94 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
95 | 95 | ||
96 | hashbin_delete(ircomm, (FREE_FUNC) __ircomm_close); | 96 | hashbin_delete(ircomm, (FREE_FUNC) __ircomm_close); |
97 | 97 | ||
@@ -111,7 +111,7 @@ struct ircomm_cb *ircomm_open(notify_t *notify, __u8 service_type, int line) | |||
111 | struct ircomm_cb *self = NULL; | 111 | struct ircomm_cb *self = NULL; |
112 | int ret; | 112 | int ret; |
113 | 113 | ||
114 | IRDA_DEBUG(2, "%s(), service_type=0x%02x\n", __FUNCTION__ , | 114 | IRDA_DEBUG(2, "%s(), service_type=0x%02x\n", __func__ , |
115 | service_type); | 115 | service_type); |
116 | 116 | ||
117 | IRDA_ASSERT(ircomm != NULL, return NULL;); | 117 | IRDA_ASSERT(ircomm != NULL, return NULL;); |
@@ -155,7 +155,7 @@ EXPORT_SYMBOL(ircomm_open); | |||
155 | */ | 155 | */ |
156 | static int __ircomm_close(struct ircomm_cb *self) | 156 | static int __ircomm_close(struct ircomm_cb *self) |
157 | { | 157 | { |
158 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 158 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
159 | 159 | ||
160 | /* Disconnect link if any */ | 160 | /* Disconnect link if any */ |
161 | ircomm_do_event(self, IRCOMM_DISCONNECT_REQUEST, NULL, NULL); | 161 | ircomm_do_event(self, IRCOMM_DISCONNECT_REQUEST, NULL, NULL); |
@@ -191,7 +191,7 @@ int ircomm_close(struct ircomm_cb *self) | |||
191 | IRDA_ASSERT(self != NULL, return -EIO;); | 191 | IRDA_ASSERT(self != NULL, return -EIO;); |
192 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EIO;); | 192 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EIO;); |
193 | 193 | ||
194 | IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); | 194 | IRDA_DEBUG(0, "%s()\n", __func__ ); |
195 | 195 | ||
196 | entry = hashbin_remove(ircomm, self->line, NULL); | 196 | entry = hashbin_remove(ircomm, self->line, NULL); |
197 | 197 | ||
@@ -216,7 +216,7 @@ int ircomm_connect_request(struct ircomm_cb *self, __u8 dlsap_sel, | |||
216 | struct ircomm_info info; | 216 | struct ircomm_info info; |
217 | int ret; | 217 | int ret; |
218 | 218 | ||
219 | IRDA_DEBUG(2 , "%s()\n", __FUNCTION__ ); | 219 | IRDA_DEBUG(2 , "%s()\n", __func__ ); |
220 | 220 | ||
221 | IRDA_ASSERT(self != NULL, return -1;); | 221 | IRDA_ASSERT(self != NULL, return -1;); |
222 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;); | 222 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;); |
@@ -245,7 +245,7 @@ void ircomm_connect_indication(struct ircomm_cb *self, struct sk_buff *skb, | |||
245 | { | 245 | { |
246 | int clen = 0; | 246 | int clen = 0; |
247 | 247 | ||
248 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 248 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
249 | 249 | ||
250 | /* Check if the packet contains data on the control channel */ | 250 | /* Check if the packet contains data on the control channel */ |
251 | if (skb->len > 0) | 251 | if (skb->len > 0) |
@@ -261,7 +261,7 @@ void ircomm_connect_indication(struct ircomm_cb *self, struct sk_buff *skb, | |||
261 | info->qos, info->max_data_size, | 261 | info->qos, info->max_data_size, |
262 | info->max_header_size, skb); | 262 | info->max_header_size, skb); |
263 | else { | 263 | else { |
264 | IRDA_DEBUG(0, "%s(), missing handler\n", __FUNCTION__ ); | 264 | IRDA_DEBUG(0, "%s(), missing handler\n", __func__ ); |
265 | } | 265 | } |
266 | } | 266 | } |
267 | 267 | ||
@@ -278,7 +278,7 @@ int ircomm_connect_response(struct ircomm_cb *self, struct sk_buff *userdata) | |||
278 | IRDA_ASSERT(self != NULL, return -1;); | 278 | IRDA_ASSERT(self != NULL, return -1;); |
279 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;); | 279 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;); |
280 | 280 | ||
281 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 281 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
282 | 282 | ||
283 | ret = ircomm_do_event(self, IRCOMM_CONNECT_RESPONSE, userdata, NULL); | 283 | ret = ircomm_do_event(self, IRCOMM_CONNECT_RESPONSE, userdata, NULL); |
284 | 284 | ||
@@ -296,7 +296,7 @@ EXPORT_SYMBOL(ircomm_connect_response); | |||
296 | void ircomm_connect_confirm(struct ircomm_cb *self, struct sk_buff *skb, | 296 | void ircomm_connect_confirm(struct ircomm_cb *self, struct sk_buff *skb, |
297 | struct ircomm_info *info) | 297 | struct ircomm_info *info) |
298 | { | 298 | { |
299 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 299 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
300 | 300 | ||
301 | if (self->notify.connect_confirm ) | 301 | if (self->notify.connect_confirm ) |
302 | self->notify.connect_confirm(self->notify.instance, | 302 | self->notify.connect_confirm(self->notify.instance, |
@@ -304,7 +304,7 @@ void ircomm_connect_confirm(struct ircomm_cb *self, struct sk_buff *skb, | |||
304 | info->max_data_size, | 304 | info->max_data_size, |
305 | info->max_header_size, skb); | 305 | info->max_header_size, skb); |
306 | else { | 306 | else { |
307 | IRDA_DEBUG(0, "%s(), missing handler\n", __FUNCTION__ ); | 307 | IRDA_DEBUG(0, "%s(), missing handler\n", __func__ ); |
308 | } | 308 | } |
309 | } | 309 | } |
310 | 310 | ||
@@ -318,7 +318,7 @@ int ircomm_data_request(struct ircomm_cb *self, struct sk_buff *skb) | |||
318 | { | 318 | { |
319 | int ret; | 319 | int ret; |
320 | 320 | ||
321 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 321 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
322 | 322 | ||
323 | IRDA_ASSERT(self != NULL, return -EFAULT;); | 323 | IRDA_ASSERT(self != NULL, return -EFAULT;); |
324 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EFAULT;); | 324 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EFAULT;); |
@@ -339,14 +339,14 @@ EXPORT_SYMBOL(ircomm_data_request); | |||
339 | */ | 339 | */ |
340 | void ircomm_data_indication(struct ircomm_cb *self, struct sk_buff *skb) | 340 | void ircomm_data_indication(struct ircomm_cb *self, struct sk_buff *skb) |
341 | { | 341 | { |
342 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 342 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
343 | 343 | ||
344 | IRDA_ASSERT(skb->len > 0, return;); | 344 | IRDA_ASSERT(skb->len > 0, return;); |
345 | 345 | ||
346 | if (self->notify.data_indication) | 346 | if (self->notify.data_indication) |
347 | self->notify.data_indication(self->notify.instance, self, skb); | 347 | self->notify.data_indication(self->notify.instance, self, skb); |
348 | else { | 348 | else { |
349 | IRDA_DEBUG(0, "%s(), missing handler\n", __FUNCTION__ ); | 349 | IRDA_DEBUG(0, "%s(), missing handler\n", __func__ ); |
350 | } | 350 | } |
351 | } | 351 | } |
352 | 352 | ||
@@ -372,7 +372,7 @@ void ircomm_process_data(struct ircomm_cb *self, struct sk_buff *skb) | |||
372 | */ | 372 | */ |
373 | if (unlikely(skb->len < (clen + 1))) { | 373 | if (unlikely(skb->len < (clen + 1))) { |
374 | IRDA_DEBUG(2, "%s() throwing away illegal frame\n", | 374 | IRDA_DEBUG(2, "%s() throwing away illegal frame\n", |
375 | __FUNCTION__ ); | 375 | __func__ ); |
376 | return; | 376 | return; |
377 | } | 377 | } |
378 | 378 | ||
@@ -391,7 +391,7 @@ void ircomm_process_data(struct ircomm_cb *self, struct sk_buff *skb) | |||
391 | ircomm_data_indication(self, skb); | 391 | ircomm_data_indication(self, skb); |
392 | else { | 392 | else { |
393 | IRDA_DEBUG(4, "%s(), data was control info only!\n", | 393 | IRDA_DEBUG(4, "%s(), data was control info only!\n", |
394 | __FUNCTION__ ); | 394 | __func__ ); |
395 | } | 395 | } |
396 | } | 396 | } |
397 | 397 | ||
@@ -405,7 +405,7 @@ int ircomm_control_request(struct ircomm_cb *self, struct sk_buff *skb) | |||
405 | { | 405 | { |
406 | int ret; | 406 | int ret; |
407 | 407 | ||
408 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 408 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
409 | 409 | ||
410 | IRDA_ASSERT(self != NULL, return -EFAULT;); | 410 | IRDA_ASSERT(self != NULL, return -EFAULT;); |
411 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EFAULT;); | 411 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EFAULT;); |
@@ -427,7 +427,7 @@ EXPORT_SYMBOL(ircomm_control_request); | |||
427 | static void ircomm_control_indication(struct ircomm_cb *self, | 427 | static void ircomm_control_indication(struct ircomm_cb *self, |
428 | struct sk_buff *skb, int clen) | 428 | struct sk_buff *skb, int clen) |
429 | { | 429 | { |
430 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 430 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
431 | 431 | ||
432 | /* Use udata for delivering data on the control channel */ | 432 | /* Use udata for delivering data on the control channel */ |
433 | if (self->notify.udata_indication) { | 433 | if (self->notify.udata_indication) { |
@@ -448,7 +448,7 @@ static void ircomm_control_indication(struct ircomm_cb *self, | |||
448 | * see ircomm_tty_control_indication(). */ | 448 | * see ircomm_tty_control_indication(). */ |
449 | dev_kfree_skb(ctrl_skb); | 449 | dev_kfree_skb(ctrl_skb); |
450 | } else { | 450 | } else { |
451 | IRDA_DEBUG(0, "%s(), missing handler\n", __FUNCTION__ ); | 451 | IRDA_DEBUG(0, "%s(), missing handler\n", __func__ ); |
452 | } | 452 | } |
453 | } | 453 | } |
454 | 454 | ||
@@ -463,7 +463,7 @@ int ircomm_disconnect_request(struct ircomm_cb *self, struct sk_buff *userdata) | |||
463 | struct ircomm_info info; | 463 | struct ircomm_info info; |
464 | int ret; | 464 | int ret; |
465 | 465 | ||
466 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 466 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
467 | 467 | ||
468 | IRDA_ASSERT(self != NULL, return -1;); | 468 | IRDA_ASSERT(self != NULL, return -1;); |
469 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;); | 469 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;); |
@@ -484,7 +484,7 @@ EXPORT_SYMBOL(ircomm_disconnect_request); | |||
484 | void ircomm_disconnect_indication(struct ircomm_cb *self, struct sk_buff *skb, | 484 | void ircomm_disconnect_indication(struct ircomm_cb *self, struct sk_buff *skb, |
485 | struct ircomm_info *info) | 485 | struct ircomm_info *info) |
486 | { | 486 | { |
487 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 487 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
488 | 488 | ||
489 | IRDA_ASSERT(info != NULL, return;); | 489 | IRDA_ASSERT(info != NULL, return;); |
490 | 490 | ||
@@ -492,7 +492,7 @@ void ircomm_disconnect_indication(struct ircomm_cb *self, struct sk_buff *skb, | |||
492 | self->notify.disconnect_indication(self->notify.instance, self, | 492 | self->notify.disconnect_indication(self->notify.instance, self, |
493 | info->reason, skb); | 493 | info->reason, skb); |
494 | } else { | 494 | } else { |
495 | IRDA_DEBUG(0, "%s(), missing handler\n", __FUNCTION__ ); | 495 | IRDA_DEBUG(0, "%s(), missing handler\n", __func__ ); |
496 | } | 496 | } |
497 | } | 497 | } |
498 | 498 | ||
@@ -504,7 +504,7 @@ void ircomm_disconnect_indication(struct ircomm_cb *self, struct sk_buff *skb, | |||
504 | */ | 504 | */ |
505 | void ircomm_flow_request(struct ircomm_cb *self, LOCAL_FLOW flow) | 505 | void ircomm_flow_request(struct ircomm_cb *self, LOCAL_FLOW flow) |
506 | { | 506 | { |
507 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 507 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
508 | 508 | ||
509 | IRDA_ASSERT(self != NULL, return;); | 509 | IRDA_ASSERT(self != NULL, return;); |
510 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); | 510 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); |
diff --git a/net/irda/ircomm/ircomm_event.c b/net/irda/ircomm/ircomm_event.c index 8ba4e59ece16..c35b3ef5c2f0 100644 --- a/net/irda/ircomm/ircomm_event.c +++ b/net/irda/ircomm/ircomm_event.c | |||
@@ -108,7 +108,7 @@ static int ircomm_state_idle(struct ircomm_cb *self, IRCOMM_EVENT event, | |||
108 | ircomm_connect_indication(self, skb, info); | 108 | ircomm_connect_indication(self, skb, info); |
109 | break; | 109 | break; |
110 | default: | 110 | default: |
111 | IRDA_DEBUG(4, "%s(), unknown event: %s\n", __FUNCTION__ , | 111 | IRDA_DEBUG(4, "%s(), unknown event: %s\n", __func__ , |
112 | ircomm_event[event]); | 112 | ircomm_event[event]); |
113 | ret = -EINVAL; | 113 | ret = -EINVAL; |
114 | } | 114 | } |
@@ -138,7 +138,7 @@ static int ircomm_state_waiti(struct ircomm_cb *self, IRCOMM_EVENT event, | |||
138 | ircomm_disconnect_indication(self, skb, info); | 138 | ircomm_disconnect_indication(self, skb, info); |
139 | break; | 139 | break; |
140 | default: | 140 | default: |
141 | IRDA_DEBUG(0, "%s(), unknown event: %s\n", __FUNCTION__ , | 141 | IRDA_DEBUG(0, "%s(), unknown event: %s\n", __func__ , |
142 | ircomm_event[event]); | 142 | ircomm_event[event]); |
143 | ret = -EINVAL; | 143 | ret = -EINVAL; |
144 | } | 144 | } |
@@ -171,7 +171,7 @@ static int ircomm_state_waitr(struct ircomm_cb *self, IRCOMM_EVENT event, | |||
171 | ircomm_disconnect_indication(self, skb, info); | 171 | ircomm_disconnect_indication(self, skb, info); |
172 | break; | 172 | break; |
173 | default: | 173 | default: |
174 | IRDA_DEBUG(0, "%s(), unknown event = %s\n", __FUNCTION__ , | 174 | IRDA_DEBUG(0, "%s(), unknown event = %s\n", __func__ , |
175 | ircomm_event[event]); | 175 | ircomm_event[event]); |
176 | ret = -EINVAL; | 176 | ret = -EINVAL; |
177 | } | 177 | } |
@@ -213,7 +213,7 @@ static int ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event, | |||
213 | ret = self->issue.disconnect_request(self, skb, info); | 213 | ret = self->issue.disconnect_request(self, skb, info); |
214 | break; | 214 | break; |
215 | default: | 215 | default: |
216 | IRDA_DEBUG(0, "%s(), unknown event = %s\n", __FUNCTION__ , | 216 | IRDA_DEBUG(0, "%s(), unknown event = %s\n", __func__ , |
217 | ircomm_event[event]); | 217 | ircomm_event[event]); |
218 | ret = -EINVAL; | 218 | ret = -EINVAL; |
219 | } | 219 | } |
@@ -229,7 +229,7 @@ static int ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event, | |||
229 | int ircomm_do_event(struct ircomm_cb *self, IRCOMM_EVENT event, | 229 | int ircomm_do_event(struct ircomm_cb *self, IRCOMM_EVENT event, |
230 | struct sk_buff *skb, struct ircomm_info *info) | 230 | struct sk_buff *skb, struct ircomm_info *info) |
231 | { | 231 | { |
232 | IRDA_DEBUG(4, "%s: state=%s, event=%s\n", __FUNCTION__ , | 232 | IRDA_DEBUG(4, "%s: state=%s, event=%s\n", __func__ , |
233 | ircomm_state[self->state], ircomm_event[event]); | 233 | ircomm_state[self->state], ircomm_event[event]); |
234 | 234 | ||
235 | return (*state[self->state])(self, event, skb, info); | 235 | return (*state[self->state])(self, event, skb, info); |
@@ -245,6 +245,6 @@ void ircomm_next_state(struct ircomm_cb *self, IRCOMM_STATE state) | |||
245 | { | 245 | { |
246 | self->state = state; | 246 | self->state = state; |
247 | 247 | ||
248 | IRDA_DEBUG(4, "%s: next state=%s, service type=%d\n", __FUNCTION__ , | 248 | IRDA_DEBUG(4, "%s: next state=%s, service type=%d\n", __func__ , |
249 | ircomm_state[self->state], self->service_type); | 249 | ircomm_state[self->state], self->service_type); |
250 | } | 250 | } |
diff --git a/net/irda/ircomm/ircomm_lmp.c b/net/irda/ircomm/ircomm_lmp.c index 55860ee4e39e..67c99d20857f 100644 --- a/net/irda/ircomm/ircomm_lmp.c +++ b/net/irda/ircomm/ircomm_lmp.c | |||
@@ -53,7 +53,7 @@ static int ircomm_lmp_connect_request(struct ircomm_cb *self, | |||
53 | { | 53 | { |
54 | int ret = 0; | 54 | int ret = 0; |
55 | 55 | ||
56 | IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); | 56 | IRDA_DEBUG(0, "%s()\n", __func__ ); |
57 | 57 | ||
58 | /* Don't forget to refcount it - should be NULL anyway */ | 58 | /* Don't forget to refcount it - should be NULL anyway */ |
59 | if(userdata) | 59 | if(userdata) |
@@ -76,7 +76,7 @@ static int ircomm_lmp_connect_response(struct ircomm_cb *self, | |||
76 | struct sk_buff *tx_skb; | 76 | struct sk_buff *tx_skb; |
77 | int ret; | 77 | int ret; |
78 | 78 | ||
79 | IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); | 79 | IRDA_DEBUG(0, "%s()\n", __func__ ); |
80 | 80 | ||
81 | /* Any userdata supplied? */ | 81 | /* Any userdata supplied? */ |
82 | if (userdata == NULL) { | 82 | if (userdata == NULL) { |
@@ -111,7 +111,7 @@ static int ircomm_lmp_disconnect_request(struct ircomm_cb *self, | |||
111 | struct sk_buff *tx_skb; | 111 | struct sk_buff *tx_skb; |
112 | int ret; | 112 | int ret; |
113 | 113 | ||
114 | IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); | 114 | IRDA_DEBUG(0, "%s()\n", __func__ ); |
115 | 115 | ||
116 | if (!userdata) { | 116 | if (!userdata) { |
117 | tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC); | 117 | tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC); |
@@ -148,13 +148,13 @@ static void ircomm_lmp_flow_control(struct sk_buff *skb) | |||
148 | 148 | ||
149 | cb = (struct irda_skb_cb *) skb->cb; | 149 | cb = (struct irda_skb_cb *) skb->cb; |
150 | 150 | ||
151 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 151 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
152 | 152 | ||
153 | line = cb->line; | 153 | line = cb->line; |
154 | 154 | ||
155 | self = (struct ircomm_cb *) hashbin_lock_find(ircomm, line, NULL); | 155 | self = (struct ircomm_cb *) hashbin_lock_find(ircomm, line, NULL); |
156 | if (!self) { | 156 | if (!self) { |
157 | IRDA_DEBUG(2, "%s(), didn't find myself\n", __FUNCTION__ ); | 157 | IRDA_DEBUG(2, "%s(), didn't find myself\n", __func__ ); |
158 | return; | 158 | return; |
159 | } | 159 | } |
160 | 160 | ||
@@ -164,7 +164,7 @@ static void ircomm_lmp_flow_control(struct sk_buff *skb) | |||
164 | self->pkt_count--; | 164 | self->pkt_count--; |
165 | 165 | ||
166 | if ((self->pkt_count < 2) && (self->flow_status == FLOW_STOP)) { | 166 | if ((self->pkt_count < 2) && (self->flow_status == FLOW_STOP)) { |
167 | IRDA_DEBUG(2, "%s(), asking TTY to start again!\n", __FUNCTION__ ); | 167 | IRDA_DEBUG(2, "%s(), asking TTY to start again!\n", __func__ ); |
168 | self->flow_status = FLOW_START; | 168 | self->flow_status = FLOW_START; |
169 | if (self->notify.flow_indication) | 169 | if (self->notify.flow_indication) |
170 | self->notify.flow_indication(self->notify.instance, | 170 | self->notify.flow_indication(self->notify.instance, |
@@ -191,7 +191,7 @@ static int ircomm_lmp_data_request(struct ircomm_cb *self, | |||
191 | 191 | ||
192 | cb->line = self->line; | 192 | cb->line = self->line; |
193 | 193 | ||
194 | IRDA_DEBUG(4, "%s(), sending frame\n", __FUNCTION__ ); | 194 | IRDA_DEBUG(4, "%s(), sending frame\n", __func__ ); |
195 | 195 | ||
196 | /* Don't forget to refcount it - see ircomm_tty_do_softint() */ | 196 | /* Don't forget to refcount it - see ircomm_tty_do_softint() */ |
197 | skb_get(skb); | 197 | skb_get(skb); |
@@ -199,7 +199,7 @@ static int ircomm_lmp_data_request(struct ircomm_cb *self, | |||
199 | skb->destructor = ircomm_lmp_flow_control; | 199 | skb->destructor = ircomm_lmp_flow_control; |
200 | 200 | ||
201 | if ((self->pkt_count++ > 7) && (self->flow_status == FLOW_START)) { | 201 | if ((self->pkt_count++ > 7) && (self->flow_status == FLOW_START)) { |
202 | IRDA_DEBUG(2, "%s(), asking TTY to slow down!\n", __FUNCTION__ ); | 202 | IRDA_DEBUG(2, "%s(), asking TTY to slow down!\n", __func__ ); |
203 | self->flow_status = FLOW_STOP; | 203 | self->flow_status = FLOW_STOP; |
204 | if (self->notify.flow_indication) | 204 | if (self->notify.flow_indication) |
205 | self->notify.flow_indication(self->notify.instance, | 205 | self->notify.flow_indication(self->notify.instance, |
@@ -207,7 +207,7 @@ static int ircomm_lmp_data_request(struct ircomm_cb *self, | |||
207 | } | 207 | } |
208 | ret = irlmp_data_request(self->lsap, skb); | 208 | ret = irlmp_data_request(self->lsap, skb); |
209 | if (ret) { | 209 | if (ret) { |
210 | IRDA_ERROR("%s(), failed\n", __FUNCTION__); | 210 | IRDA_ERROR("%s(), failed\n", __func__); |
211 | /* irlmp_data_request already free the packet */ | 211 | /* irlmp_data_request already free the packet */ |
212 | } | 212 | } |
213 | 213 | ||
@@ -225,7 +225,7 @@ static int ircomm_lmp_data_indication(void *instance, void *sap, | |||
225 | { | 225 | { |
226 | struct ircomm_cb *self = (struct ircomm_cb *) instance; | 226 | struct ircomm_cb *self = (struct ircomm_cb *) instance; |
227 | 227 | ||
228 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 228 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
229 | 229 | ||
230 | IRDA_ASSERT(self != NULL, return -1;); | 230 | IRDA_ASSERT(self != NULL, return -1;); |
231 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;); | 231 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;); |
@@ -255,7 +255,7 @@ static void ircomm_lmp_connect_confirm(void *instance, void *sap, | |||
255 | struct ircomm_cb *self = (struct ircomm_cb *) instance; | 255 | struct ircomm_cb *self = (struct ircomm_cb *) instance; |
256 | struct ircomm_info info; | 256 | struct ircomm_info info; |
257 | 257 | ||
258 | IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); | 258 | IRDA_DEBUG(0, "%s()\n", __func__ ); |
259 | 259 | ||
260 | IRDA_ASSERT(self != NULL, return;); | 260 | IRDA_ASSERT(self != NULL, return;); |
261 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); | 261 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); |
@@ -288,7 +288,7 @@ static void ircomm_lmp_connect_indication(void *instance, void *sap, | |||
288 | struct ircomm_cb *self = (struct ircomm_cb *)instance; | 288 | struct ircomm_cb *self = (struct ircomm_cb *)instance; |
289 | struct ircomm_info info; | 289 | struct ircomm_info info; |
290 | 290 | ||
291 | IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); | 291 | IRDA_DEBUG(0, "%s()\n", __func__ ); |
292 | 292 | ||
293 | IRDA_ASSERT(self != NULL, return;); | 293 | IRDA_ASSERT(self != NULL, return;); |
294 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); | 294 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); |
@@ -318,7 +318,7 @@ static void ircomm_lmp_disconnect_indication(void *instance, void *sap, | |||
318 | struct ircomm_cb *self = (struct ircomm_cb *) instance; | 318 | struct ircomm_cb *self = (struct ircomm_cb *) instance; |
319 | struct ircomm_info info; | 319 | struct ircomm_info info; |
320 | 320 | ||
321 | IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); | 321 | IRDA_DEBUG(0, "%s()\n", __func__ ); |
322 | 322 | ||
323 | IRDA_ASSERT(self != NULL, return;); | 323 | IRDA_ASSERT(self != NULL, return;); |
324 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); | 324 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); |
@@ -341,7 +341,7 @@ int ircomm_open_lsap(struct ircomm_cb *self) | |||
341 | { | 341 | { |
342 | notify_t notify; | 342 | notify_t notify; |
343 | 343 | ||
344 | IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); | 344 | IRDA_DEBUG(0, "%s()\n", __func__ ); |
345 | 345 | ||
346 | /* Register callbacks */ | 346 | /* Register callbacks */ |
347 | irda_notify_init(¬ify); | 347 | irda_notify_init(¬ify); |
@@ -354,7 +354,7 @@ int ircomm_open_lsap(struct ircomm_cb *self) | |||
354 | 354 | ||
355 | self->lsap = irlmp_open_lsap(LSAP_ANY, ¬ify, 0); | 355 | self->lsap = irlmp_open_lsap(LSAP_ANY, ¬ify, 0); |
356 | if (!self->lsap) { | 356 | if (!self->lsap) { |
357 | IRDA_DEBUG(0,"%sfailed to allocate tsap\n", __FUNCTION__ ); | 357 | IRDA_DEBUG(0,"%sfailed to allocate tsap\n", __func__ ); |
358 | return -1; | 358 | return -1; |
359 | } | 359 | } |
360 | self->slsap_sel = self->lsap->slsap_sel; | 360 | self->slsap_sel = self->lsap->slsap_sel; |
diff --git a/net/irda/ircomm/ircomm_param.c b/net/irda/ircomm/ircomm_param.c index 598dcbe4a501..d57aefd9fe77 100644 --- a/net/irda/ircomm/ircomm_param.c +++ b/net/irda/ircomm/ircomm_param.c | |||
@@ -103,7 +103,7 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush) | |||
103 | struct sk_buff *skb; | 103 | struct sk_buff *skb; |
104 | int count; | 104 | int count; |
105 | 105 | ||
106 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 106 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
107 | 107 | ||
108 | IRDA_ASSERT(self != NULL, return -1;); | 108 | IRDA_ASSERT(self != NULL, return -1;); |
109 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); | 109 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); |
@@ -136,7 +136,7 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush) | |||
136 | count = irda_param_insert(self, pi, skb_tail_pointer(skb), | 136 | count = irda_param_insert(self, pi, skb_tail_pointer(skb), |
137 | skb_tailroom(skb), &ircomm_param_info); | 137 | skb_tailroom(skb), &ircomm_param_info); |
138 | if (count < 0) { | 138 | if (count < 0) { |
139 | IRDA_WARNING("%s(), no room for parameter!\n", __FUNCTION__); | 139 | IRDA_WARNING("%s(), no room for parameter!\n", __func__); |
140 | spin_unlock_irqrestore(&self->spinlock, flags); | 140 | spin_unlock_irqrestore(&self->spinlock, flags); |
141 | return -1; | 141 | return -1; |
142 | } | 142 | } |
@@ -144,7 +144,7 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush) | |||
144 | 144 | ||
145 | spin_unlock_irqrestore(&self->spinlock, flags); | 145 | spin_unlock_irqrestore(&self->spinlock, flags); |
146 | 146 | ||
147 | IRDA_DEBUG(2, "%s(), skb->len=%d\n", __FUNCTION__ , skb->len); | 147 | IRDA_DEBUG(2, "%s(), skb->len=%d\n", __func__ , skb->len); |
148 | 148 | ||
149 | if (flush) { | 149 | if (flush) { |
150 | /* ircomm_tty_do_softint will take care of the rest */ | 150 | /* ircomm_tty_do_softint will take care of the rest */ |
@@ -179,10 +179,10 @@ static int ircomm_param_service_type(void *instance, irda_param_t *param, | |||
179 | service_type &= self->service_type; | 179 | service_type &= self->service_type; |
180 | if (!service_type) { | 180 | if (!service_type) { |
181 | IRDA_DEBUG(2, | 181 | IRDA_DEBUG(2, |
182 | "%s(), No common service type to use!\n", __FUNCTION__ ); | 182 | "%s(), No common service type to use!\n", __func__ ); |
183 | return -1; | 183 | return -1; |
184 | } | 184 | } |
185 | IRDA_DEBUG(0, "%s(), services in common=%02x\n", __FUNCTION__ , | 185 | IRDA_DEBUG(0, "%s(), services in common=%02x\n", __func__ , |
186 | service_type); | 186 | service_type); |
187 | 187 | ||
188 | /* | 188 | /* |
@@ -197,7 +197,7 @@ static int ircomm_param_service_type(void *instance, irda_param_t *param, | |||
197 | else if (service_type & IRCOMM_3_WIRE_RAW) | 197 | else if (service_type & IRCOMM_3_WIRE_RAW) |
198 | self->settings.service_type = IRCOMM_3_WIRE_RAW; | 198 | self->settings.service_type = IRCOMM_3_WIRE_RAW; |
199 | 199 | ||
200 | IRDA_DEBUG(0, "%s(), resulting service type=0x%02x\n", __FUNCTION__ , | 200 | IRDA_DEBUG(0, "%s(), resulting service type=0x%02x\n", __func__ , |
201 | self->settings.service_type); | 201 | self->settings.service_type); |
202 | 202 | ||
203 | /* | 203 | /* |
@@ -240,7 +240,7 @@ static int ircomm_param_port_type(void *instance, irda_param_t *param, int get) | |||
240 | else { | 240 | else { |
241 | self->settings.port_type = (__u8) param->pv.i; | 241 | self->settings.port_type = (__u8) param->pv.i; |
242 | 242 | ||
243 | IRDA_DEBUG(0, "%s(), port type=%d\n", __FUNCTION__ , | 243 | IRDA_DEBUG(0, "%s(), port type=%d\n", __func__ , |
244 | self->settings.port_type); | 244 | self->settings.port_type); |
245 | } | 245 | } |
246 | return 0; | 246 | return 0; |
@@ -260,9 +260,9 @@ static int ircomm_param_port_name(void *instance, irda_param_t *param, int get) | |||
260 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); | 260 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); |
261 | 261 | ||
262 | if (get) { | 262 | if (get) { |
263 | IRDA_DEBUG(0, "%s(), not imp!\n", __FUNCTION__ ); | 263 | IRDA_DEBUG(0, "%s(), not imp!\n", __func__ ); |
264 | } else { | 264 | } else { |
265 | IRDA_DEBUG(0, "%s(), port-name=%s\n", __FUNCTION__ , param->pv.c); | 265 | IRDA_DEBUG(0, "%s(), port-name=%s\n", __func__ , param->pv.c); |
266 | strncpy(self->settings.port_name, param->pv.c, 32); | 266 | strncpy(self->settings.port_name, param->pv.c, 32); |
267 | } | 267 | } |
268 | 268 | ||
@@ -287,7 +287,7 @@ static int ircomm_param_data_rate(void *instance, irda_param_t *param, int get) | |||
287 | else | 287 | else |
288 | self->settings.data_rate = param->pv.i; | 288 | self->settings.data_rate = param->pv.i; |
289 | 289 | ||
290 | IRDA_DEBUG(2, "%s(), data rate = %d\n", __FUNCTION__ , param->pv.i); | 290 | IRDA_DEBUG(2, "%s(), data rate = %d\n", __func__ , param->pv.i); |
291 | 291 | ||
292 | return 0; | 292 | return 0; |
293 | } | 293 | } |
@@ -333,7 +333,7 @@ static int ircomm_param_flow_control(void *instance, irda_param_t *param, | |||
333 | else | 333 | else |
334 | self->settings.flow_control = (__u8) param->pv.i; | 334 | self->settings.flow_control = (__u8) param->pv.i; |
335 | 335 | ||
336 | IRDA_DEBUG(1, "%s(), flow control = 0x%02x\n", __FUNCTION__ , (__u8) param->pv.i); | 336 | IRDA_DEBUG(1, "%s(), flow control = 0x%02x\n", __func__ , (__u8) param->pv.i); |
337 | 337 | ||
338 | return 0; | 338 | return 0; |
339 | } | 339 | } |
@@ -359,7 +359,7 @@ static int ircomm_param_xon_xoff(void *instance, irda_param_t *param, int get) | |||
359 | self->settings.xonxoff[1] = (__u16) param->pv.i >> 8; | 359 | self->settings.xonxoff[1] = (__u16) param->pv.i >> 8; |
360 | } | 360 | } |
361 | 361 | ||
362 | IRDA_DEBUG(0, "%s(), XON/XOFF = 0x%02x,0x%02x\n", __FUNCTION__ , | 362 | IRDA_DEBUG(0, "%s(), XON/XOFF = 0x%02x,0x%02x\n", __func__ , |
363 | param->pv.i & 0xff, param->pv.i >> 8); | 363 | param->pv.i & 0xff, param->pv.i >> 8); |
364 | 364 | ||
365 | return 0; | 365 | return 0; |
@@ -386,7 +386,7 @@ static int ircomm_param_enq_ack(void *instance, irda_param_t *param, int get) | |||
386 | self->settings.enqack[1] = (__u16) param->pv.i >> 8; | 386 | self->settings.enqack[1] = (__u16) param->pv.i >> 8; |
387 | } | 387 | } |
388 | 388 | ||
389 | IRDA_DEBUG(0, "%s(), ENQ/ACK = 0x%02x,0x%02x\n", __FUNCTION__ , | 389 | IRDA_DEBUG(0, "%s(), ENQ/ACK = 0x%02x,0x%02x\n", __func__ , |
390 | param->pv.i & 0xff, param->pv.i >> 8); | 390 | param->pv.i & 0xff, param->pv.i >> 8); |
391 | 391 | ||
392 | return 0; | 392 | return 0; |
@@ -401,7 +401,7 @@ static int ircomm_param_enq_ack(void *instance, irda_param_t *param, int get) | |||
401 | static int ircomm_param_line_status(void *instance, irda_param_t *param, | 401 | static int ircomm_param_line_status(void *instance, irda_param_t *param, |
402 | int get) | 402 | int get) |
403 | { | 403 | { |
404 | IRDA_DEBUG(2, "%s(), not impl.\n", __FUNCTION__ ); | 404 | IRDA_DEBUG(2, "%s(), not impl.\n", __func__ ); |
405 | 405 | ||
406 | return 0; | 406 | return 0; |
407 | } | 407 | } |
@@ -462,7 +462,7 @@ static int ircomm_param_dce(void *instance, irda_param_t *param, int get) | |||
462 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; | 462 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; |
463 | __u8 dce; | 463 | __u8 dce; |
464 | 464 | ||
465 | IRDA_DEBUG(1, "%s(), dce = 0x%02x\n", __FUNCTION__ , (__u8) param->pv.i); | 465 | IRDA_DEBUG(1, "%s(), dce = 0x%02x\n", __func__ , (__u8) param->pv.i); |
466 | 466 | ||
467 | dce = (__u8) param->pv.i; | 467 | dce = (__u8) param->pv.i; |
468 | 468 | ||
@@ -474,7 +474,7 @@ static int ircomm_param_dce(void *instance, irda_param_t *param, int get) | |||
474 | /* Check if any of the settings have changed */ | 474 | /* Check if any of the settings have changed */ |
475 | if (dce & 0x0f) { | 475 | if (dce & 0x0f) { |
476 | if (dce & IRCOMM_DELTA_CTS) { | 476 | if (dce & IRCOMM_DELTA_CTS) { |
477 | IRDA_DEBUG(2, "%s(), CTS \n", __FUNCTION__ ); | 477 | IRDA_DEBUG(2, "%s(), CTS \n", __func__ ); |
478 | } | 478 | } |
479 | } | 479 | } |
480 | 480 | ||
diff --git a/net/irda/ircomm/ircomm_ttp.c b/net/irda/ircomm/ircomm_ttp.c index 712eafd0cc76..6e6509f22f60 100644 --- a/net/irda/ircomm/ircomm_ttp.c +++ b/net/irda/ircomm/ircomm_ttp.c | |||
@@ -78,7 +78,7 @@ int ircomm_open_tsap(struct ircomm_cb *self) | |||
78 | { | 78 | { |
79 | notify_t notify; | 79 | notify_t notify; |
80 | 80 | ||
81 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 81 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
82 | 82 | ||
83 | /* Register callbacks */ | 83 | /* Register callbacks */ |
84 | irda_notify_init(¬ify); | 84 | irda_notify_init(¬ify); |
@@ -93,7 +93,7 @@ int ircomm_open_tsap(struct ircomm_cb *self) | |||
93 | self->tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, | 93 | self->tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, |
94 | ¬ify); | 94 | ¬ify); |
95 | if (!self->tsap) { | 95 | if (!self->tsap) { |
96 | IRDA_DEBUG(0, "%sfailed to allocate tsap\n", __FUNCTION__ ); | 96 | IRDA_DEBUG(0, "%sfailed to allocate tsap\n", __func__ ); |
97 | return -1; | 97 | return -1; |
98 | } | 98 | } |
99 | self->slsap_sel = self->tsap->stsap_sel; | 99 | self->slsap_sel = self->tsap->stsap_sel; |
@@ -121,7 +121,7 @@ static int ircomm_ttp_connect_request(struct ircomm_cb *self, | |||
121 | { | 121 | { |
122 | int ret = 0; | 122 | int ret = 0; |
123 | 123 | ||
124 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 124 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
125 | 125 | ||
126 | /* Don't forget to refcount it - should be NULL anyway */ | 126 | /* Don't forget to refcount it - should be NULL anyway */ |
127 | if(userdata) | 127 | if(userdata) |
@@ -145,7 +145,7 @@ static int ircomm_ttp_connect_response(struct ircomm_cb *self, | |||
145 | { | 145 | { |
146 | int ret; | 146 | int ret; |
147 | 147 | ||
148 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 148 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
149 | 149 | ||
150 | /* Don't forget to refcount it - should be NULL anyway */ | 150 | /* Don't forget to refcount it - should be NULL anyway */ |
151 | if(userdata) | 151 | if(userdata) |
@@ -173,7 +173,7 @@ static int ircomm_ttp_data_request(struct ircomm_cb *self, | |||
173 | 173 | ||
174 | IRDA_ASSERT(skb != NULL, return -1;); | 174 | IRDA_ASSERT(skb != NULL, return -1;); |
175 | 175 | ||
176 | IRDA_DEBUG(2, "%s(), clen=%d\n", __FUNCTION__ , clen); | 176 | IRDA_DEBUG(2, "%s(), clen=%d\n", __func__ , clen); |
177 | 177 | ||
178 | /* | 178 | /* |
179 | * Insert clen field, currently we either send data only, or control | 179 | * Insert clen field, currently we either send data only, or control |
@@ -190,7 +190,7 @@ static int ircomm_ttp_data_request(struct ircomm_cb *self, | |||
190 | 190 | ||
191 | ret = irttp_data_request(self->tsap, skb); | 191 | ret = irttp_data_request(self->tsap, skb); |
192 | if (ret) { | 192 | if (ret) { |
193 | IRDA_ERROR("%s(), failed\n", __FUNCTION__); | 193 | IRDA_ERROR("%s(), failed\n", __func__); |
194 | /* irttp_data_request already free the packet */ | 194 | /* irttp_data_request already free the packet */ |
195 | } | 195 | } |
196 | 196 | ||
@@ -208,7 +208,7 @@ static int ircomm_ttp_data_indication(void *instance, void *sap, | |||
208 | { | 208 | { |
209 | struct ircomm_cb *self = (struct ircomm_cb *) instance; | 209 | struct ircomm_cb *self = (struct ircomm_cb *) instance; |
210 | 210 | ||
211 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 211 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
212 | 212 | ||
213 | IRDA_ASSERT(self != NULL, return -1;); | 213 | IRDA_ASSERT(self != NULL, return -1;); |
214 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;); | 214 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;); |
@@ -231,7 +231,7 @@ static void ircomm_ttp_connect_confirm(void *instance, void *sap, | |||
231 | struct ircomm_cb *self = (struct ircomm_cb *) instance; | 231 | struct ircomm_cb *self = (struct ircomm_cb *) instance; |
232 | struct ircomm_info info; | 232 | struct ircomm_info info; |
233 | 233 | ||
234 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 234 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
235 | 235 | ||
236 | IRDA_ASSERT(self != NULL, return;); | 236 | IRDA_ASSERT(self != NULL, return;); |
237 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); | 237 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); |
@@ -240,7 +240,7 @@ static void ircomm_ttp_connect_confirm(void *instance, void *sap, | |||
240 | 240 | ||
241 | if (max_sdu_size != TTP_SAR_DISABLE) { | 241 | if (max_sdu_size != TTP_SAR_DISABLE) { |
242 | IRDA_ERROR("%s(), SAR not allowed for IrCOMM!\n", | 242 | IRDA_ERROR("%s(), SAR not allowed for IrCOMM!\n", |
243 | __FUNCTION__); | 243 | __func__); |
244 | goto out; | 244 | goto out; |
245 | } | 245 | } |
246 | 246 | ||
@@ -272,7 +272,7 @@ static void ircomm_ttp_connect_indication(void *instance, void *sap, | |||
272 | struct ircomm_cb *self = (struct ircomm_cb *)instance; | 272 | struct ircomm_cb *self = (struct ircomm_cb *)instance; |
273 | struct ircomm_info info; | 273 | struct ircomm_info info; |
274 | 274 | ||
275 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 275 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
276 | 276 | ||
277 | IRDA_ASSERT(self != NULL, return;); | 277 | IRDA_ASSERT(self != NULL, return;); |
278 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); | 278 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); |
@@ -281,7 +281,7 @@ static void ircomm_ttp_connect_indication(void *instance, void *sap, | |||
281 | 281 | ||
282 | if (max_sdu_size != TTP_SAR_DISABLE) { | 282 | if (max_sdu_size != TTP_SAR_DISABLE) { |
283 | IRDA_ERROR("%s(), SAR not allowed for IrCOMM!\n", | 283 | IRDA_ERROR("%s(), SAR not allowed for IrCOMM!\n", |
284 | __FUNCTION__); | 284 | __func__); |
285 | goto out; | 285 | goto out; |
286 | } | 286 | } |
287 | 287 | ||
@@ -331,7 +331,7 @@ static void ircomm_ttp_disconnect_indication(void *instance, void *sap, | |||
331 | struct ircomm_cb *self = (struct ircomm_cb *) instance; | 331 | struct ircomm_cb *self = (struct ircomm_cb *) instance; |
332 | struct ircomm_info info; | 332 | struct ircomm_info info; |
333 | 333 | ||
334 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 334 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
335 | 335 | ||
336 | IRDA_ASSERT(self != NULL, return;); | 336 | IRDA_ASSERT(self != NULL, return;); |
337 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); | 337 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); |
@@ -356,7 +356,7 @@ static void ircomm_ttp_flow_indication(void *instance, void *sap, | |||
356 | { | 356 | { |
357 | struct ircomm_cb *self = (struct ircomm_cb *) instance; | 357 | struct ircomm_cb *self = (struct ircomm_cb *) instance; |
358 | 358 | ||
359 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 359 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
360 | 360 | ||
361 | IRDA_ASSERT(self != NULL, return;); | 361 | IRDA_ASSERT(self != NULL, return;); |
362 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); | 362 | IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); |
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index be627e1f04d8..d2620410cb0a 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c | |||
@@ -115,7 +115,7 @@ static int __init ircomm_tty_init(void) | |||
115 | return -ENOMEM; | 115 | return -ENOMEM; |
116 | ircomm_tty = hashbin_new(HB_LOCK); | 116 | ircomm_tty = hashbin_new(HB_LOCK); |
117 | if (ircomm_tty == NULL) { | 117 | if (ircomm_tty == NULL) { |
118 | IRDA_ERROR("%s(), can't allocate hashbin!\n", __FUNCTION__); | 118 | IRDA_ERROR("%s(), can't allocate hashbin!\n", __func__); |
119 | put_tty_driver(driver); | 119 | put_tty_driver(driver); |
120 | return -ENOMEM; | 120 | return -ENOMEM; |
121 | } | 121 | } |
@@ -133,7 +133,7 @@ static int __init ircomm_tty_init(void) | |||
133 | tty_set_operations(driver, &ops); | 133 | tty_set_operations(driver, &ops); |
134 | if (tty_register_driver(driver)) { | 134 | if (tty_register_driver(driver)) { |
135 | IRDA_ERROR("%s(): Couldn't register serial driver\n", | 135 | IRDA_ERROR("%s(): Couldn't register serial driver\n", |
136 | __FUNCTION__); | 136 | __func__); |
137 | put_tty_driver(driver); | 137 | put_tty_driver(driver); |
138 | return -1; | 138 | return -1; |
139 | } | 139 | } |
@@ -142,7 +142,7 @@ static int __init ircomm_tty_init(void) | |||
142 | 142 | ||
143 | static void __exit __ircomm_tty_cleanup(struct ircomm_tty_cb *self) | 143 | static void __exit __ircomm_tty_cleanup(struct ircomm_tty_cb *self) |
144 | { | 144 | { |
145 | IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); | 145 | IRDA_DEBUG(0, "%s()\n", __func__ ); |
146 | 146 | ||
147 | IRDA_ASSERT(self != NULL, return;); | 147 | IRDA_ASSERT(self != NULL, return;); |
148 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); | 148 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); |
@@ -163,12 +163,12 @@ static void __exit ircomm_tty_cleanup(void) | |||
163 | { | 163 | { |
164 | int ret; | 164 | int ret; |
165 | 165 | ||
166 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 166 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
167 | 167 | ||
168 | ret = tty_unregister_driver(driver); | 168 | ret = tty_unregister_driver(driver); |
169 | if (ret) { | 169 | if (ret) { |
170 | IRDA_ERROR("%s(), failed to unregister driver\n", | 170 | IRDA_ERROR("%s(), failed to unregister driver\n", |
171 | __FUNCTION__); | 171 | __func__); |
172 | return; | 172 | return; |
173 | } | 173 | } |
174 | 174 | ||
@@ -187,14 +187,14 @@ static int ircomm_tty_startup(struct ircomm_tty_cb *self) | |||
187 | notify_t notify; | 187 | notify_t notify; |
188 | int ret = -ENODEV; | 188 | int ret = -ENODEV; |
189 | 189 | ||
190 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 190 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
191 | 191 | ||
192 | IRDA_ASSERT(self != NULL, return -1;); | 192 | IRDA_ASSERT(self != NULL, return -1;); |
193 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); | 193 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); |
194 | 194 | ||
195 | /* Check if already open */ | 195 | /* Check if already open */ |
196 | if (test_and_set_bit(ASYNC_B_INITIALIZED, &self->flags)) { | 196 | if (test_and_set_bit(ASYNC_B_INITIALIZED, &self->flags)) { |
197 | IRDA_DEBUG(2, "%s(), already open so break out!\n", __FUNCTION__ ); | 197 | IRDA_DEBUG(2, "%s(), already open so break out!\n", __func__ ); |
198 | return 0; | 198 | return 0; |
199 | } | 199 | } |
200 | 200 | ||
@@ -224,7 +224,7 @@ static int ircomm_tty_startup(struct ircomm_tty_cb *self) | |||
224 | /* Connect IrCOMM link with remote device */ | 224 | /* Connect IrCOMM link with remote device */ |
225 | ret = ircomm_tty_attach_cable(self); | 225 | ret = ircomm_tty_attach_cable(self); |
226 | if (ret < 0) { | 226 | if (ret < 0) { |
227 | IRDA_ERROR("%s(), error attaching cable!\n", __FUNCTION__); | 227 | IRDA_ERROR("%s(), error attaching cable!\n", __func__); |
228 | goto err; | 228 | goto err; |
229 | } | 229 | } |
230 | 230 | ||
@@ -249,7 +249,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, | |||
249 | unsigned long flags; | 249 | unsigned long flags; |
250 | struct tty_struct *tty; | 250 | struct tty_struct *tty; |
251 | 251 | ||
252 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 252 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
253 | 253 | ||
254 | tty = self->tty; | 254 | tty = self->tty; |
255 | 255 | ||
@@ -260,12 +260,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, | |||
260 | if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ | 260 | if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ |
261 | /* nonblock mode is set or port is not enabled */ | 261 | /* nonblock mode is set or port is not enabled */ |
262 | self->flags |= ASYNC_NORMAL_ACTIVE; | 262 | self->flags |= ASYNC_NORMAL_ACTIVE; |
263 | IRDA_DEBUG(1, "%s(), O_NONBLOCK requested!\n", __FUNCTION__ ); | 263 | IRDA_DEBUG(1, "%s(), O_NONBLOCK requested!\n", __func__ ); |
264 | return 0; | 264 | return 0; |
265 | } | 265 | } |
266 | 266 | ||
267 | if (tty->termios->c_cflag & CLOCAL) { | 267 | if (tty->termios->c_cflag & CLOCAL) { |
268 | IRDA_DEBUG(1, "%s(), doing CLOCAL!\n", __FUNCTION__ ); | 268 | IRDA_DEBUG(1, "%s(), doing CLOCAL!\n", __func__ ); |
269 | do_clocal = 1; | 269 | do_clocal = 1; |
270 | } | 270 | } |
271 | 271 | ||
@@ -368,7 +368,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) | |||
368 | unsigned long flags; | 368 | unsigned long flags; |
369 | int ret; | 369 | int ret; |
370 | 370 | ||
371 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 371 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
372 | 372 | ||
373 | line = tty->index; | 373 | line = tty->index; |
374 | if ((line < 0) || (line >= IRCOMM_TTY_PORTS)) { | 374 | if ((line < 0) || (line >= IRCOMM_TTY_PORTS)) { |
@@ -381,7 +381,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) | |||
381 | /* No, so make new instance */ | 381 | /* No, so make new instance */ |
382 | self = kzalloc(sizeof(struct ircomm_tty_cb), GFP_KERNEL); | 382 | self = kzalloc(sizeof(struct ircomm_tty_cb), GFP_KERNEL); |
383 | if (self == NULL) { | 383 | if (self == NULL) { |
384 | IRDA_ERROR("%s(), kmalloc failed!\n", __FUNCTION__); | 384 | IRDA_ERROR("%s(), kmalloc failed!\n", __func__); |
385 | return -ENOMEM; | 385 | return -ENOMEM; |
386 | } | 386 | } |
387 | 387 | ||
@@ -420,7 +420,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) | |||
420 | self->tty = tty; | 420 | self->tty = tty; |
421 | spin_unlock_irqrestore(&self->spinlock, flags); | 421 | spin_unlock_irqrestore(&self->spinlock, flags); |
422 | 422 | ||
423 | IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __FUNCTION__ , tty->driver->name, | 423 | IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name, |
424 | self->line, self->open_count); | 424 | self->line, self->open_count); |
425 | 425 | ||
426 | /* Not really used by us, but lets do it anyway */ | 426 | /* Not really used by us, but lets do it anyway */ |
@@ -442,7 +442,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) | |||
442 | 442 | ||
443 | if (wait_event_interruptible(self->close_wait, !test_bit(ASYNC_B_CLOSING, &self->flags))) { | 443 | if (wait_event_interruptible(self->close_wait, !test_bit(ASYNC_B_CLOSING, &self->flags))) { |
444 | IRDA_WARNING("%s - got signal while blocking on ASYNC_CLOSING!\n", | 444 | IRDA_WARNING("%s - got signal while blocking on ASYNC_CLOSING!\n", |
445 | __FUNCTION__); | 445 | __func__); |
446 | return -ERESTARTSYS; | 446 | return -ERESTARTSYS; |
447 | } | 447 | } |
448 | 448 | ||
@@ -460,9 +460,9 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) | |||
460 | self->settings.service_type = IRCOMM_9_WIRE; /* 9 wire as default */ | 460 | self->settings.service_type = IRCOMM_9_WIRE; /* 9 wire as default */ |
461 | /* Jan Kiszka -> add DSR/RI -> Conform to IrCOMM spec */ | 461 | /* Jan Kiszka -> add DSR/RI -> Conform to IrCOMM spec */ |
462 | self->settings.dce = IRCOMM_CTS | IRCOMM_CD | IRCOMM_DSR | IRCOMM_RI; /* Default line settings */ | 462 | self->settings.dce = IRCOMM_CTS | IRCOMM_CD | IRCOMM_DSR | IRCOMM_RI; /* Default line settings */ |
463 | IRDA_DEBUG(2, "%s(), IrCOMM device\n", __FUNCTION__ ); | 463 | IRDA_DEBUG(2, "%s(), IrCOMM device\n", __func__ ); |
464 | } else { | 464 | } else { |
465 | IRDA_DEBUG(2, "%s(), IrLPT device\n", __FUNCTION__ ); | 465 | IRDA_DEBUG(2, "%s(), IrLPT device\n", __func__ ); |
466 | self->service_type = IRCOMM_3_WIRE_RAW; | 466 | self->service_type = IRCOMM_3_WIRE_RAW; |
467 | self->settings.service_type = IRCOMM_3_WIRE_RAW; /* Default */ | 467 | self->settings.service_type = IRCOMM_3_WIRE_RAW; /* Default */ |
468 | } | 468 | } |
@@ -474,7 +474,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) | |||
474 | ret = ircomm_tty_block_til_ready(self, filp); | 474 | ret = ircomm_tty_block_til_ready(self, filp); |
475 | if (ret) { | 475 | if (ret) { |
476 | IRDA_DEBUG(2, | 476 | IRDA_DEBUG(2, |
477 | "%s(), returning after block_til_ready with %d\n", __FUNCTION__ , | 477 | "%s(), returning after block_til_ready with %d\n", __func__ , |
478 | ret); | 478 | ret); |
479 | 479 | ||
480 | return ret; | 480 | return ret; |
@@ -493,7 +493,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp) | |||
493 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; | 493 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; |
494 | unsigned long flags; | 494 | unsigned long flags; |
495 | 495 | ||
496 | IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); | 496 | IRDA_DEBUG(0, "%s()\n", __func__ ); |
497 | 497 | ||
498 | if (!tty) | 498 | if (!tty) |
499 | return; | 499 | return; |
@@ -506,7 +506,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp) | |||
506 | if (tty_hung_up_p(filp)) { | 506 | if (tty_hung_up_p(filp)) { |
507 | spin_unlock_irqrestore(&self->spinlock, flags); | 507 | spin_unlock_irqrestore(&self->spinlock, flags); |
508 | 508 | ||
509 | IRDA_DEBUG(0, "%s(), returning 1\n", __FUNCTION__ ); | 509 | IRDA_DEBUG(0, "%s(), returning 1\n", __func__ ); |
510 | return; | 510 | return; |
511 | } | 511 | } |
512 | 512 | ||
@@ -519,20 +519,20 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp) | |||
519 | * serial port won't be shutdown. | 519 | * serial port won't be shutdown. |
520 | */ | 520 | */ |
521 | IRDA_DEBUG(0, "%s(), bad serial port count; " | 521 | IRDA_DEBUG(0, "%s(), bad serial port count; " |
522 | "tty->count is 1, state->count is %d\n", __FUNCTION__ , | 522 | "tty->count is 1, state->count is %d\n", __func__ , |
523 | self->open_count); | 523 | self->open_count); |
524 | self->open_count = 1; | 524 | self->open_count = 1; |
525 | } | 525 | } |
526 | 526 | ||
527 | if (--self->open_count < 0) { | 527 | if (--self->open_count < 0) { |
528 | IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n", | 528 | IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n", |
529 | __FUNCTION__, self->line, self->open_count); | 529 | __func__, self->line, self->open_count); |
530 | self->open_count = 0; | 530 | self->open_count = 0; |
531 | } | 531 | } |
532 | if (self->open_count) { | 532 | if (self->open_count) { |
533 | spin_unlock_irqrestore(&self->spinlock, flags); | 533 | spin_unlock_irqrestore(&self->spinlock, flags); |
534 | 534 | ||
535 | IRDA_DEBUG(0, "%s(), open count > 0\n", __FUNCTION__ ); | 535 | IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ ); |
536 | return; | 536 | return; |
537 | } | 537 | } |
538 | 538 | ||
@@ -608,7 +608,7 @@ static void ircomm_tty_do_softint(struct work_struct *work) | |||
608 | unsigned long flags; | 608 | unsigned long flags; |
609 | struct sk_buff *skb, *ctrl_skb; | 609 | struct sk_buff *skb, *ctrl_skb; |
610 | 610 | ||
611 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 611 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
612 | 612 | ||
613 | if (!self || self->magic != IRCOMM_TTY_MAGIC) | 613 | if (!self || self->magic != IRCOMM_TTY_MAGIC) |
614 | return; | 614 | return; |
@@ -678,7 +678,7 @@ static int ircomm_tty_write(struct tty_struct *tty, | |||
678 | int len = 0; | 678 | int len = 0; |
679 | int size; | 679 | int size; |
680 | 680 | ||
681 | IRDA_DEBUG(2, "%s(), count=%d, hw_stopped=%d\n", __FUNCTION__ , count, | 681 | IRDA_DEBUG(2, "%s(), count=%d, hw_stopped=%d\n", __func__ , count, |
682 | tty->hw_stopped); | 682 | tty->hw_stopped); |
683 | 683 | ||
684 | IRDA_ASSERT(self != NULL, return -1;); | 684 | IRDA_ASSERT(self != NULL, return -1;); |
@@ -701,7 +701,7 @@ static int ircomm_tty_write(struct tty_struct *tty, | |||
701 | * we don't mess up the original "safe skb" (see tx_data_size). | 701 | * we don't mess up the original "safe skb" (see tx_data_size). |
702 | * Jean II */ | 702 | * Jean II */ |
703 | if (self->max_header_size == IRCOMM_TTY_HDR_UNINITIALISED) { | 703 | if (self->max_header_size == IRCOMM_TTY_HDR_UNINITIALISED) { |
704 | IRDA_DEBUG(1, "%s() : not initialised\n", __FUNCTION__); | 704 | IRDA_DEBUG(1, "%s() : not initialised\n", __func__); |
705 | #ifdef IRCOMM_NO_TX_BEFORE_INIT | 705 | #ifdef IRCOMM_NO_TX_BEFORE_INIT |
706 | /* We didn't consume anything, TTY will retry */ | 706 | /* We didn't consume anything, TTY will retry */ |
707 | return 0; | 707 | return 0; |
@@ -830,7 +830,7 @@ static int ircomm_tty_write_room(struct tty_struct *tty) | |||
830 | ret = self->max_data_size; | 830 | ret = self->max_data_size; |
831 | spin_unlock_irqrestore(&self->spinlock, flags); | 831 | spin_unlock_irqrestore(&self->spinlock, flags); |
832 | } | 832 | } |
833 | IRDA_DEBUG(2, "%s(), ret=%d\n", __FUNCTION__ , ret); | 833 | IRDA_DEBUG(2, "%s(), ret=%d\n", __func__ , ret); |
834 | 834 | ||
835 | return ret; | 835 | return ret; |
836 | } | 836 | } |
@@ -847,7 +847,7 @@ static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout) | |||
847 | unsigned long orig_jiffies, poll_time; | 847 | unsigned long orig_jiffies, poll_time; |
848 | unsigned long flags; | 848 | unsigned long flags; |
849 | 849 | ||
850 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 850 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
851 | 851 | ||
852 | IRDA_ASSERT(self != NULL, return;); | 852 | IRDA_ASSERT(self != NULL, return;); |
853 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); | 853 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); |
@@ -882,7 +882,7 @@ static void ircomm_tty_throttle(struct tty_struct *tty) | |||
882 | { | 882 | { |
883 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; | 883 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; |
884 | 884 | ||
885 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 885 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
886 | 886 | ||
887 | IRDA_ASSERT(self != NULL, return;); | 887 | IRDA_ASSERT(self != NULL, return;); |
888 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); | 888 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); |
@@ -913,7 +913,7 @@ static void ircomm_tty_unthrottle(struct tty_struct *tty) | |||
913 | { | 913 | { |
914 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; | 914 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; |
915 | 915 | ||
916 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 916 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
917 | 917 | ||
918 | IRDA_ASSERT(self != NULL, return;); | 918 | IRDA_ASSERT(self != NULL, return;); |
919 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); | 919 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); |
@@ -928,7 +928,7 @@ static void ircomm_tty_unthrottle(struct tty_struct *tty) | |||
928 | self->settings.dte |= (IRCOMM_RTS|IRCOMM_DELTA_RTS); | 928 | self->settings.dte |= (IRCOMM_RTS|IRCOMM_DELTA_RTS); |
929 | 929 | ||
930 | ircomm_param_request(self, IRCOMM_DTE, TRUE); | 930 | ircomm_param_request(self, IRCOMM_DTE, TRUE); |
931 | IRDA_DEBUG(1, "%s(), FLOW_START\n", __FUNCTION__ ); | 931 | IRDA_DEBUG(1, "%s(), FLOW_START\n", __func__ ); |
932 | } | 932 | } |
933 | ircomm_flow_request(self->ircomm, FLOW_START); | 933 | ircomm_flow_request(self->ircomm, FLOW_START); |
934 | } | 934 | } |
@@ -965,7 +965,7 @@ static void ircomm_tty_shutdown(struct ircomm_tty_cb *self) | |||
965 | IRDA_ASSERT(self != NULL, return;); | 965 | IRDA_ASSERT(self != NULL, return;); |
966 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); | 966 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); |
967 | 967 | ||
968 | IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); | 968 | IRDA_DEBUG(0, "%s()\n", __func__ ); |
969 | 969 | ||
970 | if (!test_and_clear_bit(ASYNC_B_INITIALIZED, &self->flags)) | 970 | if (!test_and_clear_bit(ASYNC_B_INITIALIZED, &self->flags)) |
971 | return; | 971 | return; |
@@ -1008,7 +1008,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty) | |||
1008 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; | 1008 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; |
1009 | unsigned long flags; | 1009 | unsigned long flags; |
1010 | 1010 | ||
1011 | IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); | 1011 | IRDA_DEBUG(0, "%s()\n", __func__ ); |
1012 | 1012 | ||
1013 | IRDA_ASSERT(self != NULL, return;); | 1013 | IRDA_ASSERT(self != NULL, return;); |
1014 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); | 1014 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); |
@@ -1037,7 +1037,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty) | |||
1037 | */ | 1037 | */ |
1038 | static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch) | 1038 | static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch) |
1039 | { | 1039 | { |
1040 | IRDA_DEBUG(0, "%s(), not impl\n", __FUNCTION__ ); | 1040 | IRDA_DEBUG(0, "%s(), not impl\n", __func__ ); |
1041 | } | 1041 | } |
1042 | 1042 | ||
1043 | /* | 1043 | /* |
@@ -1081,7 +1081,7 @@ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self) | |||
1081 | struct tty_struct *tty; | 1081 | struct tty_struct *tty; |
1082 | int status; | 1082 | int status; |
1083 | 1083 | ||
1084 | IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); | 1084 | IRDA_DEBUG(0, "%s()\n", __func__ ); |
1085 | 1085 | ||
1086 | IRDA_ASSERT(self != NULL, return;); | 1086 | IRDA_ASSERT(self != NULL, return;); |
1087 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); | 1087 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); |
@@ -1095,14 +1095,14 @@ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self) | |||
1095 | } | 1095 | } |
1096 | if ((self->flags & ASYNC_CHECK_CD) && (status & IRCOMM_DELTA_CD)) { | 1096 | if ((self->flags & ASYNC_CHECK_CD) && (status & IRCOMM_DELTA_CD)) { |
1097 | IRDA_DEBUG(2, | 1097 | IRDA_DEBUG(2, |
1098 | "%s(), ircomm%d CD now %s...\n", __FUNCTION__ , self->line, | 1098 | "%s(), ircomm%d CD now %s...\n", __func__ , self->line, |
1099 | (status & IRCOMM_CD) ? "on" : "off"); | 1099 | (status & IRCOMM_CD) ? "on" : "off"); |
1100 | 1100 | ||
1101 | if (status & IRCOMM_CD) { | 1101 | if (status & IRCOMM_CD) { |
1102 | wake_up_interruptible(&self->open_wait); | 1102 | wake_up_interruptible(&self->open_wait); |
1103 | } else { | 1103 | } else { |
1104 | IRDA_DEBUG(2, | 1104 | IRDA_DEBUG(2, |
1105 | "%s(), Doing serial hangup..\n", __FUNCTION__ ); | 1105 | "%s(), Doing serial hangup..\n", __func__ ); |
1106 | if (tty) | 1106 | if (tty) |
1107 | tty_hangup(tty); | 1107 | tty_hangup(tty); |
1108 | 1108 | ||
@@ -1114,7 +1114,7 @@ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self) | |||
1114 | if (tty->hw_stopped) { | 1114 | if (tty->hw_stopped) { |
1115 | if (status & IRCOMM_CTS) { | 1115 | if (status & IRCOMM_CTS) { |
1116 | IRDA_DEBUG(2, | 1116 | IRDA_DEBUG(2, |
1117 | "%s(), CTS tx start...\n", __FUNCTION__ ); | 1117 | "%s(), CTS tx start...\n", __func__ ); |
1118 | tty->hw_stopped = 0; | 1118 | tty->hw_stopped = 0; |
1119 | 1119 | ||
1120 | /* Wake up processes blocked on open */ | 1120 | /* Wake up processes blocked on open */ |
@@ -1126,7 +1126,7 @@ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self) | |||
1126 | } else { | 1126 | } else { |
1127 | if (!(status & IRCOMM_CTS)) { | 1127 | if (!(status & IRCOMM_CTS)) { |
1128 | IRDA_DEBUG(2, | 1128 | IRDA_DEBUG(2, |
1129 | "%s(), CTS tx stop...\n", __FUNCTION__ ); | 1129 | "%s(), CTS tx stop...\n", __func__ ); |
1130 | tty->hw_stopped = 1; | 1130 | tty->hw_stopped = 1; |
1131 | } | 1131 | } |
1132 | } | 1132 | } |
@@ -1144,14 +1144,14 @@ static int ircomm_tty_data_indication(void *instance, void *sap, | |||
1144 | { | 1144 | { |
1145 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; | 1145 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; |
1146 | 1146 | ||
1147 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 1147 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
1148 | 1148 | ||
1149 | IRDA_ASSERT(self != NULL, return -1;); | 1149 | IRDA_ASSERT(self != NULL, return -1;); |
1150 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); | 1150 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); |
1151 | IRDA_ASSERT(skb != NULL, return -1;); | 1151 | IRDA_ASSERT(skb != NULL, return -1;); |
1152 | 1152 | ||
1153 | if (!self->tty) { | 1153 | if (!self->tty) { |
1154 | IRDA_DEBUG(0, "%s(), no tty!\n", __FUNCTION__ ); | 1154 | IRDA_DEBUG(0, "%s(), no tty!\n", __func__ ); |
1155 | return 0; | 1155 | return 0; |
1156 | } | 1156 | } |
1157 | 1157 | ||
@@ -1162,7 +1162,7 @@ static int ircomm_tty_data_indication(void *instance, void *sap, | |||
1162 | * params, we can just as well declare the hardware for running. | 1162 | * params, we can just as well declare the hardware for running. |
1163 | */ | 1163 | */ |
1164 | if (self->tty->hw_stopped && (self->flow == FLOW_START)) { | 1164 | if (self->tty->hw_stopped && (self->flow == FLOW_START)) { |
1165 | IRDA_DEBUG(0, "%s(), polling for line settings!\n", __FUNCTION__ ); | 1165 | IRDA_DEBUG(0, "%s(), polling for line settings!\n", __func__ ); |
1166 | ircomm_param_request(self, IRCOMM_POLL, TRUE); | 1166 | ircomm_param_request(self, IRCOMM_POLL, TRUE); |
1167 | 1167 | ||
1168 | /* We can just as well declare the hardware for running */ | 1168 | /* We can just as well declare the hardware for running */ |
@@ -1194,7 +1194,7 @@ static int ircomm_tty_control_indication(void *instance, void *sap, | |||
1194 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; | 1194 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; |
1195 | int clen; | 1195 | int clen; |
1196 | 1196 | ||
1197 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 1197 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
1198 | 1198 | ||
1199 | IRDA_ASSERT(self != NULL, return -1;); | 1199 | IRDA_ASSERT(self != NULL, return -1;); |
1200 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); | 1200 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); |
@@ -1230,7 +1230,7 @@ static void ircomm_tty_flow_indication(void *instance, void *sap, | |||
1230 | 1230 | ||
1231 | switch (cmd) { | 1231 | switch (cmd) { |
1232 | case FLOW_START: | 1232 | case FLOW_START: |
1233 | IRDA_DEBUG(2, "%s(), hw start!\n", __FUNCTION__ ); | 1233 | IRDA_DEBUG(2, "%s(), hw start!\n", __func__ ); |
1234 | tty->hw_stopped = 0; | 1234 | tty->hw_stopped = 0; |
1235 | 1235 | ||
1236 | /* ircomm_tty_do_softint will take care of the rest */ | 1236 | /* ircomm_tty_do_softint will take care of the rest */ |
@@ -1238,7 +1238,7 @@ static void ircomm_tty_flow_indication(void *instance, void *sap, | |||
1238 | break; | 1238 | break; |
1239 | default: /* If we get here, something is very wrong, better stop */ | 1239 | default: /* If we get here, something is very wrong, better stop */ |
1240 | case FLOW_STOP: | 1240 | case FLOW_STOP: |
1241 | IRDA_DEBUG(2, "%s(), hw stopped!\n", __FUNCTION__ ); | 1241 | IRDA_DEBUG(2, "%s(), hw stopped!\n", __func__ ); |
1242 | tty->hw_stopped = 1; | 1242 | tty->hw_stopped = 1; |
1243 | break; | 1243 | break; |
1244 | } | 1244 | } |
diff --git a/net/irda/ircomm/ircomm_tty_attach.c b/net/irda/ircomm/ircomm_tty_attach.c index b5a13882c927..9032a1d1190d 100644 --- a/net/irda/ircomm/ircomm_tty_attach.c +++ b/net/irda/ircomm/ircomm_tty_attach.c | |||
@@ -129,14 +129,14 @@ static int (*state[])(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, | |||
129 | */ | 129 | */ |
130 | int ircomm_tty_attach_cable(struct ircomm_tty_cb *self) | 130 | int ircomm_tty_attach_cable(struct ircomm_tty_cb *self) |
131 | { | 131 | { |
132 | IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); | 132 | IRDA_DEBUG(0, "%s()\n", __func__ ); |
133 | 133 | ||
134 | IRDA_ASSERT(self != NULL, return -1;); | 134 | IRDA_ASSERT(self != NULL, return -1;); |
135 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); | 135 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); |
136 | 136 | ||
137 | /* Check if somebody has already connected to us */ | 137 | /* Check if somebody has already connected to us */ |
138 | if (ircomm_is_connected(self->ircomm)) { | 138 | if (ircomm_is_connected(self->ircomm)) { |
139 | IRDA_DEBUG(0, "%s(), already connected!\n", __FUNCTION__ ); | 139 | IRDA_DEBUG(0, "%s(), already connected!\n", __func__ ); |
140 | return 0; | 140 | return 0; |
141 | } | 141 | } |
142 | 142 | ||
@@ -158,7 +158,7 @@ int ircomm_tty_attach_cable(struct ircomm_tty_cb *self) | |||
158 | */ | 158 | */ |
159 | void ircomm_tty_detach_cable(struct ircomm_tty_cb *self) | 159 | void ircomm_tty_detach_cable(struct ircomm_tty_cb *self) |
160 | { | 160 | { |
161 | IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); | 161 | IRDA_DEBUG(0, "%s()\n", __func__ ); |
162 | 162 | ||
163 | IRDA_ASSERT(self != NULL, return;); | 163 | IRDA_ASSERT(self != NULL, return;); |
164 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); | 164 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); |
@@ -207,7 +207,7 @@ static void ircomm_tty_ias_register(struct ircomm_tty_cb *self) | |||
207 | __u8 oct_seq[6]; | 207 | __u8 oct_seq[6]; |
208 | __u16 hints; | 208 | __u16 hints; |
209 | 209 | ||
210 | IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); | 210 | IRDA_DEBUG(0, "%s()\n", __func__ ); |
211 | 211 | ||
212 | IRDA_ASSERT(self != NULL, return;); | 212 | IRDA_ASSERT(self != NULL, return;); |
213 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); | 213 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); |
@@ -308,16 +308,16 @@ int ircomm_tty_send_initial_parameters(struct ircomm_tty_cb *self) | |||
308 | * Set default values, but only if the application for some reason | 308 | * Set default values, but only if the application for some reason |
309 | * haven't set them already | 309 | * haven't set them already |
310 | */ | 310 | */ |
311 | IRDA_DEBUG(2, "%s(), data-rate = %d\n", __FUNCTION__ , | 311 | IRDA_DEBUG(2, "%s(), data-rate = %d\n", __func__ , |
312 | self->settings.data_rate); | 312 | self->settings.data_rate); |
313 | if (!self->settings.data_rate) | 313 | if (!self->settings.data_rate) |
314 | self->settings.data_rate = 9600; | 314 | self->settings.data_rate = 9600; |
315 | IRDA_DEBUG(2, "%s(), data-format = %d\n", __FUNCTION__ , | 315 | IRDA_DEBUG(2, "%s(), data-format = %d\n", __func__ , |
316 | self->settings.data_format); | 316 | self->settings.data_format); |
317 | if (!self->settings.data_format) | 317 | if (!self->settings.data_format) |
318 | self->settings.data_format = IRCOMM_WSIZE_8; /* 8N1 */ | 318 | self->settings.data_format = IRCOMM_WSIZE_8; /* 8N1 */ |
319 | 319 | ||
320 | IRDA_DEBUG(2, "%s(), flow-control = %d\n", __FUNCTION__ , | 320 | IRDA_DEBUG(2, "%s(), flow-control = %d\n", __func__ , |
321 | self->settings.flow_control); | 321 | self->settings.flow_control); |
322 | /*self->settings.flow_control = IRCOMM_RTS_CTS_IN|IRCOMM_RTS_CTS_OUT;*/ | 322 | /*self->settings.flow_control = IRCOMM_RTS_CTS_IN|IRCOMM_RTS_CTS_OUT;*/ |
323 | 323 | ||
@@ -362,7 +362,7 @@ static void ircomm_tty_discovery_indication(discinfo_t *discovery, | |||
362 | struct ircomm_tty_cb *self; | 362 | struct ircomm_tty_cb *self; |
363 | struct ircomm_tty_info info; | 363 | struct ircomm_tty_info info; |
364 | 364 | ||
365 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 365 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
366 | 366 | ||
367 | /* Important note : | 367 | /* Important note : |
368 | * We need to drop all passive discoveries. | 368 | * We need to drop all passive discoveries. |
@@ -398,7 +398,7 @@ void ircomm_tty_disconnect_indication(void *instance, void *sap, | |||
398 | { | 398 | { |
399 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; | 399 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; |
400 | 400 | ||
401 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 401 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
402 | 402 | ||
403 | IRDA_ASSERT(self != NULL, return;); | 403 | IRDA_ASSERT(self != NULL, return;); |
404 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); | 404 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); |
@@ -428,7 +428,7 @@ static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id, | |||
428 | { | 428 | { |
429 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) priv; | 429 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) priv; |
430 | 430 | ||
431 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 431 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
432 | 432 | ||
433 | IRDA_ASSERT(self != NULL, return;); | 433 | IRDA_ASSERT(self != NULL, return;); |
434 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); | 434 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); |
@@ -439,13 +439,13 @@ static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id, | |||
439 | 439 | ||
440 | /* Check if request succeeded */ | 440 | /* Check if request succeeded */ |
441 | if (result != IAS_SUCCESS) { | 441 | if (result != IAS_SUCCESS) { |
442 | IRDA_DEBUG(4, "%s(), got NULL value!\n", __FUNCTION__ ); | 442 | IRDA_DEBUG(4, "%s(), got NULL value!\n", __func__ ); |
443 | return; | 443 | return; |
444 | } | 444 | } |
445 | 445 | ||
446 | switch (value->type) { | 446 | switch (value->type) { |
447 | case IAS_OCT_SEQ: | 447 | case IAS_OCT_SEQ: |
448 | IRDA_DEBUG(2, "%s(), got octet sequence\n", __FUNCTION__ ); | 448 | IRDA_DEBUG(2, "%s(), got octet sequence\n", __func__ ); |
449 | 449 | ||
450 | irda_param_extract_all(self, value->t.oct_seq, value->len, | 450 | irda_param_extract_all(self, value->t.oct_seq, value->len, |
451 | &ircomm_param_info); | 451 | &ircomm_param_info); |
@@ -455,21 +455,21 @@ static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id, | |||
455 | break; | 455 | break; |
456 | case IAS_INTEGER: | 456 | case IAS_INTEGER: |
457 | /* Got LSAP selector */ | 457 | /* Got LSAP selector */ |
458 | IRDA_DEBUG(2, "%s(), got lsapsel = %d\n", __FUNCTION__ , | 458 | IRDA_DEBUG(2, "%s(), got lsapsel = %d\n", __func__ , |
459 | value->t.integer); | 459 | value->t.integer); |
460 | 460 | ||
461 | if (value->t.integer == -1) { | 461 | if (value->t.integer == -1) { |
462 | IRDA_DEBUG(0, "%s(), invalid value!\n", __FUNCTION__ ); | 462 | IRDA_DEBUG(0, "%s(), invalid value!\n", __func__ ); |
463 | } else | 463 | } else |
464 | self->dlsap_sel = value->t.integer; | 464 | self->dlsap_sel = value->t.integer; |
465 | 465 | ||
466 | ircomm_tty_do_event(self, IRCOMM_TTY_GOT_LSAPSEL, NULL, NULL); | 466 | ircomm_tty_do_event(self, IRCOMM_TTY_GOT_LSAPSEL, NULL, NULL); |
467 | break; | 467 | break; |
468 | case IAS_MISSING: | 468 | case IAS_MISSING: |
469 | IRDA_DEBUG(0, "%s(), got IAS_MISSING\n", __FUNCTION__ ); | 469 | IRDA_DEBUG(0, "%s(), got IAS_MISSING\n", __func__ ); |
470 | break; | 470 | break; |
471 | default: | 471 | default: |
472 | IRDA_DEBUG(0, "%s(), got unknown type!\n", __FUNCTION__ ); | 472 | IRDA_DEBUG(0, "%s(), got unknown type!\n", __func__ ); |
473 | break; | 473 | break; |
474 | } | 474 | } |
475 | irias_delete_value(value); | 475 | irias_delete_value(value); |
@@ -489,7 +489,7 @@ void ircomm_tty_connect_confirm(void *instance, void *sap, | |||
489 | { | 489 | { |
490 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; | 490 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; |
491 | 491 | ||
492 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 492 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
493 | 493 | ||
494 | IRDA_ASSERT(self != NULL, return;); | 494 | IRDA_ASSERT(self != NULL, return;); |
495 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); | 495 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); |
@@ -520,7 +520,7 @@ void ircomm_tty_connect_indication(void *instance, void *sap, | |||
520 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; | 520 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; |
521 | int clen; | 521 | int clen; |
522 | 522 | ||
523 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 523 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
524 | 524 | ||
525 | IRDA_ASSERT(self != NULL, return;); | 525 | IRDA_ASSERT(self != NULL, return;); |
526 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); | 526 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); |
@@ -549,7 +549,7 @@ void ircomm_tty_connect_indication(void *instance, void *sap, | |||
549 | */ | 549 | */ |
550 | void ircomm_tty_link_established(struct ircomm_tty_cb *self) | 550 | void ircomm_tty_link_established(struct ircomm_tty_cb *self) |
551 | { | 551 | { |
552 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 552 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
553 | 553 | ||
554 | IRDA_ASSERT(self != NULL, return;); | 554 | IRDA_ASSERT(self != NULL, return;); |
555 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); | 555 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); |
@@ -566,10 +566,10 @@ void ircomm_tty_link_established(struct ircomm_tty_cb *self) | |||
566 | * line. | 566 | * line. |
567 | */ | 567 | */ |
568 | if ((self->flags & ASYNC_CTS_FLOW) && ((self->settings.dce & IRCOMM_CTS) == 0)) { | 568 | if ((self->flags & ASYNC_CTS_FLOW) && ((self->settings.dce & IRCOMM_CTS) == 0)) { |
569 | IRDA_DEBUG(0, "%s(), waiting for CTS ...\n", __FUNCTION__ ); | 569 | IRDA_DEBUG(0, "%s(), waiting for CTS ...\n", __func__ ); |
570 | return; | 570 | return; |
571 | } else { | 571 | } else { |
572 | IRDA_DEBUG(1, "%s(), starting hardware!\n", __FUNCTION__ ); | 572 | IRDA_DEBUG(1, "%s(), starting hardware!\n", __func__ ); |
573 | 573 | ||
574 | self->tty->hw_stopped = 0; | 574 | self->tty->hw_stopped = 0; |
575 | 575 | ||
@@ -607,7 +607,7 @@ static void ircomm_tty_watchdog_timer_expired(void *data) | |||
607 | { | 607 | { |
608 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) data; | 608 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) data; |
609 | 609 | ||
610 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 610 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
611 | 611 | ||
612 | IRDA_ASSERT(self != NULL, return;); | 612 | IRDA_ASSERT(self != NULL, return;); |
613 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); | 613 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); |
@@ -628,7 +628,7 @@ int ircomm_tty_do_event(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, | |||
628 | IRDA_ASSERT(self != NULL, return -1;); | 628 | IRDA_ASSERT(self != NULL, return -1;); |
629 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); | 629 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); |
630 | 630 | ||
631 | IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __FUNCTION__ , | 631 | IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ , |
632 | ircomm_tty_state[self->state], ircomm_tty_event[event]); | 632 | ircomm_tty_state[self->state], ircomm_tty_event[event]); |
633 | 633 | ||
634 | return (*state[self->state])(self, event, skb, info); | 634 | return (*state[self->state])(self, event, skb, info); |
@@ -646,7 +646,7 @@ static inline void ircomm_tty_next_state(struct ircomm_tty_cb *self, IRCOMM_TTY_ | |||
646 | IRDA_ASSERT(self != NULL, return;); | 646 | IRDA_ASSERT(self != NULL, return;); |
647 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); | 647 | IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); |
648 | 648 | ||
649 | IRDA_DEBUG(2, "%s: next state=%s, service type=%d\n", __FUNCTION__ , | 649 | IRDA_DEBUG(2, "%s: next state=%s, service type=%d\n", __func__ , |
650 | ircomm_tty_state[self->state], self->service_type); | 650 | ircomm_tty_state[self->state], self->service_type); |
651 | */ | 651 | */ |
652 | self->state = state; | 652 | self->state = state; |
@@ -665,7 +665,7 @@ static int ircomm_tty_state_idle(struct ircomm_tty_cb *self, | |||
665 | { | 665 | { |
666 | int ret = 0; | 666 | int ret = 0; |
667 | 667 | ||
668 | IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __FUNCTION__ , | 668 | IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ , |
669 | ircomm_tty_state[self->state], ircomm_tty_event[event]); | 669 | ircomm_tty_state[self->state], ircomm_tty_event[event]); |
670 | switch (event) { | 670 | switch (event) { |
671 | case IRCOMM_TTY_ATTACH_CABLE: | 671 | case IRCOMM_TTY_ATTACH_CABLE: |
@@ -681,7 +681,7 @@ static int ircomm_tty_state_idle(struct ircomm_tty_cb *self, | |||
681 | 681 | ||
682 | if (self->iriap) { | 682 | if (self->iriap) { |
683 | IRDA_WARNING("%s(), busy with a previous query\n", | 683 | IRDA_WARNING("%s(), busy with a previous query\n", |
684 | __FUNCTION__); | 684 | __func__); |
685 | return -EBUSY; | 685 | return -EBUSY; |
686 | } | 686 | } |
687 | 687 | ||
@@ -709,7 +709,7 @@ static int ircomm_tty_state_idle(struct ircomm_tty_cb *self, | |||
709 | ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); | 709 | ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); |
710 | break; | 710 | break; |
711 | default: | 711 | default: |
712 | IRDA_DEBUG(2, "%s(), unknown event: %s\n", __FUNCTION__ , | 712 | IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ , |
713 | ircomm_tty_event[event]); | 713 | ircomm_tty_event[event]); |
714 | ret = -EINVAL; | 714 | ret = -EINVAL; |
715 | } | 715 | } |
@@ -729,7 +729,7 @@ static int ircomm_tty_state_search(struct ircomm_tty_cb *self, | |||
729 | { | 729 | { |
730 | int ret = 0; | 730 | int ret = 0; |
731 | 731 | ||
732 | IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __FUNCTION__ , | 732 | IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ , |
733 | ircomm_tty_state[self->state], ircomm_tty_event[event]); | 733 | ircomm_tty_state[self->state], ircomm_tty_event[event]); |
734 | 734 | ||
735 | switch (event) { | 735 | switch (event) { |
@@ -739,7 +739,7 @@ static int ircomm_tty_state_search(struct ircomm_tty_cb *self, | |||
739 | 739 | ||
740 | if (self->iriap) { | 740 | if (self->iriap) { |
741 | IRDA_WARNING("%s(), busy with a previous query\n", | 741 | IRDA_WARNING("%s(), busy with a previous query\n", |
742 | __FUNCTION__); | 742 | __func__); |
743 | return -EBUSY; | 743 | return -EBUSY; |
744 | } | 744 | } |
745 | 745 | ||
@@ -782,7 +782,7 @@ static int ircomm_tty_state_search(struct ircomm_tty_cb *self, | |||
782 | ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); | 782 | ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); |
783 | break; | 783 | break; |
784 | default: | 784 | default: |
785 | IRDA_DEBUG(2, "%s(), unknown event: %s\n", __FUNCTION__ , | 785 | IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ , |
786 | ircomm_tty_event[event]); | 786 | ircomm_tty_event[event]); |
787 | ret = -EINVAL; | 787 | ret = -EINVAL; |
788 | } | 788 | } |
@@ -802,14 +802,14 @@ static int ircomm_tty_state_query_parameters(struct ircomm_tty_cb *self, | |||
802 | { | 802 | { |
803 | int ret = 0; | 803 | int ret = 0; |
804 | 804 | ||
805 | IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __FUNCTION__ , | 805 | IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ , |
806 | ircomm_tty_state[self->state], ircomm_tty_event[event]); | 806 | ircomm_tty_state[self->state], ircomm_tty_event[event]); |
807 | 807 | ||
808 | switch (event) { | 808 | switch (event) { |
809 | case IRCOMM_TTY_GOT_PARAMETERS: | 809 | case IRCOMM_TTY_GOT_PARAMETERS: |
810 | if (self->iriap) { | 810 | if (self->iriap) { |
811 | IRDA_WARNING("%s(), busy with a previous query\n", | 811 | IRDA_WARNING("%s(), busy with a previous query\n", |
812 | __FUNCTION__); | 812 | __func__); |
813 | return -EBUSY; | 813 | return -EBUSY; |
814 | } | 814 | } |
815 | 815 | ||
@@ -840,7 +840,7 @@ static int ircomm_tty_state_query_parameters(struct ircomm_tty_cb *self, | |||
840 | ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); | 840 | ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); |
841 | break; | 841 | break; |
842 | default: | 842 | default: |
843 | IRDA_DEBUG(2, "%s(), unknown event: %s\n", __FUNCTION__ , | 843 | IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ , |
844 | ircomm_tty_event[event]); | 844 | ircomm_tty_event[event]); |
845 | ret = -EINVAL; | 845 | ret = -EINVAL; |
846 | } | 846 | } |
@@ -860,7 +860,7 @@ static int ircomm_tty_state_query_lsap_sel(struct ircomm_tty_cb *self, | |||
860 | { | 860 | { |
861 | int ret = 0; | 861 | int ret = 0; |
862 | 862 | ||
863 | IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __FUNCTION__ , | 863 | IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ , |
864 | ircomm_tty_state[self->state], ircomm_tty_event[event]); | 864 | ircomm_tty_state[self->state], ircomm_tty_event[event]); |
865 | 865 | ||
866 | switch (event) { | 866 | switch (event) { |
@@ -889,7 +889,7 @@ static int ircomm_tty_state_query_lsap_sel(struct ircomm_tty_cb *self, | |||
889 | ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); | 889 | ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); |
890 | break; | 890 | break; |
891 | default: | 891 | default: |
892 | IRDA_DEBUG(2, "%s(), unknown event: %s\n", __FUNCTION__ , | 892 | IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ , |
893 | ircomm_tty_event[event]); | 893 | ircomm_tty_event[event]); |
894 | ret = -EINVAL; | 894 | ret = -EINVAL; |
895 | } | 895 | } |
@@ -909,7 +909,7 @@ static int ircomm_tty_state_setup(struct ircomm_tty_cb *self, | |||
909 | { | 909 | { |
910 | int ret = 0; | 910 | int ret = 0; |
911 | 911 | ||
912 | IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __FUNCTION__ , | 912 | IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ , |
913 | ircomm_tty_state[self->state], ircomm_tty_event[event]); | 913 | ircomm_tty_state[self->state], ircomm_tty_event[event]); |
914 | 914 | ||
915 | switch (event) { | 915 | switch (event) { |
@@ -943,7 +943,7 @@ static int ircomm_tty_state_setup(struct ircomm_tty_cb *self, | |||
943 | ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); | 943 | ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); |
944 | break; | 944 | break; |
945 | default: | 945 | default: |
946 | IRDA_DEBUG(2, "%s(), unknown event: %s\n", __FUNCTION__ , | 946 | IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ , |
947 | ircomm_tty_event[event]); | 947 | ircomm_tty_event[event]); |
948 | ret = -EINVAL; | 948 | ret = -EINVAL; |
949 | } | 949 | } |
@@ -981,13 +981,13 @@ static int ircomm_tty_state_ready(struct ircomm_tty_cb *self, | |||
981 | self->settings.dce = IRCOMM_DELTA_CD; | 981 | self->settings.dce = IRCOMM_DELTA_CD; |
982 | ircomm_tty_check_modem_status(self); | 982 | ircomm_tty_check_modem_status(self); |
983 | } else { | 983 | } else { |
984 | IRDA_DEBUG(0, "%s(), hanging up!\n", __FUNCTION__ ); | 984 | IRDA_DEBUG(0, "%s(), hanging up!\n", __func__ ); |
985 | if (self->tty) | 985 | if (self->tty) |
986 | tty_hangup(self->tty); | 986 | tty_hangup(self->tty); |
987 | } | 987 | } |
988 | break; | 988 | break; |
989 | default: | 989 | default: |
990 | IRDA_DEBUG(2, "%s(), unknown event: %s\n", __FUNCTION__ , | 990 | IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ , |
991 | ircomm_tty_event[event]); | 991 | ircomm_tty_event[event]); |
992 | ret = -EINVAL; | 992 | ret = -EINVAL; |
993 | } | 993 | } |
diff --git a/net/irda/ircomm/ircomm_tty_ioctl.c b/net/irda/ircomm/ircomm_tty_ioctl.c index 6030947b6d93..24cb3aa2bbfb 100644 --- a/net/irda/ircomm/ircomm_tty_ioctl.c +++ b/net/irda/ircomm/ircomm_tty_ioctl.c | |||
@@ -57,7 +57,7 @@ static void ircomm_tty_change_speed(struct ircomm_tty_cb *self) | |||
57 | unsigned cflag, cval; | 57 | unsigned cflag, cval; |
58 | int baud; | 58 | int baud; |
59 | 59 | ||
60 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 60 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
61 | 61 | ||
62 | if (!self->tty || !self->tty->termios || !self->ircomm) | 62 | if (!self->tty || !self->tty->termios || !self->ircomm) |
63 | return; | 63 | return; |
@@ -94,7 +94,7 @@ static void ircomm_tty_change_speed(struct ircomm_tty_cb *self) | |||
94 | self->settings.flow_control |= IRCOMM_RTS_CTS_IN; | 94 | self->settings.flow_control |= IRCOMM_RTS_CTS_IN; |
95 | /* This got me. Bummer. Jean II */ | 95 | /* This got me. Bummer. Jean II */ |
96 | if (self->service_type == IRCOMM_3_WIRE_RAW) | 96 | if (self->service_type == IRCOMM_3_WIRE_RAW) |
97 | IRDA_WARNING("%s(), enabling RTS/CTS on link that doesn't support it (3-wire-raw)\n", __FUNCTION__); | 97 | IRDA_WARNING("%s(), enabling RTS/CTS on link that doesn't support it (3-wire-raw)\n", __func__); |
98 | } else { | 98 | } else { |
99 | self->flags &= ~ASYNC_CTS_FLOW; | 99 | self->flags &= ~ASYNC_CTS_FLOW; |
100 | self->settings.flow_control &= ~IRCOMM_RTS_CTS_IN; | 100 | self->settings.flow_control &= ~IRCOMM_RTS_CTS_IN; |
@@ -150,7 +150,7 @@ void ircomm_tty_set_termios(struct tty_struct *tty, | |||
150 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; | 150 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; |
151 | unsigned int cflag = tty->termios->c_cflag; | 151 | unsigned int cflag = tty->termios->c_cflag; |
152 | 152 | ||
153 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 153 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
154 | 154 | ||
155 | if ((cflag == old_termios->c_cflag) && | 155 | if ((cflag == old_termios->c_cflag) && |
156 | (RELEVANT_IFLAG(tty->termios->c_iflag) == | 156 | (RELEVANT_IFLAG(tty->termios->c_iflag) == |
@@ -199,7 +199,7 @@ int ircomm_tty_tiocmget(struct tty_struct *tty, struct file *file) | |||
199 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; | 199 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; |
200 | unsigned int result; | 200 | unsigned int result; |
201 | 201 | ||
202 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 202 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
203 | 203 | ||
204 | if (tty->flags & (1 << TTY_IO_ERROR)) | 204 | if (tty->flags & (1 << TTY_IO_ERROR)) |
205 | return -EIO; | 205 | return -EIO; |
@@ -224,7 +224,7 @@ int ircomm_tty_tiocmset(struct tty_struct *tty, struct file *file, | |||
224 | { | 224 | { |
225 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; | 225 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; |
226 | 226 | ||
227 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 227 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
228 | 228 | ||
229 | if (tty->flags & (1 << TTY_IO_ERROR)) | 229 | if (tty->flags & (1 << TTY_IO_ERROR)) |
230 | return -EIO; | 230 | return -EIO; |
@@ -266,7 +266,7 @@ static int ircomm_tty_get_serial_info(struct ircomm_tty_cb *self, | |||
266 | if (!retinfo) | 266 | if (!retinfo) |
267 | return -EFAULT; | 267 | return -EFAULT; |
268 | 268 | ||
269 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 269 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
270 | 270 | ||
271 | memset(&info, 0, sizeof(info)); | 271 | memset(&info, 0, sizeof(info)); |
272 | info.line = self->line; | 272 | info.line = self->line; |
@@ -302,7 +302,7 @@ static int ircomm_tty_set_serial_info(struct ircomm_tty_cb *self, | |||
302 | struct serial_struct new_serial; | 302 | struct serial_struct new_serial; |
303 | struct ircomm_tty_cb old_state, *state; | 303 | struct ircomm_tty_cb old_state, *state; |
304 | 304 | ||
305 | IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); | 305 | IRDA_DEBUG(0, "%s()\n", __func__ ); |
306 | 306 | ||
307 | if (copy_from_user(&new_serial,new_info,sizeof(new_serial))) | 307 | if (copy_from_user(&new_serial,new_info,sizeof(new_serial))) |
308 | return -EFAULT; | 308 | return -EFAULT; |
@@ -376,7 +376,7 @@ int ircomm_tty_ioctl(struct tty_struct *tty, struct file *file, | |||
376 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; | 376 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; |
377 | int ret = 0; | 377 | int ret = 0; |
378 | 378 | ||
379 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 379 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
380 | 380 | ||
381 | if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && | 381 | if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && |
382 | (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) && | 382 | (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) && |
@@ -397,7 +397,7 @@ int ircomm_tty_ioctl(struct tty_struct *tty, struct file *file, | |||
397 | break; | 397 | break; |
398 | 398 | ||
399 | case TIOCGICOUNT: | 399 | case TIOCGICOUNT: |
400 | IRDA_DEBUG(0, "%s(), TIOCGICOUNT not impl!\n", __FUNCTION__ ); | 400 | IRDA_DEBUG(0, "%s(), TIOCGICOUNT not impl!\n", __func__ ); |
401 | #if 0 | 401 | #if 0 |
402 | save_flags(flags); cli(); | 402 | save_flags(flags); cli(); |
403 | cnow = driver->icount; | 403 | cnow = driver->icount; |
diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c index 87185910d0ee..ea319e3ddc18 100644 --- a/net/irda/irda_device.c +++ b/net/irda/irda_device.c | |||
@@ -90,7 +90,7 @@ static void leftover_dongle(void *arg) | |||
90 | 90 | ||
91 | void irda_device_cleanup(void) | 91 | void irda_device_cleanup(void) |
92 | { | 92 | { |
93 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 93 | IRDA_DEBUG(4, "%s()\n", __func__); |
94 | 94 | ||
95 | hashbin_delete(tasks, (FREE_FUNC) __irda_task_delete); | 95 | hashbin_delete(tasks, (FREE_FUNC) __irda_task_delete); |
96 | 96 | ||
@@ -107,7 +107,7 @@ void irda_device_set_media_busy(struct net_device *dev, int status) | |||
107 | { | 107 | { |
108 | struct irlap_cb *self; | 108 | struct irlap_cb *self; |
109 | 109 | ||
110 | IRDA_DEBUG(4, "%s(%s)\n", __FUNCTION__, status ? "TRUE" : "FALSE"); | 110 | IRDA_DEBUG(4, "%s(%s)\n", __func__, status ? "TRUE" : "FALSE"); |
111 | 111 | ||
112 | self = (struct irlap_cb *) dev->atalk_ptr; | 112 | self = (struct irlap_cb *) dev->atalk_ptr; |
113 | 113 | ||
@@ -147,11 +147,11 @@ int irda_device_is_receiving(struct net_device *dev) | |||
147 | struct if_irda_req req; | 147 | struct if_irda_req req; |
148 | int ret; | 148 | int ret; |
149 | 149 | ||
150 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | 150 | IRDA_DEBUG(2, "%s()\n", __func__); |
151 | 151 | ||
152 | if (!dev->do_ioctl) { | 152 | if (!dev->do_ioctl) { |
153 | IRDA_ERROR("%s: do_ioctl not impl. by device driver\n", | 153 | IRDA_ERROR("%s: do_ioctl not impl. by device driver\n", |
154 | __FUNCTION__); | 154 | __func__); |
155 | return -1; | 155 | return -1; |
156 | } | 156 | } |
157 | 157 | ||
@@ -191,7 +191,7 @@ static int irda_task_kick(struct irda_task *task) | |||
191 | int count = 0; | 191 | int count = 0; |
192 | int timeout; | 192 | int timeout; |
193 | 193 | ||
194 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | 194 | IRDA_DEBUG(2, "%s()\n", __func__); |
195 | 195 | ||
196 | IRDA_ASSERT(task != NULL, return -1;); | 196 | IRDA_ASSERT(task != NULL, return -1;); |
197 | IRDA_ASSERT(task->magic == IRDA_TASK_MAGIC, return -1;); | 197 | IRDA_ASSERT(task->magic == IRDA_TASK_MAGIC, return -1;); |
@@ -201,14 +201,14 @@ static int irda_task_kick(struct irda_task *task) | |||
201 | timeout = task->function(task); | 201 | timeout = task->function(task); |
202 | if (count++ > 100) { | 202 | if (count++ > 100) { |
203 | IRDA_ERROR("%s: error in task handler!\n", | 203 | IRDA_ERROR("%s: error in task handler!\n", |
204 | __FUNCTION__); | 204 | __func__); |
205 | irda_task_delete(task); | 205 | irda_task_delete(task); |
206 | return TRUE; | 206 | return TRUE; |
207 | } | 207 | } |
208 | } while ((timeout == 0) && (task->state != IRDA_TASK_DONE)); | 208 | } while ((timeout == 0) && (task->state != IRDA_TASK_DONE)); |
209 | 209 | ||
210 | if (timeout < 0) { | 210 | if (timeout < 0) { |
211 | IRDA_ERROR("%s: Error executing task!\n", __FUNCTION__); | 211 | IRDA_ERROR("%s: Error executing task!\n", __func__); |
212 | irda_task_delete(task); | 212 | irda_task_delete(task); |
213 | return TRUE; | 213 | return TRUE; |
214 | } | 214 | } |
@@ -241,7 +241,7 @@ static int irda_task_kick(struct irda_task *task) | |||
241 | finished = FALSE; | 241 | finished = FALSE; |
242 | } else { | 242 | } else { |
243 | IRDA_DEBUG(0, "%s(), not finished, and no timeout!\n", | 243 | IRDA_DEBUG(0, "%s(), not finished, and no timeout!\n", |
244 | __FUNCTION__); | 244 | __func__); |
245 | finished = FALSE; | 245 | finished = FALSE; |
246 | } | 246 | } |
247 | 247 | ||
@@ -258,7 +258,7 @@ static void irda_task_timer_expired(void *data) | |||
258 | { | 258 | { |
259 | struct irda_task *task; | 259 | struct irda_task *task; |
260 | 260 | ||
261 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | 261 | IRDA_DEBUG(2, "%s()\n", __func__); |
262 | 262 | ||
263 | task = (struct irda_task *) data; | 263 | task = (struct irda_task *) data; |
264 | 264 | ||
diff --git a/net/irda/iriap.c b/net/irda/iriap.c index 390a790886eb..9e15c82960fe 100644 --- a/net/irda/iriap.c +++ b/net/irda/iriap.c | |||
@@ -108,7 +108,7 @@ int __init iriap_init(void) | |||
108 | irias_objects = hashbin_new(HB_LOCK); | 108 | irias_objects = hashbin_new(HB_LOCK); |
109 | if (!irias_objects) { | 109 | if (!irias_objects) { |
110 | IRDA_WARNING("%s: Can't allocate irias_objects hashbin!\n", | 110 | IRDA_WARNING("%s: Can't allocate irias_objects hashbin!\n", |
111 | __FUNCTION__); | 111 | __func__); |
112 | hashbin_delete(iriap, NULL); | 112 | hashbin_delete(iriap, NULL); |
113 | return -ENOMEM; | 113 | return -ENOMEM; |
114 | } | 114 | } |
@@ -139,7 +139,7 @@ int __init iriap_init(void) | |||
139 | */ | 139 | */ |
140 | server = iriap_open(LSAP_IAS, IAS_SERVER, NULL, NULL); | 140 | server = iriap_open(LSAP_IAS, IAS_SERVER, NULL, NULL); |
141 | if (!server) { | 141 | if (!server) { |
142 | IRDA_DEBUG(0, "%s(), unable to open server\n", __FUNCTION__); | 142 | IRDA_DEBUG(0, "%s(), unable to open server\n", __func__); |
143 | return -1; | 143 | return -1; |
144 | } | 144 | } |
145 | iriap_register_lsap(server, LSAP_IAS, IAS_SERVER); | 145 | iriap_register_lsap(server, LSAP_IAS, IAS_SERVER); |
@@ -171,11 +171,11 @@ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv, | |||
171 | { | 171 | { |
172 | struct iriap_cb *self; | 172 | struct iriap_cb *self; |
173 | 173 | ||
174 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | 174 | IRDA_DEBUG(2, "%s()\n", __func__); |
175 | 175 | ||
176 | self = kzalloc(sizeof(*self), GFP_ATOMIC); | 176 | self = kzalloc(sizeof(*self), GFP_ATOMIC); |
177 | if (!self) { | 177 | if (!self) { |
178 | IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); | 178 | IRDA_WARNING("%s: Unable to kmalloc!\n", __func__); |
179 | return NULL; | 179 | return NULL; |
180 | } | 180 | } |
181 | 181 | ||
@@ -217,7 +217,7 @@ EXPORT_SYMBOL(iriap_open); | |||
217 | */ | 217 | */ |
218 | static void __iriap_close(struct iriap_cb *self) | 218 | static void __iriap_close(struct iriap_cb *self) |
219 | { | 219 | { |
220 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 220 | IRDA_DEBUG(4, "%s()\n", __func__); |
221 | 221 | ||
222 | IRDA_ASSERT(self != NULL, return;); | 222 | IRDA_ASSERT(self != NULL, return;); |
223 | IRDA_ASSERT(self->magic == IAS_MAGIC, return;); | 223 | IRDA_ASSERT(self->magic == IAS_MAGIC, return;); |
@@ -241,7 +241,7 @@ void iriap_close(struct iriap_cb *self) | |||
241 | { | 241 | { |
242 | struct iriap_cb *entry; | 242 | struct iriap_cb *entry; |
243 | 243 | ||
244 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | 244 | IRDA_DEBUG(2, "%s()\n", __func__); |
245 | 245 | ||
246 | IRDA_ASSERT(self != NULL, return;); | 246 | IRDA_ASSERT(self != NULL, return;); |
247 | IRDA_ASSERT(self->magic == IAS_MAGIC, return;); | 247 | IRDA_ASSERT(self->magic == IAS_MAGIC, return;); |
@@ -262,7 +262,7 @@ static int iriap_register_lsap(struct iriap_cb *self, __u8 slsap_sel, int mode) | |||
262 | { | 262 | { |
263 | notify_t notify; | 263 | notify_t notify; |
264 | 264 | ||
265 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | 265 | IRDA_DEBUG(2, "%s()\n", __func__); |
266 | 266 | ||
267 | irda_notify_init(¬ify); | 267 | irda_notify_init(¬ify); |
268 | notify.connect_confirm = iriap_connect_confirm; | 268 | notify.connect_confirm = iriap_connect_confirm; |
@@ -277,7 +277,7 @@ static int iriap_register_lsap(struct iriap_cb *self, __u8 slsap_sel, int mode) | |||
277 | 277 | ||
278 | self->lsap = irlmp_open_lsap(slsap_sel, ¬ify, 0); | 278 | self->lsap = irlmp_open_lsap(slsap_sel, ¬ify, 0); |
279 | if (self->lsap == NULL) { | 279 | if (self->lsap == NULL) { |
280 | IRDA_ERROR("%s: Unable to allocated LSAP!\n", __FUNCTION__); | 280 | IRDA_ERROR("%s: Unable to allocated LSAP!\n", __func__); |
281 | return -1; | 281 | return -1; |
282 | } | 282 | } |
283 | self->slsap_sel = self->lsap->slsap_sel; | 283 | self->slsap_sel = self->lsap->slsap_sel; |
@@ -297,7 +297,7 @@ static void iriap_disconnect_indication(void *instance, void *sap, | |||
297 | { | 297 | { |
298 | struct iriap_cb *self; | 298 | struct iriap_cb *self; |
299 | 299 | ||
300 | IRDA_DEBUG(4, "%s(), reason=%s\n", __FUNCTION__, irlmp_reasons[reason]); | 300 | IRDA_DEBUG(4, "%s(), reason=%s\n", __func__, irlmp_reasons[reason]); |
301 | 301 | ||
302 | self = (struct iriap_cb *) instance; | 302 | self = (struct iriap_cb *) instance; |
303 | 303 | ||
@@ -313,7 +313,7 @@ static void iriap_disconnect_indication(void *instance, void *sap, | |||
313 | dev_kfree_skb(skb); | 313 | dev_kfree_skb(skb); |
314 | 314 | ||
315 | if (self->mode == IAS_CLIENT) { | 315 | if (self->mode == IAS_CLIENT) { |
316 | IRDA_DEBUG(4, "%s(), disconnect as client\n", __FUNCTION__); | 316 | IRDA_DEBUG(4, "%s(), disconnect as client\n", __func__); |
317 | 317 | ||
318 | 318 | ||
319 | iriap_do_client_event(self, IAP_LM_DISCONNECT_INDICATION, | 319 | iriap_do_client_event(self, IAP_LM_DISCONNECT_INDICATION, |
@@ -326,7 +326,7 @@ static void iriap_disconnect_indication(void *instance, void *sap, | |||
326 | if (self->confirm) | 326 | if (self->confirm) |
327 | self->confirm(IAS_DISCONNECT, 0, NULL, self->priv); | 327 | self->confirm(IAS_DISCONNECT, 0, NULL, self->priv); |
328 | } else { | 328 | } else { |
329 | IRDA_DEBUG(4, "%s(), disconnect as server\n", __FUNCTION__); | 329 | IRDA_DEBUG(4, "%s(), disconnect as server\n", __func__); |
330 | iriap_do_server_event(self, IAP_LM_DISCONNECT_INDICATION, | 330 | iriap_do_server_event(self, IAP_LM_DISCONNECT_INDICATION, |
331 | NULL); | 331 | NULL); |
332 | iriap_close(self); | 332 | iriap_close(self); |
@@ -340,7 +340,7 @@ static void iriap_disconnect_request(struct iriap_cb *self) | |||
340 | { | 340 | { |
341 | struct sk_buff *tx_skb; | 341 | struct sk_buff *tx_skb; |
342 | 342 | ||
343 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 343 | IRDA_DEBUG(4, "%s()\n", __func__); |
344 | 344 | ||
345 | IRDA_ASSERT(self != NULL, return;); | 345 | IRDA_ASSERT(self != NULL, return;); |
346 | IRDA_ASSERT(self->magic == IAS_MAGIC, return;); | 346 | IRDA_ASSERT(self->magic == IAS_MAGIC, return;); |
@@ -349,7 +349,7 @@ static void iriap_disconnect_request(struct iriap_cb *self) | |||
349 | if (tx_skb == NULL) { | 349 | if (tx_skb == NULL) { |
350 | IRDA_DEBUG(0, | 350 | IRDA_DEBUG(0, |
351 | "%s(), Could not allocate an sk_buff of length %d\n", | 351 | "%s(), Could not allocate an sk_buff of length %d\n", |
352 | __FUNCTION__, LMP_MAX_HEADER); | 352 | __func__, LMP_MAX_HEADER); |
353 | return; | 353 | return; |
354 | } | 354 | } |
355 | 355 | ||
@@ -453,13 +453,13 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self, | |||
453 | /* Get length, MSB first */ | 453 | /* Get length, MSB first */ |
454 | len = be16_to_cpu(get_unaligned((__be16 *)(fp+n))); n += 2; | 454 | len = be16_to_cpu(get_unaligned((__be16 *)(fp+n))); n += 2; |
455 | 455 | ||
456 | IRDA_DEBUG(4, "%s(), len=%d\n", __FUNCTION__, len); | 456 | IRDA_DEBUG(4, "%s(), len=%d\n", __func__, len); |
457 | 457 | ||
458 | /* Get object ID, MSB first */ | 458 | /* Get object ID, MSB first */ |
459 | obj_id = be16_to_cpu(get_unaligned((__be16 *)(fp+n))); n += 2; | 459 | obj_id = be16_to_cpu(get_unaligned((__be16 *)(fp+n))); n += 2; |
460 | 460 | ||
461 | type = fp[n++]; | 461 | type = fp[n++]; |
462 | IRDA_DEBUG(4, "%s(), Value type = %d\n", __FUNCTION__, type); | 462 | IRDA_DEBUG(4, "%s(), Value type = %d\n", __func__, type); |
463 | 463 | ||
464 | switch (type) { | 464 | switch (type) { |
465 | case IAS_INTEGER: | 465 | case IAS_INTEGER: |
@@ -468,7 +468,7 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self, | |||
468 | value = irias_new_integer_value(tmp_cpu32); | 468 | value = irias_new_integer_value(tmp_cpu32); |
469 | 469 | ||
470 | /* Legal values restricted to 0x01-0x6f, page 15 irttp */ | 470 | /* Legal values restricted to 0x01-0x6f, page 15 irttp */ |
471 | IRDA_DEBUG(4, "%s(), lsap=%d\n", __FUNCTION__, value->t.integer); | 471 | IRDA_DEBUG(4, "%s(), lsap=%d\n", __func__, value->t.integer); |
472 | break; | 472 | break; |
473 | case IAS_STRING: | 473 | case IAS_STRING: |
474 | charset = fp[n++]; | 474 | charset = fp[n++]; |
@@ -488,7 +488,7 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self, | |||
488 | /* case CS_UNICODE: */ | 488 | /* case CS_UNICODE: */ |
489 | default: | 489 | default: |
490 | IRDA_DEBUG(0, "%s(), charset %s, not supported\n", | 490 | IRDA_DEBUG(0, "%s(), charset %s, not supported\n", |
491 | __FUNCTION__, ias_charset_types[charset]); | 491 | __func__, ias_charset_types[charset]); |
492 | 492 | ||
493 | /* Aborting, close connection! */ | 493 | /* Aborting, close connection! */ |
494 | iriap_disconnect_request(self); | 494 | iriap_disconnect_request(self); |
@@ -496,7 +496,7 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self, | |||
496 | /* break; */ | 496 | /* break; */ |
497 | } | 497 | } |
498 | value_len = fp[n++]; | 498 | value_len = fp[n++]; |
499 | IRDA_DEBUG(4, "%s(), strlen=%d\n", __FUNCTION__, value_len); | 499 | IRDA_DEBUG(4, "%s(), strlen=%d\n", __func__, value_len); |
500 | 500 | ||
501 | /* Make sure the string is null-terminated */ | 501 | /* Make sure the string is null-terminated */ |
502 | fp[n+value_len] = 0x00; | 502 | fp[n+value_len] = 0x00; |
@@ -526,7 +526,7 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self, | |||
526 | if (self->confirm) | 526 | if (self->confirm) |
527 | self->confirm(IAS_SUCCESS, obj_id, value, self->priv); | 527 | self->confirm(IAS_SUCCESS, obj_id, value, self->priv); |
528 | else { | 528 | else { |
529 | IRDA_DEBUG(0, "%s(), missing handler!\n", __FUNCTION__); | 529 | IRDA_DEBUG(0, "%s(), missing handler!\n", __func__); |
530 | irias_delete_value(value); | 530 | irias_delete_value(value); |
531 | } | 531 | } |
532 | } | 532 | } |
@@ -548,7 +548,7 @@ static void iriap_getvaluebyclass_response(struct iriap_cb *self, | |||
548 | __be16 tmp_be16; | 548 | __be16 tmp_be16; |
549 | __u8 *fp; | 549 | __u8 *fp; |
550 | 550 | ||
551 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 551 | IRDA_DEBUG(4, "%s()\n", __func__); |
552 | 552 | ||
553 | IRDA_ASSERT(self != NULL, return;); | 553 | IRDA_ASSERT(self != NULL, return;); |
554 | IRDA_ASSERT(self->magic == IAS_MAGIC, return;); | 554 | IRDA_ASSERT(self->magic == IAS_MAGIC, return;); |
@@ -610,12 +610,12 @@ static void iriap_getvaluebyclass_response(struct iriap_cb *self, | |||
610 | memcpy(fp+n, value->t.oct_seq, value->len); n+=value->len; | 610 | memcpy(fp+n, value->t.oct_seq, value->len); n+=value->len; |
611 | break; | 611 | break; |
612 | case IAS_MISSING: | 612 | case IAS_MISSING: |
613 | IRDA_DEBUG( 3, "%s: sending IAS_MISSING\n", __FUNCTION__); | 613 | IRDA_DEBUG( 3, "%s: sending IAS_MISSING\n", __func__); |
614 | skb_put(tx_skb, 1); | 614 | skb_put(tx_skb, 1); |
615 | fp[n++] = value->type; | 615 | fp[n++] = value->type; |
616 | break; | 616 | break; |
617 | default: | 617 | default: |
618 | IRDA_DEBUG(0, "%s(), type not implemented!\n", __FUNCTION__); | 618 | IRDA_DEBUG(0, "%s(), type not implemented!\n", __func__); |
619 | break; | 619 | break; |
620 | } | 620 | } |
621 | iriap_do_r_connect_event(self, IAP_CALL_RESPONSE, tx_skb); | 621 | iriap_do_r_connect_event(self, IAP_CALL_RESPONSE, tx_skb); |
@@ -642,7 +642,7 @@ static void iriap_getvaluebyclass_indication(struct iriap_cb *self, | |||
642 | __u8 *fp; | 642 | __u8 *fp; |
643 | int n; | 643 | int n; |
644 | 644 | ||
645 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 645 | IRDA_DEBUG(4, "%s()\n", __func__); |
646 | 646 | ||
647 | IRDA_ASSERT(self != NULL, return;); | 647 | IRDA_ASSERT(self != NULL, return;); |
648 | IRDA_ASSERT(self->magic == IAS_MAGIC, return;); | 648 | IRDA_ASSERT(self->magic == IAS_MAGIC, return;); |
@@ -697,7 +697,7 @@ void iriap_send_ack(struct iriap_cb *self) | |||
697 | struct sk_buff *tx_skb; | 697 | struct sk_buff *tx_skb; |
698 | __u8 *frame; | 698 | __u8 *frame; |
699 | 699 | ||
700 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | 700 | IRDA_DEBUG(2, "%s()\n", __func__); |
701 | 701 | ||
702 | IRDA_ASSERT(self != NULL, return;); | 702 | IRDA_ASSERT(self != NULL, return;); |
703 | IRDA_ASSERT(self->magic == IAS_MAGIC, return;); | 703 | IRDA_ASSERT(self->magic == IAS_MAGIC, return;); |
@@ -728,7 +728,7 @@ void iriap_connect_request(struct iriap_cb *self) | |||
728 | self->saddr, self->daddr, | 728 | self->saddr, self->daddr, |
729 | NULL, NULL); | 729 | NULL, NULL); |
730 | if (ret < 0) { | 730 | if (ret < 0) { |
731 | IRDA_DEBUG(0, "%s(), connect failed!\n", __FUNCTION__); | 731 | IRDA_DEBUG(0, "%s(), connect failed!\n", __func__); |
732 | self->confirm(IAS_DISCONNECT, 0, NULL, self->priv); | 732 | self->confirm(IAS_DISCONNECT, 0, NULL, self->priv); |
733 | } | 733 | } |
734 | } | 734 | } |
@@ -776,7 +776,7 @@ static void iriap_connect_indication(void *instance, void *sap, | |||
776 | { | 776 | { |
777 | struct iriap_cb *self, *new; | 777 | struct iriap_cb *self, *new; |
778 | 778 | ||
779 | IRDA_DEBUG(1, "%s()\n", __FUNCTION__); | 779 | IRDA_DEBUG(1, "%s()\n", __func__); |
780 | 780 | ||
781 | self = (struct iriap_cb *) instance; | 781 | self = (struct iriap_cb *) instance; |
782 | 782 | ||
@@ -787,14 +787,14 @@ static void iriap_connect_indication(void *instance, void *sap, | |||
787 | /* Start new server */ | 787 | /* Start new server */ |
788 | new = iriap_open(LSAP_IAS, IAS_SERVER, NULL, NULL); | 788 | new = iriap_open(LSAP_IAS, IAS_SERVER, NULL, NULL); |
789 | if (!new) { | 789 | if (!new) { |
790 | IRDA_DEBUG(0, "%s(), open failed\n", __FUNCTION__); | 790 | IRDA_DEBUG(0, "%s(), open failed\n", __func__); |
791 | goto out; | 791 | goto out; |
792 | } | 792 | } |
793 | 793 | ||
794 | /* Now attach up the new "socket" */ | 794 | /* Now attach up the new "socket" */ |
795 | new->lsap = irlmp_dup(self->lsap, new); | 795 | new->lsap = irlmp_dup(self->lsap, new); |
796 | if (!new->lsap) { | 796 | if (!new->lsap) { |
797 | IRDA_DEBUG(0, "%s(), dup failed!\n", __FUNCTION__); | 797 | IRDA_DEBUG(0, "%s(), dup failed!\n", __func__); |
798 | goto out; | 798 | goto out; |
799 | } | 799 | } |
800 | 800 | ||
@@ -824,7 +824,7 @@ static int iriap_data_indication(void *instance, void *sap, | |||
824 | __u8 *frame; | 824 | __u8 *frame; |
825 | __u8 opcode; | 825 | __u8 opcode; |
826 | 826 | ||
827 | IRDA_DEBUG(3, "%s()\n", __FUNCTION__); | 827 | IRDA_DEBUG(3, "%s()\n", __func__); |
828 | 828 | ||
829 | self = (struct iriap_cb *) instance; | 829 | self = (struct iriap_cb *) instance; |
830 | 830 | ||
@@ -836,7 +836,7 @@ static int iriap_data_indication(void *instance, void *sap, | |||
836 | 836 | ||
837 | if (self->mode == IAS_SERVER) { | 837 | if (self->mode == IAS_SERVER) { |
838 | /* Call server */ | 838 | /* Call server */ |
839 | IRDA_DEBUG(4, "%s(), Calling server!\n", __FUNCTION__); | 839 | IRDA_DEBUG(4, "%s(), Calling server!\n", __func__); |
840 | iriap_do_r_connect_event(self, IAP_RECV_F_LST, skb); | 840 | iriap_do_r_connect_event(self, IAP_RECV_F_LST, skb); |
841 | goto out; | 841 | goto out; |
842 | } | 842 | } |
@@ -844,13 +844,13 @@ static int iriap_data_indication(void *instance, void *sap, | |||
844 | if (~opcode & IAP_LST) { | 844 | if (~opcode & IAP_LST) { |
845 | IRDA_WARNING("%s:, IrIAS multiframe commands or " | 845 | IRDA_WARNING("%s:, IrIAS multiframe commands or " |
846 | "results is not implemented yet!\n", | 846 | "results is not implemented yet!\n", |
847 | __FUNCTION__); | 847 | __func__); |
848 | goto out; | 848 | goto out; |
849 | } | 849 | } |
850 | 850 | ||
851 | /* Check for ack frames since they don't contain any data */ | 851 | /* Check for ack frames since they don't contain any data */ |
852 | if (opcode & IAP_ACK) { | 852 | if (opcode & IAP_ACK) { |
853 | IRDA_DEBUG(0, "%s() Got ack frame!\n", __FUNCTION__); | 853 | IRDA_DEBUG(0, "%s() Got ack frame!\n", __func__); |
854 | goto out; | 854 | goto out; |
855 | } | 855 | } |
856 | 856 | ||
@@ -868,7 +868,7 @@ static int iriap_data_indication(void *instance, void *sap, | |||
868 | iriap_getvaluebyclass_confirm(self, skb); | 868 | iriap_getvaluebyclass_confirm(self, skb); |
869 | break; | 869 | break; |
870 | case IAS_CLASS_UNKNOWN: | 870 | case IAS_CLASS_UNKNOWN: |
871 | IRDA_DEBUG(1, "%s(), No such class!\n", __FUNCTION__); | 871 | IRDA_DEBUG(1, "%s(), No such class!\n", __func__); |
872 | /* Finished, close connection! */ | 872 | /* Finished, close connection! */ |
873 | iriap_disconnect_request(self); | 873 | iriap_disconnect_request(self); |
874 | 874 | ||
@@ -881,7 +881,7 @@ static int iriap_data_indication(void *instance, void *sap, | |||
881 | self->priv); | 881 | self->priv); |
882 | break; | 882 | break; |
883 | case IAS_ATTRIB_UNKNOWN: | 883 | case IAS_ATTRIB_UNKNOWN: |
884 | IRDA_DEBUG(1, "%s(), No such attribute!\n", __FUNCTION__); | 884 | IRDA_DEBUG(1, "%s(), No such attribute!\n", __func__); |
885 | /* Finished, close connection! */ | 885 | /* Finished, close connection! */ |
886 | iriap_disconnect_request(self); | 886 | iriap_disconnect_request(self); |
887 | 887 | ||
@@ -896,7 +896,7 @@ static int iriap_data_indication(void *instance, void *sap, | |||
896 | } | 896 | } |
897 | break; | 897 | break; |
898 | default: | 898 | default: |
899 | IRDA_DEBUG(0, "%s(), Unknown op-code: %02x\n", __FUNCTION__, | 899 | IRDA_DEBUG(0, "%s(), Unknown op-code: %02x\n", __func__, |
900 | opcode); | 900 | opcode); |
901 | break; | 901 | break; |
902 | } | 902 | } |
@@ -918,7 +918,7 @@ void iriap_call_indication(struct iriap_cb *self, struct sk_buff *skb) | |||
918 | __u8 *fp; | 918 | __u8 *fp; |
919 | __u8 opcode; | 919 | __u8 opcode; |
920 | 920 | ||
921 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 921 | IRDA_DEBUG(4, "%s()\n", __func__); |
922 | 922 | ||
923 | IRDA_ASSERT(self != NULL, return;); | 923 | IRDA_ASSERT(self != NULL, return;); |
924 | IRDA_ASSERT(self->magic == IAS_MAGIC, return;); | 924 | IRDA_ASSERT(self->magic == IAS_MAGIC, return;); |
@@ -929,7 +929,7 @@ void iriap_call_indication(struct iriap_cb *self, struct sk_buff *skb) | |||
929 | opcode = fp[0]; | 929 | opcode = fp[0]; |
930 | if (~opcode & 0x80) { | 930 | if (~opcode & 0x80) { |
931 | IRDA_WARNING("%s: IrIAS multiframe commands or results " | 931 | IRDA_WARNING("%s: IrIAS multiframe commands or results " |
932 | "is not implemented yet!\n", __FUNCTION__); | 932 | "is not implemented yet!\n", __func__); |
933 | return; | 933 | return; |
934 | } | 934 | } |
935 | opcode &= 0x7f; /* Mask away LST bit */ | 935 | opcode &= 0x7f; /* Mask away LST bit */ |
@@ -937,7 +937,7 @@ void iriap_call_indication(struct iriap_cb *self, struct sk_buff *skb) | |||
937 | switch (opcode) { | 937 | switch (opcode) { |
938 | case GET_INFO_BASE: | 938 | case GET_INFO_BASE: |
939 | IRDA_WARNING("%s: GetInfoBaseDetails not implemented yet!\n", | 939 | IRDA_WARNING("%s: GetInfoBaseDetails not implemented yet!\n", |
940 | __FUNCTION__); | 940 | __func__); |
941 | break; | 941 | break; |
942 | case GET_VALUE_BY_CLASS: | 942 | case GET_VALUE_BY_CLASS: |
943 | iriap_getvaluebyclass_indication(self, skb); | 943 | iriap_getvaluebyclass_indication(self, skb); |
diff --git a/net/irda/iriap_event.c b/net/irda/iriap_event.c index 8fb9d7277ca8..a301cbd93785 100644 --- a/net/irda/iriap_event.c +++ b/net/irda/iriap_event.c | |||
@@ -185,7 +185,7 @@ static void state_s_disconnect(struct iriap_cb *self, IRIAP_EVENT event, | |||
185 | case IAP_LM_DISCONNECT_INDICATION: | 185 | case IAP_LM_DISCONNECT_INDICATION: |
186 | break; | 186 | break; |
187 | default: | 187 | default: |
188 | IRDA_DEBUG(0, "%s(), Unknown event %d\n", __FUNCTION__, event); | 188 | IRDA_DEBUG(0, "%s(), Unknown event %d\n", __func__, event); |
189 | break; | 189 | break; |
190 | } | 190 | } |
191 | } | 191 | } |
@@ -217,7 +217,7 @@ static void state_s_connecting(struct iriap_cb *self, IRIAP_EVENT event, | |||
217 | iriap_next_client_state(self, S_DISCONNECT); | 217 | iriap_next_client_state(self, S_DISCONNECT); |
218 | break; | 218 | break; |
219 | default: | 219 | default: |
220 | IRDA_DEBUG(0, "%s(), Unknown event %d\n", __FUNCTION__, event); | 220 | IRDA_DEBUG(0, "%s(), Unknown event %d\n", __func__, event); |
221 | break; | 221 | break; |
222 | } | 222 | } |
223 | } | 223 | } |
@@ -269,7 +269,7 @@ static void state_s_make_call(struct iriap_cb *self, IRIAP_EVENT event, | |||
269 | iriap_next_call_state(self, S_OUTSTANDING); | 269 | iriap_next_call_state(self, S_OUTSTANDING); |
270 | break; | 270 | break; |
271 | default: | 271 | default: |
272 | IRDA_DEBUG(0, "%s(), Unknown event %d\n", __FUNCTION__, event); | 272 | IRDA_DEBUG(0, "%s(), Unknown event %d\n", __func__, event); |
273 | break; | 273 | break; |
274 | } | 274 | } |
275 | } | 275 | } |
@@ -283,7 +283,7 @@ static void state_s_make_call(struct iriap_cb *self, IRIAP_EVENT event, | |||
283 | static void state_s_calling(struct iriap_cb *self, IRIAP_EVENT event, | 283 | static void state_s_calling(struct iriap_cb *self, IRIAP_EVENT event, |
284 | struct sk_buff *skb) | 284 | struct sk_buff *skb) |
285 | { | 285 | { |
286 | IRDA_DEBUG(0, "%s(), Not implemented\n", __FUNCTION__); | 286 | IRDA_DEBUG(0, "%s(), Not implemented\n", __func__); |
287 | } | 287 | } |
288 | 288 | ||
289 | /* | 289 | /* |
@@ -305,7 +305,7 @@ static void state_s_outstanding(struct iriap_cb *self, IRIAP_EVENT event, | |||
305 | iriap_next_call_state(self, S_WAIT_FOR_CALL); | 305 | iriap_next_call_state(self, S_WAIT_FOR_CALL); |
306 | break; | 306 | break; |
307 | default: | 307 | default: |
308 | IRDA_DEBUG(0, "%s(), Unknown event %d\n", __FUNCTION__, event); | 308 | IRDA_DEBUG(0, "%s(), Unknown event %d\n", __func__, event); |
309 | break; | 309 | break; |
310 | } | 310 | } |
311 | } | 311 | } |
@@ -318,7 +318,7 @@ static void state_s_outstanding(struct iriap_cb *self, IRIAP_EVENT event, | |||
318 | static void state_s_replying(struct iriap_cb *self, IRIAP_EVENT event, | 318 | static void state_s_replying(struct iriap_cb *self, IRIAP_EVENT event, |
319 | struct sk_buff *skb) | 319 | struct sk_buff *skb) |
320 | { | 320 | { |
321 | IRDA_DEBUG(0, "%s(), Not implemented\n", __FUNCTION__); | 321 | IRDA_DEBUG(0, "%s(), Not implemented\n", __func__); |
322 | } | 322 | } |
323 | 323 | ||
324 | /* | 324 | /* |
@@ -330,7 +330,7 @@ static void state_s_replying(struct iriap_cb *self, IRIAP_EVENT event, | |||
330 | static void state_s_wait_for_call(struct iriap_cb *self, IRIAP_EVENT event, | 330 | static void state_s_wait_for_call(struct iriap_cb *self, IRIAP_EVENT event, |
331 | struct sk_buff *skb) | 331 | struct sk_buff *skb) |
332 | { | 332 | { |
333 | IRDA_DEBUG(0, "%s(), Not implemented\n", __FUNCTION__); | 333 | IRDA_DEBUG(0, "%s(), Not implemented\n", __func__); |
334 | } | 334 | } |
335 | 335 | ||
336 | 336 | ||
@@ -343,7 +343,7 @@ static void state_s_wait_for_call(struct iriap_cb *self, IRIAP_EVENT event, | |||
343 | static void state_s_wait_active(struct iriap_cb *self, IRIAP_EVENT event, | 343 | static void state_s_wait_active(struct iriap_cb *self, IRIAP_EVENT event, |
344 | struct sk_buff *skb) | 344 | struct sk_buff *skb) |
345 | { | 345 | { |
346 | IRDA_DEBUG(0, "%s(), Not implemented\n", __FUNCTION__); | 346 | IRDA_DEBUG(0, "%s(), Not implemented\n", __func__); |
347 | } | 347 | } |
348 | 348 | ||
349 | /************************************************************************** | 349 | /************************************************************************** |
@@ -367,7 +367,7 @@ static void state_r_disconnect(struct iriap_cb *self, IRIAP_EVENT event, | |||
367 | case IAP_LM_CONNECT_INDICATION: | 367 | case IAP_LM_CONNECT_INDICATION: |
368 | tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC); | 368 | tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC); |
369 | if (tx_skb == NULL) { | 369 | if (tx_skb == NULL) { |
370 | IRDA_WARNING("%s: unable to malloc!\n", __FUNCTION__); | 370 | IRDA_WARNING("%s: unable to malloc!\n", __func__); |
371 | return; | 371 | return; |
372 | } | 372 | } |
373 | 373 | ||
@@ -386,7 +386,7 @@ static void state_r_disconnect(struct iriap_cb *self, IRIAP_EVENT event, | |||
386 | iriap_next_r_connect_state(self, R_RECEIVING); | 386 | iriap_next_r_connect_state(self, R_RECEIVING); |
387 | break; | 387 | break; |
388 | default: | 388 | default: |
389 | IRDA_DEBUG(0, "%s(), unknown event %d\n", __FUNCTION__, event); | 389 | IRDA_DEBUG(0, "%s(), unknown event %d\n", __func__, event); |
390 | break; | 390 | break; |
391 | } | 391 | } |
392 | } | 392 | } |
@@ -397,7 +397,7 @@ static void state_r_disconnect(struct iriap_cb *self, IRIAP_EVENT event, | |||
397 | static void state_r_call(struct iriap_cb *self, IRIAP_EVENT event, | 397 | static void state_r_call(struct iriap_cb *self, IRIAP_EVENT event, |
398 | struct sk_buff *skb) | 398 | struct sk_buff *skb) |
399 | { | 399 | { |
400 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 400 | IRDA_DEBUG(4, "%s()\n", __func__); |
401 | 401 | ||
402 | switch (event) { | 402 | switch (event) { |
403 | case IAP_LM_DISCONNECT_INDICATION: | 403 | case IAP_LM_DISCONNECT_INDICATION: |
@@ -406,7 +406,7 @@ static void state_r_call(struct iriap_cb *self, IRIAP_EVENT event, | |||
406 | iriap_next_r_connect_state(self, R_WAITING); | 406 | iriap_next_r_connect_state(self, R_WAITING); |
407 | break; | 407 | break; |
408 | default: | 408 | default: |
409 | IRDA_DEBUG(0, "%s(), unknown event!\n", __FUNCTION__); | 409 | IRDA_DEBUG(0, "%s(), unknown event!\n", __func__); |
410 | break; | 410 | break; |
411 | } | 411 | } |
412 | } | 412 | } |
@@ -421,13 +421,13 @@ static void state_r_call(struct iriap_cb *self, IRIAP_EVENT event, | |||
421 | static void state_r_waiting(struct iriap_cb *self, IRIAP_EVENT event, | 421 | static void state_r_waiting(struct iriap_cb *self, IRIAP_EVENT event, |
422 | struct sk_buff *skb) | 422 | struct sk_buff *skb) |
423 | { | 423 | { |
424 | IRDA_DEBUG(0, "%s(), Not implemented\n", __FUNCTION__); | 424 | IRDA_DEBUG(0, "%s(), Not implemented\n", __func__); |
425 | } | 425 | } |
426 | 426 | ||
427 | static void state_r_wait_active(struct iriap_cb *self, IRIAP_EVENT event, | 427 | static void state_r_wait_active(struct iriap_cb *self, IRIAP_EVENT event, |
428 | struct sk_buff *skb) | 428 | struct sk_buff *skb) |
429 | { | 429 | { |
430 | IRDA_DEBUG(0, "%s(), Not implemented\n", __FUNCTION__); | 430 | IRDA_DEBUG(0, "%s(), Not implemented\n", __func__); |
431 | } | 431 | } |
432 | 432 | ||
433 | /* | 433 | /* |
@@ -439,7 +439,7 @@ static void state_r_wait_active(struct iriap_cb *self, IRIAP_EVENT event, | |||
439 | static void state_r_receiving(struct iriap_cb *self, IRIAP_EVENT event, | 439 | static void state_r_receiving(struct iriap_cb *self, IRIAP_EVENT event, |
440 | struct sk_buff *skb) | 440 | struct sk_buff *skb) |
441 | { | 441 | { |
442 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 442 | IRDA_DEBUG(4, "%s()\n", __func__); |
443 | 443 | ||
444 | switch (event) { | 444 | switch (event) { |
445 | case IAP_RECV_F_LST: | 445 | case IAP_RECV_F_LST: |
@@ -448,7 +448,7 @@ static void state_r_receiving(struct iriap_cb *self, IRIAP_EVENT event, | |||
448 | iriap_call_indication(self, skb); | 448 | iriap_call_indication(self, skb); |
449 | break; | 449 | break; |
450 | default: | 450 | default: |
451 | IRDA_DEBUG(0, "%s(), unknown event!\n", __FUNCTION__); | 451 | IRDA_DEBUG(0, "%s(), unknown event!\n", __func__); |
452 | break; | 452 | break; |
453 | } | 453 | } |
454 | } | 454 | } |
@@ -462,7 +462,7 @@ static void state_r_receiving(struct iriap_cb *self, IRIAP_EVENT event, | |||
462 | static void state_r_execute(struct iriap_cb *self, IRIAP_EVENT event, | 462 | static void state_r_execute(struct iriap_cb *self, IRIAP_EVENT event, |
463 | struct sk_buff *skb) | 463 | struct sk_buff *skb) |
464 | { | 464 | { |
465 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 465 | IRDA_DEBUG(4, "%s()\n", __func__); |
466 | 466 | ||
467 | IRDA_ASSERT(skb != NULL, return;); | 467 | IRDA_ASSERT(skb != NULL, return;); |
468 | IRDA_ASSERT(self != NULL, return;); | 468 | IRDA_ASSERT(self != NULL, return;); |
@@ -483,7 +483,7 @@ static void state_r_execute(struct iriap_cb *self, IRIAP_EVENT event, | |||
483 | irlmp_data_request(self->lsap, skb); | 483 | irlmp_data_request(self->lsap, skb); |
484 | break; | 484 | break; |
485 | default: | 485 | default: |
486 | IRDA_DEBUG(0, "%s(), unknown event!\n", __FUNCTION__); | 486 | IRDA_DEBUG(0, "%s(), unknown event!\n", __func__); |
487 | break; | 487 | break; |
488 | } | 488 | } |
489 | } | 489 | } |
@@ -491,7 +491,7 @@ static void state_r_execute(struct iriap_cb *self, IRIAP_EVENT event, | |||
491 | static void state_r_returning(struct iriap_cb *self, IRIAP_EVENT event, | 491 | static void state_r_returning(struct iriap_cb *self, IRIAP_EVENT event, |
492 | struct sk_buff *skb) | 492 | struct sk_buff *skb) |
493 | { | 493 | { |
494 | IRDA_DEBUG(0, "%s(), event=%d\n", __FUNCTION__, event); | 494 | IRDA_DEBUG(0, "%s(), event=%d\n", __func__, event); |
495 | 495 | ||
496 | switch (event) { | 496 | switch (event) { |
497 | case IAP_RECV_F_LST: | 497 | case IAP_RECV_F_LST: |
diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c index cbcf04380f3a..99ebb96f1386 100644 --- a/net/irda/irias_object.c +++ b/net/irda/irias_object.c | |||
@@ -47,12 +47,12 @@ struct ias_object *irias_new_object( char *name, int id) | |||
47 | { | 47 | { |
48 | struct ias_object *obj; | 48 | struct ias_object *obj; |
49 | 49 | ||
50 | IRDA_DEBUG( 4, "%s()\n", __FUNCTION__); | 50 | IRDA_DEBUG( 4, "%s()\n", __func__); |
51 | 51 | ||
52 | obj = kzalloc(sizeof(struct ias_object), GFP_ATOMIC); | 52 | obj = kzalloc(sizeof(struct ias_object), GFP_ATOMIC); |
53 | if (obj == NULL) { | 53 | if (obj == NULL) { |
54 | IRDA_WARNING("%s(), Unable to allocate object!\n", | 54 | IRDA_WARNING("%s(), Unable to allocate object!\n", |
55 | __FUNCTION__); | 55 | __func__); |
56 | return NULL; | 56 | return NULL; |
57 | } | 57 | } |
58 | 58 | ||
@@ -60,7 +60,7 @@ struct ias_object *irias_new_object( char *name, int id) | |||
60 | obj->name = kstrndup(name, IAS_MAX_CLASSNAME, GFP_ATOMIC); | 60 | obj->name = kstrndup(name, IAS_MAX_CLASSNAME, GFP_ATOMIC); |
61 | if (!obj->name) { | 61 | if (!obj->name) { |
62 | IRDA_WARNING("%s(), Unable to allocate name!\n", | 62 | IRDA_WARNING("%s(), Unable to allocate name!\n", |
63 | __FUNCTION__); | 63 | __func__); |
64 | kfree(obj); | 64 | kfree(obj); |
65 | return NULL; | 65 | return NULL; |
66 | } | 66 | } |
@@ -73,7 +73,7 @@ struct ias_object *irias_new_object( char *name, int id) | |||
73 | 73 | ||
74 | if (obj->attribs == NULL) { | 74 | if (obj->attribs == NULL) { |
75 | IRDA_WARNING("%s(), Unable to allocate attribs!\n", | 75 | IRDA_WARNING("%s(), Unable to allocate attribs!\n", |
76 | __FUNCTION__); | 76 | __func__); |
77 | kfree(obj->name); | 77 | kfree(obj->name); |
78 | kfree(obj); | 78 | kfree(obj); |
79 | return NULL; | 79 | return NULL; |
@@ -134,7 +134,7 @@ int irias_delete_object(struct ias_object *obj) | |||
134 | node = hashbin_remove_this(irias_objects, (irda_queue_t *) obj); | 134 | node = hashbin_remove_this(irias_objects, (irda_queue_t *) obj); |
135 | if (!node) | 135 | if (!node) |
136 | IRDA_DEBUG( 0, "%s(), object already removed!\n", | 136 | IRDA_DEBUG( 0, "%s(), object already removed!\n", |
137 | __FUNCTION__); | 137 | __func__); |
138 | 138 | ||
139 | /* Destroy */ | 139 | /* Destroy */ |
140 | __irias_delete_object(obj); | 140 | __irias_delete_object(obj); |
@@ -268,7 +268,7 @@ int irias_object_change_attribute(char *obj_name, char *attrib_name, | |||
268 | /* Find object */ | 268 | /* Find object */ |
269 | obj = hashbin_lock_find(irias_objects, 0, obj_name); | 269 | obj = hashbin_lock_find(irias_objects, 0, obj_name); |
270 | if (obj == NULL) { | 270 | if (obj == NULL) { |
271 | IRDA_WARNING("%s: Unable to find object: %s\n", __FUNCTION__, | 271 | IRDA_WARNING("%s: Unable to find object: %s\n", __func__, |
272 | obj_name); | 272 | obj_name); |
273 | return -1; | 273 | return -1; |
274 | } | 274 | } |
@@ -280,14 +280,14 @@ int irias_object_change_attribute(char *obj_name, char *attrib_name, | |||
280 | attrib = hashbin_find(obj->attribs, 0, attrib_name); | 280 | attrib = hashbin_find(obj->attribs, 0, attrib_name); |
281 | if (attrib == NULL) { | 281 | if (attrib == NULL) { |
282 | IRDA_WARNING("%s: Unable to find attribute: %s\n", | 282 | IRDA_WARNING("%s: Unable to find attribute: %s\n", |
283 | __FUNCTION__, attrib_name); | 283 | __func__, attrib_name); |
284 | spin_unlock_irqrestore(&obj->attribs->hb_spinlock, flags); | 284 | spin_unlock_irqrestore(&obj->attribs->hb_spinlock, flags); |
285 | return -1; | 285 | return -1; |
286 | } | 286 | } |
287 | 287 | ||
288 | if ( attrib->value->type != new_value->type) { | 288 | if ( attrib->value->type != new_value->type) { |
289 | IRDA_DEBUG( 0, "%s(), changing value type not allowed!\n", | 289 | IRDA_DEBUG( 0, "%s(), changing value type not allowed!\n", |
290 | __FUNCTION__); | 290 | __func__); |
291 | spin_unlock_irqrestore(&obj->attribs->hb_spinlock, flags); | 291 | spin_unlock_irqrestore(&obj->attribs->hb_spinlock, flags); |
292 | return -1; | 292 | return -1; |
293 | } | 293 | } |
@@ -322,7 +322,7 @@ void irias_add_integer_attrib(struct ias_object *obj, char *name, int value, | |||
322 | attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC); | 322 | attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC); |
323 | if (attrib == NULL) { | 323 | if (attrib == NULL) { |
324 | IRDA_WARNING("%s: Unable to allocate attribute!\n", | 324 | IRDA_WARNING("%s: Unable to allocate attribute!\n", |
325 | __FUNCTION__); | 325 | __func__); |
326 | return; | 326 | return; |
327 | } | 327 | } |
328 | 328 | ||
@@ -333,7 +333,7 @@ void irias_add_integer_attrib(struct ias_object *obj, char *name, int value, | |||
333 | attrib->value = irias_new_integer_value(value); | 333 | attrib->value = irias_new_integer_value(value); |
334 | if (!attrib->name || !attrib->value) { | 334 | if (!attrib->name || !attrib->value) { |
335 | IRDA_WARNING("%s: Unable to allocate attribute!\n", | 335 | IRDA_WARNING("%s: Unable to allocate attribute!\n", |
336 | __FUNCTION__); | 336 | __func__); |
337 | if (attrib->value) | 337 | if (attrib->value) |
338 | irias_delete_value(attrib->value); | 338 | irias_delete_value(attrib->value); |
339 | kfree(attrib->name); | 339 | kfree(attrib->name); |
@@ -366,7 +366,7 @@ void irias_add_octseq_attrib(struct ias_object *obj, char *name, __u8 *octets, | |||
366 | attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC); | 366 | attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC); |
367 | if (attrib == NULL) { | 367 | if (attrib == NULL) { |
368 | IRDA_WARNING("%s: Unable to allocate attribute!\n", | 368 | IRDA_WARNING("%s: Unable to allocate attribute!\n", |
369 | __FUNCTION__); | 369 | __func__); |
370 | return; | 370 | return; |
371 | } | 371 | } |
372 | 372 | ||
@@ -376,7 +376,7 @@ void irias_add_octseq_attrib(struct ias_object *obj, char *name, __u8 *octets, | |||
376 | attrib->value = irias_new_octseq_value( octets, len); | 376 | attrib->value = irias_new_octseq_value( octets, len); |
377 | if (!attrib->name || !attrib->value) { | 377 | if (!attrib->name || !attrib->value) { |
378 | IRDA_WARNING("%s: Unable to allocate attribute!\n", | 378 | IRDA_WARNING("%s: Unable to allocate attribute!\n", |
379 | __FUNCTION__); | 379 | __func__); |
380 | if (attrib->value) | 380 | if (attrib->value) |
381 | irias_delete_value(attrib->value); | 381 | irias_delete_value(attrib->value); |
382 | kfree(attrib->name); | 382 | kfree(attrib->name); |
@@ -408,7 +408,7 @@ void irias_add_string_attrib(struct ias_object *obj, char *name, char *value, | |||
408 | attrib = kzalloc(sizeof( struct ias_attrib), GFP_ATOMIC); | 408 | attrib = kzalloc(sizeof( struct ias_attrib), GFP_ATOMIC); |
409 | if (attrib == NULL) { | 409 | if (attrib == NULL) { |
410 | IRDA_WARNING("%s: Unable to allocate attribute!\n", | 410 | IRDA_WARNING("%s: Unable to allocate attribute!\n", |
411 | __FUNCTION__); | 411 | __func__); |
412 | return; | 412 | return; |
413 | } | 413 | } |
414 | 414 | ||
@@ -418,7 +418,7 @@ void irias_add_string_attrib(struct ias_object *obj, char *name, char *value, | |||
418 | attrib->value = irias_new_string_value(value); | 418 | attrib->value = irias_new_string_value(value); |
419 | if (!attrib->name || !attrib->value) { | 419 | if (!attrib->name || !attrib->value) { |
420 | IRDA_WARNING("%s: Unable to allocate attribute!\n", | 420 | IRDA_WARNING("%s: Unable to allocate attribute!\n", |
421 | __FUNCTION__); | 421 | __func__); |
422 | if (attrib->value) | 422 | if (attrib->value) |
423 | irias_delete_value(attrib->value); | 423 | irias_delete_value(attrib->value); |
424 | kfree(attrib->name); | 424 | kfree(attrib->name); |
@@ -442,7 +442,7 @@ struct ias_value *irias_new_integer_value(int integer) | |||
442 | 442 | ||
443 | value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); | 443 | value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); |
444 | if (value == NULL) { | 444 | if (value == NULL) { |
445 | IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); | 445 | IRDA_WARNING("%s: Unable to kmalloc!\n", __func__); |
446 | return NULL; | 446 | return NULL; |
447 | } | 447 | } |
448 | 448 | ||
@@ -467,7 +467,7 @@ struct ias_value *irias_new_string_value(char *string) | |||
467 | 467 | ||
468 | value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); | 468 | value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); |
469 | if (value == NULL) { | 469 | if (value == NULL) { |
470 | IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); | 470 | IRDA_WARNING("%s: Unable to kmalloc!\n", __func__); |
471 | return NULL; | 471 | return NULL; |
472 | } | 472 | } |
473 | 473 | ||
@@ -475,7 +475,7 @@ struct ias_value *irias_new_string_value(char *string) | |||
475 | value->charset = CS_ASCII; | 475 | value->charset = CS_ASCII; |
476 | value->t.string = kstrndup(string, IAS_MAX_STRING, GFP_ATOMIC); | 476 | value->t.string = kstrndup(string, IAS_MAX_STRING, GFP_ATOMIC); |
477 | if (!value->t.string) { | 477 | if (!value->t.string) { |
478 | IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); | 478 | IRDA_WARNING("%s: Unable to kmalloc!\n", __func__); |
479 | kfree(value); | 479 | kfree(value); |
480 | return NULL; | 480 | return NULL; |
481 | } | 481 | } |
@@ -498,7 +498,7 @@ struct ias_value *irias_new_octseq_value(__u8 *octseq , int len) | |||
498 | 498 | ||
499 | value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); | 499 | value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); |
500 | if (value == NULL) { | 500 | if (value == NULL) { |
501 | IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); | 501 | IRDA_WARNING("%s: Unable to kmalloc!\n", __func__); |
502 | return NULL; | 502 | return NULL; |
503 | } | 503 | } |
504 | 504 | ||
@@ -510,7 +510,7 @@ struct ias_value *irias_new_octseq_value(__u8 *octseq , int len) | |||
510 | 510 | ||
511 | value->t.oct_seq = kmemdup(octseq, len, GFP_ATOMIC); | 511 | value->t.oct_seq = kmemdup(octseq, len, GFP_ATOMIC); |
512 | if (value->t.oct_seq == NULL){ | 512 | if (value->t.oct_seq == NULL){ |
513 | IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); | 513 | IRDA_WARNING("%s: Unable to kmalloc!\n", __func__); |
514 | kfree(value); | 514 | kfree(value); |
515 | return NULL; | 515 | return NULL; |
516 | } | 516 | } |
@@ -523,7 +523,7 @@ struct ias_value *irias_new_missing_value(void) | |||
523 | 523 | ||
524 | value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); | 524 | value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); |
525 | if (value == NULL) { | 525 | if (value == NULL) { |
526 | IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); | 526 | IRDA_WARNING("%s: Unable to kmalloc!\n", __func__); |
527 | return NULL; | 527 | return NULL; |
528 | } | 528 | } |
529 | 529 | ||
@@ -540,7 +540,7 @@ struct ias_value *irias_new_missing_value(void) | |||
540 | */ | 540 | */ |
541 | void irias_delete_value(struct ias_value *value) | 541 | void irias_delete_value(struct ias_value *value) |
542 | { | 542 | { |
543 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 543 | IRDA_DEBUG(4, "%s()\n", __func__); |
544 | 544 | ||
545 | IRDA_ASSERT(value != NULL, return;); | 545 | IRDA_ASSERT(value != NULL, return;); |
546 | 546 | ||
@@ -558,7 +558,7 @@ void irias_delete_value(struct ias_value *value) | |||
558 | kfree(value->t.oct_seq); | 558 | kfree(value->t.oct_seq); |
559 | break; | 559 | break; |
560 | default: | 560 | default: |
561 | IRDA_DEBUG(0, "%s(), Unknown value type!\n", __FUNCTION__); | 561 | IRDA_DEBUG(0, "%s(), Unknown value type!\n", __func__); |
562 | break; | 562 | break; |
563 | } | 563 | } |
564 | kfree(value); | 564 | kfree(value); |
diff --git a/net/irda/irlan/irlan_client.c b/net/irda/irlan/irlan_client.c index fff52d57a200..6be1ec26b30c 100644 --- a/net/irda/irlan/irlan_client.c +++ b/net/irda/irlan/irlan_client.c | |||
@@ -72,7 +72,7 @@ static void irlan_client_kick_timer_expired(void *data) | |||
72 | { | 72 | { |
73 | struct irlan_cb *self = (struct irlan_cb *) data; | 73 | struct irlan_cb *self = (struct irlan_cb *) data; |
74 | 74 | ||
75 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 75 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
76 | 76 | ||
77 | IRDA_ASSERT(self != NULL, return;); | 77 | IRDA_ASSERT(self != NULL, return;); |
78 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); | 78 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); |
@@ -91,7 +91,7 @@ static void irlan_client_kick_timer_expired(void *data) | |||
91 | 91 | ||
92 | static void irlan_client_start_kick_timer(struct irlan_cb *self, int timeout) | 92 | static void irlan_client_start_kick_timer(struct irlan_cb *self, int timeout) |
93 | { | 93 | { |
94 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 94 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
95 | 95 | ||
96 | irda_start_timer(&self->client.kick_timer, timeout, (void *) self, | 96 | irda_start_timer(&self->client.kick_timer, timeout, (void *) self, |
97 | irlan_client_kick_timer_expired); | 97 | irlan_client_kick_timer_expired); |
@@ -105,7 +105,7 @@ static void irlan_client_start_kick_timer(struct irlan_cb *self, int timeout) | |||
105 | */ | 105 | */ |
106 | void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr) | 106 | void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr) |
107 | { | 107 | { |
108 | IRDA_DEBUG(1, "%s()\n", __FUNCTION__ ); | 108 | IRDA_DEBUG(1, "%s()\n", __func__ ); |
109 | 109 | ||
110 | IRDA_ASSERT(self != NULL, return;); | 110 | IRDA_ASSERT(self != NULL, return;); |
111 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); | 111 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); |
@@ -117,7 +117,7 @@ void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr) | |||
117 | if ((self->client.state != IRLAN_IDLE) || | 117 | if ((self->client.state != IRLAN_IDLE) || |
118 | (self->provider.access_type == ACCESS_DIRECT)) | 118 | (self->provider.access_type == ACCESS_DIRECT)) |
119 | { | 119 | { |
120 | IRDA_DEBUG(0, "%s(), already awake!\n", __FUNCTION__ ); | 120 | IRDA_DEBUG(0, "%s(), already awake!\n", __func__ ); |
121 | return; | 121 | return; |
122 | } | 122 | } |
123 | 123 | ||
@@ -126,7 +126,7 @@ void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr) | |||
126 | self->daddr = daddr; | 126 | self->daddr = daddr; |
127 | 127 | ||
128 | if (self->disconnect_reason == LM_USER_REQUEST) { | 128 | if (self->disconnect_reason == LM_USER_REQUEST) { |
129 | IRDA_DEBUG(0, "%s(), still stopped by user\n", __FUNCTION__ ); | 129 | IRDA_DEBUG(0, "%s(), still stopped by user\n", __func__ ); |
130 | return; | 130 | return; |
131 | } | 131 | } |
132 | 132 | ||
@@ -153,7 +153,7 @@ void irlan_client_discovery_indication(discinfo_t *discovery, | |||
153 | struct irlan_cb *self; | 153 | struct irlan_cb *self; |
154 | __u32 saddr, daddr; | 154 | __u32 saddr, daddr; |
155 | 155 | ||
156 | IRDA_DEBUG(1, "%s()\n", __FUNCTION__ ); | 156 | IRDA_DEBUG(1, "%s()\n", __func__ ); |
157 | 157 | ||
158 | IRDA_ASSERT(discovery != NULL, return;); | 158 | IRDA_ASSERT(discovery != NULL, return;); |
159 | 159 | ||
@@ -175,7 +175,7 @@ void irlan_client_discovery_indication(discinfo_t *discovery, | |||
175 | if (self) { | 175 | if (self) { |
176 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, goto out;); | 176 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, goto out;); |
177 | 177 | ||
178 | IRDA_DEBUG(1, "%s(), Found instance (%08x)!\n", __FUNCTION__ , | 178 | IRDA_DEBUG(1, "%s(), Found instance (%08x)!\n", __func__ , |
179 | daddr); | 179 | daddr); |
180 | 180 | ||
181 | irlan_client_wakeup(self, saddr, daddr); | 181 | irlan_client_wakeup(self, saddr, daddr); |
@@ -195,7 +195,7 @@ static int irlan_client_ctrl_data_indication(void *instance, void *sap, | |||
195 | { | 195 | { |
196 | struct irlan_cb *self; | 196 | struct irlan_cb *self; |
197 | 197 | ||
198 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 198 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
199 | 199 | ||
200 | self = (struct irlan_cb *) instance; | 200 | self = (struct irlan_cb *) instance; |
201 | 201 | ||
@@ -206,7 +206,7 @@ static int irlan_client_ctrl_data_indication(void *instance, void *sap, | |||
206 | irlan_do_client_event(self, IRLAN_DATA_INDICATION, skb); | 206 | irlan_do_client_event(self, IRLAN_DATA_INDICATION, skb); |
207 | 207 | ||
208 | /* Ready for a new command */ | 208 | /* Ready for a new command */ |
209 | IRDA_DEBUG(2, "%s(), clearing tx_busy\n", __FUNCTION__ ); | 209 | IRDA_DEBUG(2, "%s(), clearing tx_busy\n", __func__ ); |
210 | self->client.tx_busy = FALSE; | 210 | self->client.tx_busy = FALSE; |
211 | 211 | ||
212 | /* Check if we have some queued commands waiting to be sent */ | 212 | /* Check if we have some queued commands waiting to be sent */ |
@@ -223,7 +223,7 @@ static void irlan_client_ctrl_disconnect_indication(void *instance, void *sap, | |||
223 | struct tsap_cb *tsap; | 223 | struct tsap_cb *tsap; |
224 | struct sk_buff *skb; | 224 | struct sk_buff *skb; |
225 | 225 | ||
226 | IRDA_DEBUG(4, "%s(), reason=%d\n", __FUNCTION__ , reason); | 226 | IRDA_DEBUG(4, "%s(), reason=%d\n", __func__ , reason); |
227 | 227 | ||
228 | self = (struct irlan_cb *) instance; | 228 | self = (struct irlan_cb *) instance; |
229 | tsap = (struct tsap_cb *) sap; | 229 | tsap = (struct tsap_cb *) sap; |
@@ -255,7 +255,7 @@ static void irlan_client_open_ctrl_tsap(struct irlan_cb *self) | |||
255 | struct tsap_cb *tsap; | 255 | struct tsap_cb *tsap; |
256 | notify_t notify; | 256 | notify_t notify; |
257 | 257 | ||
258 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 258 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
259 | 259 | ||
260 | IRDA_ASSERT(self != NULL, return;); | 260 | IRDA_ASSERT(self != NULL, return;); |
261 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); | 261 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); |
@@ -275,7 +275,7 @@ static void irlan_client_open_ctrl_tsap(struct irlan_cb *self) | |||
275 | 275 | ||
276 | tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, ¬ify); | 276 | tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, ¬ify); |
277 | if (!tsap) { | 277 | if (!tsap) { |
278 | IRDA_DEBUG(2, "%s(), Got no tsap!\n", __FUNCTION__ ); | 278 | IRDA_DEBUG(2, "%s(), Got no tsap!\n", __func__ ); |
279 | return; | 279 | return; |
280 | } | 280 | } |
281 | self->client.tsap_ctrl = tsap; | 281 | self->client.tsap_ctrl = tsap; |
@@ -295,7 +295,7 @@ static void irlan_client_ctrl_connect_confirm(void *instance, void *sap, | |||
295 | { | 295 | { |
296 | struct irlan_cb *self; | 296 | struct irlan_cb *self; |
297 | 297 | ||
298 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 298 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
299 | 299 | ||
300 | self = (struct irlan_cb *) instance; | 300 | self = (struct irlan_cb *) instance; |
301 | 301 | ||
@@ -374,13 +374,13 @@ void irlan_client_parse_response(struct irlan_cb *self, struct sk_buff *skb) | |||
374 | 374 | ||
375 | IRDA_ASSERT(skb != NULL, return;); | 375 | IRDA_ASSERT(skb != NULL, return;); |
376 | 376 | ||
377 | IRDA_DEBUG(4, "%s() skb->len=%d\n", __FUNCTION__ , (int) skb->len); | 377 | IRDA_DEBUG(4, "%s() skb->len=%d\n", __func__ , (int) skb->len); |
378 | 378 | ||
379 | IRDA_ASSERT(self != NULL, return;); | 379 | IRDA_ASSERT(self != NULL, return;); |
380 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); | 380 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); |
381 | 381 | ||
382 | if (!skb) { | 382 | if (!skb) { |
383 | IRDA_ERROR("%s(), Got NULL skb!\n", __FUNCTION__); | 383 | IRDA_ERROR("%s(), Got NULL skb!\n", __func__); |
384 | return; | 384 | return; |
385 | } | 385 | } |
386 | frame = skb->data; | 386 | frame = skb->data; |
@@ -405,7 +405,7 @@ void irlan_client_parse_response(struct irlan_cb *self, struct sk_buff *skb) | |||
405 | /* How many parameters? */ | 405 | /* How many parameters? */ |
406 | count = frame[1]; | 406 | count = frame[1]; |
407 | 407 | ||
408 | IRDA_DEBUG(4, "%s(), got %d parameters\n", __FUNCTION__ , count); | 408 | IRDA_DEBUG(4, "%s(), got %d parameters\n", __func__ , count); |
409 | 409 | ||
410 | ptr = frame+2; | 410 | ptr = frame+2; |
411 | 411 | ||
@@ -413,7 +413,7 @@ void irlan_client_parse_response(struct irlan_cb *self, struct sk_buff *skb) | |||
413 | for (i=0; i<count;i++) { | 413 | for (i=0; i<count;i++) { |
414 | ret = irlan_extract_param(ptr, name, value, &val_len); | 414 | ret = irlan_extract_param(ptr, name, value, &val_len); |
415 | if (ret < 0) { | 415 | if (ret < 0) { |
416 | IRDA_DEBUG(2, "%s(), IrLAN, Error!\n", __FUNCTION__ ); | 416 | IRDA_DEBUG(2, "%s(), IrLAN, Error!\n", __func__ ); |
417 | break; | 417 | break; |
418 | } | 418 | } |
419 | ptr += ret; | 419 | ptr += ret; |
@@ -438,7 +438,7 @@ static void irlan_check_response_param(struct irlan_cb *self, char *param, | |||
438 | int i; | 438 | int i; |
439 | DECLARE_MAC_BUF(mac); | 439 | DECLARE_MAC_BUF(mac); |
440 | 440 | ||
441 | IRDA_DEBUG(4, "%s(), parm=%s\n", __FUNCTION__ , param); | 441 | IRDA_DEBUG(4, "%s(), parm=%s\n", __func__ , param); |
442 | 442 | ||
443 | IRDA_ASSERT(self != NULL, return;); | 443 | IRDA_ASSERT(self != NULL, return;); |
444 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); | 444 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); |
@@ -476,7 +476,7 @@ static void irlan_check_response_param(struct irlan_cb *self, char *param, | |||
476 | else if (strcmp(value, "HOSTED") == 0) | 476 | else if (strcmp(value, "HOSTED") == 0) |
477 | self->client.access_type = ACCESS_HOSTED; | 477 | self->client.access_type = ACCESS_HOSTED; |
478 | else { | 478 | else { |
479 | IRDA_DEBUG(2, "%s(), unknown access type!\n", __FUNCTION__ ); | 479 | IRDA_DEBUG(2, "%s(), unknown access type!\n", __func__ ); |
480 | } | 480 | } |
481 | } | 481 | } |
482 | /* IRLAN version */ | 482 | /* IRLAN version */ |
@@ -498,14 +498,14 @@ static void irlan_check_response_param(struct irlan_cb *self, char *param, | |||
498 | memcpy(&tmp_cpu, value, 2); /* Align value */ | 498 | memcpy(&tmp_cpu, value, 2); /* Align value */ |
499 | le16_to_cpus(&tmp_cpu); /* Convert to host order */ | 499 | le16_to_cpus(&tmp_cpu); /* Convert to host order */ |
500 | self->client.recv_arb_val = tmp_cpu; | 500 | self->client.recv_arb_val = tmp_cpu; |
501 | IRDA_DEBUG(2, "%s(), receive arb val=%d\n", __FUNCTION__ , | 501 | IRDA_DEBUG(2, "%s(), receive arb val=%d\n", __func__ , |
502 | self->client.recv_arb_val); | 502 | self->client.recv_arb_val); |
503 | } | 503 | } |
504 | if (strcmp(param, "MAX_FRAME") == 0) { | 504 | if (strcmp(param, "MAX_FRAME") == 0) { |
505 | memcpy(&tmp_cpu, value, 2); /* Align value */ | 505 | memcpy(&tmp_cpu, value, 2); /* Align value */ |
506 | le16_to_cpus(&tmp_cpu); /* Convert to host order */ | 506 | le16_to_cpus(&tmp_cpu); /* Convert to host order */ |
507 | self->client.max_frame = tmp_cpu; | 507 | self->client.max_frame = tmp_cpu; |
508 | IRDA_DEBUG(4, "%s(), max frame=%d\n", __FUNCTION__ , | 508 | IRDA_DEBUG(4, "%s(), max frame=%d\n", __func__ , |
509 | self->client.max_frame); | 509 | self->client.max_frame); |
510 | } | 510 | } |
511 | 511 | ||
@@ -539,7 +539,7 @@ void irlan_client_get_value_confirm(int result, __u16 obj_id, | |||
539 | { | 539 | { |
540 | struct irlan_cb *self; | 540 | struct irlan_cb *self; |
541 | 541 | ||
542 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 542 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
543 | 543 | ||
544 | IRDA_ASSERT(priv != NULL, return;); | 544 | IRDA_ASSERT(priv != NULL, return;); |
545 | 545 | ||
@@ -552,7 +552,7 @@ void irlan_client_get_value_confirm(int result, __u16 obj_id, | |||
552 | 552 | ||
553 | /* Check if request succeeded */ | 553 | /* Check if request succeeded */ |
554 | if (result != IAS_SUCCESS) { | 554 | if (result != IAS_SUCCESS) { |
555 | IRDA_DEBUG(2, "%s(), got NULL value!\n", __FUNCTION__ ); | 555 | IRDA_DEBUG(2, "%s(), got NULL value!\n", __func__ ); |
556 | irlan_do_client_event(self, IRLAN_IAS_PROVIDER_NOT_AVAIL, | 556 | irlan_do_client_event(self, IRLAN_IAS_PROVIDER_NOT_AVAIL, |
557 | NULL); | 557 | NULL); |
558 | return; | 558 | return; |
@@ -570,7 +570,7 @@ void irlan_client_get_value_confirm(int result, __u16 obj_id, | |||
570 | irias_delete_value(value); | 570 | irias_delete_value(value); |
571 | break; | 571 | break; |
572 | default: | 572 | default: |
573 | IRDA_DEBUG(2, "%s(), unknown type!\n", __FUNCTION__ ); | 573 | IRDA_DEBUG(2, "%s(), unknown type!\n", __func__ ); |
574 | break; | 574 | break; |
575 | } | 575 | } |
576 | irlan_do_client_event(self, IRLAN_IAS_PROVIDER_NOT_AVAIL, NULL); | 576 | irlan_do_client_event(self, IRLAN_IAS_PROVIDER_NOT_AVAIL, NULL); |
diff --git a/net/irda/irlan/irlan_client_event.c b/net/irda/irlan/irlan_client_event.c index 6afcee59e906..8d5a8ebc444f 100644 --- a/net/irda/irlan/irlan_client_event.c +++ b/net/irda/irlan/irlan_client_event.c | |||
@@ -92,7 +92,7 @@ void irlan_do_client_event(struct irlan_cb *self, IRLAN_EVENT event, | |||
92 | static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event, | 92 | static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event, |
93 | struct sk_buff *skb) | 93 | struct sk_buff *skb) |
94 | { | 94 | { |
95 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 95 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
96 | 96 | ||
97 | IRDA_ASSERT(self != NULL, return -1;); | 97 | IRDA_ASSERT(self != NULL, return -1;); |
98 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); | 98 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); |
@@ -101,7 +101,7 @@ static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event, | |||
101 | case IRLAN_DISCOVERY_INDICATION: | 101 | case IRLAN_DISCOVERY_INDICATION: |
102 | if (self->client.iriap) { | 102 | if (self->client.iriap) { |
103 | IRDA_WARNING("%s(), busy with a previous query\n", | 103 | IRDA_WARNING("%s(), busy with a previous query\n", |
104 | __FUNCTION__); | 104 | __func__); |
105 | return -EBUSY; | 105 | return -EBUSY; |
106 | } | 106 | } |
107 | 107 | ||
@@ -114,10 +114,10 @@ static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event, | |||
114 | "IrLAN", "IrDA:TinyTP:LsapSel"); | 114 | "IrLAN", "IrDA:TinyTP:LsapSel"); |
115 | break; | 115 | break; |
116 | case IRLAN_WATCHDOG_TIMEOUT: | 116 | case IRLAN_WATCHDOG_TIMEOUT: |
117 | IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __FUNCTION__ ); | 117 | IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); |
118 | break; | 118 | break; |
119 | default: | 119 | default: |
120 | IRDA_DEBUG(4, "%s(), Unknown event %d\n", __FUNCTION__ , event); | 120 | IRDA_DEBUG(4, "%s(), Unknown event %d\n", __func__ , event); |
121 | break; | 121 | break; |
122 | } | 122 | } |
123 | if (skb) | 123 | if (skb) |
@@ -136,7 +136,7 @@ static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event, | |||
136 | static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event, | 136 | static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event, |
137 | struct sk_buff *skb) | 137 | struct sk_buff *skb) |
138 | { | 138 | { |
139 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 139 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
140 | 140 | ||
141 | IRDA_ASSERT(self != NULL, return -1;); | 141 | IRDA_ASSERT(self != NULL, return -1;); |
142 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); | 142 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); |
@@ -154,7 +154,7 @@ static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event, | |||
154 | irlan_next_client_state(self, IRLAN_CONN); | 154 | irlan_next_client_state(self, IRLAN_CONN); |
155 | break; | 155 | break; |
156 | case IRLAN_IAS_PROVIDER_NOT_AVAIL: | 156 | case IRLAN_IAS_PROVIDER_NOT_AVAIL: |
157 | IRDA_DEBUG(2, "%s(), IAS_PROVIDER_NOT_AVAIL\n", __FUNCTION__ ); | 157 | IRDA_DEBUG(2, "%s(), IAS_PROVIDER_NOT_AVAIL\n", __func__ ); |
158 | irlan_next_client_state(self, IRLAN_IDLE); | 158 | irlan_next_client_state(self, IRLAN_IDLE); |
159 | 159 | ||
160 | /* Give the client a kick! */ | 160 | /* Give the client a kick! */ |
@@ -167,10 +167,10 @@ static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event, | |||
167 | irlan_next_client_state(self, IRLAN_IDLE); | 167 | irlan_next_client_state(self, IRLAN_IDLE); |
168 | break; | 168 | break; |
169 | case IRLAN_WATCHDOG_TIMEOUT: | 169 | case IRLAN_WATCHDOG_TIMEOUT: |
170 | IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __FUNCTION__ ); | 170 | IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); |
171 | break; | 171 | break; |
172 | default: | 172 | default: |
173 | IRDA_DEBUG(2, "%s(), Unknown event %d\n", __FUNCTION__ , event); | 173 | IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); |
174 | break; | 174 | break; |
175 | } | 175 | } |
176 | if (skb) | 176 | if (skb) |
@@ -189,7 +189,7 @@ static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event, | |||
189 | static int irlan_client_state_conn(struct irlan_cb *self, IRLAN_EVENT event, | 189 | static int irlan_client_state_conn(struct irlan_cb *self, IRLAN_EVENT event, |
190 | struct sk_buff *skb) | 190 | struct sk_buff *skb) |
191 | { | 191 | { |
192 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 192 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
193 | 193 | ||
194 | IRDA_ASSERT(self != NULL, return -1;); | 194 | IRDA_ASSERT(self != NULL, return -1;); |
195 | 195 | ||
@@ -204,10 +204,10 @@ static int irlan_client_state_conn(struct irlan_cb *self, IRLAN_EVENT event, | |||
204 | irlan_next_client_state(self, IRLAN_IDLE); | 204 | irlan_next_client_state(self, IRLAN_IDLE); |
205 | break; | 205 | break; |
206 | case IRLAN_WATCHDOG_TIMEOUT: | 206 | case IRLAN_WATCHDOG_TIMEOUT: |
207 | IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __FUNCTION__ ); | 207 | IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); |
208 | break; | 208 | break; |
209 | default: | 209 | default: |
210 | IRDA_DEBUG(2, "%s(), Unknown event %d\n", __FUNCTION__ , event); | 210 | IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); |
211 | break; | 211 | break; |
212 | } | 212 | } |
213 | if (skb) | 213 | if (skb) |
@@ -224,7 +224,7 @@ static int irlan_client_state_conn(struct irlan_cb *self, IRLAN_EVENT event, | |||
224 | static int irlan_client_state_info(struct irlan_cb *self, IRLAN_EVENT event, | 224 | static int irlan_client_state_info(struct irlan_cb *self, IRLAN_EVENT event, |
225 | struct sk_buff *skb) | 225 | struct sk_buff *skb) |
226 | { | 226 | { |
227 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 227 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
228 | 228 | ||
229 | IRDA_ASSERT(self != NULL, return -1;); | 229 | IRDA_ASSERT(self != NULL, return -1;); |
230 | 230 | ||
@@ -244,10 +244,10 @@ static int irlan_client_state_info(struct irlan_cb *self, IRLAN_EVENT event, | |||
244 | irlan_next_client_state(self, IRLAN_IDLE); | 244 | irlan_next_client_state(self, IRLAN_IDLE); |
245 | break; | 245 | break; |
246 | case IRLAN_WATCHDOG_TIMEOUT: | 246 | case IRLAN_WATCHDOG_TIMEOUT: |
247 | IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __FUNCTION__ ); | 247 | IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); |
248 | break; | 248 | break; |
249 | default: | 249 | default: |
250 | IRDA_DEBUG(2, "%s(), Unknown event %d\n", __FUNCTION__ , event); | 250 | IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); |
251 | break; | 251 | break; |
252 | } | 252 | } |
253 | if (skb) | 253 | if (skb) |
@@ -266,7 +266,7 @@ static int irlan_client_state_info(struct irlan_cb *self, IRLAN_EVENT event, | |||
266 | static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event, | 266 | static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event, |
267 | struct sk_buff *skb) | 267 | struct sk_buff *skb) |
268 | { | 268 | { |
269 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 269 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
270 | 270 | ||
271 | IRDA_ASSERT(self != NULL, return -1;); | 271 | IRDA_ASSERT(self != NULL, return -1;); |
272 | 272 | ||
@@ -281,10 +281,10 @@ static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event, | |||
281 | irlan_next_client_state(self, IRLAN_IDLE); | 281 | irlan_next_client_state(self, IRLAN_IDLE); |
282 | break; | 282 | break; |
283 | case IRLAN_WATCHDOG_TIMEOUT: | 283 | case IRLAN_WATCHDOG_TIMEOUT: |
284 | IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __FUNCTION__ ); | 284 | IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); |
285 | break; | 285 | break; |
286 | default: | 286 | default: |
287 | IRDA_DEBUG(2, "%s(), Unknown event %d\n", __FUNCTION__ , event); | 287 | IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); |
288 | break; | 288 | break; |
289 | } | 289 | } |
290 | if (skb) | 290 | if (skb) |
@@ -305,7 +305,7 @@ static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event, | |||
305 | { | 305 | { |
306 | struct qos_info qos; | 306 | struct qos_info qos; |
307 | 307 | ||
308 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 308 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
309 | 309 | ||
310 | IRDA_ASSERT(self != NULL, return -1;); | 310 | IRDA_ASSERT(self != NULL, return -1;); |
311 | 311 | ||
@@ -344,7 +344,7 @@ static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event, | |||
344 | irlan_next_client_state(self, IRLAN_DATA); | 344 | irlan_next_client_state(self, IRLAN_DATA); |
345 | break; | 345 | break; |
346 | default: | 346 | default: |
347 | IRDA_DEBUG(2, "%s(), unknown access type!\n", __FUNCTION__ ); | 347 | IRDA_DEBUG(2, "%s(), unknown access type!\n", __func__ ); |
348 | break; | 348 | break; |
349 | } | 349 | } |
350 | break; | 350 | break; |
@@ -353,10 +353,10 @@ static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event, | |||
353 | irlan_next_client_state(self, IRLAN_IDLE); | 353 | irlan_next_client_state(self, IRLAN_IDLE); |
354 | break; | 354 | break; |
355 | case IRLAN_WATCHDOG_TIMEOUT: | 355 | case IRLAN_WATCHDOG_TIMEOUT: |
356 | IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __FUNCTION__ ); | 356 | IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); |
357 | break; | 357 | break; |
358 | default: | 358 | default: |
359 | IRDA_DEBUG(2, "%s(), Unknown event %d\n", __FUNCTION__ , event); | 359 | IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); |
360 | break; | 360 | break; |
361 | } | 361 | } |
362 | 362 | ||
@@ -376,7 +376,7 @@ static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event, | |||
376 | static int irlan_client_state_wait(struct irlan_cb *self, IRLAN_EVENT event, | 376 | static int irlan_client_state_wait(struct irlan_cb *self, IRLAN_EVENT event, |
377 | struct sk_buff *skb) | 377 | struct sk_buff *skb) |
378 | { | 378 | { |
379 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 379 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
380 | 380 | ||
381 | IRDA_ASSERT(self != NULL, return -1;); | 381 | IRDA_ASSERT(self != NULL, return -1;); |
382 | 382 | ||
@@ -390,10 +390,10 @@ static int irlan_client_state_wait(struct irlan_cb *self, IRLAN_EVENT event, | |||
390 | irlan_next_client_state(self, IRLAN_IDLE); | 390 | irlan_next_client_state(self, IRLAN_IDLE); |
391 | break; | 391 | break; |
392 | case IRLAN_WATCHDOG_TIMEOUT: | 392 | case IRLAN_WATCHDOG_TIMEOUT: |
393 | IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __FUNCTION__ ); | 393 | IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); |
394 | break; | 394 | break; |
395 | default: | 395 | default: |
396 | IRDA_DEBUG(2, "%s(), Unknown event %d\n", __FUNCTION__ , event); | 396 | IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); |
397 | break; | 397 | break; |
398 | } | 398 | } |
399 | if (skb) | 399 | if (skb) |
@@ -407,7 +407,7 @@ static int irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event, | |||
407 | { | 407 | { |
408 | struct qos_info qos; | 408 | struct qos_info qos; |
409 | 409 | ||
410 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 410 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
411 | 411 | ||
412 | IRDA_ASSERT(self != NULL, return -1;); | 412 | IRDA_ASSERT(self != NULL, return -1;); |
413 | 413 | ||
@@ -429,7 +429,7 @@ static int irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event, | |||
429 | } else if (self->client.recv_arb_val > | 429 | } else if (self->client.recv_arb_val > |
430 | self->provider.send_arb_val) | 430 | self->provider.send_arb_val) |
431 | { | 431 | { |
432 | IRDA_DEBUG(2, "%s(), lost the battle :-(\n", __FUNCTION__ ); | 432 | IRDA_DEBUG(2, "%s(), lost the battle :-(\n", __func__ ); |
433 | } | 433 | } |
434 | break; | 434 | break; |
435 | case IRLAN_DATA_CONNECT_INDICATION: | 435 | case IRLAN_DATA_CONNECT_INDICATION: |
@@ -440,10 +440,10 @@ static int irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event, | |||
440 | irlan_next_client_state(self, IRLAN_IDLE); | 440 | irlan_next_client_state(self, IRLAN_IDLE); |
441 | break; | 441 | break; |
442 | case IRLAN_WATCHDOG_TIMEOUT: | 442 | case IRLAN_WATCHDOG_TIMEOUT: |
443 | IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __FUNCTION__ ); | 443 | IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); |
444 | break; | 444 | break; |
445 | default: | 445 | default: |
446 | IRDA_DEBUG(2, "%s(), Unknown event %d\n", __FUNCTION__ , event); | 446 | IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); |
447 | break; | 447 | break; |
448 | } | 448 | } |
449 | if (skb) | 449 | if (skb) |
@@ -462,7 +462,7 @@ static int irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event, | |||
462 | static int irlan_client_state_data(struct irlan_cb *self, IRLAN_EVENT event, | 462 | static int irlan_client_state_data(struct irlan_cb *self, IRLAN_EVENT event, |
463 | struct sk_buff *skb) | 463 | struct sk_buff *skb) |
464 | { | 464 | { |
465 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 465 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
466 | 466 | ||
467 | IRDA_ASSERT(self != NULL, return -1;); | 467 | IRDA_ASSERT(self != NULL, return -1;); |
468 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); | 468 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); |
@@ -476,7 +476,7 @@ static int irlan_client_state_data(struct irlan_cb *self, IRLAN_EVENT event, | |||
476 | irlan_next_client_state(self, IRLAN_IDLE); | 476 | irlan_next_client_state(self, IRLAN_IDLE); |
477 | break; | 477 | break; |
478 | default: | 478 | default: |
479 | IRDA_DEBUG(2, "%s(), Unknown event %d\n", __FUNCTION__ , event); | 479 | IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); |
480 | break; | 480 | break; |
481 | } | 481 | } |
482 | if (skb) | 482 | if (skb) |
@@ -494,7 +494,7 @@ static int irlan_client_state_data(struct irlan_cb *self, IRLAN_EVENT event, | |||
494 | static int irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event, | 494 | static int irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event, |
495 | struct sk_buff *skb) | 495 | struct sk_buff *skb) |
496 | { | 496 | { |
497 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 497 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
498 | 498 | ||
499 | if (skb) | 499 | if (skb) |
500 | dev_kfree_skb(skb); | 500 | dev_kfree_skb(skb); |
@@ -511,7 +511,7 @@ static int irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event, | |||
511 | static int irlan_client_state_sync(struct irlan_cb *self, IRLAN_EVENT event, | 511 | static int irlan_client_state_sync(struct irlan_cb *self, IRLAN_EVENT event, |
512 | struct sk_buff *skb) | 512 | struct sk_buff *skb) |
513 | { | 513 | { |
514 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 514 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
515 | 515 | ||
516 | if (skb) | 516 | if (skb) |
517 | dev_kfree_skb(skb); | 517 | dev_kfree_skb(skb); |
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c index 1eb4bbcb1c9e..75bb6a9dcaaa 100644 --- a/net/irda/irlan/irlan_common.c +++ b/net/irda/irlan/irlan_common.c | |||
@@ -124,7 +124,7 @@ static int __init irlan_init(void) | |||
124 | struct irlan_cb *new; | 124 | struct irlan_cb *new; |
125 | __u16 hints; | 125 | __u16 hints; |
126 | 126 | ||
127 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 127 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
128 | 128 | ||
129 | #ifdef CONFIG_PROC_FS | 129 | #ifdef CONFIG_PROC_FS |
130 | { struct proc_dir_entry *proc; | 130 | { struct proc_dir_entry *proc; |
@@ -136,7 +136,7 @@ static int __init irlan_init(void) | |||
136 | } | 136 | } |
137 | #endif /* CONFIG_PROC_FS */ | 137 | #endif /* CONFIG_PROC_FS */ |
138 | 138 | ||
139 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 139 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
140 | hints = irlmp_service_to_hint(S_LAN); | 140 | hints = irlmp_service_to_hint(S_LAN); |
141 | 141 | ||
142 | /* Register with IrLMP as a client */ | 142 | /* Register with IrLMP as a client */ |
@@ -179,7 +179,7 @@ static void __exit irlan_cleanup(void) | |||
179 | { | 179 | { |
180 | struct irlan_cb *self, *next; | 180 | struct irlan_cb *self, *next; |
181 | 181 | ||
182 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 182 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
183 | 183 | ||
184 | irlmp_unregister_client(ckey); | 184 | irlmp_unregister_client(ckey); |
185 | irlmp_unregister_service(skey); | 185 | irlmp_unregister_service(skey); |
@@ -207,7 +207,7 @@ static struct irlan_cb *irlan_open(__u32 saddr, __u32 daddr) | |||
207 | struct net_device *dev; | 207 | struct net_device *dev; |
208 | struct irlan_cb *self; | 208 | struct irlan_cb *self; |
209 | 209 | ||
210 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 210 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
211 | 211 | ||
212 | /* Create network device with irlan */ | 212 | /* Create network device with irlan */ |
213 | dev = alloc_irlandev(eth ? "eth%d" : "irlan%d"); | 213 | dev = alloc_irlandev(eth ? "eth%d" : "irlan%d"); |
@@ -252,7 +252,7 @@ static struct irlan_cb *irlan_open(__u32 saddr, __u32 daddr) | |||
252 | 252 | ||
253 | if (register_netdev(dev)) { | 253 | if (register_netdev(dev)) { |
254 | IRDA_DEBUG(2, "%s(), register_netdev() failed!\n", | 254 | IRDA_DEBUG(2, "%s(), register_netdev() failed!\n", |
255 | __FUNCTION__ ); | 255 | __func__ ); |
256 | self = NULL; | 256 | self = NULL; |
257 | free_netdev(dev); | 257 | free_netdev(dev); |
258 | } else { | 258 | } else { |
@@ -272,7 +272,7 @@ static struct irlan_cb *irlan_open(__u32 saddr, __u32 daddr) | |||
272 | */ | 272 | */ |
273 | static void __irlan_close(struct irlan_cb *self) | 273 | static void __irlan_close(struct irlan_cb *self) |
274 | { | 274 | { |
275 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 275 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
276 | 276 | ||
277 | ASSERT_RTNL(); | 277 | ASSERT_RTNL(); |
278 | IRDA_ASSERT(self != NULL, return;); | 278 | IRDA_ASSERT(self != NULL, return;); |
@@ -320,7 +320,7 @@ static void irlan_connect_indication(void *instance, void *sap, | |||
320 | struct irlan_cb *self; | 320 | struct irlan_cb *self; |
321 | struct tsap_cb *tsap; | 321 | struct tsap_cb *tsap; |
322 | 322 | ||
323 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 323 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
324 | 324 | ||
325 | self = (struct irlan_cb *) instance; | 325 | self = (struct irlan_cb *) instance; |
326 | tsap = (struct tsap_cb *) sap; | 326 | tsap = (struct tsap_cb *) sap; |
@@ -332,7 +332,7 @@ static void irlan_connect_indication(void *instance, void *sap, | |||
332 | self->max_sdu_size = max_sdu_size; | 332 | self->max_sdu_size = max_sdu_size; |
333 | self->max_header_size = max_header_size; | 333 | self->max_header_size = max_header_size; |
334 | 334 | ||
335 | IRDA_DEBUG(0, "%s: We are now connected!\n", __FUNCTION__); | 335 | IRDA_DEBUG(0, "%s: We are now connected!\n", __func__); |
336 | 336 | ||
337 | del_timer(&self->watchdog_timer); | 337 | del_timer(&self->watchdog_timer); |
338 | 338 | ||
@@ -376,7 +376,7 @@ static void irlan_connect_confirm(void *instance, void *sap, | |||
376 | 376 | ||
377 | /* TODO: we could set the MTU depending on the max_sdu_size */ | 377 | /* TODO: we could set the MTU depending on the max_sdu_size */ |
378 | 378 | ||
379 | IRDA_DEBUG(0, "%s: We are now connected!\n", __FUNCTION__); | 379 | IRDA_DEBUG(0, "%s: We are now connected!\n", __func__); |
380 | del_timer(&self->watchdog_timer); | 380 | del_timer(&self->watchdog_timer); |
381 | 381 | ||
382 | /* | 382 | /* |
@@ -412,7 +412,7 @@ static void irlan_disconnect_indication(void *instance, | |||
412 | struct irlan_cb *self; | 412 | struct irlan_cb *self; |
413 | struct tsap_cb *tsap; | 413 | struct tsap_cb *tsap; |
414 | 414 | ||
415 | IRDA_DEBUG(0, "%s(), reason=%d\n", __FUNCTION__ , reason); | 415 | IRDA_DEBUG(0, "%s(), reason=%d\n", __func__ , reason); |
416 | 416 | ||
417 | self = (struct irlan_cb *) instance; | 417 | self = (struct irlan_cb *) instance; |
418 | tsap = (struct tsap_cb *) sap; | 418 | tsap = (struct tsap_cb *) sap; |
@@ -431,22 +431,22 @@ static void irlan_disconnect_indication(void *instance, | |||
431 | 431 | ||
432 | switch (reason) { | 432 | switch (reason) { |
433 | case LM_USER_REQUEST: /* User request */ | 433 | case LM_USER_REQUEST: /* User request */ |
434 | IRDA_DEBUG(2, "%s(), User requested\n", __FUNCTION__ ); | 434 | IRDA_DEBUG(2, "%s(), User requested\n", __func__ ); |
435 | break; | 435 | break; |
436 | case LM_LAP_DISCONNECT: /* Unexpected IrLAP disconnect */ | 436 | case LM_LAP_DISCONNECT: /* Unexpected IrLAP disconnect */ |
437 | IRDA_DEBUG(2, "%s(), Unexpected IrLAP disconnect\n", __FUNCTION__ ); | 437 | IRDA_DEBUG(2, "%s(), Unexpected IrLAP disconnect\n", __func__ ); |
438 | break; | 438 | break; |
439 | case LM_CONNECT_FAILURE: /* Failed to establish IrLAP connection */ | 439 | case LM_CONNECT_FAILURE: /* Failed to establish IrLAP connection */ |
440 | IRDA_DEBUG(2, "%s(), IrLAP connect failed\n", __FUNCTION__ ); | 440 | IRDA_DEBUG(2, "%s(), IrLAP connect failed\n", __func__ ); |
441 | break; | 441 | break; |
442 | case LM_LAP_RESET: /* IrLAP reset */ | 442 | case LM_LAP_RESET: /* IrLAP reset */ |
443 | IRDA_DEBUG(2, "%s(), IrLAP reset\n", __FUNCTION__ ); | 443 | IRDA_DEBUG(2, "%s(), IrLAP reset\n", __func__ ); |
444 | break; | 444 | break; |
445 | case LM_INIT_DISCONNECT: | 445 | case LM_INIT_DISCONNECT: |
446 | IRDA_DEBUG(2, "%s(), IrLMP connect failed\n", __FUNCTION__ ); | 446 | IRDA_DEBUG(2, "%s(), IrLMP connect failed\n", __func__ ); |
447 | break; | 447 | break; |
448 | default: | 448 | default: |
449 | IRDA_ERROR("%s(), Unknown disconnect reason\n", __FUNCTION__); | 449 | IRDA_ERROR("%s(), Unknown disconnect reason\n", __func__); |
450 | break; | 450 | break; |
451 | } | 451 | } |
452 | 452 | ||
@@ -468,7 +468,7 @@ void irlan_open_data_tsap(struct irlan_cb *self) | |||
468 | struct tsap_cb *tsap; | 468 | struct tsap_cb *tsap; |
469 | notify_t notify; | 469 | notify_t notify; |
470 | 470 | ||
471 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 471 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
472 | 472 | ||
473 | IRDA_ASSERT(self != NULL, return;); | 473 | IRDA_ASSERT(self != NULL, return;); |
474 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); | 474 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); |
@@ -490,7 +490,7 @@ void irlan_open_data_tsap(struct irlan_cb *self) | |||
490 | 490 | ||
491 | tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, ¬ify); | 491 | tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, ¬ify); |
492 | if (!tsap) { | 492 | if (!tsap) { |
493 | IRDA_DEBUG(2, "%s(), Got no tsap!\n", __FUNCTION__ ); | 493 | IRDA_DEBUG(2, "%s(), Got no tsap!\n", __func__ ); |
494 | return; | 494 | return; |
495 | } | 495 | } |
496 | self->tsap_data = tsap; | 496 | self->tsap_data = tsap; |
@@ -504,7 +504,7 @@ void irlan_open_data_tsap(struct irlan_cb *self) | |||
504 | 504 | ||
505 | void irlan_close_tsaps(struct irlan_cb *self) | 505 | void irlan_close_tsaps(struct irlan_cb *self) |
506 | { | 506 | { |
507 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 507 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
508 | 508 | ||
509 | IRDA_ASSERT(self != NULL, return;); | 509 | IRDA_ASSERT(self != NULL, return;); |
510 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); | 510 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); |
@@ -594,7 +594,7 @@ int irlan_run_ctrl_tx_queue(struct irlan_cb *self) | |||
594 | { | 594 | { |
595 | struct sk_buff *skb; | 595 | struct sk_buff *skb; |
596 | 596 | ||
597 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 597 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
598 | 598 | ||
599 | if (irda_lock(&self->client.tx_busy) == FALSE) | 599 | if (irda_lock(&self->client.tx_busy) == FALSE) |
600 | return -EBUSY; | 600 | return -EBUSY; |
@@ -613,7 +613,7 @@ int irlan_run_ctrl_tx_queue(struct irlan_cb *self) | |||
613 | dev_kfree_skb(skb); | 613 | dev_kfree_skb(skb); |
614 | return -1; | 614 | return -1; |
615 | } | 615 | } |
616 | IRDA_DEBUG(2, "%s(), sending ...\n", __FUNCTION__ ); | 616 | IRDA_DEBUG(2, "%s(), sending ...\n", __func__ ); |
617 | 617 | ||
618 | return irttp_data_request(self->client.tsap_ctrl, skb); | 618 | return irttp_data_request(self->client.tsap_ctrl, skb); |
619 | } | 619 | } |
@@ -626,7 +626,7 @@ int irlan_run_ctrl_tx_queue(struct irlan_cb *self) | |||
626 | */ | 626 | */ |
627 | static void irlan_ctrl_data_request(struct irlan_cb *self, struct sk_buff *skb) | 627 | static void irlan_ctrl_data_request(struct irlan_cb *self, struct sk_buff *skb) |
628 | { | 628 | { |
629 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 629 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
630 | 630 | ||
631 | /* Queue command */ | 631 | /* Queue command */ |
632 | skb_queue_tail(&self->client.txq, skb); | 632 | skb_queue_tail(&self->client.txq, skb); |
@@ -646,7 +646,7 @@ void irlan_get_provider_info(struct irlan_cb *self) | |||
646 | struct sk_buff *skb; | 646 | struct sk_buff *skb; |
647 | __u8 *frame; | 647 | __u8 *frame; |
648 | 648 | ||
649 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 649 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
650 | 650 | ||
651 | IRDA_ASSERT(self != NULL, return;); | 651 | IRDA_ASSERT(self != NULL, return;); |
652 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); | 652 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); |
@@ -679,7 +679,7 @@ void irlan_open_data_channel(struct irlan_cb *self) | |||
679 | struct sk_buff *skb; | 679 | struct sk_buff *skb; |
680 | __u8 *frame; | 680 | __u8 *frame; |
681 | 681 | ||
682 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 682 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
683 | 683 | ||
684 | IRDA_ASSERT(self != NULL, return;); | 684 | IRDA_ASSERT(self != NULL, return;); |
685 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); | 685 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); |
@@ -714,7 +714,7 @@ void irlan_close_data_channel(struct irlan_cb *self) | |||
714 | struct sk_buff *skb; | 714 | struct sk_buff *skb; |
715 | __u8 *frame; | 715 | __u8 *frame; |
716 | 716 | ||
717 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 717 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
718 | 718 | ||
719 | IRDA_ASSERT(self != NULL, return;); | 719 | IRDA_ASSERT(self != NULL, return;); |
720 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); | 720 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); |
@@ -755,7 +755,7 @@ static void irlan_open_unicast_addr(struct irlan_cb *self) | |||
755 | struct sk_buff *skb; | 755 | struct sk_buff *skb; |
756 | __u8 *frame; | 756 | __u8 *frame; |
757 | 757 | ||
758 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 758 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
759 | 759 | ||
760 | IRDA_ASSERT(self != NULL, return;); | 760 | IRDA_ASSERT(self != NULL, return;); |
761 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); | 761 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); |
@@ -797,7 +797,7 @@ void irlan_set_broadcast_filter(struct irlan_cb *self, int status) | |||
797 | struct sk_buff *skb; | 797 | struct sk_buff *skb; |
798 | __u8 *frame; | 798 | __u8 *frame; |
799 | 799 | ||
800 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 800 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
801 | 801 | ||
802 | IRDA_ASSERT(self != NULL, return;); | 802 | IRDA_ASSERT(self != NULL, return;); |
803 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); | 803 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); |
@@ -841,7 +841,7 @@ void irlan_set_multicast_filter(struct irlan_cb *self, int status) | |||
841 | struct sk_buff *skb; | 841 | struct sk_buff *skb; |
842 | __u8 *frame; | 842 | __u8 *frame; |
843 | 843 | ||
844 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 844 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
845 | 845 | ||
846 | IRDA_ASSERT(self != NULL, return;); | 846 | IRDA_ASSERT(self != NULL, return;); |
847 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); | 847 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); |
@@ -886,7 +886,7 @@ static void irlan_get_unicast_addr(struct irlan_cb *self) | |||
886 | struct sk_buff *skb; | 886 | struct sk_buff *skb; |
887 | __u8 *frame; | 887 | __u8 *frame; |
888 | 888 | ||
889 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 889 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
890 | 890 | ||
891 | IRDA_ASSERT(self != NULL, return;); | 891 | IRDA_ASSERT(self != NULL, return;); |
892 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); | 892 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); |
@@ -926,7 +926,7 @@ void irlan_get_media_char(struct irlan_cb *self) | |||
926 | struct sk_buff *skb; | 926 | struct sk_buff *skb; |
927 | __u8 *frame; | 927 | __u8 *frame; |
928 | 928 | ||
929 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 929 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
930 | 930 | ||
931 | IRDA_ASSERT(self != NULL, return;); | 931 | IRDA_ASSERT(self != NULL, return;); |
932 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); | 932 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); |
@@ -1014,7 +1014,7 @@ static int __irlan_insert_param(struct sk_buff *skb, char *param, int type, | |||
1014 | int n=0; | 1014 | int n=0; |
1015 | 1015 | ||
1016 | if (skb == NULL) { | 1016 | if (skb == NULL) { |
1017 | IRDA_DEBUG(2, "%s(), Got NULL skb\n", __FUNCTION__ ); | 1017 | IRDA_DEBUG(2, "%s(), Got NULL skb\n", __func__ ); |
1018 | return 0; | 1018 | return 0; |
1019 | } | 1019 | } |
1020 | 1020 | ||
@@ -1031,7 +1031,7 @@ static int __irlan_insert_param(struct sk_buff *skb, char *param, int type, | |||
1031 | IRDA_ASSERT(value_len > 0, return 0;); | 1031 | IRDA_ASSERT(value_len > 0, return 0;); |
1032 | break; | 1032 | break; |
1033 | default: | 1033 | default: |
1034 | IRDA_DEBUG(2, "%s(), Unknown parameter type!\n", __FUNCTION__ ); | 1034 | IRDA_DEBUG(2, "%s(), Unknown parameter type!\n", __func__ ); |
1035 | return 0; | 1035 | return 0; |
1036 | break; | 1036 | break; |
1037 | } | 1037 | } |
@@ -1041,7 +1041,7 @@ static int __irlan_insert_param(struct sk_buff *skb, char *param, int type, | |||
1041 | 1041 | ||
1042 | /* Make space for data */ | 1042 | /* Make space for data */ |
1043 | if (skb_tailroom(skb) < (param_len+value_len+3)) { | 1043 | if (skb_tailroom(skb) < (param_len+value_len+3)) { |
1044 | IRDA_DEBUG(2, "%s(), No more space at end of skb\n", __FUNCTION__ ); | 1044 | IRDA_DEBUG(2, "%s(), No more space at end of skb\n", __func__ ); |
1045 | return 0; | 1045 | return 0; |
1046 | } | 1046 | } |
1047 | skb_put(skb, param_len+value_len+3); | 1047 | skb_put(skb, param_len+value_len+3); |
@@ -1088,13 +1088,13 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len) | |||
1088 | __u16 val_len; | 1088 | __u16 val_len; |
1089 | int n=0; | 1089 | int n=0; |
1090 | 1090 | ||
1091 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 1091 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
1092 | 1092 | ||
1093 | /* get length of parameter name (1 byte) */ | 1093 | /* get length of parameter name (1 byte) */ |
1094 | name_len = buf[n++]; | 1094 | name_len = buf[n++]; |
1095 | 1095 | ||
1096 | if (name_len > 254) { | 1096 | if (name_len > 254) { |
1097 | IRDA_DEBUG(2, "%s(), name_len > 254\n", __FUNCTION__ ); | 1097 | IRDA_DEBUG(2, "%s(), name_len > 254\n", __func__ ); |
1098 | return -RSP_INVALID_COMMAND_FORMAT; | 1098 | return -RSP_INVALID_COMMAND_FORMAT; |
1099 | } | 1099 | } |
1100 | 1100 | ||
@@ -1111,7 +1111,7 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len) | |||
1111 | le16_to_cpus(&val_len); n+=2; | 1111 | le16_to_cpus(&val_len); n+=2; |
1112 | 1112 | ||
1113 | if (val_len > 1016) { | 1113 | if (val_len > 1016) { |
1114 | IRDA_DEBUG(2, "%s(), parameter length to long\n", __FUNCTION__ ); | 1114 | IRDA_DEBUG(2, "%s(), parameter length to long\n", __func__ ); |
1115 | return -RSP_INVALID_COMMAND_FORMAT; | 1115 | return -RSP_INVALID_COMMAND_FORMAT; |
1116 | } | 1116 | } |
1117 | *len = val_len; | 1117 | *len = val_len; |
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c index 1ab91f787cc1..7a6b14ab1e7f 100644 --- a/net/irda/irlan/irlan_eth.c +++ b/net/irda/irlan/irlan_eth.c | |||
@@ -103,7 +103,7 @@ static int irlan_eth_open(struct net_device *dev) | |||
103 | { | 103 | { |
104 | struct irlan_cb *self = netdev_priv(dev); | 104 | struct irlan_cb *self = netdev_priv(dev); |
105 | 105 | ||
106 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 106 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
107 | 107 | ||
108 | /* Ready to play! */ | 108 | /* Ready to play! */ |
109 | netif_stop_queue(dev); /* Wait until data link is ready */ | 109 | netif_stop_queue(dev); /* Wait until data link is ready */ |
@@ -130,7 +130,7 @@ static int irlan_eth_close(struct net_device *dev) | |||
130 | { | 130 | { |
131 | struct irlan_cb *self = netdev_priv(dev); | 131 | struct irlan_cb *self = netdev_priv(dev); |
132 | 132 | ||
133 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 133 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
134 | 134 | ||
135 | /* Stop device */ | 135 | /* Stop device */ |
136 | netif_stop_queue(dev); | 136 | netif_stop_queue(dev); |
@@ -221,7 +221,7 @@ int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb) | |||
221 | } | 221 | } |
222 | if (skb->len < ETH_HLEN) { | 222 | if (skb->len < ETH_HLEN) { |
223 | IRDA_DEBUG(0, "%s() : IrLAN frame too short (%d)\n", | 223 | IRDA_DEBUG(0, "%s() : IrLAN frame too short (%d)\n", |
224 | __FUNCTION__, skb->len); | 224 | __func__, skb->len); |
225 | ++self->stats.rx_dropped; | 225 | ++self->stats.rx_dropped; |
226 | dev_kfree_skb(skb); | 226 | dev_kfree_skb(skb); |
227 | return 0; | 227 | return 0; |
@@ -270,7 +270,7 @@ void irlan_eth_flow_indication(void *instance, void *sap, LOCAL_FLOW flow) | |||
270 | 270 | ||
271 | IRDA_ASSERT(dev != NULL, return;); | 271 | IRDA_ASSERT(dev != NULL, return;); |
272 | 272 | ||
273 | IRDA_DEBUG(0, "%s() : flow %s ; running %d\n", __FUNCTION__, | 273 | IRDA_DEBUG(0, "%s() : flow %s ; running %d\n", __func__, |
274 | flow == FLOW_STOP ? "FLOW_STOP" : "FLOW_START", | 274 | flow == FLOW_STOP ? "FLOW_STOP" : "FLOW_START", |
275 | netif_running(dev)); | 275 | netif_running(dev)); |
276 | 276 | ||
@@ -332,11 +332,11 @@ static void irlan_eth_set_multicast_list(struct net_device *dev) | |||
332 | { | 332 | { |
333 | struct irlan_cb *self = netdev_priv(dev); | 333 | struct irlan_cb *self = netdev_priv(dev); |
334 | 334 | ||
335 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); | 335 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
336 | 336 | ||
337 | /* Check if data channel has been connected yet */ | 337 | /* Check if data channel has been connected yet */ |
338 | if (self->client.state != IRLAN_DATA) { | 338 | if (self->client.state != IRLAN_DATA) { |
339 | IRDA_DEBUG(1, "%s(), delaying!\n", __FUNCTION__ ); | 339 | IRDA_DEBUG(1, "%s(), delaying!\n", __func__ ); |
340 | return; | 340 | return; |
341 | } | 341 | } |
342 | 342 | ||
@@ -346,20 +346,20 @@ static void irlan_eth_set_multicast_list(struct net_device *dev) | |||
346 | } | 346 | } |
347 | else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS) { | 347 | else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS) { |
348 | /* Disable promiscuous mode, use normal mode. */ | 348 | /* Disable promiscuous mode, use normal mode. */ |
349 | IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __FUNCTION__ ); | 349 | IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ ); |
350 | /* hardware_set_filter(NULL); */ | 350 | /* hardware_set_filter(NULL); */ |
351 | 351 | ||
352 | irlan_set_multicast_filter(self, TRUE); | 352 | irlan_set_multicast_filter(self, TRUE); |
353 | } | 353 | } |
354 | else if (dev->mc_count) { | 354 | else if (dev->mc_count) { |
355 | IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __FUNCTION__ ); | 355 | IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ ); |
356 | /* Walk the address list, and load the filter */ | 356 | /* Walk the address list, and load the filter */ |
357 | /* hardware_set_filter(dev->mc_list); */ | 357 | /* hardware_set_filter(dev->mc_list); */ |
358 | 358 | ||
359 | irlan_set_multicast_filter(self, TRUE); | 359 | irlan_set_multicast_filter(self, TRUE); |
360 | } | 360 | } |
361 | else { | 361 | else { |
362 | IRDA_DEBUG(4, "%s(), Clearing multicast filter\n", __FUNCTION__ ); | 362 | IRDA_DEBUG(4, "%s(), Clearing multicast filter\n", __func__ ); |
363 | irlan_set_multicast_filter(self, FALSE); | 363 | irlan_set_multicast_filter(self, FALSE); |
364 | } | 364 | } |
365 | 365 | ||
diff --git a/net/irda/irlan/irlan_event.c b/net/irda/irlan/irlan_event.c index a9750a801388..cbcb4eb54037 100644 --- a/net/irda/irlan/irlan_event.c +++ b/net/irda/irlan/irlan_event.c | |||
@@ -40,7 +40,7 @@ char *irlan_state[] = { | |||
40 | 40 | ||
41 | void irlan_next_client_state(struct irlan_cb *self, IRLAN_STATE state) | 41 | void irlan_next_client_state(struct irlan_cb *self, IRLAN_STATE state) |
42 | { | 42 | { |
43 | IRDA_DEBUG(2, "%s(), %s\n", __FUNCTION__ , irlan_state[state]); | 43 | IRDA_DEBUG(2, "%s(), %s\n", __func__ , irlan_state[state]); |
44 | 44 | ||
45 | IRDA_ASSERT(self != NULL, return;); | 45 | IRDA_ASSERT(self != NULL, return;); |
46 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); | 46 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); |
@@ -50,7 +50,7 @@ void irlan_next_client_state(struct irlan_cb *self, IRLAN_STATE state) | |||
50 | 50 | ||
51 | void irlan_next_provider_state(struct irlan_cb *self, IRLAN_STATE state) | 51 | void irlan_next_provider_state(struct irlan_cb *self, IRLAN_STATE state) |
52 | { | 52 | { |
53 | IRDA_DEBUG(2, "%s(), %s\n", __FUNCTION__ , irlan_state[state]); | 53 | IRDA_DEBUG(2, "%s(), %s\n", __func__ , irlan_state[state]); |
54 | 54 | ||
55 | IRDA_ASSERT(self != NULL, return;); | 55 | IRDA_ASSERT(self != NULL, return;); |
56 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); | 56 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); |
diff --git a/net/irda/irlan/irlan_filter.c b/net/irda/irlan/irlan_filter.c index 4384be9a6888..9ff7823abec7 100644 --- a/net/irda/irlan/irlan_filter.c +++ b/net/irda/irlan/irlan_filter.c | |||
@@ -145,7 +145,7 @@ void irlan_check_command_param(struct irlan_cb *self, char *param, char *value) | |||
145 | { | 145 | { |
146 | __u8 *bytes; | 146 | __u8 *bytes; |
147 | 147 | ||
148 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 148 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
149 | 149 | ||
150 | bytes = value; | 150 | bytes = value; |
151 | 151 | ||
@@ -158,7 +158,7 @@ void irlan_check_command_param(struct irlan_cb *self, char *param, char *value) | |||
158 | * This is experimental!! DB. | 158 | * This is experimental!! DB. |
159 | */ | 159 | */ |
160 | if (strcmp(param, "MODE") == 0) { | 160 | if (strcmp(param, "MODE") == 0) { |
161 | IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); | 161 | IRDA_DEBUG(0, "%s()\n", __func__ ); |
162 | self->use_udata = TRUE; | 162 | self->use_udata = TRUE; |
163 | return; | 163 | return; |
164 | } | 164 | } |
diff --git a/net/irda/irlan/irlan_provider.c b/net/irda/irlan/irlan_provider.c index 13db942812e4..3f81f81b2dfa 100644 --- a/net/irda/irlan/irlan_provider.c +++ b/net/irda/irlan/irlan_provider.c | |||
@@ -70,7 +70,7 @@ static int irlan_provider_data_indication(void *instance, void *sap, | |||
70 | struct irlan_cb *self; | 70 | struct irlan_cb *self; |
71 | __u8 code; | 71 | __u8 code; |
72 | 72 | ||
73 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 73 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
74 | 74 | ||
75 | self = (struct irlan_cb *) instance; | 75 | self = (struct irlan_cb *) instance; |
76 | 76 | ||
@@ -99,15 +99,15 @@ static int irlan_provider_data_indication(void *instance, void *sap, | |||
99 | irlan_do_provider_event(self, IRLAN_FILTER_CONFIG_CMD, skb); | 99 | irlan_do_provider_event(self, IRLAN_FILTER_CONFIG_CMD, skb); |
100 | break; | 100 | break; |
101 | case CMD_RECONNECT_DATA_CHAN: | 101 | case CMD_RECONNECT_DATA_CHAN: |
102 | IRDA_DEBUG(2, "%s(), Got RECONNECT_DATA_CHAN command\n", __FUNCTION__ ); | 102 | IRDA_DEBUG(2, "%s(), Got RECONNECT_DATA_CHAN command\n", __func__ ); |
103 | IRDA_DEBUG(2, "%s(), NOT IMPLEMENTED\n", __FUNCTION__ ); | 103 | IRDA_DEBUG(2, "%s(), NOT IMPLEMENTED\n", __func__ ); |
104 | break; | 104 | break; |
105 | case CMD_CLOSE_DATA_CHAN: | 105 | case CMD_CLOSE_DATA_CHAN: |
106 | IRDA_DEBUG(2, "Got CLOSE_DATA_CHAN command!\n"); | 106 | IRDA_DEBUG(2, "Got CLOSE_DATA_CHAN command!\n"); |
107 | IRDA_DEBUG(2, "%s(), NOT IMPLEMENTED\n", __FUNCTION__ ); | 107 | IRDA_DEBUG(2, "%s(), NOT IMPLEMENTED\n", __func__ ); |
108 | break; | 108 | break; |
109 | default: | 109 | default: |
110 | IRDA_DEBUG(2, "%s(), Unknown command!\n", __FUNCTION__ ); | 110 | IRDA_DEBUG(2, "%s(), Unknown command!\n", __func__ ); |
111 | break; | 111 | break; |
112 | } | 112 | } |
113 | return 0; | 113 | return 0; |
@@ -129,7 +129,7 @@ static void irlan_provider_connect_indication(void *instance, void *sap, | |||
129 | struct tsap_cb *tsap; | 129 | struct tsap_cb *tsap; |
130 | __u32 saddr, daddr; | 130 | __u32 saddr, daddr; |
131 | 131 | ||
132 | IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); | 132 | IRDA_DEBUG(0, "%s()\n", __func__ ); |
133 | 133 | ||
134 | self = (struct irlan_cb *) instance; | 134 | self = (struct irlan_cb *) instance; |
135 | tsap = (struct tsap_cb *) sap; | 135 | tsap = (struct tsap_cb *) sap; |
@@ -182,7 +182,7 @@ static void irlan_provider_disconnect_indication(void *instance, void *sap, | |||
182 | struct irlan_cb *self; | 182 | struct irlan_cb *self; |
183 | struct tsap_cb *tsap; | 183 | struct tsap_cb *tsap; |
184 | 184 | ||
185 | IRDA_DEBUG(4, "%s(), reason=%d\n", __FUNCTION__ , reason); | 185 | IRDA_DEBUG(4, "%s(), reason=%d\n", __func__ , reason); |
186 | 186 | ||
187 | self = (struct irlan_cb *) instance; | 187 | self = (struct irlan_cb *) instance; |
188 | tsap = (struct tsap_cb *) sap; | 188 | tsap = (struct tsap_cb *) sap; |
@@ -236,7 +236,7 @@ int irlan_provider_parse_command(struct irlan_cb *self, int cmd, | |||
236 | 236 | ||
237 | IRDA_ASSERT(skb != NULL, return -RSP_PROTOCOL_ERROR;); | 237 | IRDA_ASSERT(skb != NULL, return -RSP_PROTOCOL_ERROR;); |
238 | 238 | ||
239 | IRDA_DEBUG(4, "%s(), skb->len=%d\n", __FUNCTION__ , (int)skb->len); | 239 | IRDA_DEBUG(4, "%s(), skb->len=%d\n", __func__ , (int)skb->len); |
240 | 240 | ||
241 | IRDA_ASSERT(self != NULL, return -RSP_PROTOCOL_ERROR;); | 241 | IRDA_ASSERT(self != NULL, return -RSP_PROTOCOL_ERROR;); |
242 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -RSP_PROTOCOL_ERROR;); | 242 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -RSP_PROTOCOL_ERROR;); |
@@ -266,7 +266,7 @@ int irlan_provider_parse_command(struct irlan_cb *self, int cmd, | |||
266 | for (i=0; i<count;i++) { | 266 | for (i=0; i<count;i++) { |
267 | ret = irlan_extract_param(ptr, name, value, &val_len); | 267 | ret = irlan_extract_param(ptr, name, value, &val_len); |
268 | if (ret < 0) { | 268 | if (ret < 0) { |
269 | IRDA_DEBUG(2, "%s(), IrLAN, Error!\n", __FUNCTION__ ); | 269 | IRDA_DEBUG(2, "%s(), IrLAN, Error!\n", __func__ ); |
270 | break; | 270 | break; |
271 | } | 271 | } |
272 | ptr+=ret; | 272 | ptr+=ret; |
@@ -291,7 +291,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command, | |||
291 | { | 291 | { |
292 | struct sk_buff *skb; | 292 | struct sk_buff *skb; |
293 | 293 | ||
294 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 294 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
295 | 295 | ||
296 | IRDA_ASSERT(self != NULL, return;); | 296 | IRDA_ASSERT(self != NULL, return;); |
297 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); | 297 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); |
@@ -323,7 +323,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command, | |||
323 | irlan_insert_string_param(skb, "MEDIA", "802.5"); | 323 | irlan_insert_string_param(skb, "MEDIA", "802.5"); |
324 | break; | 324 | break; |
325 | default: | 325 | default: |
326 | IRDA_DEBUG(2, "%s(), unknown media type!\n", __FUNCTION__ ); | 326 | IRDA_DEBUG(2, "%s(), unknown media type!\n", __func__ ); |
327 | break; | 327 | break; |
328 | } | 328 | } |
329 | irlan_insert_short_param(skb, "IRLAN_VER", 0x0101); | 329 | irlan_insert_short_param(skb, "IRLAN_VER", 0x0101); |
@@ -347,7 +347,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command, | |||
347 | irlan_insert_string_param(skb, "ACCESS_TYPE", "HOSTED"); | 347 | irlan_insert_string_param(skb, "ACCESS_TYPE", "HOSTED"); |
348 | break; | 348 | break; |
349 | default: | 349 | default: |
350 | IRDA_DEBUG(2, "%s(), Unknown access type\n", __FUNCTION__ ); | 350 | IRDA_DEBUG(2, "%s(), Unknown access type\n", __func__ ); |
351 | break; | 351 | break; |
352 | } | 352 | } |
353 | irlan_insert_short_param(skb, "MAX_FRAME", 0x05ee); | 353 | irlan_insert_short_param(skb, "MAX_FRAME", 0x05ee); |
@@ -367,7 +367,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command, | |||
367 | irlan_filter_request(self, skb); | 367 | irlan_filter_request(self, skb); |
368 | break; | 368 | break; |
369 | default: | 369 | default: |
370 | IRDA_DEBUG(2, "%s(), Unknown command!\n", __FUNCTION__ ); | 370 | IRDA_DEBUG(2, "%s(), Unknown command!\n", __func__ ); |
371 | break; | 371 | break; |
372 | } | 372 | } |
373 | 373 | ||
@@ -385,7 +385,7 @@ int irlan_provider_open_ctrl_tsap(struct irlan_cb *self) | |||
385 | struct tsap_cb *tsap; | 385 | struct tsap_cb *tsap; |
386 | notify_t notify; | 386 | notify_t notify; |
387 | 387 | ||
388 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 388 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
389 | 389 | ||
390 | IRDA_ASSERT(self != NULL, return -1;); | 390 | IRDA_ASSERT(self != NULL, return -1;); |
391 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); | 391 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); |
@@ -406,7 +406,7 @@ int irlan_provider_open_ctrl_tsap(struct irlan_cb *self) | |||
406 | 406 | ||
407 | tsap = irttp_open_tsap(LSAP_ANY, 1, ¬ify); | 407 | tsap = irttp_open_tsap(LSAP_ANY, 1, ¬ify); |
408 | if (!tsap) { | 408 | if (!tsap) { |
409 | IRDA_DEBUG(2, "%s(), Got no tsap!\n", __FUNCTION__ ); | 409 | IRDA_DEBUG(2, "%s(), Got no tsap!\n", __func__ ); |
410 | return -1; | 410 | return -1; |
411 | } | 411 | } |
412 | self->provider.tsap_ctrl = tsap; | 412 | self->provider.tsap_ctrl = tsap; |
diff --git a/net/irda/irlan/irlan_provider_event.c b/net/irda/irlan/irlan_provider_event.c index 10ece5a47522..01a9d7c993ee 100644 --- a/net/irda/irlan/irlan_provider_event.c +++ b/net/irda/irlan/irlan_provider_event.c | |||
@@ -72,7 +72,7 @@ void irlan_do_provider_event(struct irlan_cb *self, IRLAN_EVENT event, | |||
72 | static int irlan_provider_state_idle(struct irlan_cb *self, IRLAN_EVENT event, | 72 | static int irlan_provider_state_idle(struct irlan_cb *self, IRLAN_EVENT event, |
73 | struct sk_buff *skb) | 73 | struct sk_buff *skb) |
74 | { | 74 | { |
75 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 75 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
76 | 76 | ||
77 | IRDA_ASSERT(self != NULL, return -1;); | 77 | IRDA_ASSERT(self != NULL, return -1;); |
78 | 78 | ||
@@ -82,7 +82,7 @@ static int irlan_provider_state_idle(struct irlan_cb *self, IRLAN_EVENT event, | |||
82 | irlan_next_provider_state( self, IRLAN_INFO); | 82 | irlan_next_provider_state( self, IRLAN_INFO); |
83 | break; | 83 | break; |
84 | default: | 84 | default: |
85 | IRDA_DEBUG(4, "%s(), Unknown event %d\n", __FUNCTION__ , event); | 85 | IRDA_DEBUG(4, "%s(), Unknown event %d\n", __func__ , event); |
86 | break; | 86 | break; |
87 | } | 87 | } |
88 | if (skb) | 88 | if (skb) |
@@ -101,7 +101,7 @@ static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event, | |||
101 | { | 101 | { |
102 | int ret; | 102 | int ret; |
103 | 103 | ||
104 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 104 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
105 | 105 | ||
106 | IRDA_ASSERT(self != NULL, return -1;); | 106 | IRDA_ASSERT(self != NULL, return -1;); |
107 | 107 | ||
@@ -147,7 +147,7 @@ static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event, | |||
147 | irlan_next_provider_state(self, IRLAN_IDLE); | 147 | irlan_next_provider_state(self, IRLAN_IDLE); |
148 | break; | 148 | break; |
149 | default: | 149 | default: |
150 | IRDA_DEBUG( 0, "%s(), Unknown event %d\n", __FUNCTION__ , event); | 150 | IRDA_DEBUG( 0, "%s(), Unknown event %d\n", __func__ , event); |
151 | break; | 151 | break; |
152 | } | 152 | } |
153 | if (skb) | 153 | if (skb) |
@@ -166,7 +166,7 @@ static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event, | |||
166 | static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event, | 166 | static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event, |
167 | struct sk_buff *skb) | 167 | struct sk_buff *skb) |
168 | { | 168 | { |
169 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 169 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
170 | 170 | ||
171 | IRDA_ASSERT(self != NULL, return -1;); | 171 | IRDA_ASSERT(self != NULL, return -1;); |
172 | 172 | ||
@@ -186,7 +186,7 @@ static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event, | |||
186 | irlan_next_provider_state(self, IRLAN_IDLE); | 186 | irlan_next_provider_state(self, IRLAN_IDLE); |
187 | break; | 187 | break; |
188 | default: | 188 | default: |
189 | IRDA_DEBUG(2, "%s(), Unknown event %d\n", __FUNCTION__ , event); | 189 | IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); |
190 | break; | 190 | break; |
191 | } | 191 | } |
192 | if (skb) | 192 | if (skb) |
@@ -205,7 +205,7 @@ static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event, | |||
205 | static int irlan_provider_state_data(struct irlan_cb *self, IRLAN_EVENT event, | 205 | static int irlan_provider_state_data(struct irlan_cb *self, IRLAN_EVENT event, |
206 | struct sk_buff *skb) | 206 | struct sk_buff *skb) |
207 | { | 207 | { |
208 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 208 | IRDA_DEBUG(4, "%s()\n", __func__ ); |
209 | 209 | ||
210 | IRDA_ASSERT(self != NULL, return -1;); | 210 | IRDA_ASSERT(self != NULL, return -1;); |
211 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); | 211 | IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); |
@@ -221,7 +221,7 @@ static int irlan_provider_state_data(struct irlan_cb *self, IRLAN_EVENT event, | |||
221 | irlan_next_provider_state(self, IRLAN_IDLE); | 221 | irlan_next_provider_state(self, IRLAN_IDLE); |
222 | break; | 222 | break; |
223 | default: | 223 | default: |
224 | IRDA_DEBUG( 0, "%s(), Unknown event %d\n", __FUNCTION__ , event); | 224 | IRDA_DEBUG( 0, "%s(), Unknown event %d\n", __func__ , event); |
225 | break; | 225 | break; |
226 | } | 226 | } |
227 | if (skb) | 227 | if (skb) |
diff --git a/net/irda/irlap.c b/net/irda/irlap.c index f3236acc8d22..e4965b764b9b 100644 --- a/net/irda/irlap.c +++ b/net/irda/irlap.c | |||
@@ -88,7 +88,7 @@ int __init irlap_init(void) | |||
88 | irlap = hashbin_new(HB_LOCK); | 88 | irlap = hashbin_new(HB_LOCK); |
89 | if (irlap == NULL) { | 89 | if (irlap == NULL) { |
90 | IRDA_ERROR("%s: can't allocate irlap hashbin!\n", | 90 | IRDA_ERROR("%s: can't allocate irlap hashbin!\n", |
91 | __FUNCTION__); | 91 | __func__); |
92 | return -ENOMEM; | 92 | return -ENOMEM; |
93 | } | 93 | } |
94 | 94 | ||
@@ -113,7 +113,7 @@ struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos, | |||
113 | { | 113 | { |
114 | struct irlap_cb *self; | 114 | struct irlap_cb *self; |
115 | 115 | ||
116 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 116 | IRDA_DEBUG(4, "%s()\n", __func__); |
117 | 117 | ||
118 | /* Initialize the irlap structure. */ | 118 | /* Initialize the irlap structure. */ |
119 | self = kzalloc(sizeof(struct irlap_cb), GFP_KERNEL); | 119 | self = kzalloc(sizeof(struct irlap_cb), GFP_KERNEL); |
@@ -215,7 +215,7 @@ void irlap_close(struct irlap_cb *self) | |||
215 | { | 215 | { |
216 | struct irlap_cb *lap; | 216 | struct irlap_cb *lap; |
217 | 217 | ||
218 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 218 | IRDA_DEBUG(4, "%s()\n", __func__); |
219 | 219 | ||
220 | IRDA_ASSERT(self != NULL, return;); | 220 | IRDA_ASSERT(self != NULL, return;); |
221 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); | 221 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); |
@@ -231,7 +231,7 @@ void irlap_close(struct irlap_cb *self) | |||
231 | /* Be sure that we manage to remove ourself from the hash */ | 231 | /* Be sure that we manage to remove ourself from the hash */ |
232 | lap = hashbin_remove(irlap, self->saddr, NULL); | 232 | lap = hashbin_remove(irlap, self->saddr, NULL); |
233 | if (!lap) { | 233 | if (!lap) { |
234 | IRDA_DEBUG(1, "%s(), Didn't find myself!\n", __FUNCTION__); | 234 | IRDA_DEBUG(1, "%s(), Didn't find myself!\n", __func__); |
235 | return; | 235 | return; |
236 | } | 236 | } |
237 | __irlap_close(lap); | 237 | __irlap_close(lap); |
@@ -246,7 +246,7 @@ EXPORT_SYMBOL(irlap_close); | |||
246 | */ | 246 | */ |
247 | void irlap_connect_indication(struct irlap_cb *self, struct sk_buff *skb) | 247 | void irlap_connect_indication(struct irlap_cb *self, struct sk_buff *skb) |
248 | { | 248 | { |
249 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 249 | IRDA_DEBUG(4, "%s()\n", __func__); |
250 | 250 | ||
251 | IRDA_ASSERT(self != NULL, return;); | 251 | IRDA_ASSERT(self != NULL, return;); |
252 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); | 252 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); |
@@ -265,7 +265,7 @@ void irlap_connect_indication(struct irlap_cb *self, struct sk_buff *skb) | |||
265 | */ | 265 | */ |
266 | void irlap_connect_response(struct irlap_cb *self, struct sk_buff *userdata) | 266 | void irlap_connect_response(struct irlap_cb *self, struct sk_buff *userdata) |
267 | { | 267 | { |
268 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 268 | IRDA_DEBUG(4, "%s()\n", __func__); |
269 | 269 | ||
270 | irlap_do_event(self, CONNECT_RESPONSE, userdata, NULL); | 270 | irlap_do_event(self, CONNECT_RESPONSE, userdata, NULL); |
271 | } | 271 | } |
@@ -280,7 +280,7 @@ void irlap_connect_response(struct irlap_cb *self, struct sk_buff *userdata) | |||
280 | void irlap_connect_request(struct irlap_cb *self, __u32 daddr, | 280 | void irlap_connect_request(struct irlap_cb *self, __u32 daddr, |
281 | struct qos_info *qos_user, int sniff) | 281 | struct qos_info *qos_user, int sniff) |
282 | { | 282 | { |
283 | IRDA_DEBUG(3, "%s(), daddr=0x%08x\n", __FUNCTION__, daddr); | 283 | IRDA_DEBUG(3, "%s(), daddr=0x%08x\n", __func__, daddr); |
284 | 284 | ||
285 | IRDA_ASSERT(self != NULL, return;); | 285 | IRDA_ASSERT(self != NULL, return;); |
286 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); | 286 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); |
@@ -307,7 +307,7 @@ void irlap_connect_request(struct irlap_cb *self, __u32 daddr, | |||
307 | */ | 307 | */ |
308 | void irlap_connect_confirm(struct irlap_cb *self, struct sk_buff *skb) | 308 | void irlap_connect_confirm(struct irlap_cb *self, struct sk_buff *skb) |
309 | { | 309 | { |
310 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 310 | IRDA_DEBUG(4, "%s()\n", __func__); |
311 | 311 | ||
312 | IRDA_ASSERT(self != NULL, return;); | 312 | IRDA_ASSERT(self != NULL, return;); |
313 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); | 313 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); |
@@ -344,7 +344,7 @@ void irlap_data_request(struct irlap_cb *self, struct sk_buff *skb, | |||
344 | IRDA_ASSERT(self != NULL, return;); | 344 | IRDA_ASSERT(self != NULL, return;); |
345 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); | 345 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); |
346 | 346 | ||
347 | IRDA_DEBUG(3, "%s()\n", __FUNCTION__); | 347 | IRDA_DEBUG(3, "%s()\n", __func__); |
348 | 348 | ||
349 | IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER), | 349 | IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER), |
350 | return;); | 350 | return;); |
@@ -391,7 +391,7 @@ void irlap_unitdata_request(struct irlap_cb *self, struct sk_buff *skb) | |||
391 | IRDA_ASSERT(self != NULL, return;); | 391 | IRDA_ASSERT(self != NULL, return;); |
392 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); | 392 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); |
393 | 393 | ||
394 | IRDA_DEBUG(3, "%s()\n", __FUNCTION__); | 394 | IRDA_DEBUG(3, "%s()\n", __func__); |
395 | 395 | ||
396 | IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER), | 396 | IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER), |
397 | return;); | 397 | return;); |
@@ -417,7 +417,7 @@ void irlap_unitdata_request(struct irlap_cb *self, struct sk_buff *skb) | |||
417 | #ifdef CONFIG_IRDA_ULTRA | 417 | #ifdef CONFIG_IRDA_ULTRA |
418 | void irlap_unitdata_indication(struct irlap_cb *self, struct sk_buff *skb) | 418 | void irlap_unitdata_indication(struct irlap_cb *self, struct sk_buff *skb) |
419 | { | 419 | { |
420 | IRDA_DEBUG(1, "%s()\n", __FUNCTION__); | 420 | IRDA_DEBUG(1, "%s()\n", __func__); |
421 | 421 | ||
422 | IRDA_ASSERT(self != NULL, return;); | 422 | IRDA_ASSERT(self != NULL, return;); |
423 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); | 423 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); |
@@ -437,7 +437,7 @@ void irlap_unitdata_indication(struct irlap_cb *self, struct sk_buff *skb) | |||
437 | */ | 437 | */ |
438 | void irlap_disconnect_request(struct irlap_cb *self) | 438 | void irlap_disconnect_request(struct irlap_cb *self) |
439 | { | 439 | { |
440 | IRDA_DEBUG(3, "%s()\n", __FUNCTION__); | 440 | IRDA_DEBUG(3, "%s()\n", __func__); |
441 | 441 | ||
442 | IRDA_ASSERT(self != NULL, return;); | 442 | IRDA_ASSERT(self != NULL, return;); |
443 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); | 443 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); |
@@ -458,7 +458,7 @@ void irlap_disconnect_request(struct irlap_cb *self) | |||
458 | irlap_do_event(self, DISCONNECT_REQUEST, NULL, NULL); | 458 | irlap_do_event(self, DISCONNECT_REQUEST, NULL, NULL); |
459 | break; | 459 | break; |
460 | default: | 460 | default: |
461 | IRDA_DEBUG(2, "%s(), disconnect pending!\n", __FUNCTION__); | 461 | IRDA_DEBUG(2, "%s(), disconnect pending!\n", __func__); |
462 | self->disconnect_pending = TRUE; | 462 | self->disconnect_pending = TRUE; |
463 | break; | 463 | break; |
464 | } | 464 | } |
@@ -472,7 +472,7 @@ void irlap_disconnect_request(struct irlap_cb *self) | |||
472 | */ | 472 | */ |
473 | void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason) | 473 | void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason) |
474 | { | 474 | { |
475 | IRDA_DEBUG(1, "%s(), reason=%s\n", __FUNCTION__, lap_reasons[reason]); | 475 | IRDA_DEBUG(1, "%s(), reason=%s\n", __func__, lap_reasons[reason]); |
476 | 476 | ||
477 | IRDA_ASSERT(self != NULL, return;); | 477 | IRDA_ASSERT(self != NULL, return;); |
478 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); | 478 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); |
@@ -482,7 +482,7 @@ void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason) | |||
482 | 482 | ||
483 | switch (reason) { | 483 | switch (reason) { |
484 | case LAP_RESET_INDICATION: | 484 | case LAP_RESET_INDICATION: |
485 | IRDA_DEBUG(1, "%s(), Sending reset request!\n", __FUNCTION__); | 485 | IRDA_DEBUG(1, "%s(), Sending reset request!\n", __func__); |
486 | irlap_do_event(self, RESET_REQUEST, NULL, NULL); | 486 | irlap_do_event(self, RESET_REQUEST, NULL, NULL); |
487 | break; | 487 | break; |
488 | case LAP_NO_RESPONSE: /* FALLTROUGH */ | 488 | case LAP_NO_RESPONSE: /* FALLTROUGH */ |
@@ -493,7 +493,7 @@ void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason) | |||
493 | reason, NULL); | 493 | reason, NULL); |
494 | break; | 494 | break; |
495 | default: | 495 | default: |
496 | IRDA_ERROR("%s: Unknown reason %d\n", __FUNCTION__, reason); | 496 | IRDA_ERROR("%s: Unknown reason %d\n", __func__, reason); |
497 | } | 497 | } |
498 | } | 498 | } |
499 | 499 | ||
@@ -511,7 +511,7 @@ void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery) | |||
511 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); | 511 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); |
512 | IRDA_ASSERT(discovery != NULL, return;); | 512 | IRDA_ASSERT(discovery != NULL, return;); |
513 | 513 | ||
514 | IRDA_DEBUG(4, "%s(), nslots = %d\n", __FUNCTION__, discovery->nslots); | 514 | IRDA_DEBUG(4, "%s(), nslots = %d\n", __func__, discovery->nslots); |
515 | 515 | ||
516 | IRDA_ASSERT((discovery->nslots == 1) || (discovery->nslots == 6) || | 516 | IRDA_ASSERT((discovery->nslots == 1) || (discovery->nslots == 6) || |
517 | (discovery->nslots == 8) || (discovery->nslots == 16), | 517 | (discovery->nslots == 8) || (discovery->nslots == 16), |
@@ -520,7 +520,7 @@ void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery) | |||
520 | /* Discovery is only possible in NDM mode */ | 520 | /* Discovery is only possible in NDM mode */ |
521 | if (self->state != LAP_NDM) { | 521 | if (self->state != LAP_NDM) { |
522 | IRDA_DEBUG(4, "%s(), discovery only possible in NDM mode\n", | 522 | IRDA_DEBUG(4, "%s(), discovery only possible in NDM mode\n", |
523 | __FUNCTION__); | 523 | __func__); |
524 | irlap_discovery_confirm(self, NULL); | 524 | irlap_discovery_confirm(self, NULL); |
525 | /* Note : in theory, if we are not in NDM, we could postpone | 525 | /* Note : in theory, if we are not in NDM, we could postpone |
526 | * the discovery like we do for connection request. | 526 | * the discovery like we do for connection request. |
@@ -543,7 +543,7 @@ void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery) | |||
543 | 543 | ||
544 | if (self->discovery_log == NULL) { | 544 | if (self->discovery_log == NULL) { |
545 | IRDA_WARNING("%s(), Unable to allocate discovery log!\n", | 545 | IRDA_WARNING("%s(), Unable to allocate discovery log!\n", |
546 | __FUNCTION__); | 546 | __func__); |
547 | return; | 547 | return; |
548 | } | 548 | } |
549 | 549 | ||
@@ -598,7 +598,7 @@ void irlap_discovery_confirm(struct irlap_cb *self, hashbin_t *discovery_log) | |||
598 | */ | 598 | */ |
599 | void irlap_discovery_indication(struct irlap_cb *self, discovery_t *discovery) | 599 | void irlap_discovery_indication(struct irlap_cb *self, discovery_t *discovery) |
600 | { | 600 | { |
601 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 601 | IRDA_DEBUG(4, "%s()\n", __func__); |
602 | 602 | ||
603 | IRDA_ASSERT(self != NULL, return;); | 603 | IRDA_ASSERT(self != NULL, return;); |
604 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); | 604 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); |
@@ -644,7 +644,7 @@ void irlap_status_indication(struct irlap_cb *self, int quality_of_link) | |||
644 | */ | 644 | */ |
645 | void irlap_reset_indication(struct irlap_cb *self) | 645 | void irlap_reset_indication(struct irlap_cb *self) |
646 | { | 646 | { |
647 | IRDA_DEBUG(1, "%s()\n", __FUNCTION__); | 647 | IRDA_DEBUG(1, "%s()\n", __func__); |
648 | 648 | ||
649 | IRDA_ASSERT(self != NULL, return;); | 649 | IRDA_ASSERT(self != NULL, return;); |
650 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); | 650 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); |
@@ -660,7 +660,7 @@ void irlap_reset_indication(struct irlap_cb *self) | |||
660 | */ | 660 | */ |
661 | void irlap_reset_confirm(void) | 661 | void irlap_reset_confirm(void) |
662 | { | 662 | { |
663 | IRDA_DEBUG(1, "%s()\n", __FUNCTION__); | 663 | IRDA_DEBUG(1, "%s()\n", __func__); |
664 | } | 664 | } |
665 | 665 | ||
666 | /* | 666 | /* |
@@ -760,7 +760,7 @@ int irlap_validate_nr_received(struct irlap_cb *self, int nr) | |||
760 | { | 760 | { |
761 | /* nr as expected? */ | 761 | /* nr as expected? */ |
762 | if (nr == self->vs) { | 762 | if (nr == self->vs) { |
763 | IRDA_DEBUG(4, "%s(), expected!\n", __FUNCTION__); | 763 | IRDA_DEBUG(4, "%s(), expected!\n", __func__); |
764 | return NR_EXPECTED; | 764 | return NR_EXPECTED; |
765 | } | 765 | } |
766 | 766 | ||
@@ -788,7 +788,7 @@ int irlap_validate_nr_received(struct irlap_cb *self, int nr) | |||
788 | */ | 788 | */ |
789 | void irlap_initiate_connection_state(struct irlap_cb *self) | 789 | void irlap_initiate_connection_state(struct irlap_cb *self) |
790 | { | 790 | { |
791 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 791 | IRDA_DEBUG(4, "%s()\n", __func__); |
792 | 792 | ||
793 | IRDA_ASSERT(self != NULL, return;); | 793 | IRDA_ASSERT(self != NULL, return;); |
794 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); | 794 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); |
@@ -871,7 +871,7 @@ static void irlap_change_speed(struct irlap_cb *self, __u32 speed, int now) | |||
871 | { | 871 | { |
872 | struct sk_buff *skb; | 872 | struct sk_buff *skb; |
873 | 873 | ||
874 | IRDA_DEBUG(0, "%s(), setting speed to %d\n", __FUNCTION__, speed); | 874 | IRDA_DEBUG(0, "%s(), setting speed to %d\n", __func__, speed); |
875 | 875 | ||
876 | IRDA_ASSERT(self != NULL, return;); | 876 | IRDA_ASSERT(self != NULL, return;); |
877 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); | 877 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); |
@@ -914,7 +914,7 @@ static void irlap_init_qos_capabilities(struct irlap_cb *self, | |||
914 | * user may not have set all of them. | 914 | * user may not have set all of them. |
915 | */ | 915 | */ |
916 | if (qos_user) { | 916 | if (qos_user) { |
917 | IRDA_DEBUG(1, "%s(), Found user specified QoS!\n", __FUNCTION__); | 917 | IRDA_DEBUG(1, "%s(), Found user specified QoS!\n", __func__); |
918 | 918 | ||
919 | if (qos_user->baud_rate.bits) | 919 | if (qos_user->baud_rate.bits) |
920 | self->qos_rx.baud_rate.bits &= qos_user->baud_rate.bits; | 920 | self->qos_rx.baud_rate.bits &= qos_user->baud_rate.bits; |
@@ -944,7 +944,7 @@ static void irlap_init_qos_capabilities(struct irlap_cb *self, | |||
944 | */ | 944 | */ |
945 | void irlap_apply_default_connection_parameters(struct irlap_cb *self) | 945 | void irlap_apply_default_connection_parameters(struct irlap_cb *self) |
946 | { | 946 | { |
947 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 947 | IRDA_DEBUG(4, "%s()\n", __func__); |
948 | 948 | ||
949 | IRDA_ASSERT(self != NULL, return;); | 949 | IRDA_ASSERT(self != NULL, return;); |
950 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); | 950 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); |
@@ -1007,7 +1007,7 @@ void irlap_apply_default_connection_parameters(struct irlap_cb *self) | |||
1007 | */ | 1007 | */ |
1008 | void irlap_apply_connection_parameters(struct irlap_cb *self, int now) | 1008 | void irlap_apply_connection_parameters(struct irlap_cb *self, int now) |
1009 | { | 1009 | { |
1010 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 1010 | IRDA_DEBUG(4, "%s()\n", __func__); |
1011 | 1011 | ||
1012 | IRDA_ASSERT(self != NULL, return;); | 1012 | IRDA_ASSERT(self != NULL, return;); |
1013 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); | 1013 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); |
diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c index 6af86eba7463..16c4ef0f5c1a 100644 --- a/net/irda/irlap_event.c +++ b/net/irda/irlap_event.c | |||
@@ -217,7 +217,7 @@ static void irlap_start_poll_timer(struct irlap_cb *self, int timeout) | |||
217 | } else | 217 | } else |
218 | self->fast_RR = FALSE; | 218 | self->fast_RR = FALSE; |
219 | 219 | ||
220 | IRDA_DEBUG(3, "%s(), timeout=%d (%ld)\n", __FUNCTION__, timeout, jiffies); | 220 | IRDA_DEBUG(3, "%s(), timeout=%d (%ld)\n", __func__, timeout, jiffies); |
221 | #endif /* CONFIG_IRDA_FAST_RR */ | 221 | #endif /* CONFIG_IRDA_FAST_RR */ |
222 | 222 | ||
223 | if (timeout == 0) | 223 | if (timeout == 0) |
@@ -241,7 +241,7 @@ void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event, | |||
241 | if (!self || self->magic != LAP_MAGIC) | 241 | if (!self || self->magic != LAP_MAGIC) |
242 | return; | 242 | return; |
243 | 243 | ||
244 | IRDA_DEBUG(3, "%s(), event = %s, state = %s\n", __FUNCTION__, | 244 | IRDA_DEBUG(3, "%s(), event = %s, state = %s\n", __func__, |
245 | irlap_event[event], irlap_state[self->state]); | 245 | irlap_event[event], irlap_state[self->state]); |
246 | 246 | ||
247 | ret = (*state[self->state])(self, event, skb, info); | 247 | ret = (*state[self->state])(self, event, skb, info); |
@@ -259,7 +259,7 @@ void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event, | |||
259 | * try to disconnect link if we send any data frames, since | 259 | * try to disconnect link if we send any data frames, since |
260 | * that will change the state away form XMIT | 260 | * that will change the state away form XMIT |
261 | */ | 261 | */ |
262 | IRDA_DEBUG(2, "%s() : queue len = %d\n", __FUNCTION__, | 262 | IRDA_DEBUG(2, "%s() : queue len = %d\n", __func__, |
263 | skb_queue_len(&self->txq)); | 263 | skb_queue_len(&self->txq)); |
264 | 264 | ||
265 | if (!skb_queue_empty(&self->txq)) { | 265 | if (!skb_queue_empty(&self->txq)) { |
@@ -340,7 +340,7 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event, | |||
340 | * media busy in irlap_connect_request() and | 340 | * media busy in irlap_connect_request() and |
341 | * postpone the event... - Jean II */ | 341 | * postpone the event... - Jean II */ |
342 | IRDA_DEBUG(0, "%s(), CONNECT_REQUEST: media busy!\n", | 342 | IRDA_DEBUG(0, "%s(), CONNECT_REQUEST: media busy!\n", |
343 | __FUNCTION__); | 343 | __func__); |
344 | 344 | ||
345 | /* Always switch state before calling upper layers */ | 345 | /* Always switch state before calling upper layers */ |
346 | irlap_next_state(self, LAP_NDM); | 346 | irlap_next_state(self, LAP_NDM); |
@@ -367,7 +367,7 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event, | |||
367 | irlap_connect_indication(self, skb); | 367 | irlap_connect_indication(self, skb); |
368 | } else { | 368 | } else { |
369 | IRDA_DEBUG(0, "%s(), SNRM frame does not " | 369 | IRDA_DEBUG(0, "%s(), SNRM frame does not " |
370 | "contain an I field!\n", __FUNCTION__); | 370 | "contain an I field!\n", __func__); |
371 | } | 371 | } |
372 | break; | 372 | break; |
373 | case DISCOVERY_REQUEST: | 373 | case DISCOVERY_REQUEST: |
@@ -375,7 +375,7 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event, | |||
375 | 375 | ||
376 | if (self->media_busy) { | 376 | if (self->media_busy) { |
377 | IRDA_DEBUG(1, "%s(), DISCOVERY_REQUEST: media busy!\n", | 377 | IRDA_DEBUG(1, "%s(), DISCOVERY_REQUEST: media busy!\n", |
378 | __FUNCTION__); | 378 | __func__); |
379 | /* irlap->log.condition = MEDIA_BUSY; */ | 379 | /* irlap->log.condition = MEDIA_BUSY; */ |
380 | 380 | ||
381 | /* This will make IrLMP try again */ | 381 | /* This will make IrLMP try again */ |
@@ -441,7 +441,7 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event, | |||
441 | * those cases... | 441 | * those cases... |
442 | * Jean II | 442 | * Jean II |
443 | */ | 443 | */ |
444 | IRDA_DEBUG(1, "%s(), Receiving final discovery request, missed the discovery slots :-(\n", __FUNCTION__); | 444 | IRDA_DEBUG(1, "%s(), Receiving final discovery request, missed the discovery slots :-(\n", __func__); |
445 | 445 | ||
446 | /* Last discovery request -> in the log */ | 446 | /* Last discovery request -> in the log */ |
447 | irlap_discovery_indication(self, info->discovery); | 447 | irlap_discovery_indication(self, info->discovery); |
@@ -520,7 +520,7 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event, | |||
520 | /* Only accept broadcast frames in NDM mode */ | 520 | /* Only accept broadcast frames in NDM mode */ |
521 | if (info->caddr != CBROADCAST) { | 521 | if (info->caddr != CBROADCAST) { |
522 | IRDA_DEBUG(0, "%s(), not a broadcast frame!\n", | 522 | IRDA_DEBUG(0, "%s(), not a broadcast frame!\n", |
523 | __FUNCTION__); | 523 | __func__); |
524 | } else | 524 | } else |
525 | irlap_unitdata_indication(self, skb); | 525 | irlap_unitdata_indication(self, skb); |
526 | break; | 526 | break; |
@@ -536,10 +536,10 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event, | |||
536 | irlap_send_test_frame(self, CBROADCAST, info->daddr, skb); | 536 | irlap_send_test_frame(self, CBROADCAST, info->daddr, skb); |
537 | break; | 537 | break; |
538 | case RECV_TEST_RSP: | 538 | case RECV_TEST_RSP: |
539 | IRDA_DEBUG(0, "%s() not implemented!\n", __FUNCTION__); | 539 | IRDA_DEBUG(0, "%s() not implemented!\n", __func__); |
540 | break; | 540 | break; |
541 | default: | 541 | default: |
542 | IRDA_DEBUG(2, "%s(), Unknown event %s\n", __FUNCTION__, | 542 | IRDA_DEBUG(2, "%s(), Unknown event %s\n", __func__, |
543 | irlap_event[event]); | 543 | irlap_event[event]); |
544 | 544 | ||
545 | ret = -1; | 545 | ret = -1; |
@@ -567,13 +567,13 @@ static int irlap_state_query(struct irlap_cb *self, IRLAP_EVENT event, | |||
567 | IRDA_ASSERT(info != NULL, return -1;); | 567 | IRDA_ASSERT(info != NULL, return -1;); |
568 | IRDA_ASSERT(info->discovery != NULL, return -1;); | 568 | IRDA_ASSERT(info->discovery != NULL, return -1;); |
569 | 569 | ||
570 | IRDA_DEBUG(4, "%s(), daddr=%08x\n", __FUNCTION__, | 570 | IRDA_DEBUG(4, "%s(), daddr=%08x\n", __func__, |
571 | info->discovery->data.daddr); | 571 | info->discovery->data.daddr); |
572 | 572 | ||
573 | if (!self->discovery_log) { | 573 | if (!self->discovery_log) { |
574 | IRDA_WARNING("%s: discovery log is gone! " | 574 | IRDA_WARNING("%s: discovery log is gone! " |
575 | "maybe the discovery timeout has been set" | 575 | "maybe the discovery timeout has been set" |
576 | " too short?\n", __FUNCTION__); | 576 | " too short?\n", __func__); |
577 | break; | 577 | break; |
578 | } | 578 | } |
579 | hashbin_insert(self->discovery_log, | 579 | hashbin_insert(self->discovery_log, |
@@ -598,7 +598,7 @@ static int irlap_state_query(struct irlap_cb *self, IRLAP_EVENT event, | |||
598 | 598 | ||
599 | IRDA_ASSERT(info != NULL, return -1;); | 599 | IRDA_ASSERT(info != NULL, return -1;); |
600 | 600 | ||
601 | IRDA_DEBUG(1, "%s(), Receiving discovery request (s = %d) while performing discovery :-(\n", __FUNCTION__, info->s); | 601 | IRDA_DEBUG(1, "%s(), Receiving discovery request (s = %d) while performing discovery :-(\n", __func__, info->s); |
602 | 602 | ||
603 | /* Last discovery request ? */ | 603 | /* Last discovery request ? */ |
604 | if (info->s == 0xff) | 604 | if (info->s == 0xff) |
@@ -613,7 +613,7 @@ static int irlap_state_query(struct irlap_cb *self, IRLAP_EVENT event, | |||
613 | */ | 613 | */ |
614 | if (irda_device_is_receiving(self->netdev) && !self->add_wait) { | 614 | if (irda_device_is_receiving(self->netdev) && !self->add_wait) { |
615 | IRDA_DEBUG(2, "%s(), device is slow to answer, " | 615 | IRDA_DEBUG(2, "%s(), device is slow to answer, " |
616 | "waiting some more!\n", __FUNCTION__); | 616 | "waiting some more!\n", __func__); |
617 | irlap_start_slot_timer(self, msecs_to_jiffies(10)); | 617 | irlap_start_slot_timer(self, msecs_to_jiffies(10)); |
618 | self->add_wait = TRUE; | 618 | self->add_wait = TRUE; |
619 | return ret; | 619 | return ret; |
@@ -649,7 +649,7 @@ static int irlap_state_query(struct irlap_cb *self, IRLAP_EVENT event, | |||
649 | } | 649 | } |
650 | break; | 650 | break; |
651 | default: | 651 | default: |
652 | IRDA_DEBUG(2, "%s(), Unknown event %s\n", __FUNCTION__, | 652 | IRDA_DEBUG(2, "%s(), Unknown event %s\n", __func__, |
653 | irlap_event[event]); | 653 | irlap_event[event]); |
654 | 654 | ||
655 | ret = -1; | 655 | ret = -1; |
@@ -671,7 +671,7 @@ static int irlap_state_reply(struct irlap_cb *self, IRLAP_EVENT event, | |||
671 | discovery_t *discovery_rsp; | 671 | discovery_t *discovery_rsp; |
672 | int ret=0; | 672 | int ret=0; |
673 | 673 | ||
674 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 674 | IRDA_DEBUG(4, "%s()\n", __func__); |
675 | 675 | ||
676 | IRDA_ASSERT(self != NULL, return -1;); | 676 | IRDA_ASSERT(self != NULL, return -1;); |
677 | IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); | 677 | IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); |
@@ -679,7 +679,7 @@ static int irlap_state_reply(struct irlap_cb *self, IRLAP_EVENT event, | |||
679 | switch (event) { | 679 | switch (event) { |
680 | case QUERY_TIMER_EXPIRED: | 680 | case QUERY_TIMER_EXPIRED: |
681 | IRDA_DEBUG(0, "%s(), QUERY_TIMER_EXPIRED <%ld>\n", | 681 | IRDA_DEBUG(0, "%s(), QUERY_TIMER_EXPIRED <%ld>\n", |
682 | __FUNCTION__, jiffies); | 682 | __func__, jiffies); |
683 | irlap_next_state(self, LAP_NDM); | 683 | irlap_next_state(self, LAP_NDM); |
684 | break; | 684 | break; |
685 | case RECV_DISCOVERY_XID_CMD: | 685 | case RECV_DISCOVERY_XID_CMD: |
@@ -717,7 +717,7 @@ static int irlap_state_reply(struct irlap_cb *self, IRLAP_EVENT event, | |||
717 | } | 717 | } |
718 | break; | 718 | break; |
719 | default: | 719 | default: |
720 | IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __FUNCTION__, | 720 | IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __func__, |
721 | event, irlap_event[event]); | 721 | event, irlap_event[event]); |
722 | 722 | ||
723 | ret = -1; | 723 | ret = -1; |
@@ -738,7 +738,7 @@ static int irlap_state_conn(struct irlap_cb *self, IRLAP_EVENT event, | |||
738 | { | 738 | { |
739 | int ret = 0; | 739 | int ret = 0; |
740 | 740 | ||
741 | IRDA_DEBUG(4, "%s(), event=%s\n", __FUNCTION__, irlap_event[ event]); | 741 | IRDA_DEBUG(4, "%s(), event=%s\n", __func__, irlap_event[ event]); |
742 | 742 | ||
743 | IRDA_ASSERT(self != NULL, return -1;); | 743 | IRDA_ASSERT(self != NULL, return -1;); |
744 | IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); | 744 | IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); |
@@ -799,18 +799,18 @@ static int irlap_state_conn(struct irlap_cb *self, IRLAP_EVENT event, | |||
799 | break; | 799 | break; |
800 | case RECV_DISCOVERY_XID_CMD: | 800 | case RECV_DISCOVERY_XID_CMD: |
801 | IRDA_DEBUG(3, "%s(), event RECV_DISCOVER_XID_CMD!\n", | 801 | IRDA_DEBUG(3, "%s(), event RECV_DISCOVER_XID_CMD!\n", |
802 | __FUNCTION__); | 802 | __func__); |
803 | irlap_next_state(self, LAP_NDM); | 803 | irlap_next_state(self, LAP_NDM); |
804 | 804 | ||
805 | break; | 805 | break; |
806 | case DISCONNECT_REQUEST: | 806 | case DISCONNECT_REQUEST: |
807 | IRDA_DEBUG(0, "%s(), Disconnect request!\n", __FUNCTION__); | 807 | IRDA_DEBUG(0, "%s(), Disconnect request!\n", __func__); |
808 | irlap_send_dm_frame(self); | 808 | irlap_send_dm_frame(self); |
809 | irlap_next_state( self, LAP_NDM); | 809 | irlap_next_state( self, LAP_NDM); |
810 | irlap_disconnect_indication(self, LAP_DISC_INDICATION); | 810 | irlap_disconnect_indication(self, LAP_DISC_INDICATION); |
811 | break; | 811 | break; |
812 | default: | 812 | default: |
813 | IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __FUNCTION__, | 813 | IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __func__, |
814 | event, irlap_event[event]); | 814 | event, irlap_event[event]); |
815 | 815 | ||
816 | ret = -1; | 816 | ret = -1; |
@@ -832,7 +832,7 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event, | |||
832 | { | 832 | { |
833 | int ret = 0; | 833 | int ret = 0; |
834 | 834 | ||
835 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 835 | IRDA_DEBUG(4, "%s()\n", __func__); |
836 | 836 | ||
837 | IRDA_ASSERT(self != NULL, return -1;); | 837 | IRDA_ASSERT(self != NULL, return -1;); |
838 | IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); | 838 | IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); |
@@ -861,7 +861,7 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event, | |||
861 | self->retry_count++; | 861 | self->retry_count++; |
862 | break; | 862 | break; |
863 | case RECV_SNRM_CMD: | 863 | case RECV_SNRM_CMD: |
864 | IRDA_DEBUG(4, "%s(), SNRM battle!\n", __FUNCTION__); | 864 | IRDA_DEBUG(4, "%s(), SNRM battle!\n", __func__); |
865 | 865 | ||
866 | IRDA_ASSERT(skb != NULL, return 0;); | 866 | IRDA_ASSERT(skb != NULL, return 0;); |
867 | IRDA_ASSERT(info != NULL, return 0;); | 867 | IRDA_ASSERT(info != NULL, return 0;); |
@@ -948,7 +948,7 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event, | |||
948 | irlap_disconnect_indication(self, LAP_DISC_INDICATION); | 948 | irlap_disconnect_indication(self, LAP_DISC_INDICATION); |
949 | break; | 949 | break; |
950 | default: | 950 | default: |
951 | IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __FUNCTION__, | 951 | IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __func__, |
952 | event, irlap_event[event]); | 952 | event, irlap_event[event]); |
953 | 953 | ||
954 | ret = -1; | 954 | ret = -1; |
@@ -966,7 +966,7 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event, | |||
966 | static int irlap_state_offline(struct irlap_cb *self, IRLAP_EVENT event, | 966 | static int irlap_state_offline(struct irlap_cb *self, IRLAP_EVENT event, |
967 | struct sk_buff *skb, struct irlap_info *info) | 967 | struct sk_buff *skb, struct irlap_info *info) |
968 | { | 968 | { |
969 | IRDA_DEBUG( 0, "%s(), Unknown event\n", __FUNCTION__); | 969 | IRDA_DEBUG( 0, "%s(), Unknown event\n", __func__); |
970 | 970 | ||
971 | return -1; | 971 | return -1; |
972 | } | 972 | } |
@@ -1030,7 +1030,7 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event, | |||
1030 | */ | 1030 | */ |
1031 | if((!nextfit) && (skb->len > self->bytes_left)) { | 1031 | if((!nextfit) && (skb->len > self->bytes_left)) { |
1032 | IRDA_DEBUG(0, "%s(), Not allowed to transmit" | 1032 | IRDA_DEBUG(0, "%s(), Not allowed to transmit" |
1033 | " more bytes!\n", __FUNCTION__); | 1033 | " more bytes!\n", __func__); |
1034 | /* Requeue the skb */ | 1034 | /* Requeue the skb */ |
1035 | skb_queue_head(&self->txq, skb_get(skb)); | 1035 | skb_queue_head(&self->txq, skb_get(skb)); |
1036 | /* | 1036 | /* |
@@ -1082,7 +1082,7 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event, | |||
1082 | #endif /* CONFIG_IRDA_FAST_RR */ | 1082 | #endif /* CONFIG_IRDA_FAST_RR */ |
1083 | } else { | 1083 | } else { |
1084 | IRDA_DEBUG(4, "%s(), Unable to send! remote busy?\n", | 1084 | IRDA_DEBUG(4, "%s(), Unable to send! remote busy?\n", |
1085 | __FUNCTION__); | 1085 | __func__); |
1086 | skb_queue_head(&self->txq, skb_get(skb)); | 1086 | skb_queue_head(&self->txq, skb_get(skb)); |
1087 | 1087 | ||
1088 | /* | 1088 | /* |
@@ -1094,7 +1094,7 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event, | |||
1094 | break; | 1094 | break; |
1095 | case POLL_TIMER_EXPIRED: | 1095 | case POLL_TIMER_EXPIRED: |
1096 | IRDA_DEBUG(3, "%s(), POLL_TIMER_EXPIRED <%ld>\n", | 1096 | IRDA_DEBUG(3, "%s(), POLL_TIMER_EXPIRED <%ld>\n", |
1097 | __FUNCTION__, jiffies); | 1097 | __func__, jiffies); |
1098 | irlap_send_rr_frame(self, CMD_FRAME); | 1098 | irlap_send_rr_frame(self, CMD_FRAME); |
1099 | /* Return to NRM properly - Jean II */ | 1099 | /* Return to NRM properly - Jean II */ |
1100 | self->window = self->window_size; | 1100 | self->window = self->window_size; |
@@ -1120,7 +1120,7 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event, | |||
1120 | break; | 1120 | break; |
1121 | default: | 1121 | default: |
1122 | IRDA_DEBUG(0, "%s(), Unknown event %s\n", | 1122 | IRDA_DEBUG(0, "%s(), Unknown event %s\n", |
1123 | __FUNCTION__, irlap_event[event]); | 1123 | __func__, irlap_event[event]); |
1124 | 1124 | ||
1125 | ret = -EINVAL; | 1125 | ret = -EINVAL; |
1126 | break; | 1126 | break; |
@@ -1138,7 +1138,7 @@ static int irlap_state_pclose(struct irlap_cb *self, IRLAP_EVENT event, | |||
1138 | { | 1138 | { |
1139 | int ret = 0; | 1139 | int ret = 0; |
1140 | 1140 | ||
1141 | IRDA_DEBUG(1, "%s()\n", __FUNCTION__); | 1141 | IRDA_DEBUG(1, "%s()\n", __func__); |
1142 | 1142 | ||
1143 | IRDA_ASSERT(self != NULL, return -1;); | 1143 | IRDA_ASSERT(self != NULL, return -1;); |
1144 | IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); | 1144 | IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); |
@@ -1173,7 +1173,7 @@ static int irlap_state_pclose(struct irlap_cb *self, IRLAP_EVENT event, | |||
1173 | } | 1173 | } |
1174 | break; | 1174 | break; |
1175 | default: | 1175 | default: |
1176 | IRDA_DEBUG(1, "%s(), Unknown event %d\n", __FUNCTION__, event); | 1176 | IRDA_DEBUG(1, "%s(), Unknown event %d\n", __func__, event); |
1177 | 1177 | ||
1178 | ret = -1; | 1178 | ret = -1; |
1179 | break; | 1179 | break; |
@@ -1297,7 +1297,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, | |||
1297 | } else { | 1297 | } else { |
1298 | IRDA_DEBUG(4, | 1298 | IRDA_DEBUG(4, |
1299 | "%s(), missing or duplicate frame!\n", | 1299 | "%s(), missing or duplicate frame!\n", |
1300 | __FUNCTION__); | 1300 | __func__); |
1301 | 1301 | ||
1302 | /* Update Nr received */ | 1302 | /* Update Nr received */ |
1303 | irlap_update_nr_received(self, info->nr); | 1303 | irlap_update_nr_received(self, info->nr); |
@@ -1367,7 +1367,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, | |||
1367 | (nr_status == NR_UNEXPECTED)) | 1367 | (nr_status == NR_UNEXPECTED)) |
1368 | { | 1368 | { |
1369 | IRDA_DEBUG(4, "%s(), unexpected nr and ns!\n", | 1369 | IRDA_DEBUG(4, "%s(), unexpected nr and ns!\n", |
1370 | __FUNCTION__); | 1370 | __func__); |
1371 | if (info->pf) { | 1371 | if (info->pf) { |
1372 | /* Resend rejected frames */ | 1372 | /* Resend rejected frames */ |
1373 | irlap_resend_rejected_frames(self, CMD_FRAME); | 1373 | irlap_resend_rejected_frames(self, CMD_FRAME); |
@@ -1407,9 +1407,9 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, | |||
1407 | } | 1407 | } |
1408 | break; | 1408 | break; |
1409 | } | 1409 | } |
1410 | IRDA_DEBUG(1, "%s(), Not implemented!\n", __FUNCTION__); | 1410 | IRDA_DEBUG(1, "%s(), Not implemented!\n", __func__); |
1411 | IRDA_DEBUG(1, "%s(), event=%s, ns_status=%d, nr_status=%d\n", | 1411 | IRDA_DEBUG(1, "%s(), event=%s, ns_status=%d, nr_status=%d\n", |
1412 | __FUNCTION__, irlap_event[event], ns_status, nr_status); | 1412 | __func__, irlap_event[event], ns_status, nr_status); |
1413 | break; | 1413 | break; |
1414 | case RECV_UI_FRAME: | 1414 | case RECV_UI_FRAME: |
1415 | /* Poll bit cleared? */ | 1415 | /* Poll bit cleared? */ |
@@ -1420,7 +1420,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, | |||
1420 | del_timer(&self->final_timer); | 1420 | del_timer(&self->final_timer); |
1421 | irlap_data_indication(self, skb, TRUE); | 1421 | irlap_data_indication(self, skb, TRUE); |
1422 | irlap_next_state(self, LAP_XMIT_P); | 1422 | irlap_next_state(self, LAP_XMIT_P); |
1423 | IRDA_DEBUG(1, "%s: RECV_UI_FRAME: next state %s\n", __FUNCTION__, irlap_state[self->state]); | 1423 | IRDA_DEBUG(1, "%s: RECV_UI_FRAME: next state %s\n", __func__, irlap_state[self->state]); |
1424 | irlap_start_poll_timer(self, self->poll_timeout); | 1424 | irlap_start_poll_timer(self, self->poll_timeout); |
1425 | } | 1425 | } |
1426 | break; | 1426 | break; |
@@ -1475,7 +1475,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, | |||
1475 | irlap_next_state(self, LAP_NRM_P); | 1475 | irlap_next_state(self, LAP_NRM_P); |
1476 | } else if (ret == NR_INVALID) { | 1476 | } else if (ret == NR_INVALID) { |
1477 | IRDA_DEBUG(1, "%s(), Received RR with " | 1477 | IRDA_DEBUG(1, "%s(), Received RR with " |
1478 | "invalid nr !\n", __FUNCTION__); | 1478 | "invalid nr !\n", __func__); |
1479 | 1479 | ||
1480 | irlap_next_state(self, LAP_RESET_WAIT); | 1480 | irlap_next_state(self, LAP_RESET_WAIT); |
1481 | 1481 | ||
@@ -1580,7 +1580,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, | |||
1580 | irlap_start_final_timer(self, 2 * self->final_timeout); | 1580 | irlap_start_final_timer(self, 2 * self->final_timeout); |
1581 | break; | 1581 | break; |
1582 | case RECV_RD_RSP: | 1582 | case RECV_RD_RSP: |
1583 | IRDA_DEBUG(1, "%s(), RECV_RD_RSP\n", __FUNCTION__); | 1583 | IRDA_DEBUG(1, "%s(), RECV_RD_RSP\n", __func__); |
1584 | 1584 | ||
1585 | irlap_flush_all_queues(self); | 1585 | irlap_flush_all_queues(self); |
1586 | irlap_next_state(self, LAP_XMIT_P); | 1586 | irlap_next_state(self, LAP_XMIT_P); |
@@ -1589,7 +1589,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, | |||
1589 | break; | 1589 | break; |
1590 | default: | 1590 | default: |
1591 | IRDA_DEBUG(1, "%s(), Unknown event %s\n", | 1591 | IRDA_DEBUG(1, "%s(), Unknown event %s\n", |
1592 | __FUNCTION__, irlap_event[event]); | 1592 | __func__, irlap_event[event]); |
1593 | 1593 | ||
1594 | ret = -1; | 1594 | ret = -1; |
1595 | break; | 1595 | break; |
@@ -1609,7 +1609,7 @@ static int irlap_state_reset_wait(struct irlap_cb *self, IRLAP_EVENT event, | |||
1609 | { | 1609 | { |
1610 | int ret = 0; | 1610 | int ret = 0; |
1611 | 1611 | ||
1612 | IRDA_DEBUG(3, "%s(), event = %s\n", __FUNCTION__, irlap_event[event]); | 1612 | IRDA_DEBUG(3, "%s(), event = %s\n", __func__, irlap_event[event]); |
1613 | 1613 | ||
1614 | IRDA_ASSERT(self != NULL, return -1;); | 1614 | IRDA_ASSERT(self != NULL, return -1;); |
1615 | IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); | 1615 | IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); |
@@ -1635,7 +1635,7 @@ static int irlap_state_reset_wait(struct irlap_cb *self, IRLAP_EVENT event, | |||
1635 | irlap_next_state( self, LAP_PCLOSE); | 1635 | irlap_next_state( self, LAP_PCLOSE); |
1636 | break; | 1636 | break; |
1637 | default: | 1637 | default: |
1638 | IRDA_DEBUG(2, "%s(), Unknown event %s\n", __FUNCTION__, | 1638 | IRDA_DEBUG(2, "%s(), Unknown event %s\n", __func__, |
1639 | irlap_event[event]); | 1639 | irlap_event[event]); |
1640 | 1640 | ||
1641 | ret = -1; | 1641 | ret = -1; |
@@ -1656,7 +1656,7 @@ static int irlap_state_reset(struct irlap_cb *self, IRLAP_EVENT event, | |||
1656 | { | 1656 | { |
1657 | int ret = 0; | 1657 | int ret = 0; |
1658 | 1658 | ||
1659 | IRDA_DEBUG(3, "%s(), event = %s\n", __FUNCTION__, irlap_event[event]); | 1659 | IRDA_DEBUG(3, "%s(), event = %s\n", __func__, irlap_event[event]); |
1660 | 1660 | ||
1661 | IRDA_ASSERT(self != NULL, return -1;); | 1661 | IRDA_ASSERT(self != NULL, return -1;); |
1662 | IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); | 1662 | IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); |
@@ -1714,7 +1714,7 @@ static int irlap_state_reset(struct irlap_cb *self, IRLAP_EVENT event, | |||
1714 | * state | 1714 | * state |
1715 | */ | 1715 | */ |
1716 | if (!info) { | 1716 | if (!info) { |
1717 | IRDA_DEBUG(3, "%s(), RECV_SNRM_CMD\n", __FUNCTION__); | 1717 | IRDA_DEBUG(3, "%s(), RECV_SNRM_CMD\n", __func__); |
1718 | irlap_initiate_connection_state(self); | 1718 | irlap_initiate_connection_state(self); |
1719 | irlap_wait_min_turn_around(self, &self->qos_tx); | 1719 | irlap_wait_min_turn_around(self, &self->qos_tx); |
1720 | irlap_send_ua_response_frame(self, &self->qos_rx); | 1720 | irlap_send_ua_response_frame(self, &self->qos_rx); |
@@ -1724,12 +1724,12 @@ static int irlap_state_reset(struct irlap_cb *self, IRLAP_EVENT event, | |||
1724 | } else { | 1724 | } else { |
1725 | IRDA_DEBUG(0, | 1725 | IRDA_DEBUG(0, |
1726 | "%s(), SNRM frame contained an I field!\n", | 1726 | "%s(), SNRM frame contained an I field!\n", |
1727 | __FUNCTION__); | 1727 | __func__); |
1728 | } | 1728 | } |
1729 | break; | 1729 | break; |
1730 | default: | 1730 | default: |
1731 | IRDA_DEBUG(1, "%s(), Unknown event %s\n", | 1731 | IRDA_DEBUG(1, "%s(), Unknown event %s\n", |
1732 | __FUNCTION__, irlap_event[event]); | 1732 | __func__, irlap_event[event]); |
1733 | 1733 | ||
1734 | ret = -1; | 1734 | ret = -1; |
1735 | break; | 1735 | break; |
@@ -1749,7 +1749,7 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event, | |||
1749 | { | 1749 | { |
1750 | int ret = 0; | 1750 | int ret = 0; |
1751 | 1751 | ||
1752 | IRDA_DEBUG(4, "%s(), event=%s\n", __FUNCTION__, irlap_event[event]); | 1752 | IRDA_DEBUG(4, "%s(), event=%s\n", __func__, irlap_event[event]); |
1753 | 1753 | ||
1754 | IRDA_ASSERT(self != NULL, return -ENODEV;); | 1754 | IRDA_ASSERT(self != NULL, return -ENODEV;); |
1755 | IRDA_ASSERT(self->magic == LAP_MAGIC, return -EBADR;); | 1755 | IRDA_ASSERT(self->magic == LAP_MAGIC, return -EBADR;); |
@@ -1786,7 +1786,7 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event, | |||
1786 | */ | 1786 | */ |
1787 | if((!nextfit) && (skb->len > self->bytes_left)) { | 1787 | if((!nextfit) && (skb->len > self->bytes_left)) { |
1788 | IRDA_DEBUG(0, "%s(), Not allowed to transmit" | 1788 | IRDA_DEBUG(0, "%s(), Not allowed to transmit" |
1789 | " more bytes!\n", __FUNCTION__); | 1789 | " more bytes!\n", __func__); |
1790 | /* Requeue the skb */ | 1790 | /* Requeue the skb */ |
1791 | skb_queue_head(&self->txq, skb_get(skb)); | 1791 | skb_queue_head(&self->txq, skb_get(skb)); |
1792 | 1792 | ||
@@ -1832,7 +1832,7 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event, | |||
1832 | ret = -EPROTO; | 1832 | ret = -EPROTO; |
1833 | } | 1833 | } |
1834 | } else { | 1834 | } else { |
1835 | IRDA_DEBUG(2, "%s(), Unable to send!\n", __FUNCTION__); | 1835 | IRDA_DEBUG(2, "%s(), Unable to send!\n", __func__); |
1836 | skb_queue_head(&self->txq, skb_get(skb)); | 1836 | skb_queue_head(&self->txq, skb_get(skb)); |
1837 | ret = -EPROTO; | 1837 | ret = -EPROTO; |
1838 | } | 1838 | } |
@@ -1848,7 +1848,7 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event, | |||
1848 | * when we return... - Jean II */ | 1848 | * when we return... - Jean II */ |
1849 | break; | 1849 | break; |
1850 | default: | 1850 | default: |
1851 | IRDA_DEBUG(2, "%s(), Unknown event %s\n", __FUNCTION__, | 1851 | IRDA_DEBUG(2, "%s(), Unknown event %s\n", __func__, |
1852 | irlap_event[event]); | 1852 | irlap_event[event]); |
1853 | 1853 | ||
1854 | ret = -EINVAL; | 1854 | ret = -EINVAL; |
@@ -1871,7 +1871,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event, | |||
1871 | int nr_status; | 1871 | int nr_status; |
1872 | int ret = 0; | 1872 | int ret = 0; |
1873 | 1873 | ||
1874 | IRDA_DEBUG(4, "%s(), event=%s\n", __FUNCTION__, irlap_event[ event]); | 1874 | IRDA_DEBUG(4, "%s(), event=%s\n", __func__, irlap_event[ event]); |
1875 | 1875 | ||
1876 | IRDA_ASSERT(self != NULL, return -1;); | 1876 | IRDA_ASSERT(self != NULL, return -1;); |
1877 | IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); | 1877 | IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); |
@@ -1880,7 +1880,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event, | |||
1880 | case RECV_I_CMD: /* Optimize for the common case */ | 1880 | case RECV_I_CMD: /* Optimize for the common case */ |
1881 | /* FIXME: must check for remote_busy below */ | 1881 | /* FIXME: must check for remote_busy below */ |
1882 | IRDA_DEBUG(4, "%s(), event=%s nr=%d, vs=%d, ns=%d, " | 1882 | IRDA_DEBUG(4, "%s(), event=%s nr=%d, vs=%d, ns=%d, " |
1883 | "vr=%d, pf=%d\n", __FUNCTION__, | 1883 | "vr=%d, pf=%d\n", __func__, |
1884 | irlap_event[event], info->nr, | 1884 | irlap_event[event], info->nr, |
1885 | self->vs, info->ns, self->vr, info->pf); | 1885 | self->vs, info->ns, self->vr, info->pf); |
1886 | 1886 | ||
@@ -2112,21 +2112,21 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event, | |||
2112 | irlap_next_state(self, LAP_NRM_S); | 2112 | irlap_next_state(self, LAP_NRM_S); |
2113 | } else { | 2113 | } else { |
2114 | IRDA_DEBUG(1, "%s(), invalid nr not implemented!\n", | 2114 | IRDA_DEBUG(1, "%s(), invalid nr not implemented!\n", |
2115 | __FUNCTION__); | 2115 | __func__); |
2116 | } | 2116 | } |
2117 | break; | 2117 | break; |
2118 | case RECV_SNRM_CMD: | 2118 | case RECV_SNRM_CMD: |
2119 | /* SNRM frame is not allowed to contain an I-field */ | 2119 | /* SNRM frame is not allowed to contain an I-field */ |
2120 | if (!info) { | 2120 | if (!info) { |
2121 | del_timer(&self->wd_timer); | 2121 | del_timer(&self->wd_timer); |
2122 | IRDA_DEBUG(1, "%s(), received SNRM cmd\n", __FUNCTION__); | 2122 | IRDA_DEBUG(1, "%s(), received SNRM cmd\n", __func__); |
2123 | irlap_next_state(self, LAP_RESET_CHECK); | 2123 | irlap_next_state(self, LAP_RESET_CHECK); |
2124 | 2124 | ||
2125 | irlap_reset_indication(self); | 2125 | irlap_reset_indication(self); |
2126 | } else { | 2126 | } else { |
2127 | IRDA_DEBUG(0, | 2127 | IRDA_DEBUG(0, |
2128 | "%s(), SNRM frame contained an I-field!\n", | 2128 | "%s(), SNRM frame contained an I-field!\n", |
2129 | __FUNCTION__); | 2129 | __func__); |
2130 | 2130 | ||
2131 | } | 2131 | } |
2132 | break; | 2132 | break; |
@@ -2158,7 +2158,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event, | |||
2158 | * which explain why we use (self->N2 / 2) here !!! | 2158 | * which explain why we use (self->N2 / 2) here !!! |
2159 | * Jean II | 2159 | * Jean II |
2160 | */ | 2160 | */ |
2161 | IRDA_DEBUG(1, "%s(), retry_count = %d\n", __FUNCTION__, | 2161 | IRDA_DEBUG(1, "%s(), retry_count = %d\n", __func__, |
2162 | self->retry_count); | 2162 | self->retry_count); |
2163 | 2163 | ||
2164 | if (self->retry_count < (self->N2 / 2)) { | 2164 | if (self->retry_count < (self->N2 / 2)) { |
@@ -2211,7 +2211,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event, | |||
2211 | irlap_send_test_frame(self, self->caddr, info->daddr, skb); | 2211 | irlap_send_test_frame(self, self->caddr, info->daddr, skb); |
2212 | break; | 2212 | break; |
2213 | default: | 2213 | default: |
2214 | IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __FUNCTION__, | 2214 | IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __func__, |
2215 | event, irlap_event[event]); | 2215 | event, irlap_event[event]); |
2216 | 2216 | ||
2217 | ret = -EINVAL; | 2217 | ret = -EINVAL; |
@@ -2228,7 +2228,7 @@ static int irlap_state_sclose(struct irlap_cb *self, IRLAP_EVENT event, | |||
2228 | { | 2228 | { |
2229 | int ret = 0; | 2229 | int ret = 0; |
2230 | 2230 | ||
2231 | IRDA_DEBUG(1, "%s()\n", __FUNCTION__); | 2231 | IRDA_DEBUG(1, "%s()\n", __func__); |
2232 | 2232 | ||
2233 | IRDA_ASSERT(self != NULL, return -ENODEV;); | 2233 | IRDA_ASSERT(self != NULL, return -ENODEV;); |
2234 | IRDA_ASSERT(self->magic == LAP_MAGIC, return -EBADR;); | 2234 | IRDA_ASSERT(self->magic == LAP_MAGIC, return -EBADR;); |
@@ -2285,7 +2285,7 @@ static int irlap_state_sclose(struct irlap_cb *self, IRLAP_EVENT event, | |||
2285 | break; /* stay in SCLOSE */ | 2285 | break; /* stay in SCLOSE */ |
2286 | } | 2286 | } |
2287 | 2287 | ||
2288 | IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __FUNCTION__, | 2288 | IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __func__, |
2289 | event, irlap_event[event]); | 2289 | event, irlap_event[event]); |
2290 | 2290 | ||
2291 | ret = -EINVAL; | 2291 | ret = -EINVAL; |
@@ -2301,7 +2301,7 @@ static int irlap_state_reset_check( struct irlap_cb *self, IRLAP_EVENT event, | |||
2301 | { | 2301 | { |
2302 | int ret = 0; | 2302 | int ret = 0; |
2303 | 2303 | ||
2304 | IRDA_DEBUG(1, "%s(), event=%s\n", __FUNCTION__, irlap_event[event]); | 2304 | IRDA_DEBUG(1, "%s(), event=%s\n", __func__, irlap_event[event]); |
2305 | 2305 | ||
2306 | IRDA_ASSERT(self != NULL, return -ENODEV;); | 2306 | IRDA_ASSERT(self != NULL, return -ENODEV;); |
2307 | IRDA_ASSERT(self->magic == LAP_MAGIC, return -EBADR;); | 2307 | IRDA_ASSERT(self->magic == LAP_MAGIC, return -EBADR;); |
@@ -2322,7 +2322,7 @@ static int irlap_state_reset_check( struct irlap_cb *self, IRLAP_EVENT event, | |||
2322 | irlap_next_state(self, LAP_SCLOSE); | 2322 | irlap_next_state(self, LAP_SCLOSE); |
2323 | break; | 2323 | break; |
2324 | default: | 2324 | default: |
2325 | IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __FUNCTION__, | 2325 | IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __func__, |
2326 | event, irlap_event[event]); | 2326 | event, irlap_event[event]); |
2327 | 2327 | ||
2328 | ret = -EINVAL; | 2328 | ret = -EINVAL; |
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c index 7c132d6342af..a38b231c8689 100644 --- a/net/irda/irlap_frame.c +++ b/net/irda/irlap_frame.c | |||
@@ -102,7 +102,7 @@ void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb) | |||
102 | irlap_insert_info(self, skb); | 102 | irlap_insert_info(self, skb); |
103 | 103 | ||
104 | if (unlikely(self->mode & IRDA_MODE_MONITOR)) { | 104 | if (unlikely(self->mode & IRDA_MODE_MONITOR)) { |
105 | IRDA_DEBUG(3, "%s(): %s is in monitor mode\n", __FUNCTION__, | 105 | IRDA_DEBUG(3, "%s(): %s is in monitor mode\n", __func__, |
106 | self->netdev->name); | 106 | self->netdev->name); |
107 | dev_kfree_skb(skb); | 107 | dev_kfree_skb(skb); |
108 | return; | 108 | return; |
@@ -182,7 +182,7 @@ static void irlap_recv_snrm_cmd(struct irlap_cb *self, struct sk_buff *skb, | |||
182 | /* Check if the new connection address is valid */ | 182 | /* Check if the new connection address is valid */ |
183 | if ((info->caddr == 0x00) || (info->caddr == 0xfe)) { | 183 | if ((info->caddr == 0x00) || (info->caddr == 0xfe)) { |
184 | IRDA_DEBUG(3, "%s(), invalid connection address!\n", | 184 | IRDA_DEBUG(3, "%s(), invalid connection address!\n", |
185 | __FUNCTION__); | 185 | __func__); |
186 | return; | 186 | return; |
187 | } | 187 | } |
188 | 188 | ||
@@ -193,7 +193,7 @@ static void irlap_recv_snrm_cmd(struct irlap_cb *self, struct sk_buff *skb, | |||
193 | /* Only accept if addressed directly to us */ | 193 | /* Only accept if addressed directly to us */ |
194 | if (info->saddr != self->saddr) { | 194 | if (info->saddr != self->saddr) { |
195 | IRDA_DEBUG(2, "%s(), not addressed to us!\n", | 195 | IRDA_DEBUG(2, "%s(), not addressed to us!\n", |
196 | __FUNCTION__); | 196 | __func__); |
197 | return; | 197 | return; |
198 | } | 198 | } |
199 | irlap_do_event(self, RECV_SNRM_CMD, skb, info); | 199 | irlap_do_event(self, RECV_SNRM_CMD, skb, info); |
@@ -215,7 +215,7 @@ void irlap_send_ua_response_frame(struct irlap_cb *self, struct qos_info *qos) | |||
215 | struct ua_frame *frame; | 215 | struct ua_frame *frame; |
216 | int ret; | 216 | int ret; |
217 | 217 | ||
218 | IRDA_DEBUG(2, "%s() <%ld>\n", __FUNCTION__, jiffies); | 218 | IRDA_DEBUG(2, "%s() <%ld>\n", __func__, jiffies); |
219 | 219 | ||
220 | IRDA_ASSERT(self != NULL, return;); | 220 | IRDA_ASSERT(self != NULL, return;); |
221 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); | 221 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); |
@@ -290,7 +290,7 @@ void irlap_send_disc_frame(struct irlap_cb *self) | |||
290 | struct sk_buff *tx_skb = NULL; | 290 | struct sk_buff *tx_skb = NULL; |
291 | struct disc_frame *frame; | 291 | struct disc_frame *frame; |
292 | 292 | ||
293 | IRDA_DEBUG(3, "%s()\n", __FUNCTION__); | 293 | IRDA_DEBUG(3, "%s()\n", __func__); |
294 | 294 | ||
295 | IRDA_ASSERT(self != NULL, return;); | 295 | IRDA_ASSERT(self != NULL, return;); |
296 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); | 296 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); |
@@ -321,7 +321,7 @@ void irlap_send_discovery_xid_frame(struct irlap_cb *self, int S, __u8 s, | |||
321 | __u32 bcast = BROADCAST; | 321 | __u32 bcast = BROADCAST; |
322 | __u8 *info; | 322 | __u8 *info; |
323 | 323 | ||
324 | IRDA_DEBUG(4, "%s(), s=%d, S=%d, command=%d\n", __FUNCTION__, | 324 | IRDA_DEBUG(4, "%s(), s=%d, S=%d, command=%d\n", __func__, |
325 | s, S, command); | 325 | s, S, command); |
326 | 326 | ||
327 | IRDA_ASSERT(self != NULL, return;); | 327 | IRDA_ASSERT(self != NULL, return;); |
@@ -414,13 +414,13 @@ static void irlap_recv_discovery_xid_rsp(struct irlap_cb *self, | |||
414 | __u8 *discovery_info; | 414 | __u8 *discovery_info; |
415 | char *text; | 415 | char *text; |
416 | 416 | ||
417 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 417 | IRDA_DEBUG(4, "%s()\n", __func__); |
418 | 418 | ||
419 | IRDA_ASSERT(self != NULL, return;); | 419 | IRDA_ASSERT(self != NULL, return;); |
420 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); | 420 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); |
421 | 421 | ||
422 | if (!pskb_may_pull(skb, sizeof(struct xid_frame))) { | 422 | if (!pskb_may_pull(skb, sizeof(struct xid_frame))) { |
423 | IRDA_ERROR("%s: frame too short!\n", __FUNCTION__); | 423 | IRDA_ERROR("%s: frame too short!\n", __func__); |
424 | return; | 424 | return; |
425 | } | 425 | } |
426 | 426 | ||
@@ -432,12 +432,12 @@ static void irlap_recv_discovery_xid_rsp(struct irlap_cb *self, | |||
432 | /* Make sure frame is addressed to us */ | 432 | /* Make sure frame is addressed to us */ |
433 | if ((info->saddr != self->saddr) && (info->saddr != BROADCAST)) { | 433 | if ((info->saddr != self->saddr) && (info->saddr != BROADCAST)) { |
434 | IRDA_DEBUG(0, "%s(), frame is not addressed to us!\n", | 434 | IRDA_DEBUG(0, "%s(), frame is not addressed to us!\n", |
435 | __FUNCTION__); | 435 | __func__); |
436 | return; | 436 | return; |
437 | } | 437 | } |
438 | 438 | ||
439 | if ((discovery = kzalloc(sizeof(discovery_t), GFP_ATOMIC)) == NULL) { | 439 | if ((discovery = kzalloc(sizeof(discovery_t), GFP_ATOMIC)) == NULL) { |
440 | IRDA_WARNING("%s: kmalloc failed!\n", __FUNCTION__); | 440 | IRDA_WARNING("%s: kmalloc failed!\n", __func__); |
441 | return; | 441 | return; |
442 | } | 442 | } |
443 | 443 | ||
@@ -445,7 +445,7 @@ static void irlap_recv_discovery_xid_rsp(struct irlap_cb *self, | |||
445 | discovery->data.saddr = self->saddr; | 445 | discovery->data.saddr = self->saddr; |
446 | discovery->timestamp = jiffies; | 446 | discovery->timestamp = jiffies; |
447 | 447 | ||
448 | IRDA_DEBUG(4, "%s(), daddr=%08x\n", __FUNCTION__, | 448 | IRDA_DEBUG(4, "%s(), daddr=%08x\n", __func__, |
449 | discovery->data.daddr); | 449 | discovery->data.daddr); |
450 | 450 | ||
451 | discovery_info = skb_pull(skb, sizeof(struct xid_frame)); | 451 | discovery_info = skb_pull(skb, sizeof(struct xid_frame)); |
@@ -491,7 +491,7 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self, | |||
491 | char *text; | 491 | char *text; |
492 | 492 | ||
493 | if (!pskb_may_pull(skb, sizeof(struct xid_frame))) { | 493 | if (!pskb_may_pull(skb, sizeof(struct xid_frame))) { |
494 | IRDA_ERROR("%s: frame too short!\n", __FUNCTION__); | 494 | IRDA_ERROR("%s: frame too short!\n", __func__); |
495 | return; | 495 | return; |
496 | } | 496 | } |
497 | 497 | ||
@@ -503,7 +503,7 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self, | |||
503 | /* Make sure frame is addressed to us */ | 503 | /* Make sure frame is addressed to us */ |
504 | if ((info->saddr != self->saddr) && (info->saddr != BROADCAST)) { | 504 | if ((info->saddr != self->saddr) && (info->saddr != BROADCAST)) { |
505 | IRDA_DEBUG(0, "%s(), frame is not addressed to us!\n", | 505 | IRDA_DEBUG(0, "%s(), frame is not addressed to us!\n", |
506 | __FUNCTION__); | 506 | __func__); |
507 | return; | 507 | return; |
508 | } | 508 | } |
509 | 509 | ||
@@ -536,7 +536,7 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self, | |||
536 | if((discovery_info == NULL) || | 536 | if((discovery_info == NULL) || |
537 | !pskb_may_pull(skb, 3)) { | 537 | !pskb_may_pull(skb, 3)) { |
538 | IRDA_ERROR("%s: discovery frame too short!\n", | 538 | IRDA_ERROR("%s: discovery frame too short!\n", |
539 | __FUNCTION__); | 539 | __func__); |
540 | return; | 540 | return; |
541 | } | 541 | } |
542 | 542 | ||
@@ -545,7 +545,7 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self, | |||
545 | */ | 545 | */ |
546 | discovery = kmalloc(sizeof(discovery_t), GFP_ATOMIC); | 546 | discovery = kmalloc(sizeof(discovery_t), GFP_ATOMIC); |
547 | if (!discovery) { | 547 | if (!discovery) { |
548 | IRDA_WARNING("%s: unable to malloc!\n", __FUNCTION__); | 548 | IRDA_WARNING("%s: unable to malloc!\n", __func__); |
549 | return; | 549 | return; |
550 | } | 550 | } |
551 | 551 | ||
@@ -657,7 +657,7 @@ static void irlap_recv_rnr_frame(struct irlap_cb *self, struct sk_buff *skb, | |||
657 | { | 657 | { |
658 | info->nr = skb->data[1] >> 5; | 658 | info->nr = skb->data[1] >> 5; |
659 | 659 | ||
660 | IRDA_DEBUG(4, "%s(), nr=%d, %ld\n", __FUNCTION__, info->nr, jiffies); | 660 | IRDA_DEBUG(4, "%s(), nr=%d, %ld\n", __func__, info->nr, jiffies); |
661 | 661 | ||
662 | if (command) | 662 | if (command) |
663 | irlap_do_event(self, RECV_RNR_CMD, skb, info); | 663 | irlap_do_event(self, RECV_RNR_CMD, skb, info); |
@@ -668,7 +668,7 @@ static void irlap_recv_rnr_frame(struct irlap_cb *self, struct sk_buff *skb, | |||
668 | static void irlap_recv_rej_frame(struct irlap_cb *self, struct sk_buff *skb, | 668 | static void irlap_recv_rej_frame(struct irlap_cb *self, struct sk_buff *skb, |
669 | struct irlap_info *info, int command) | 669 | struct irlap_info *info, int command) |
670 | { | 670 | { |
671 | IRDA_DEBUG(0, "%s()\n", __FUNCTION__); | 671 | IRDA_DEBUG(0, "%s()\n", __func__); |
672 | 672 | ||
673 | info->nr = skb->data[1] >> 5; | 673 | info->nr = skb->data[1] >> 5; |
674 | 674 | ||
@@ -682,7 +682,7 @@ static void irlap_recv_rej_frame(struct irlap_cb *self, struct sk_buff *skb, | |||
682 | static void irlap_recv_srej_frame(struct irlap_cb *self, struct sk_buff *skb, | 682 | static void irlap_recv_srej_frame(struct irlap_cb *self, struct sk_buff *skb, |
683 | struct irlap_info *info, int command) | 683 | struct irlap_info *info, int command) |
684 | { | 684 | { |
685 | IRDA_DEBUG(0, "%s()\n", __FUNCTION__); | 685 | IRDA_DEBUG(0, "%s()\n", __func__); |
686 | 686 | ||
687 | info->nr = skb->data[1] >> 5; | 687 | info->nr = skb->data[1] >> 5; |
688 | 688 | ||
@@ -696,7 +696,7 @@ static void irlap_recv_srej_frame(struct irlap_cb *self, struct sk_buff *skb, | |||
696 | static void irlap_recv_disc_frame(struct irlap_cb *self, struct sk_buff *skb, | 696 | static void irlap_recv_disc_frame(struct irlap_cb *self, struct sk_buff *skb, |
697 | struct irlap_info *info, int command) | 697 | struct irlap_info *info, int command) |
698 | { | 698 | { |
699 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | 699 | IRDA_DEBUG(2, "%s()\n", __func__); |
700 | 700 | ||
701 | /* Check if this is a command or a response frame */ | 701 | /* Check if this is a command or a response frame */ |
702 | if (command) | 702 | if (command) |
@@ -755,7 +755,7 @@ void irlap_send_data_primary(struct irlap_cb *self, struct sk_buff *skb) | |||
755 | 755 | ||
756 | irlap_send_i_frame( self, tx_skb, CMD_FRAME); | 756 | irlap_send_i_frame( self, tx_skb, CMD_FRAME); |
757 | } else { | 757 | } else { |
758 | IRDA_DEBUG(4, "%s(), sending unreliable frame\n", __FUNCTION__); | 758 | IRDA_DEBUG(4, "%s(), sending unreliable frame\n", __func__); |
759 | irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME); | 759 | irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME); |
760 | self->window -= 1; | 760 | self->window -= 1; |
761 | } | 761 | } |
@@ -808,7 +808,7 @@ void irlap_send_data_primary_poll(struct irlap_cb *self, struct sk_buff *skb) | |||
808 | irlap_next_state(self, LAP_NRM_P); | 808 | irlap_next_state(self, LAP_NRM_P); |
809 | irlap_send_i_frame(self, tx_skb, CMD_FRAME); | 809 | irlap_send_i_frame(self, tx_skb, CMD_FRAME); |
810 | } else { | 810 | } else { |
811 | IRDA_DEBUG(4, "%s(), sending unreliable frame\n", __FUNCTION__); | 811 | IRDA_DEBUG(4, "%s(), sending unreliable frame\n", __func__); |
812 | 812 | ||
813 | if (self->ack_required) { | 813 | if (self->ack_required) { |
814 | irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME); | 814 | irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME); |
@@ -835,7 +835,7 @@ void irlap_send_data_primary_poll(struct irlap_cb *self, struct sk_buff *skb) | |||
835 | * See max_line_capacities[][] in qos.c for details. Jean II */ | 835 | * See max_line_capacities[][] in qos.c for details. Jean II */ |
836 | transmission_time -= (self->final_timeout * self->bytes_left | 836 | transmission_time -= (self->final_timeout * self->bytes_left |
837 | / self->line_capacity); | 837 | / self->line_capacity); |
838 | IRDA_DEBUG(4, "%s() adjusting transmission_time : ft=%d, bl=%d, lc=%d -> tt=%d\n", __FUNCTION__, self->final_timeout, self->bytes_left, self->line_capacity, transmission_time); | 838 | IRDA_DEBUG(4, "%s() adjusting transmission_time : ft=%d, bl=%d, lc=%d -> tt=%d\n", __func__, self->final_timeout, self->bytes_left, self->line_capacity, transmission_time); |
839 | 839 | ||
840 | /* We are allowed to transmit a maximum number of bytes again. */ | 840 | /* We are allowed to transmit a maximum number of bytes again. */ |
841 | self->bytes_left = self->line_capacity; | 841 | self->bytes_left = self->line_capacity; |
@@ -1001,7 +1001,7 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command) | |||
1001 | /* tx_skb = skb_clone( skb, GFP_ATOMIC); */ | 1001 | /* tx_skb = skb_clone( skb, GFP_ATOMIC); */ |
1002 | tx_skb = skb_copy(skb, GFP_ATOMIC); | 1002 | tx_skb = skb_copy(skb, GFP_ATOMIC); |
1003 | if (!tx_skb) { | 1003 | if (!tx_skb) { |
1004 | IRDA_DEBUG(0, "%s(), unable to copy\n", __FUNCTION__); | 1004 | IRDA_DEBUG(0, "%s(), unable to copy\n", __func__); |
1005 | return; | 1005 | return; |
1006 | } | 1006 | } |
1007 | 1007 | ||
@@ -1033,7 +1033,7 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command) | |||
1033 | */ | 1033 | */ |
1034 | while (!skb_queue_empty(&self->txq)) { | 1034 | while (!skb_queue_empty(&self->txq)) { |
1035 | 1035 | ||
1036 | IRDA_DEBUG(0, "%s(), sending additional frames!\n", __FUNCTION__); | 1036 | IRDA_DEBUG(0, "%s(), sending additional frames!\n", __func__); |
1037 | if (self->window > 0) { | 1037 | if (self->window > 0) { |
1038 | skb = skb_dequeue( &self->txq); | 1038 | skb = skb_dequeue( &self->txq); |
1039 | IRDA_ASSERT(skb != NULL, return;); | 1039 | IRDA_ASSERT(skb != NULL, return;); |
@@ -1073,7 +1073,7 @@ void irlap_resend_rejected_frame(struct irlap_cb *self, int command) | |||
1073 | /* tx_skb = skb_clone( skb, GFP_ATOMIC); */ | 1073 | /* tx_skb = skb_clone( skb, GFP_ATOMIC); */ |
1074 | tx_skb = skb_copy(skb, GFP_ATOMIC); | 1074 | tx_skb = skb_copy(skb, GFP_ATOMIC); |
1075 | if (!tx_skb) { | 1075 | if (!tx_skb) { |
1076 | IRDA_DEBUG(0, "%s(), unable to copy\n", __FUNCTION__); | 1076 | IRDA_DEBUG(0, "%s(), unable to copy\n", __func__); |
1077 | return; | 1077 | return; |
1078 | } | 1078 | } |
1079 | 1079 | ||
@@ -1096,7 +1096,7 @@ void irlap_resend_rejected_frame(struct irlap_cb *self, int command) | |||
1096 | void irlap_send_ui_frame(struct irlap_cb *self, struct sk_buff *skb, | 1096 | void irlap_send_ui_frame(struct irlap_cb *self, struct sk_buff *skb, |
1097 | __u8 caddr, int command) | 1097 | __u8 caddr, int command) |
1098 | { | 1098 | { |
1099 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 1099 | IRDA_DEBUG(4, "%s()\n", __func__); |
1100 | 1100 | ||
1101 | IRDA_ASSERT(self != NULL, return;); | 1101 | IRDA_ASSERT(self != NULL, return;); |
1102 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); | 1102 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); |
@@ -1156,7 +1156,7 @@ static inline void irlap_recv_i_frame(struct irlap_cb *self, | |||
1156 | static void irlap_recv_ui_frame(struct irlap_cb *self, struct sk_buff *skb, | 1156 | static void irlap_recv_ui_frame(struct irlap_cb *self, struct sk_buff *skb, |
1157 | struct irlap_info *info) | 1157 | struct irlap_info *info) |
1158 | { | 1158 | { |
1159 | IRDA_DEBUG( 4, "%s()\n", __FUNCTION__); | 1159 | IRDA_DEBUG( 4, "%s()\n", __func__); |
1160 | 1160 | ||
1161 | info->pf = skb->data[1] & PF_BIT; /* Final bit */ | 1161 | info->pf = skb->data[1] & PF_BIT; /* Final bit */ |
1162 | 1162 | ||
@@ -1175,7 +1175,7 @@ static void irlap_recv_frmr_frame(struct irlap_cb *self, struct sk_buff *skb, | |||
1175 | __u8 *frame; | 1175 | __u8 *frame; |
1176 | int w, x, y, z; | 1176 | int w, x, y, z; |
1177 | 1177 | ||
1178 | IRDA_DEBUG(0, "%s()\n", __FUNCTION__); | 1178 | IRDA_DEBUG(0, "%s()\n", __func__); |
1179 | 1179 | ||
1180 | IRDA_ASSERT(self != NULL, return;); | 1180 | IRDA_ASSERT(self != NULL, return;); |
1181 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); | 1181 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); |
@@ -1183,7 +1183,7 @@ static void irlap_recv_frmr_frame(struct irlap_cb *self, struct sk_buff *skb, | |||
1183 | IRDA_ASSERT(info != NULL, return;); | 1183 | IRDA_ASSERT(info != NULL, return;); |
1184 | 1184 | ||
1185 | if (!pskb_may_pull(skb, 4)) { | 1185 | if (!pskb_may_pull(skb, 4)) { |
1186 | IRDA_ERROR("%s: frame too short!\n", __FUNCTION__); | 1186 | IRDA_ERROR("%s: frame too short!\n", __func__); |
1187 | return; | 1187 | return; |
1188 | } | 1188 | } |
1189 | 1189 | ||
@@ -1269,10 +1269,10 @@ static void irlap_recv_test_frame(struct irlap_cb *self, struct sk_buff *skb, | |||
1269 | { | 1269 | { |
1270 | struct test_frame *frame; | 1270 | struct test_frame *frame; |
1271 | 1271 | ||
1272 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | 1272 | IRDA_DEBUG(2, "%s()\n", __func__); |
1273 | 1273 | ||
1274 | if (!pskb_may_pull(skb, sizeof(*frame))) { | 1274 | if (!pskb_may_pull(skb, sizeof(*frame))) { |
1275 | IRDA_ERROR("%s: frame too short!\n", __FUNCTION__); | 1275 | IRDA_ERROR("%s: frame too short!\n", __func__); |
1276 | return; | 1276 | return; |
1277 | } | 1277 | } |
1278 | frame = (struct test_frame *) skb->data; | 1278 | frame = (struct test_frame *) skb->data; |
@@ -1281,7 +1281,7 @@ static void irlap_recv_test_frame(struct irlap_cb *self, struct sk_buff *skb, | |||
1281 | if (info->caddr == CBROADCAST) { | 1281 | if (info->caddr == CBROADCAST) { |
1282 | if (skb->len < sizeof(struct test_frame)) { | 1282 | if (skb->len < sizeof(struct test_frame)) { |
1283 | IRDA_DEBUG(0, "%s() test frame too short!\n", | 1283 | IRDA_DEBUG(0, "%s() test frame too short!\n", |
1284 | __FUNCTION__); | 1284 | __func__); |
1285 | return; | 1285 | return; |
1286 | } | 1286 | } |
1287 | 1287 | ||
@@ -1342,14 +1342,14 @@ int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev, | |||
1342 | * share and non linear skbs. This should never happen, so | 1342 | * share and non linear skbs. This should never happen, so |
1343 | * we don't need to be clever about it. Jean II */ | 1343 | * we don't need to be clever about it. Jean II */ |
1344 | if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { | 1344 | if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { |
1345 | IRDA_ERROR("%s: can't clone shared skb!\n", __FUNCTION__); | 1345 | IRDA_ERROR("%s: can't clone shared skb!\n", __func__); |
1346 | dev_kfree_skb(skb); | 1346 | dev_kfree_skb(skb); |
1347 | return -1; | 1347 | return -1; |
1348 | } | 1348 | } |
1349 | 1349 | ||
1350 | /* Check if frame is large enough for parsing */ | 1350 | /* Check if frame is large enough for parsing */ |
1351 | if (!pskb_may_pull(skb, 2)) { | 1351 | if (!pskb_may_pull(skb, 2)) { |
1352 | IRDA_ERROR("%s: frame too short!\n", __FUNCTION__); | 1352 | IRDA_ERROR("%s: frame too short!\n", __func__); |
1353 | dev_kfree_skb(skb); | 1353 | dev_kfree_skb(skb); |
1354 | return -1; | 1354 | return -1; |
1355 | } | 1355 | } |
@@ -1365,7 +1365,7 @@ int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev, | |||
1365 | /* First we check if this frame has a valid connection address */ | 1365 | /* First we check if this frame has a valid connection address */ |
1366 | if ((info.caddr != self->caddr) && (info.caddr != CBROADCAST)) { | 1366 | if ((info.caddr != self->caddr) && (info.caddr != CBROADCAST)) { |
1367 | IRDA_DEBUG(0, "%s(), wrong connection address!\n", | 1367 | IRDA_DEBUG(0, "%s(), wrong connection address!\n", |
1368 | __FUNCTION__); | 1368 | __func__); |
1369 | goto out; | 1369 | goto out; |
1370 | } | 1370 | } |
1371 | /* | 1371 | /* |
@@ -1400,7 +1400,7 @@ int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev, | |||
1400 | break; | 1400 | break; |
1401 | default: | 1401 | default: |
1402 | IRDA_WARNING("%s: Unknown S-frame %02x received!\n", | 1402 | IRDA_WARNING("%s: Unknown S-frame %02x received!\n", |
1403 | __FUNCTION__, info.control); | 1403 | __func__, info.control); |
1404 | break; | 1404 | break; |
1405 | } | 1405 | } |
1406 | goto out; | 1406 | goto out; |
@@ -1438,7 +1438,7 @@ int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev, | |||
1438 | break; | 1438 | break; |
1439 | default: | 1439 | default: |
1440 | IRDA_WARNING("%s: Unknown frame %02x received!\n", | 1440 | IRDA_WARNING("%s: Unknown frame %02x received!\n", |
1441 | __FUNCTION__, info.control); | 1441 | __func__, info.control); |
1442 | break; | 1442 | break; |
1443 | } | 1443 | } |
1444 | out: | 1444 | out: |
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c index 135ac6907bbf..1f81f8e7c61d 100644 --- a/net/irda/irlmp.c +++ b/net/irda/irlmp.c | |||
@@ -76,7 +76,7 @@ const char *irlmp_reasons[] = { | |||
76 | */ | 76 | */ |
77 | int __init irlmp_init(void) | 77 | int __init irlmp_init(void) |
78 | { | 78 | { |
79 | IRDA_DEBUG(1, "%s()\n", __FUNCTION__); | 79 | IRDA_DEBUG(1, "%s()\n", __func__); |
80 | /* Initialize the irlmp structure. */ | 80 | /* Initialize the irlmp structure. */ |
81 | irlmp = kzalloc( sizeof(struct irlmp_cb), GFP_KERNEL); | 81 | irlmp = kzalloc( sizeof(struct irlmp_cb), GFP_KERNEL); |
82 | if (irlmp == NULL) | 82 | if (irlmp == NULL) |
@@ -164,7 +164,7 @@ struct lsap_cb *irlmp_open_lsap(__u8 slsap_sel, notify_t *notify, __u8 pid) | |||
164 | /* Allocate new instance of a LSAP connection */ | 164 | /* Allocate new instance of a LSAP connection */ |
165 | self = kzalloc(sizeof(struct lsap_cb), GFP_ATOMIC); | 165 | self = kzalloc(sizeof(struct lsap_cb), GFP_ATOMIC); |
166 | if (self == NULL) { | 166 | if (self == NULL) { |
167 | IRDA_ERROR("%s: can't allocate memory\n", __FUNCTION__); | 167 | IRDA_ERROR("%s: can't allocate memory\n", __func__); |
168 | return NULL; | 168 | return NULL; |
169 | } | 169 | } |
170 | 170 | ||
@@ -202,7 +202,7 @@ EXPORT_SYMBOL(irlmp_open_lsap); | |||
202 | */ | 202 | */ |
203 | static void __irlmp_close_lsap(struct lsap_cb *self) | 203 | static void __irlmp_close_lsap(struct lsap_cb *self) |
204 | { | 204 | { |
205 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 205 | IRDA_DEBUG(4, "%s()\n", __func__); |
206 | 206 | ||
207 | IRDA_ASSERT(self != NULL, return;); | 207 | IRDA_ASSERT(self != NULL, return;); |
208 | IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); | 208 | IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); |
@@ -264,7 +264,7 @@ void irlmp_close_lsap(struct lsap_cb *self) | |||
264 | if (!lsap) { | 264 | if (!lsap) { |
265 | IRDA_DEBUG(0, | 265 | IRDA_DEBUG(0, |
266 | "%s(), Looks like somebody has removed me already!\n", | 266 | "%s(), Looks like somebody has removed me already!\n", |
267 | __FUNCTION__); | 267 | __func__); |
268 | return; | 268 | return; |
269 | } | 269 | } |
270 | __irlmp_close_lsap(self); | 270 | __irlmp_close_lsap(self); |
@@ -291,7 +291,7 @@ void irlmp_register_link(struct irlap_cb *irlap, __u32 saddr, notify_t *notify) | |||
291 | */ | 291 | */ |
292 | lap = kzalloc(sizeof(struct lap_cb), GFP_KERNEL); | 292 | lap = kzalloc(sizeof(struct lap_cb), GFP_KERNEL); |
293 | if (lap == NULL) { | 293 | if (lap == NULL) { |
294 | IRDA_ERROR("%s: unable to kmalloc\n", __FUNCTION__); | 294 | IRDA_ERROR("%s: unable to kmalloc\n", __func__); |
295 | return; | 295 | return; |
296 | } | 296 | } |
297 | 297 | ||
@@ -304,7 +304,7 @@ void irlmp_register_link(struct irlap_cb *irlap, __u32 saddr, notify_t *notify) | |||
304 | #endif | 304 | #endif |
305 | lap->lsaps = hashbin_new(HB_LOCK); | 305 | lap->lsaps = hashbin_new(HB_LOCK); |
306 | if (lap->lsaps == NULL) { | 306 | if (lap->lsaps == NULL) { |
307 | IRDA_WARNING("%s(), unable to kmalloc lsaps\n", __FUNCTION__); | 307 | IRDA_WARNING("%s(), unable to kmalloc lsaps\n", __func__); |
308 | kfree(lap); | 308 | kfree(lap); |
309 | return; | 309 | return; |
310 | } | 310 | } |
@@ -336,7 +336,7 @@ void irlmp_unregister_link(__u32 saddr) | |||
336 | { | 336 | { |
337 | struct lap_cb *link; | 337 | struct lap_cb *link; |
338 | 338 | ||
339 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 339 | IRDA_DEBUG(4, "%s()\n", __func__); |
340 | 340 | ||
341 | /* We must remove ourselves from the hashbin *first*. This ensure | 341 | /* We must remove ourselves from the hashbin *first*. This ensure |
342 | * that no more LSAPs will be open on this link and no discovery | 342 | * that no more LSAPs will be open on this link and no discovery |
@@ -381,7 +381,7 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel, | |||
381 | 381 | ||
382 | IRDA_DEBUG(2, | 382 | IRDA_DEBUG(2, |
383 | "%s(), slsap_sel=%02x, dlsap_sel=%02x, saddr=%08x, daddr=%08x\n", | 383 | "%s(), slsap_sel=%02x, dlsap_sel=%02x, saddr=%08x, daddr=%08x\n", |
384 | __FUNCTION__, self->slsap_sel, dlsap_sel, saddr, daddr); | 384 | __func__, self->slsap_sel, dlsap_sel, saddr, daddr); |
385 | 385 | ||
386 | if (test_bit(0, &self->connected)) { | 386 | if (test_bit(0, &self->connected)) { |
387 | ret = -EISCONN; | 387 | ret = -EISCONN; |
@@ -425,7 +425,7 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel, | |||
425 | if (daddr != DEV_ADDR_ANY) | 425 | if (daddr != DEV_ADDR_ANY) |
426 | discovery = hashbin_find(irlmp->cachelog, daddr, NULL); | 426 | discovery = hashbin_find(irlmp->cachelog, daddr, NULL); |
427 | else { | 427 | else { |
428 | IRDA_DEBUG(2, "%s(), no daddr\n", __FUNCTION__); | 428 | IRDA_DEBUG(2, "%s(), no daddr\n", __func__); |
429 | discovery = (discovery_t *) | 429 | discovery = (discovery_t *) |
430 | hashbin_get_first(irlmp->cachelog); | 430 | hashbin_get_first(irlmp->cachelog); |
431 | } | 431 | } |
@@ -438,7 +438,7 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel, | |||
438 | } | 438 | } |
439 | lap = hashbin_lock_find(irlmp->links, saddr, NULL); | 439 | lap = hashbin_lock_find(irlmp->links, saddr, NULL); |
440 | if (lap == NULL) { | 440 | if (lap == NULL) { |
441 | IRDA_DEBUG(1, "%s(), Unable to find a usable link!\n", __FUNCTION__); | 441 | IRDA_DEBUG(1, "%s(), Unable to find a usable link!\n", __func__); |
442 | ret = -EHOSTUNREACH; | 442 | ret = -EHOSTUNREACH; |
443 | goto err; | 443 | goto err; |
444 | } | 444 | } |
@@ -453,14 +453,14 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel, | |||
453 | * disconnected yet (waiting for timeout in LAP). | 453 | * disconnected yet (waiting for timeout in LAP). |
454 | * Maybe we could give LAP a bit of help in this case. | 454 | * Maybe we could give LAP a bit of help in this case. |
455 | */ | 455 | */ |
456 | IRDA_DEBUG(0, "%s(), sorry, but I'm waiting for LAP to timeout!\n", __FUNCTION__); | 456 | IRDA_DEBUG(0, "%s(), sorry, but I'm waiting for LAP to timeout!\n", __func__); |
457 | ret = -EAGAIN; | 457 | ret = -EAGAIN; |
458 | goto err; | 458 | goto err; |
459 | } | 459 | } |
460 | 460 | ||
461 | /* LAP is already connected to a different node, and LAP | 461 | /* LAP is already connected to a different node, and LAP |
462 | * can only talk to one node at a time */ | 462 | * can only talk to one node at a time */ |
463 | IRDA_DEBUG(0, "%s(), sorry, but link is busy!\n", __FUNCTION__); | 463 | IRDA_DEBUG(0, "%s(), sorry, but link is busy!\n", __func__); |
464 | ret = -EBUSY; | 464 | ret = -EBUSY; |
465 | goto err; | 465 | goto err; |
466 | } | 466 | } |
@@ -522,7 +522,7 @@ void irlmp_connect_indication(struct lsap_cb *self, struct sk_buff *skb) | |||
522 | IRDA_ASSERT(self->lap != NULL, return;); | 522 | IRDA_ASSERT(self->lap != NULL, return;); |
523 | 523 | ||
524 | IRDA_DEBUG(2, "%s(), slsap_sel=%02x, dlsap_sel=%02x\n", | 524 | IRDA_DEBUG(2, "%s(), slsap_sel=%02x, dlsap_sel=%02x\n", |
525 | __FUNCTION__, self->slsap_sel, self->dlsap_sel); | 525 | __func__, self->slsap_sel, self->dlsap_sel); |
526 | 526 | ||
527 | /* Note : self->lap is set in irlmp_link_data_indication(), | 527 | /* Note : self->lap is set in irlmp_link_data_indication(), |
528 | * (case CONNECT_CMD:) because we have no way to set it here. | 528 | * (case CONNECT_CMD:) because we have no way to set it here. |
@@ -563,7 +563,7 @@ int irlmp_connect_response(struct lsap_cb *self, struct sk_buff *userdata) | |||
563 | * in the state machine itself. Jean II */ | 563 | * in the state machine itself. Jean II */ |
564 | 564 | ||
565 | IRDA_DEBUG(2, "%s(), slsap_sel=%02x, dlsap_sel=%02x\n", | 565 | IRDA_DEBUG(2, "%s(), slsap_sel=%02x, dlsap_sel=%02x\n", |
566 | __FUNCTION__, self->slsap_sel, self->dlsap_sel); | 566 | __func__, self->slsap_sel, self->dlsap_sel); |
567 | 567 | ||
568 | /* Make room for MUX control header (3 bytes) */ | 568 | /* Make room for MUX control header (3 bytes) */ |
569 | IRDA_ASSERT(skb_headroom(userdata) >= LMP_CONTROL_HEADER, return -1;); | 569 | IRDA_ASSERT(skb_headroom(userdata) >= LMP_CONTROL_HEADER, return -1;); |
@@ -589,7 +589,7 @@ void irlmp_connect_confirm(struct lsap_cb *self, struct sk_buff *skb) | |||
589 | int lap_header_size; | 589 | int lap_header_size; |
590 | int max_seg_size; | 590 | int max_seg_size; |
591 | 591 | ||
592 | IRDA_DEBUG(3, "%s()\n", __FUNCTION__); | 592 | IRDA_DEBUG(3, "%s()\n", __func__); |
593 | 593 | ||
594 | IRDA_ASSERT(skb != NULL, return;); | 594 | IRDA_ASSERT(skb != NULL, return;); |
595 | IRDA_ASSERT(self != NULL, return;); | 595 | IRDA_ASSERT(self != NULL, return;); |
@@ -603,7 +603,7 @@ void irlmp_connect_confirm(struct lsap_cb *self, struct sk_buff *skb) | |||
603 | max_header_size = LMP_HEADER + lap_header_size; | 603 | max_header_size = LMP_HEADER + lap_header_size; |
604 | 604 | ||
605 | IRDA_DEBUG(2, "%s(), max_header_size=%d\n", | 605 | IRDA_DEBUG(2, "%s(), max_header_size=%d\n", |
606 | __FUNCTION__, max_header_size); | 606 | __func__, max_header_size); |
607 | 607 | ||
608 | /* Hide LMP_CONTROL_HEADER header from layer above */ | 608 | /* Hide LMP_CONTROL_HEADER header from layer above */ |
609 | skb_pull(skb, LMP_CONTROL_HEADER); | 609 | skb_pull(skb, LMP_CONTROL_HEADER); |
@@ -629,7 +629,7 @@ struct lsap_cb *irlmp_dup(struct lsap_cb *orig, void *instance) | |||
629 | struct lsap_cb *new; | 629 | struct lsap_cb *new; |
630 | unsigned long flags; | 630 | unsigned long flags; |
631 | 631 | ||
632 | IRDA_DEBUG(1, "%s()\n", __FUNCTION__); | 632 | IRDA_DEBUG(1, "%s()\n", __func__); |
633 | 633 | ||
634 | spin_lock_irqsave(&irlmp->unconnected_lsaps->hb_spinlock, flags); | 634 | spin_lock_irqsave(&irlmp->unconnected_lsaps->hb_spinlock, flags); |
635 | 635 | ||
@@ -638,7 +638,7 @@ struct lsap_cb *irlmp_dup(struct lsap_cb *orig, void *instance) | |||
638 | if ((!hashbin_find(irlmp->unconnected_lsaps, (long) orig, NULL)) || | 638 | if ((!hashbin_find(irlmp->unconnected_lsaps, (long) orig, NULL)) || |
639 | (orig->lap == NULL)) { | 639 | (orig->lap == NULL)) { |
640 | IRDA_DEBUG(0, "%s(), invalid LSAP (wrong state)\n", | 640 | IRDA_DEBUG(0, "%s(), invalid LSAP (wrong state)\n", |
641 | __FUNCTION__); | 641 | __func__); |
642 | spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock, | 642 | spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock, |
643 | flags); | 643 | flags); |
644 | return NULL; | 644 | return NULL; |
@@ -647,7 +647,7 @@ struct lsap_cb *irlmp_dup(struct lsap_cb *orig, void *instance) | |||
647 | /* Allocate a new instance */ | 647 | /* Allocate a new instance */ |
648 | new = kmemdup(orig, sizeof(*new), GFP_ATOMIC); | 648 | new = kmemdup(orig, sizeof(*new), GFP_ATOMIC); |
649 | if (!new) { | 649 | if (!new) { |
650 | IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __FUNCTION__); | 650 | IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __func__); |
651 | spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock, | 651 | spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock, |
652 | flags); | 652 | flags); |
653 | return NULL; | 653 | return NULL; |
@@ -693,7 +693,7 @@ int irlmp_disconnect_request(struct lsap_cb *self, struct sk_buff *userdata) | |||
693 | * and us that might mess up the hashbins below. This fixes it. | 693 | * and us that might mess up the hashbins below. This fixes it. |
694 | * Jean II */ | 694 | * Jean II */ |
695 | if (! test_and_clear_bit(0, &self->connected)) { | 695 | if (! test_and_clear_bit(0, &self->connected)) { |
696 | IRDA_DEBUG(0, "%s(), already disconnected!\n", __FUNCTION__); | 696 | IRDA_DEBUG(0, "%s(), already disconnected!\n", __func__); |
697 | dev_kfree_skb(userdata); | 697 | dev_kfree_skb(userdata); |
698 | return -1; | 698 | return -1; |
699 | } | 699 | } |
@@ -747,19 +747,19 @@ void irlmp_disconnect_indication(struct lsap_cb *self, LM_REASON reason, | |||
747 | { | 747 | { |
748 | struct lsap_cb *lsap; | 748 | struct lsap_cb *lsap; |
749 | 749 | ||
750 | IRDA_DEBUG(1, "%s(), reason=%s\n", __FUNCTION__, irlmp_reasons[reason]); | 750 | IRDA_DEBUG(1, "%s(), reason=%s\n", __func__, irlmp_reasons[reason]); |
751 | IRDA_ASSERT(self != NULL, return;); | 751 | IRDA_ASSERT(self != NULL, return;); |
752 | IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); | 752 | IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); |
753 | 753 | ||
754 | IRDA_DEBUG(3, "%s(), slsap_sel=%02x, dlsap_sel=%02x\n", | 754 | IRDA_DEBUG(3, "%s(), slsap_sel=%02x, dlsap_sel=%02x\n", |
755 | __FUNCTION__, self->slsap_sel, self->dlsap_sel); | 755 | __func__, self->slsap_sel, self->dlsap_sel); |
756 | 756 | ||
757 | /* Already disconnected ? | 757 | /* Already disconnected ? |
758 | * There is a race condition between irlmp_disconnect_request() | 758 | * There is a race condition between irlmp_disconnect_request() |
759 | * and us that might mess up the hashbins below. This fixes it. | 759 | * and us that might mess up the hashbins below. This fixes it. |
760 | * Jean II */ | 760 | * Jean II */ |
761 | if (! test_and_clear_bit(0, &self->connected)) { | 761 | if (! test_and_clear_bit(0, &self->connected)) { |
762 | IRDA_DEBUG(0, "%s(), already disconnected!\n", __FUNCTION__); | 762 | IRDA_DEBUG(0, "%s(), already disconnected!\n", __func__); |
763 | return; | 763 | return; |
764 | } | 764 | } |
765 | 765 | ||
@@ -792,7 +792,7 @@ void irlmp_disconnect_indication(struct lsap_cb *self, LM_REASON reason, | |||
792 | self->notify.disconnect_indication(self->notify.instance, | 792 | self->notify.disconnect_indication(self->notify.instance, |
793 | self, reason, skb); | 793 | self, reason, skb); |
794 | } else { | 794 | } else { |
795 | IRDA_DEBUG(0, "%s(), no handler\n", __FUNCTION__); | 795 | IRDA_DEBUG(0, "%s(), no handler\n", __func__); |
796 | } | 796 | } |
797 | } | 797 | } |
798 | 798 | ||
@@ -845,7 +845,7 @@ void irlmp_do_discovery(int nslots) | |||
845 | /* Make sure the value is sane */ | 845 | /* Make sure the value is sane */ |
846 | if ((nslots != 1) && (nslots != 6) && (nslots != 8) && (nslots != 16)){ | 846 | if ((nslots != 1) && (nslots != 6) && (nslots != 8) && (nslots != 16)){ |
847 | IRDA_WARNING("%s: invalid value for number of slots!\n", | 847 | IRDA_WARNING("%s: invalid value for number of slots!\n", |
848 | __FUNCTION__); | 848 | __func__); |
849 | nslots = sysctl_discovery_slots = 8; | 849 | nslots = sysctl_discovery_slots = 8; |
850 | } | 850 | } |
851 | 851 | ||
@@ -963,7 +963,7 @@ irlmp_notify_client(irlmp_client_t *client, | |||
963 | int number; /* Number of nodes in the log */ | 963 | int number; /* Number of nodes in the log */ |
964 | int i; | 964 | int i; |
965 | 965 | ||
966 | IRDA_DEBUG(3, "%s()\n", __FUNCTION__); | 966 | IRDA_DEBUG(3, "%s()\n", __func__); |
967 | 967 | ||
968 | /* Check if client wants or not partial/selective log (optimisation) */ | 968 | /* Check if client wants or not partial/selective log (optimisation) */ |
969 | if (!client->disco_callback) | 969 | if (!client->disco_callback) |
@@ -1014,7 +1014,7 @@ void irlmp_discovery_confirm(hashbin_t *log, DISCOVERY_MODE mode) | |||
1014 | irlmp_client_t *client; | 1014 | irlmp_client_t *client; |
1015 | irlmp_client_t *client_next; | 1015 | irlmp_client_t *client_next; |
1016 | 1016 | ||
1017 | IRDA_DEBUG(3, "%s()\n", __FUNCTION__); | 1017 | IRDA_DEBUG(3, "%s()\n", __func__); |
1018 | 1018 | ||
1019 | IRDA_ASSERT(log != NULL, return;); | 1019 | IRDA_ASSERT(log != NULL, return;); |
1020 | 1020 | ||
@@ -1049,7 +1049,7 @@ void irlmp_discovery_expiry(discinfo_t *expiries, int number) | |||
1049 | irlmp_client_t *client_next; | 1049 | irlmp_client_t *client_next; |
1050 | int i; | 1050 | int i; |
1051 | 1051 | ||
1052 | IRDA_DEBUG(3, "%s()\n", __FUNCTION__); | 1052 | IRDA_DEBUG(3, "%s()\n", __func__); |
1053 | 1053 | ||
1054 | IRDA_ASSERT(expiries != NULL, return;); | 1054 | IRDA_ASSERT(expiries != NULL, return;); |
1055 | 1055 | ||
@@ -1082,7 +1082,7 @@ void irlmp_discovery_expiry(discinfo_t *expiries, int number) | |||
1082 | */ | 1082 | */ |
1083 | discovery_t *irlmp_get_discovery_response(void) | 1083 | discovery_t *irlmp_get_discovery_response(void) |
1084 | { | 1084 | { |
1085 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 1085 | IRDA_DEBUG(4, "%s()\n", __func__); |
1086 | 1086 | ||
1087 | IRDA_ASSERT(irlmp != NULL, return NULL;); | 1087 | IRDA_ASSERT(irlmp != NULL, return NULL;); |
1088 | 1088 | ||
@@ -1160,7 +1160,7 @@ int irlmp_udata_request(struct lsap_cb *self, struct sk_buff *userdata) | |||
1160 | { | 1160 | { |
1161 | int ret; | 1161 | int ret; |
1162 | 1162 | ||
1163 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 1163 | IRDA_DEBUG(4, "%s()\n", __func__); |
1164 | 1164 | ||
1165 | IRDA_ASSERT(userdata != NULL, return -1;); | 1165 | IRDA_ASSERT(userdata != NULL, return -1;); |
1166 | 1166 | ||
@@ -1184,7 +1184,7 @@ int irlmp_udata_request(struct lsap_cb *self, struct sk_buff *userdata) | |||
1184 | */ | 1184 | */ |
1185 | void irlmp_udata_indication(struct lsap_cb *self, struct sk_buff *skb) | 1185 | void irlmp_udata_indication(struct lsap_cb *self, struct sk_buff *skb) |
1186 | { | 1186 | { |
1187 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 1187 | IRDA_DEBUG(4, "%s()\n", __func__); |
1188 | 1188 | ||
1189 | IRDA_ASSERT(self != NULL, return;); | 1189 | IRDA_ASSERT(self != NULL, return;); |
1190 | IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); | 1190 | IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); |
@@ -1211,7 +1211,7 @@ int irlmp_connless_data_request(struct lsap_cb *self, struct sk_buff *userdata, | |||
1211 | struct sk_buff *clone_skb; | 1211 | struct sk_buff *clone_skb; |
1212 | struct lap_cb *lap; | 1212 | struct lap_cb *lap; |
1213 | 1213 | ||
1214 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 1214 | IRDA_DEBUG(4, "%s()\n", __func__); |
1215 | 1215 | ||
1216 | IRDA_ASSERT(userdata != NULL, return -1;); | 1216 | IRDA_ASSERT(userdata != NULL, return -1;); |
1217 | 1217 | ||
@@ -1262,7 +1262,7 @@ int irlmp_connless_data_request(struct lsap_cb *self, struct sk_buff *userdata, | |||
1262 | #ifdef CONFIG_IRDA_ULTRA | 1262 | #ifdef CONFIG_IRDA_ULTRA |
1263 | void irlmp_connless_data_indication(struct lsap_cb *self, struct sk_buff *skb) | 1263 | void irlmp_connless_data_indication(struct lsap_cb *self, struct sk_buff *skb) |
1264 | { | 1264 | { |
1265 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 1265 | IRDA_DEBUG(4, "%s()\n", __func__); |
1266 | 1266 | ||
1267 | IRDA_ASSERT(self != NULL, return;); | 1267 | IRDA_ASSERT(self != NULL, return;); |
1268 | IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); | 1268 | IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); |
@@ -1305,7 +1305,7 @@ void irlmp_status_indication(struct lap_cb *self, | |||
1305 | curr->notify.status_indication(curr->notify.instance, | 1305 | curr->notify.status_indication(curr->notify.instance, |
1306 | link, lock); | 1306 | link, lock); |
1307 | else | 1307 | else |
1308 | IRDA_DEBUG(2, "%s(), no handler\n", __FUNCTION__); | 1308 | IRDA_DEBUG(2, "%s(), no handler\n", __func__); |
1309 | 1309 | ||
1310 | curr = next; | 1310 | curr = next; |
1311 | } | 1311 | } |
@@ -1333,7 +1333,7 @@ void irlmp_flow_indication(struct lap_cb *self, LOCAL_FLOW flow) | |||
1333 | /* Get the number of lsap. That's the only safe way to know | 1333 | /* Get the number of lsap. That's the only safe way to know |
1334 | * that we have looped around... - Jean II */ | 1334 | * that we have looped around... - Jean II */ |
1335 | lsap_todo = HASHBIN_GET_SIZE(self->lsaps); | 1335 | lsap_todo = HASHBIN_GET_SIZE(self->lsaps); |
1336 | IRDA_DEBUG(4, "%s() : %d lsaps to scan\n", __FUNCTION__, lsap_todo); | 1336 | IRDA_DEBUG(4, "%s() : %d lsaps to scan\n", __func__, lsap_todo); |
1337 | 1337 | ||
1338 | /* Poll lsap in order until the queue is full or until we | 1338 | /* Poll lsap in order until the queue is full or until we |
1339 | * tried them all. | 1339 | * tried them all. |
@@ -1352,14 +1352,14 @@ void irlmp_flow_indication(struct lap_cb *self, LOCAL_FLOW flow) | |||
1352 | /* Uh-oh... Paranoia */ | 1352 | /* Uh-oh... Paranoia */ |
1353 | if(curr == NULL) | 1353 | if(curr == NULL) |
1354 | break; | 1354 | break; |
1355 | IRDA_DEBUG(4, "%s() : curr is %p, next was %p and is now %p, still %d to go - queue len = %d\n", __FUNCTION__, curr, next, self->flow_next, lsap_todo, IRLAP_GET_TX_QUEUE_LEN(self->irlap)); | 1355 | IRDA_DEBUG(4, "%s() : curr is %p, next was %p and is now %p, still %d to go - queue len = %d\n", __func__, curr, next, self->flow_next, lsap_todo, IRLAP_GET_TX_QUEUE_LEN(self->irlap)); |
1356 | 1356 | ||
1357 | /* Inform lsap user that it can send one more packet. */ | 1357 | /* Inform lsap user that it can send one more packet. */ |
1358 | if (curr->notify.flow_indication != NULL) | 1358 | if (curr->notify.flow_indication != NULL) |
1359 | curr->notify.flow_indication(curr->notify.instance, | 1359 | curr->notify.flow_indication(curr->notify.instance, |
1360 | curr, flow); | 1360 | curr, flow); |
1361 | else | 1361 | else |
1362 | IRDA_DEBUG(1, "%s(), no handler\n", __FUNCTION__); | 1362 | IRDA_DEBUG(1, "%s(), no handler\n", __func__); |
1363 | } | 1363 | } |
1364 | } | 1364 | } |
1365 | 1365 | ||
@@ -1381,7 +1381,7 @@ __u8 *irlmp_hint_to_service(__u8 *hint) | |||
1381 | */ | 1381 | */ |
1382 | service = kmalloc(16, GFP_ATOMIC); | 1382 | service = kmalloc(16, GFP_ATOMIC); |
1383 | if (!service) { | 1383 | if (!service) { |
1384 | IRDA_DEBUG(1, "%s(), Unable to kmalloc!\n", __FUNCTION__); | 1384 | IRDA_DEBUG(1, "%s(), Unable to kmalloc!\n", __func__); |
1385 | return NULL; | 1385 | return NULL; |
1386 | } | 1386 | } |
1387 | 1387 | ||
@@ -1482,12 +1482,12 @@ void *irlmp_register_service(__u16 hints) | |||
1482 | { | 1482 | { |
1483 | irlmp_service_t *service; | 1483 | irlmp_service_t *service; |
1484 | 1484 | ||
1485 | IRDA_DEBUG(4, "%s(), hints = %04x\n", __FUNCTION__, hints); | 1485 | IRDA_DEBUG(4, "%s(), hints = %04x\n", __func__, hints); |
1486 | 1486 | ||
1487 | /* Make a new registration */ | 1487 | /* Make a new registration */ |
1488 | service = kmalloc(sizeof(irlmp_service_t), GFP_ATOMIC); | 1488 | service = kmalloc(sizeof(irlmp_service_t), GFP_ATOMIC); |
1489 | if (!service) { | 1489 | if (!service) { |
1490 | IRDA_DEBUG(1, "%s(), Unable to kmalloc!\n", __FUNCTION__); | 1490 | IRDA_DEBUG(1, "%s(), Unable to kmalloc!\n", __func__); |
1491 | return NULL; | 1491 | return NULL; |
1492 | } | 1492 | } |
1493 | service->hints.word = hints; | 1493 | service->hints.word = hints; |
@@ -1512,7 +1512,7 @@ int irlmp_unregister_service(void *handle) | |||
1512 | irlmp_service_t *service; | 1512 | irlmp_service_t *service; |
1513 | unsigned long flags; | 1513 | unsigned long flags; |
1514 | 1514 | ||
1515 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 1515 | IRDA_DEBUG(4, "%s()\n", __func__); |
1516 | 1516 | ||
1517 | if (!handle) | 1517 | if (!handle) |
1518 | return -1; | 1518 | return -1; |
@@ -1520,7 +1520,7 @@ int irlmp_unregister_service(void *handle) | |||
1520 | /* Caller may call with invalid handle (it's legal) - Jean II */ | 1520 | /* Caller may call with invalid handle (it's legal) - Jean II */ |
1521 | service = hashbin_lock_find(irlmp->services, (long) handle, NULL); | 1521 | service = hashbin_lock_find(irlmp->services, (long) handle, NULL); |
1522 | if (!service) { | 1522 | if (!service) { |
1523 | IRDA_DEBUG(1, "%s(), Unknown service!\n", __FUNCTION__); | 1523 | IRDA_DEBUG(1, "%s(), Unknown service!\n", __func__); |
1524 | return -1; | 1524 | return -1; |
1525 | } | 1525 | } |
1526 | 1526 | ||
@@ -1557,13 +1557,13 @@ void *irlmp_register_client(__u16 hint_mask, DISCOVERY_CALLBACK1 disco_clb, | |||
1557 | { | 1557 | { |
1558 | irlmp_client_t *client; | 1558 | irlmp_client_t *client; |
1559 | 1559 | ||
1560 | IRDA_DEBUG(1, "%s()\n", __FUNCTION__); | 1560 | IRDA_DEBUG(1, "%s()\n", __func__); |
1561 | IRDA_ASSERT(irlmp != NULL, return NULL;); | 1561 | IRDA_ASSERT(irlmp != NULL, return NULL;); |
1562 | 1562 | ||
1563 | /* Make a new registration */ | 1563 | /* Make a new registration */ |
1564 | client = kmalloc(sizeof(irlmp_client_t), GFP_ATOMIC); | 1564 | client = kmalloc(sizeof(irlmp_client_t), GFP_ATOMIC); |
1565 | if (!client) { | 1565 | if (!client) { |
1566 | IRDA_DEBUG( 1, "%s(), Unable to kmalloc!\n", __FUNCTION__); | 1566 | IRDA_DEBUG( 1, "%s(), Unable to kmalloc!\n", __func__); |
1567 | return NULL; | 1567 | return NULL; |
1568 | } | 1568 | } |
1569 | 1569 | ||
@@ -1599,7 +1599,7 @@ int irlmp_update_client(void *handle, __u16 hint_mask, | |||
1599 | 1599 | ||
1600 | client = hashbin_lock_find(irlmp->clients, (long) handle, NULL); | 1600 | client = hashbin_lock_find(irlmp->clients, (long) handle, NULL); |
1601 | if (!client) { | 1601 | if (!client) { |
1602 | IRDA_DEBUG(1, "%s(), Unknown client!\n", __FUNCTION__); | 1602 | IRDA_DEBUG(1, "%s(), Unknown client!\n", __func__); |
1603 | return -1; | 1603 | return -1; |
1604 | } | 1604 | } |
1605 | 1605 | ||
@@ -1622,7 +1622,7 @@ int irlmp_unregister_client(void *handle) | |||
1622 | { | 1622 | { |
1623 | struct irlmp_client *client; | 1623 | struct irlmp_client *client; |
1624 | 1624 | ||
1625 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 1625 | IRDA_DEBUG(4, "%s()\n", __func__); |
1626 | 1626 | ||
1627 | if (!handle) | 1627 | if (!handle) |
1628 | return -1; | 1628 | return -1; |
@@ -1630,11 +1630,11 @@ int irlmp_unregister_client(void *handle) | |||
1630 | /* Caller may call with invalid handle (it's legal) - Jean II */ | 1630 | /* Caller may call with invalid handle (it's legal) - Jean II */ |
1631 | client = hashbin_lock_find(irlmp->clients, (long) handle, NULL); | 1631 | client = hashbin_lock_find(irlmp->clients, (long) handle, NULL); |
1632 | if (!client) { | 1632 | if (!client) { |
1633 | IRDA_DEBUG(1, "%s(), Unknown client!\n", __FUNCTION__); | 1633 | IRDA_DEBUG(1, "%s(), Unknown client!\n", __func__); |
1634 | return -1; | 1634 | return -1; |
1635 | } | 1635 | } |
1636 | 1636 | ||
1637 | IRDA_DEBUG(4, "%s(), removing client!\n", __FUNCTION__); | 1637 | IRDA_DEBUG(4, "%s(), removing client!\n", __func__); |
1638 | hashbin_remove_this(irlmp->clients, (irda_queue_t *) client); | 1638 | hashbin_remove_this(irlmp->clients, (irda_queue_t *) client); |
1639 | kfree(client); | 1639 | kfree(client); |
1640 | 1640 | ||
@@ -1663,7 +1663,7 @@ static int irlmp_slsap_inuse(__u8 slsap_sel) | |||
1663 | IRDA_ASSERT(irlmp->magic == LMP_MAGIC, return TRUE;); | 1663 | IRDA_ASSERT(irlmp->magic == LMP_MAGIC, return TRUE;); |
1664 | IRDA_ASSERT(slsap_sel != LSAP_ANY, return TRUE;); | 1664 | IRDA_ASSERT(slsap_sel != LSAP_ANY, return TRUE;); |
1665 | 1665 | ||
1666 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 1666 | IRDA_DEBUG(4, "%s()\n", __func__); |
1667 | 1667 | ||
1668 | #ifdef CONFIG_IRDA_ULTRA | 1668 | #ifdef CONFIG_IRDA_ULTRA |
1669 | /* Accept all bindings to the connectionless LSAP */ | 1669 | /* Accept all bindings to the connectionless LSAP */ |
@@ -1790,7 +1790,7 @@ static __u8 irlmp_find_free_slsap(void) | |||
1790 | /* Make sure we terminate the loop */ | 1790 | /* Make sure we terminate the loop */ |
1791 | if (wrapped++) { | 1791 | if (wrapped++) { |
1792 | IRDA_ERROR("%s: no more free LSAPs !\n", | 1792 | IRDA_ERROR("%s: no more free LSAPs !\n", |
1793 | __FUNCTION__); | 1793 | __func__); |
1794 | return 0; | 1794 | return 0; |
1795 | } | 1795 | } |
1796 | } | 1796 | } |
@@ -1805,7 +1805,7 @@ static __u8 irlmp_find_free_slsap(void) | |||
1805 | /* Got it ! */ | 1805 | /* Got it ! */ |
1806 | lsap_sel = irlmp->last_lsap_sel; | 1806 | lsap_sel = irlmp->last_lsap_sel; |
1807 | IRDA_DEBUG(4, "%s(), found free lsap_sel=%02x\n", | 1807 | IRDA_DEBUG(4, "%s(), found free lsap_sel=%02x\n", |
1808 | __FUNCTION__, lsap_sel); | 1808 | __func__, lsap_sel); |
1809 | 1809 | ||
1810 | return lsap_sel; | 1810 | return lsap_sel; |
1811 | } | 1811 | } |
@@ -1823,26 +1823,26 @@ LM_REASON irlmp_convert_lap_reason( LAP_REASON lap_reason) | |||
1823 | 1823 | ||
1824 | switch (lap_reason) { | 1824 | switch (lap_reason) { |
1825 | case LAP_DISC_INDICATION: /* Received a disconnect request from peer */ | 1825 | case LAP_DISC_INDICATION: /* Received a disconnect request from peer */ |
1826 | IRDA_DEBUG( 1, "%s(), LAP_DISC_INDICATION\n", __FUNCTION__); | 1826 | IRDA_DEBUG( 1, "%s(), LAP_DISC_INDICATION\n", __func__); |
1827 | reason = LM_USER_REQUEST; | 1827 | reason = LM_USER_REQUEST; |
1828 | break; | 1828 | break; |
1829 | case LAP_NO_RESPONSE: /* To many retransmits without response */ | 1829 | case LAP_NO_RESPONSE: /* To many retransmits without response */ |
1830 | IRDA_DEBUG( 1, "%s(), LAP_NO_RESPONSE\n", __FUNCTION__); | 1830 | IRDA_DEBUG( 1, "%s(), LAP_NO_RESPONSE\n", __func__); |
1831 | reason = LM_LAP_DISCONNECT; | 1831 | reason = LM_LAP_DISCONNECT; |
1832 | break; | 1832 | break; |
1833 | case LAP_RESET_INDICATION: | 1833 | case LAP_RESET_INDICATION: |
1834 | IRDA_DEBUG( 1, "%s(), LAP_RESET_INDICATION\n", __FUNCTION__); | 1834 | IRDA_DEBUG( 1, "%s(), LAP_RESET_INDICATION\n", __func__); |
1835 | reason = LM_LAP_RESET; | 1835 | reason = LM_LAP_RESET; |
1836 | break; | 1836 | break; |
1837 | case LAP_FOUND_NONE: | 1837 | case LAP_FOUND_NONE: |
1838 | case LAP_MEDIA_BUSY: | 1838 | case LAP_MEDIA_BUSY: |
1839 | case LAP_PRIMARY_CONFLICT: | 1839 | case LAP_PRIMARY_CONFLICT: |
1840 | IRDA_DEBUG(1, "%s(), LAP_FOUND_NONE, LAP_MEDIA_BUSY or LAP_PRIMARY_CONFLICT\n", __FUNCTION__); | 1840 | IRDA_DEBUG(1, "%s(), LAP_FOUND_NONE, LAP_MEDIA_BUSY or LAP_PRIMARY_CONFLICT\n", __func__); |
1841 | reason = LM_CONNECT_FAILURE; | 1841 | reason = LM_CONNECT_FAILURE; |
1842 | break; | 1842 | break; |
1843 | default: | 1843 | default: |
1844 | IRDA_DEBUG(1, "%s(), Unknow IrLAP disconnect reason %d!\n", | 1844 | IRDA_DEBUG(1, "%s(), Unknow IrLAP disconnect reason %d!\n", |
1845 | __FUNCTION__, lap_reason); | 1845 | __func__, lap_reason); |
1846 | reason = LM_LAP_DISCONNECT; | 1846 | reason = LM_LAP_DISCONNECT; |
1847 | break; | 1847 | break; |
1848 | } | 1848 | } |
diff --git a/net/irda/irlmp_event.c b/net/irda/irlmp_event.c index 150cd3f1129a..78cce0cb073f 100644 --- a/net/irda/irlmp_event.c +++ b/net/irda/irlmp_event.c | |||
@@ -120,7 +120,7 @@ static inline void irlmp_next_lap_state(struct lap_cb *self, | |||
120 | IRLMP_STATE state) | 120 | IRLMP_STATE state) |
121 | { | 121 | { |
122 | /* | 122 | /* |
123 | IRDA_DEBUG(4, "%s(), LMP LAP = %s\n", __FUNCTION__, irlmp_state[state]); | 123 | IRDA_DEBUG(4, "%s(), LMP LAP = %s\n", __func__, irlmp_state[state]); |
124 | */ | 124 | */ |
125 | self->lap_state = state; | 125 | self->lap_state = state; |
126 | } | 126 | } |
@@ -130,7 +130,7 @@ static inline void irlmp_next_lsap_state(struct lsap_cb *self, | |||
130 | { | 130 | { |
131 | /* | 131 | /* |
132 | IRDA_ASSERT(self != NULL, return;); | 132 | IRDA_ASSERT(self != NULL, return;); |
133 | IRDA_DEBUG(4, "%s(), LMP LSAP = %s\n", __FUNCTION__, irlsap_state[state]); | 133 | IRDA_DEBUG(4, "%s(), LMP LSAP = %s\n", __func__, irlsap_state[state]); |
134 | */ | 134 | */ |
135 | self->lsap_state = state; | 135 | self->lsap_state = state; |
136 | } | 136 | } |
@@ -143,7 +143,7 @@ int irlmp_do_lsap_event(struct lsap_cb *self, IRLMP_EVENT event, | |||
143 | IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); | 143 | IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); |
144 | 144 | ||
145 | IRDA_DEBUG(4, "%s(), EVENT = %s, STATE = %s\n", | 145 | IRDA_DEBUG(4, "%s(), EVENT = %s, STATE = %s\n", |
146 | __FUNCTION__, irlmp_event[event], irlsap_state[ self->lsap_state]); | 146 | __func__, irlmp_event[event], irlsap_state[ self->lsap_state]); |
147 | 147 | ||
148 | return (*lsap_state[self->lsap_state]) (self, event, skb); | 148 | return (*lsap_state[self->lsap_state]) (self, event, skb); |
149 | } | 149 | } |
@@ -160,7 +160,7 @@ void irlmp_do_lap_event(struct lap_cb *self, IRLMP_EVENT event, | |||
160 | IRDA_ASSERT(self != NULL, return;); | 160 | IRDA_ASSERT(self != NULL, return;); |
161 | IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); | 161 | IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); |
162 | 162 | ||
163 | IRDA_DEBUG(4, "%s(), EVENT = %s, STATE = %s\n", __FUNCTION__, | 163 | IRDA_DEBUG(4, "%s(), EVENT = %s, STATE = %s\n", __func__, |
164 | irlmp_event[event], | 164 | irlmp_event[event], |
165 | irlmp_state[self->lap_state]); | 165 | irlmp_state[self->lap_state]); |
166 | 166 | ||
@@ -169,7 +169,7 @@ void irlmp_do_lap_event(struct lap_cb *self, IRLMP_EVENT event, | |||
169 | 169 | ||
170 | void irlmp_discovery_timer_expired(void *data) | 170 | void irlmp_discovery_timer_expired(void *data) |
171 | { | 171 | { |
172 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 172 | IRDA_DEBUG(4, "%s()\n", __func__); |
173 | 173 | ||
174 | /* We always cleanup the log (active & passive discovery) */ | 174 | /* We always cleanup the log (active & passive discovery) */ |
175 | irlmp_do_expiry(); | 175 | irlmp_do_expiry(); |
@@ -184,7 +184,7 @@ void irlmp_watchdog_timer_expired(void *data) | |||
184 | { | 184 | { |
185 | struct lsap_cb *self = (struct lsap_cb *) data; | 185 | struct lsap_cb *self = (struct lsap_cb *) data; |
186 | 186 | ||
187 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | 187 | IRDA_DEBUG(2, "%s()\n", __func__); |
188 | 188 | ||
189 | IRDA_ASSERT(self != NULL, return;); | 189 | IRDA_ASSERT(self != NULL, return;); |
190 | IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); | 190 | IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); |
@@ -196,7 +196,7 @@ void irlmp_idle_timer_expired(void *data) | |||
196 | { | 196 | { |
197 | struct lap_cb *self = (struct lap_cb *) data; | 197 | struct lap_cb *self = (struct lap_cb *) data; |
198 | 198 | ||
199 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | 199 | IRDA_DEBUG(2, "%s()\n", __func__); |
200 | 200 | ||
201 | IRDA_ASSERT(self != NULL, return;); | 201 | IRDA_ASSERT(self != NULL, return;); |
202 | IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); | 202 | IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); |
@@ -256,7 +256,7 @@ irlmp_do_all_lsap_event(hashbin_t * lsap_hashbin, | |||
256 | static void irlmp_state_standby(struct lap_cb *self, IRLMP_EVENT event, | 256 | static void irlmp_state_standby(struct lap_cb *self, IRLMP_EVENT event, |
257 | struct sk_buff *skb) | 257 | struct sk_buff *skb) |
258 | { | 258 | { |
259 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 259 | IRDA_DEBUG(4, "%s()\n", __func__); |
260 | IRDA_ASSERT(self->irlap != NULL, return;); | 260 | IRDA_ASSERT(self->irlap != NULL, return;); |
261 | 261 | ||
262 | switch (event) { | 262 | switch (event) { |
@@ -276,7 +276,7 @@ static void irlmp_state_standby(struct lap_cb *self, IRLMP_EVENT event, | |||
276 | irlap_connect_response(self->irlap, skb); | 276 | irlap_connect_response(self->irlap, skb); |
277 | break; | 277 | break; |
278 | case LM_LAP_CONNECT_REQUEST: | 278 | case LM_LAP_CONNECT_REQUEST: |
279 | IRDA_DEBUG(4, "%s() LS_CONNECT_REQUEST\n", __FUNCTION__); | 279 | IRDA_DEBUG(4, "%s() LS_CONNECT_REQUEST\n", __func__); |
280 | 280 | ||
281 | irlmp_next_lap_state(self, LAP_U_CONNECT); | 281 | irlmp_next_lap_state(self, LAP_U_CONNECT); |
282 | 282 | ||
@@ -285,13 +285,13 @@ static void irlmp_state_standby(struct lap_cb *self, IRLMP_EVENT event, | |||
285 | break; | 285 | break; |
286 | case LM_LAP_DISCONNECT_INDICATION: | 286 | case LM_LAP_DISCONNECT_INDICATION: |
287 | IRDA_DEBUG(4, "%s(), Error LM_LAP_DISCONNECT_INDICATION\n", | 287 | IRDA_DEBUG(4, "%s(), Error LM_LAP_DISCONNECT_INDICATION\n", |
288 | __FUNCTION__); | 288 | __func__); |
289 | 289 | ||
290 | irlmp_next_lap_state(self, LAP_STANDBY); | 290 | irlmp_next_lap_state(self, LAP_STANDBY); |
291 | break; | 291 | break; |
292 | default: | 292 | default: |
293 | IRDA_DEBUG(0, "%s(), Unknown event %s\n", | 293 | IRDA_DEBUG(0, "%s(), Unknown event %s\n", |
294 | __FUNCTION__, irlmp_event[event]); | 294 | __func__, irlmp_event[event]); |
295 | break; | 295 | break; |
296 | } | 296 | } |
297 | } | 297 | } |
@@ -306,7 +306,7 @@ static void irlmp_state_standby(struct lap_cb *self, IRLMP_EVENT event, | |||
306 | static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event, | 306 | static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event, |
307 | struct sk_buff *skb) | 307 | struct sk_buff *skb) |
308 | { | 308 | { |
309 | IRDA_DEBUG(2, "%s(), event=%s\n", __FUNCTION__, irlmp_event[event]); | 309 | IRDA_DEBUG(2, "%s(), event=%s\n", __func__, irlmp_event[event]); |
310 | 310 | ||
311 | switch (event) { | 311 | switch (event) { |
312 | case LM_LAP_CONNECT_INDICATION: | 312 | case LM_LAP_CONNECT_INDICATION: |
@@ -326,7 +326,7 @@ static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event, | |||
326 | * the lsaps may already have gone. This avoid getting stuck | 326 | * the lsaps may already have gone. This avoid getting stuck |
327 | * forever in LAP_ACTIVE state - Jean II */ | 327 | * forever in LAP_ACTIVE state - Jean II */ |
328 | if (HASHBIN_GET_SIZE(self->lsaps) == 0) { | 328 | if (HASHBIN_GET_SIZE(self->lsaps) == 0) { |
329 | IRDA_DEBUG(0, "%s() NO LSAPs !\n", __FUNCTION__); | 329 | IRDA_DEBUG(0, "%s() NO LSAPs !\n", __func__); |
330 | irlmp_start_idle_timer(self, LM_IDLE_TIMEOUT); | 330 | irlmp_start_idle_timer(self, LM_IDLE_TIMEOUT); |
331 | } | 331 | } |
332 | break; | 332 | break; |
@@ -344,12 +344,12 @@ static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event, | |||
344 | * the lsaps may already have gone. This avoid getting stuck | 344 | * the lsaps may already have gone. This avoid getting stuck |
345 | * forever in LAP_ACTIVE state - Jean II */ | 345 | * forever in LAP_ACTIVE state - Jean II */ |
346 | if (HASHBIN_GET_SIZE(self->lsaps) == 0) { | 346 | if (HASHBIN_GET_SIZE(self->lsaps) == 0) { |
347 | IRDA_DEBUG(0, "%s() NO LSAPs !\n", __FUNCTION__); | 347 | IRDA_DEBUG(0, "%s() NO LSAPs !\n", __func__); |
348 | irlmp_start_idle_timer(self, LM_IDLE_TIMEOUT); | 348 | irlmp_start_idle_timer(self, LM_IDLE_TIMEOUT); |
349 | } | 349 | } |
350 | break; | 350 | break; |
351 | case LM_LAP_DISCONNECT_INDICATION: | 351 | case LM_LAP_DISCONNECT_INDICATION: |
352 | IRDA_DEBUG(4, "%s(), LM_LAP_DISCONNECT_INDICATION\n", __FUNCTION__); | 352 | IRDA_DEBUG(4, "%s(), LM_LAP_DISCONNECT_INDICATION\n", __func__); |
353 | irlmp_next_lap_state(self, LAP_STANDBY); | 353 | irlmp_next_lap_state(self, LAP_STANDBY); |
354 | 354 | ||
355 | /* Send disconnect event to all LSAPs using this link */ | 355 | /* Send disconnect event to all LSAPs using this link */ |
@@ -357,7 +357,7 @@ static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event, | |||
357 | LM_LAP_DISCONNECT_INDICATION); | 357 | LM_LAP_DISCONNECT_INDICATION); |
358 | break; | 358 | break; |
359 | case LM_LAP_DISCONNECT_REQUEST: | 359 | case LM_LAP_DISCONNECT_REQUEST: |
360 | IRDA_DEBUG(4, "%s(), LM_LAP_DISCONNECT_REQUEST\n", __FUNCTION__); | 360 | IRDA_DEBUG(4, "%s(), LM_LAP_DISCONNECT_REQUEST\n", __func__); |
361 | 361 | ||
362 | /* One of the LSAP did timeout or was closed, if it was | 362 | /* One of the LSAP did timeout or was closed, if it was |
363 | * the last one, try to get out of here - Jean II */ | 363 | * the last one, try to get out of here - Jean II */ |
@@ -367,7 +367,7 @@ static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event, | |||
367 | break; | 367 | break; |
368 | default: | 368 | default: |
369 | IRDA_DEBUG(0, "%s(), Unknown event %s\n", | 369 | IRDA_DEBUG(0, "%s(), Unknown event %s\n", |
370 | __FUNCTION__, irlmp_event[event]); | 370 | __func__, irlmp_event[event]); |
371 | break; | 371 | break; |
372 | } | 372 | } |
373 | } | 373 | } |
@@ -381,11 +381,11 @@ static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event, | |||
381 | static void irlmp_state_active(struct lap_cb *self, IRLMP_EVENT event, | 381 | static void irlmp_state_active(struct lap_cb *self, IRLMP_EVENT event, |
382 | struct sk_buff *skb) | 382 | struct sk_buff *skb) |
383 | { | 383 | { |
384 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 384 | IRDA_DEBUG(4, "%s()\n", __func__); |
385 | 385 | ||
386 | switch (event) { | 386 | switch (event) { |
387 | case LM_LAP_CONNECT_REQUEST: | 387 | case LM_LAP_CONNECT_REQUEST: |
388 | IRDA_DEBUG(4, "%s(), LS_CONNECT_REQUEST\n", __FUNCTION__); | 388 | IRDA_DEBUG(4, "%s(), LS_CONNECT_REQUEST\n", __func__); |
389 | 389 | ||
390 | /* | 390 | /* |
391 | * IrLAP may have a pending disconnect. We tried to close | 391 | * IrLAP may have a pending disconnect. We tried to close |
@@ -468,7 +468,7 @@ static void irlmp_state_active(struct lap_cb *self, IRLMP_EVENT event, | |||
468 | break; | 468 | break; |
469 | default: | 469 | default: |
470 | IRDA_DEBUG(0, "%s(), Unknown event %s\n", | 470 | IRDA_DEBUG(0, "%s(), Unknown event %s\n", |
471 | __FUNCTION__, irlmp_event[event]); | 471 | __func__, irlmp_event[event]); |
472 | break; | 472 | break; |
473 | } | 473 | } |
474 | } | 474 | } |
@@ -490,7 +490,7 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event, | |||
490 | { | 490 | { |
491 | int ret = 0; | 491 | int ret = 0; |
492 | 492 | ||
493 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 493 | IRDA_DEBUG(4, "%s()\n", __func__); |
494 | 494 | ||
495 | IRDA_ASSERT(self != NULL, return -1;); | 495 | IRDA_ASSERT(self != NULL, return -1;); |
496 | IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); | 496 | IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); |
@@ -505,11 +505,11 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event, | |||
505 | break; | 505 | break; |
506 | #endif /* CONFIG_IRDA_ULTRA */ | 506 | #endif /* CONFIG_IRDA_ULTRA */ |
507 | case LM_CONNECT_REQUEST: | 507 | case LM_CONNECT_REQUEST: |
508 | IRDA_DEBUG(4, "%s(), LM_CONNECT_REQUEST\n", __FUNCTION__); | 508 | IRDA_DEBUG(4, "%s(), LM_CONNECT_REQUEST\n", __func__); |
509 | 509 | ||
510 | if (self->conn_skb) { | 510 | if (self->conn_skb) { |
511 | IRDA_WARNING("%s: busy with another request!\n", | 511 | IRDA_WARNING("%s: busy with another request!\n", |
512 | __FUNCTION__); | 512 | __func__); |
513 | return -EBUSY; | 513 | return -EBUSY; |
514 | } | 514 | } |
515 | /* Don't forget to refcount it (see irlmp_connect_request()) */ | 515 | /* Don't forget to refcount it (see irlmp_connect_request()) */ |
@@ -526,7 +526,7 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event, | |||
526 | case LM_CONNECT_INDICATION: | 526 | case LM_CONNECT_INDICATION: |
527 | if (self->conn_skb) { | 527 | if (self->conn_skb) { |
528 | IRDA_WARNING("%s: busy with another request!\n", | 528 | IRDA_WARNING("%s: busy with another request!\n", |
529 | __FUNCTION__); | 529 | __func__); |
530 | return -EBUSY; | 530 | return -EBUSY; |
531 | } | 531 | } |
532 | /* Don't forget to refcount it (see irlap_driver_rcv()) */ | 532 | /* Don't forget to refcount it (see irlap_driver_rcv()) */ |
@@ -552,7 +552,7 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event, | |||
552 | break; | 552 | break; |
553 | default: | 553 | default: |
554 | IRDA_DEBUG(1, "%s(), Unknown event %s on LSAP %#02x\n", | 554 | IRDA_DEBUG(1, "%s(), Unknown event %s on LSAP %#02x\n", |
555 | __FUNCTION__, irlmp_event[event], self->slsap_sel); | 555 | __func__, irlmp_event[event], self->slsap_sel); |
556 | break; | 556 | break; |
557 | } | 557 | } |
558 | return ret; | 558 | return ret; |
@@ -570,7 +570,7 @@ static int irlmp_state_connect(struct lsap_cb *self, IRLMP_EVENT event, | |||
570 | struct lsap_cb *lsap; | 570 | struct lsap_cb *lsap; |
571 | int ret = 0; | 571 | int ret = 0; |
572 | 572 | ||
573 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 573 | IRDA_DEBUG(4, "%s()\n", __func__); |
574 | 574 | ||
575 | IRDA_ASSERT(self != NULL, return -1;); | 575 | IRDA_ASSERT(self != NULL, return -1;); |
576 | IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); | 576 | IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); |
@@ -603,7 +603,7 @@ static int irlmp_state_connect(struct lsap_cb *self, IRLMP_EVENT event, | |||
603 | case LM_WATCHDOG_TIMEOUT: | 603 | case LM_WATCHDOG_TIMEOUT: |
604 | /* May happen, who knows... | 604 | /* May happen, who knows... |
605 | * Jean II */ | 605 | * Jean II */ |
606 | IRDA_DEBUG(0, "%s() WATCHDOG_TIMEOUT!\n", __FUNCTION__); | 606 | IRDA_DEBUG(0, "%s() WATCHDOG_TIMEOUT!\n", __func__); |
607 | 607 | ||
608 | /* Disconnect, get out... - Jean II */ | 608 | /* Disconnect, get out... - Jean II */ |
609 | self->lap = NULL; | 609 | self->lap = NULL; |
@@ -614,7 +614,7 @@ static int irlmp_state_connect(struct lsap_cb *self, IRLMP_EVENT event, | |||
614 | /* LM_LAP_DISCONNECT_INDICATION : Should never happen, we | 614 | /* LM_LAP_DISCONNECT_INDICATION : Should never happen, we |
615 | * are *not* yet bound to the IrLAP link. Jean II */ | 615 | * are *not* yet bound to the IrLAP link. Jean II */ |
616 | IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n", | 616 | IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n", |
617 | __FUNCTION__, irlmp_event[event], self->slsap_sel); | 617 | __func__, irlmp_event[event], self->slsap_sel); |
618 | break; | 618 | break; |
619 | } | 619 | } |
620 | return ret; | 620 | return ret; |
@@ -632,7 +632,7 @@ static int irlmp_state_connect_pend(struct lsap_cb *self, IRLMP_EVENT event, | |||
632 | struct sk_buff *tx_skb; | 632 | struct sk_buff *tx_skb; |
633 | int ret = 0; | 633 | int ret = 0; |
634 | 634 | ||
635 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 635 | IRDA_DEBUG(4, "%s()\n", __func__); |
636 | 636 | ||
637 | IRDA_ASSERT(self != NULL, return -1;); | 637 | IRDA_ASSERT(self != NULL, return -1;); |
638 | IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); | 638 | IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); |
@@ -643,16 +643,16 @@ static int irlmp_state_connect_pend(struct lsap_cb *self, IRLMP_EVENT event, | |||
643 | break; | 643 | break; |
644 | case LM_CONNECT_RESPONSE: | 644 | case LM_CONNECT_RESPONSE: |
645 | IRDA_DEBUG(0, "%s(), LM_CONNECT_RESPONSE, " | 645 | IRDA_DEBUG(0, "%s(), LM_CONNECT_RESPONSE, " |
646 | "no indication issued yet\n", __FUNCTION__); | 646 | "no indication issued yet\n", __func__); |
647 | /* Keep state */ | 647 | /* Keep state */ |
648 | break; | 648 | break; |
649 | case LM_DISCONNECT_REQUEST: | 649 | case LM_DISCONNECT_REQUEST: |
650 | IRDA_DEBUG(0, "%s(), LM_DISCONNECT_REQUEST, " | 650 | IRDA_DEBUG(0, "%s(), LM_DISCONNECT_REQUEST, " |
651 | "not yet bound to IrLAP connection\n", __FUNCTION__); | 651 | "not yet bound to IrLAP connection\n", __func__); |
652 | /* Keep state */ | 652 | /* Keep state */ |
653 | break; | 653 | break; |
654 | case LM_LAP_CONNECT_CONFIRM: | 654 | case LM_LAP_CONNECT_CONFIRM: |
655 | IRDA_DEBUG(4, "%s(), LS_CONNECT_CONFIRM\n", __FUNCTION__); | 655 | IRDA_DEBUG(4, "%s(), LS_CONNECT_CONFIRM\n", __func__); |
656 | irlmp_next_lsap_state(self, LSAP_CONNECT); | 656 | irlmp_next_lsap_state(self, LSAP_CONNECT); |
657 | 657 | ||
658 | tx_skb = self->conn_skb; | 658 | tx_skb = self->conn_skb; |
@@ -666,7 +666,7 @@ static int irlmp_state_connect_pend(struct lsap_cb *self, IRLMP_EVENT event, | |||
666 | /* Will happen in some rare cases because of a race condition. | 666 | /* Will happen in some rare cases because of a race condition. |
667 | * Just make sure we don't stay there forever... | 667 | * Just make sure we don't stay there forever... |
668 | * Jean II */ | 668 | * Jean II */ |
669 | IRDA_DEBUG(0, "%s() WATCHDOG_TIMEOUT!\n", __FUNCTION__); | 669 | IRDA_DEBUG(0, "%s() WATCHDOG_TIMEOUT!\n", __func__); |
670 | 670 | ||
671 | /* Go back to disconnected mode, keep the socket waiting */ | 671 | /* Go back to disconnected mode, keep the socket waiting */ |
672 | self->lap = NULL; | 672 | self->lap = NULL; |
@@ -680,7 +680,7 @@ static int irlmp_state_connect_pend(struct lsap_cb *self, IRLMP_EVENT event, | |||
680 | /* LM_LAP_DISCONNECT_INDICATION : Should never happen, we | 680 | /* LM_LAP_DISCONNECT_INDICATION : Should never happen, we |
681 | * are *not* yet bound to the IrLAP link. Jean II */ | 681 | * are *not* yet bound to the IrLAP link. Jean II */ |
682 | IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n", | 682 | IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n", |
683 | __FUNCTION__, irlmp_event[event], self->slsap_sel); | 683 | __func__, irlmp_event[event], self->slsap_sel); |
684 | break; | 684 | break; |
685 | } | 685 | } |
686 | return ret; | 686 | return ret; |
@@ -698,7 +698,7 @@ static int irlmp_state_dtr(struct lsap_cb *self, IRLMP_EVENT event, | |||
698 | LM_REASON reason; | 698 | LM_REASON reason; |
699 | int ret = 0; | 699 | int ret = 0; |
700 | 700 | ||
701 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 701 | IRDA_DEBUG(4, "%s()\n", __func__); |
702 | 702 | ||
703 | IRDA_ASSERT(self != NULL, return -1;); | 703 | IRDA_ASSERT(self != NULL, return -1;); |
704 | IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); | 704 | IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); |
@@ -722,12 +722,12 @@ static int irlmp_state_dtr(struct lsap_cb *self, IRLMP_EVENT event, | |||
722 | break; | 722 | break; |
723 | case LM_CONNECT_REQUEST: | 723 | case LM_CONNECT_REQUEST: |
724 | IRDA_DEBUG(0, "%s(), LM_CONNECT_REQUEST, " | 724 | IRDA_DEBUG(0, "%s(), LM_CONNECT_REQUEST, " |
725 | "error, LSAP already connected\n", __FUNCTION__); | 725 | "error, LSAP already connected\n", __func__); |
726 | /* Keep state */ | 726 | /* Keep state */ |
727 | break; | 727 | break; |
728 | case LM_CONNECT_RESPONSE: | 728 | case LM_CONNECT_RESPONSE: |
729 | IRDA_DEBUG(0, "%s(), LM_CONNECT_RESPONSE, " | 729 | IRDA_DEBUG(0, "%s(), LM_CONNECT_RESPONSE, " |
730 | "error, LSAP already connected\n", __FUNCTION__); | 730 | "error, LSAP already connected\n", __func__); |
731 | /* Keep state */ | 731 | /* Keep state */ |
732 | break; | 732 | break; |
733 | case LM_DISCONNECT_REQUEST: | 733 | case LM_DISCONNECT_REQUEST: |
@@ -740,7 +740,7 @@ static int irlmp_state_dtr(struct lsap_cb *self, IRLMP_EVENT event, | |||
740 | /* Try to close the LAP connection if its still there */ | 740 | /* Try to close the LAP connection if its still there */ |
741 | if (self->lap) { | 741 | if (self->lap) { |
742 | IRDA_DEBUG(4, "%s(), trying to close IrLAP\n", | 742 | IRDA_DEBUG(4, "%s(), trying to close IrLAP\n", |
743 | __FUNCTION__); | 743 | __func__); |
744 | irlmp_do_lap_event(self->lap, | 744 | irlmp_do_lap_event(self->lap, |
745 | LM_LAP_DISCONNECT_REQUEST, | 745 | LM_LAP_DISCONNECT_REQUEST, |
746 | NULL); | 746 | NULL); |
@@ -764,14 +764,14 @@ static int irlmp_state_dtr(struct lsap_cb *self, IRLMP_EVENT event, | |||
764 | reason = skb->data[3]; | 764 | reason = skb->data[3]; |
765 | 765 | ||
766 | /* Try to close the LAP connection */ | 766 | /* Try to close the LAP connection */ |
767 | IRDA_DEBUG(4, "%s(), trying to close IrLAP\n", __FUNCTION__); | 767 | IRDA_DEBUG(4, "%s(), trying to close IrLAP\n", __func__); |
768 | irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL); | 768 | irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL); |
769 | 769 | ||
770 | irlmp_disconnect_indication(self, reason, skb); | 770 | irlmp_disconnect_indication(self, reason, skb); |
771 | break; | 771 | break; |
772 | default: | 772 | default: |
773 | IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n", | 773 | IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n", |
774 | __FUNCTION__, irlmp_event[event], self->slsap_sel); | 774 | __func__, irlmp_event[event], self->slsap_sel); |
775 | break; | 775 | break; |
776 | } | 776 | } |
777 | return ret; | 777 | return ret; |
@@ -793,7 +793,7 @@ static int irlmp_state_setup(struct lsap_cb *self, IRLMP_EVENT event, | |||
793 | IRDA_ASSERT(self != NULL, return -1;); | 793 | IRDA_ASSERT(self != NULL, return -1;); |
794 | IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); | 794 | IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); |
795 | 795 | ||
796 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 796 | IRDA_DEBUG(4, "%s()\n", __func__); |
797 | 797 | ||
798 | switch (event) { | 798 | switch (event) { |
799 | case LM_CONNECT_CONFIRM: | 799 | case LM_CONNECT_CONFIRM: |
@@ -814,7 +814,7 @@ static int irlmp_state_setup(struct lsap_cb *self, IRLMP_EVENT event, | |||
814 | reason = skb->data[3]; | 814 | reason = skb->data[3]; |
815 | 815 | ||
816 | /* Try to close the LAP connection */ | 816 | /* Try to close the LAP connection */ |
817 | IRDA_DEBUG(4, "%s(), trying to close IrLAP\n", __FUNCTION__); | 817 | IRDA_DEBUG(4, "%s(), trying to close IrLAP\n", __func__); |
818 | irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL); | 818 | irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL); |
819 | 819 | ||
820 | irlmp_disconnect_indication(self, reason, skb); | 820 | irlmp_disconnect_indication(self, reason, skb); |
@@ -832,7 +832,7 @@ static int irlmp_state_setup(struct lsap_cb *self, IRLMP_EVENT event, | |||
832 | irlmp_disconnect_indication(self, reason, skb); | 832 | irlmp_disconnect_indication(self, reason, skb); |
833 | break; | 833 | break; |
834 | case LM_WATCHDOG_TIMEOUT: | 834 | case LM_WATCHDOG_TIMEOUT: |
835 | IRDA_DEBUG(0, "%s() WATCHDOG_TIMEOUT!\n", __FUNCTION__); | 835 | IRDA_DEBUG(0, "%s() WATCHDOG_TIMEOUT!\n", __func__); |
836 | 836 | ||
837 | IRDA_ASSERT(self->lap != NULL, return -1;); | 837 | IRDA_ASSERT(self->lap != NULL, return -1;); |
838 | irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL); | 838 | irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL); |
@@ -842,7 +842,7 @@ static int irlmp_state_setup(struct lsap_cb *self, IRLMP_EVENT event, | |||
842 | break; | 842 | break; |
843 | default: | 843 | default: |
844 | IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n", | 844 | IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n", |
845 | __FUNCTION__, irlmp_event[event], self->slsap_sel); | 845 | __func__, irlmp_event[event], self->slsap_sel); |
846 | break; | 846 | break; |
847 | } | 847 | } |
848 | return ret; | 848 | return ret; |
@@ -863,7 +863,7 @@ static int irlmp_state_setup_pend(struct lsap_cb *self, IRLMP_EVENT event, | |||
863 | LM_REASON reason; | 863 | LM_REASON reason; |
864 | int ret = 0; | 864 | int ret = 0; |
865 | 865 | ||
866 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 866 | IRDA_DEBUG(4, "%s()\n", __func__); |
867 | 867 | ||
868 | IRDA_ASSERT(self != NULL, return -1;); | 868 | IRDA_ASSERT(self != NULL, return -1;); |
869 | IRDA_ASSERT(irlmp != NULL, return -1;); | 869 | IRDA_ASSERT(irlmp != NULL, return -1;); |
@@ -883,7 +883,7 @@ static int irlmp_state_setup_pend(struct lsap_cb *self, IRLMP_EVENT event, | |||
883 | irlmp_next_lsap_state(self, LSAP_SETUP); | 883 | irlmp_next_lsap_state(self, LSAP_SETUP); |
884 | break; | 884 | break; |
885 | case LM_WATCHDOG_TIMEOUT: | 885 | case LM_WATCHDOG_TIMEOUT: |
886 | IRDA_DEBUG(0, "%s() : WATCHDOG_TIMEOUT !\n", __FUNCTION__); | 886 | IRDA_DEBUG(0, "%s() : WATCHDOG_TIMEOUT !\n", __func__); |
887 | 887 | ||
888 | IRDA_ASSERT(self->lap != NULL, return -1;); | 888 | IRDA_ASSERT(self->lap != NULL, return -1;); |
889 | irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL); | 889 | irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL); |
@@ -902,7 +902,7 @@ static int irlmp_state_setup_pend(struct lsap_cb *self, IRLMP_EVENT event, | |||
902 | break; | 902 | break; |
903 | default: | 903 | default: |
904 | IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n", | 904 | IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n", |
905 | __FUNCTION__, irlmp_event[event], self->slsap_sel); | 905 | __func__, irlmp_event[event], self->slsap_sel); |
906 | break; | 906 | break; |
907 | } | 907 | } |
908 | return ret; | 908 | return ret; |
diff --git a/net/irda/irlmp_frame.c b/net/irda/irlmp_frame.c index 0a79d9aeb08c..3750884094da 100644 --- a/net/irda/irlmp_frame.c +++ b/net/irda/irlmp_frame.c | |||
@@ -44,7 +44,7 @@ inline void irlmp_send_data_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap, | |||
44 | skb->data[1] = slsap; | 44 | skb->data[1] = slsap; |
45 | 45 | ||
46 | if (expedited) { | 46 | if (expedited) { |
47 | IRDA_DEBUG(4, "%s(), sending expedited data\n", __FUNCTION__); | 47 | IRDA_DEBUG(4, "%s(), sending expedited data\n", __func__); |
48 | irlap_data_request(self->irlap, skb, TRUE); | 48 | irlap_data_request(self->irlap, skb, TRUE); |
49 | } else | 49 | } else |
50 | irlap_data_request(self->irlap, skb, FALSE); | 50 | irlap_data_request(self->irlap, skb, FALSE); |
@@ -60,7 +60,7 @@ void irlmp_send_lcf_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap, | |||
60 | { | 60 | { |
61 | __u8 *frame; | 61 | __u8 *frame; |
62 | 62 | ||
63 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | 63 | IRDA_DEBUG(2, "%s()\n", __func__); |
64 | 64 | ||
65 | IRDA_ASSERT(self != NULL, return;); | 65 | IRDA_ASSERT(self != NULL, return;); |
66 | IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); | 66 | IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); |
@@ -95,7 +95,7 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb, | |||
95 | __u8 dlsap_sel; /* Destination LSAP address */ | 95 | __u8 dlsap_sel; /* Destination LSAP address */ |
96 | __u8 *fp; | 96 | __u8 *fp; |
97 | 97 | ||
98 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 98 | IRDA_DEBUG(4, "%s()\n", __func__); |
99 | 99 | ||
100 | IRDA_ASSERT(self != NULL, return;); | 100 | IRDA_ASSERT(self != NULL, return;); |
101 | IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); | 101 | IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); |
@@ -117,7 +117,7 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb, | |||
117 | if ((fp[0] & CONTROL_BIT) && (fp[2] == CONNECT_CMD)) { | 117 | if ((fp[0] & CONTROL_BIT) && (fp[2] == CONNECT_CMD)) { |
118 | IRDA_DEBUG(3, "%s(), incoming connection, " | 118 | IRDA_DEBUG(3, "%s(), incoming connection, " |
119 | "source LSAP=%d, dest LSAP=%d\n", | 119 | "source LSAP=%d, dest LSAP=%d\n", |
120 | __FUNCTION__, slsap_sel, dlsap_sel); | 120 | __func__, slsap_sel, dlsap_sel); |
121 | 121 | ||
122 | /* Try to find LSAP among the unconnected LSAPs */ | 122 | /* Try to find LSAP among the unconnected LSAPs */ |
123 | lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, CONNECT_CMD, | 123 | lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, CONNECT_CMD, |
@@ -125,7 +125,7 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb, | |||
125 | 125 | ||
126 | /* Maybe LSAP was already connected, so try one more time */ | 126 | /* Maybe LSAP was already connected, so try one more time */ |
127 | if (!lsap) { | 127 | if (!lsap) { |
128 | IRDA_DEBUG(1, "%s(), incoming connection for LSAP already connected\n", __FUNCTION__); | 128 | IRDA_DEBUG(1, "%s(), incoming connection for LSAP already connected\n", __func__); |
129 | lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, 0, | 129 | lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, 0, |
130 | self->lsaps); | 130 | self->lsaps); |
131 | } | 131 | } |
@@ -136,12 +136,12 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb, | |||
136 | if (lsap == NULL) { | 136 | if (lsap == NULL) { |
137 | IRDA_DEBUG(2, "IrLMP, Sorry, no LSAP for received frame!\n"); | 137 | IRDA_DEBUG(2, "IrLMP, Sorry, no LSAP for received frame!\n"); |
138 | IRDA_DEBUG(2, "%s(), slsap_sel = %02x, dlsap_sel = %02x\n", | 138 | IRDA_DEBUG(2, "%s(), slsap_sel = %02x, dlsap_sel = %02x\n", |
139 | __FUNCTION__, slsap_sel, dlsap_sel); | 139 | __func__, slsap_sel, dlsap_sel); |
140 | if (fp[0] & CONTROL_BIT) { | 140 | if (fp[0] & CONTROL_BIT) { |
141 | IRDA_DEBUG(2, "%s(), received control frame %02x\n", | 141 | IRDA_DEBUG(2, "%s(), received control frame %02x\n", |
142 | __FUNCTION__, fp[2]); | 142 | __func__, fp[2]); |
143 | } else { | 143 | } else { |
144 | IRDA_DEBUG(2, "%s(), received data frame\n", __FUNCTION__); | 144 | IRDA_DEBUG(2, "%s(), received data frame\n", __func__); |
145 | } | 145 | } |
146 | return; | 146 | return; |
147 | } | 147 | } |
@@ -160,7 +160,7 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb, | |||
160 | break; | 160 | break; |
161 | case DISCONNECT: | 161 | case DISCONNECT: |
162 | IRDA_DEBUG(4, "%s(), Disconnect indication!\n", | 162 | IRDA_DEBUG(4, "%s(), Disconnect indication!\n", |
163 | __FUNCTION__); | 163 | __func__); |
164 | irlmp_do_lsap_event(lsap, LM_DISCONNECT_INDICATION, | 164 | irlmp_do_lsap_event(lsap, LM_DISCONNECT_INDICATION, |
165 | skb); | 165 | skb); |
166 | break; | 166 | break; |
@@ -172,7 +172,7 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb, | |||
172 | break; | 172 | break; |
173 | default: | 173 | default: |
174 | IRDA_DEBUG(0, "%s(), Unknown control frame %02x\n", | 174 | IRDA_DEBUG(0, "%s(), Unknown control frame %02x\n", |
175 | __FUNCTION__, fp[2]); | 175 | __func__, fp[2]); |
176 | break; | 176 | break; |
177 | } | 177 | } |
178 | } else if (unreliable) { | 178 | } else if (unreliable) { |
@@ -206,7 +206,7 @@ void irlmp_link_unitdata_indication(struct lap_cb *self, struct sk_buff *skb) | |||
206 | __u8 *fp; | 206 | __u8 *fp; |
207 | unsigned long flags; | 207 | unsigned long flags; |
208 | 208 | ||
209 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 209 | IRDA_DEBUG(4, "%s()\n", __func__); |
210 | 210 | ||
211 | IRDA_ASSERT(self != NULL, return;); | 211 | IRDA_ASSERT(self != NULL, return;); |
212 | IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); | 212 | IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); |
@@ -224,13 +224,13 @@ void irlmp_link_unitdata_indication(struct lap_cb *self, struct sk_buff *skb) | |||
224 | 224 | ||
225 | if (pid & 0x80) { | 225 | if (pid & 0x80) { |
226 | IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", | 226 | IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", |
227 | __FUNCTION__); | 227 | __func__); |
228 | return; | 228 | return; |
229 | } | 229 | } |
230 | 230 | ||
231 | /* Check if frame is addressed to the connectionless LSAP */ | 231 | /* Check if frame is addressed to the connectionless LSAP */ |
232 | if ((slsap_sel != LSAP_CONNLESS) || (dlsap_sel != LSAP_CONNLESS)) { | 232 | if ((slsap_sel != LSAP_CONNLESS) || (dlsap_sel != LSAP_CONNLESS)) { |
233 | IRDA_DEBUG(0, "%s(), dropping frame!\n", __FUNCTION__); | 233 | IRDA_DEBUG(0, "%s(), dropping frame!\n", __func__); |
234 | return; | 234 | return; |
235 | } | 235 | } |
236 | 236 | ||
@@ -254,7 +254,7 @@ void irlmp_link_unitdata_indication(struct lap_cb *self, struct sk_buff *skb) | |||
254 | if (lsap) | 254 | if (lsap) |
255 | irlmp_connless_data_indication(lsap, skb); | 255 | irlmp_connless_data_indication(lsap, skb); |
256 | else { | 256 | else { |
257 | IRDA_DEBUG(0, "%s(), found no matching LSAP!\n", __FUNCTION__); | 257 | IRDA_DEBUG(0, "%s(), found no matching LSAP!\n", __func__); |
258 | } | 258 | } |
259 | } | 259 | } |
260 | #endif /* CONFIG_IRDA_ULTRA */ | 260 | #endif /* CONFIG_IRDA_ULTRA */ |
@@ -270,7 +270,7 @@ void irlmp_link_disconnect_indication(struct lap_cb *lap, | |||
270 | LAP_REASON reason, | 270 | LAP_REASON reason, |
271 | struct sk_buff *skb) | 271 | struct sk_buff *skb) |
272 | { | 272 | { |
273 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | 273 | IRDA_DEBUG(2, "%s()\n", __func__); |
274 | 274 | ||
275 | IRDA_ASSERT(lap != NULL, return;); | 275 | IRDA_ASSERT(lap != NULL, return;); |
276 | IRDA_ASSERT(lap->magic == LMP_LAP_MAGIC, return;); | 276 | IRDA_ASSERT(lap->magic == LMP_LAP_MAGIC, return;); |
@@ -296,7 +296,7 @@ void irlmp_link_connect_indication(struct lap_cb *self, __u32 saddr, | |||
296 | __u32 daddr, struct qos_info *qos, | 296 | __u32 daddr, struct qos_info *qos, |
297 | struct sk_buff *skb) | 297 | struct sk_buff *skb) |
298 | { | 298 | { |
299 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 299 | IRDA_DEBUG(4, "%s()\n", __func__); |
300 | 300 | ||
301 | /* Copy QoS settings for this session */ | 301 | /* Copy QoS settings for this session */ |
302 | self->qos = qos; | 302 | self->qos = qos; |
@@ -317,7 +317,7 @@ void irlmp_link_connect_indication(struct lap_cb *self, __u32 saddr, | |||
317 | void irlmp_link_connect_confirm(struct lap_cb *self, struct qos_info *qos, | 317 | void irlmp_link_connect_confirm(struct lap_cb *self, struct qos_info *qos, |
318 | struct sk_buff *skb) | 318 | struct sk_buff *skb) |
319 | { | 319 | { |
320 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 320 | IRDA_DEBUG(4, "%s()\n", __func__); |
321 | 321 | ||
322 | IRDA_ASSERT(self != NULL, return;); | 322 | IRDA_ASSERT(self != NULL, return;); |
323 | IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); | 323 | IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); |
@@ -383,7 +383,7 @@ void irlmp_link_discovery_indication(struct lap_cb *self, | |||
383 | */ | 383 | */ |
384 | void irlmp_link_discovery_confirm(struct lap_cb *self, hashbin_t *log) | 384 | void irlmp_link_discovery_confirm(struct lap_cb *self, hashbin_t *log) |
385 | { | 385 | { |
386 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 386 | IRDA_DEBUG(4, "%s()\n", __func__); |
387 | 387 | ||
388 | IRDA_ASSERT(self != NULL, return;); | 388 | IRDA_ASSERT(self != NULL, return;); |
389 | IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); | 389 | IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); |
diff --git a/net/irda/irmod.c b/net/irda/irmod.c index 01554b996b9b..4c487a883725 100644 --- a/net/irda/irmod.c +++ b/net/irda/irmod.c | |||
@@ -90,7 +90,7 @@ static int __init irda_init(void) | |||
90 | { | 90 | { |
91 | int ret = 0; | 91 | int ret = 0; |
92 | 92 | ||
93 | IRDA_DEBUG(0, "%s()\n", __FUNCTION__); | 93 | IRDA_DEBUG(0, "%s()\n", __func__); |
94 | 94 | ||
95 | /* Lower layer of the stack */ | 95 | /* Lower layer of the stack */ |
96 | irlmp_init(); | 96 | irlmp_init(); |
diff --git a/net/irda/irnet/irnet.h b/net/irda/irnet/irnet.h index bc2e15ce7004..a00e422c822e 100644 --- a/net/irda/irnet/irnet.h +++ b/net/irda/irnet/irnet.h | |||
@@ -337,27 +337,27 @@ | |||
337 | /* All error messages (will show up in the normal logs) */ | 337 | /* All error messages (will show up in the normal logs) */ |
338 | #define DERROR(dbg, format, args...) \ | 338 | #define DERROR(dbg, format, args...) \ |
339 | {if(DEBUG_##dbg) \ | 339 | {if(DEBUG_##dbg) \ |
340 | printk(KERN_INFO "irnet: %s(): " format, __FUNCTION__ , ##args);} | 340 | printk(KERN_INFO "irnet: %s(): " format, __func__ , ##args);} |
341 | 341 | ||
342 | /* Normal debug message (will show up in /var/log/debug) */ | 342 | /* Normal debug message (will show up in /var/log/debug) */ |
343 | #define DEBUG(dbg, format, args...) \ | 343 | #define DEBUG(dbg, format, args...) \ |
344 | {if(DEBUG_##dbg) \ | 344 | {if(DEBUG_##dbg) \ |
345 | printk(KERN_DEBUG "irnet: %s(): " format, __FUNCTION__ , ##args);} | 345 | printk(KERN_DEBUG "irnet: %s(): " format, __func__ , ##args);} |
346 | 346 | ||
347 | /* Entering a function (trace) */ | 347 | /* Entering a function (trace) */ |
348 | #define DENTER(dbg, format, args...) \ | 348 | #define DENTER(dbg, format, args...) \ |
349 | {if(DEBUG_##dbg) \ | 349 | {if(DEBUG_##dbg) \ |
350 | printk(KERN_DEBUG "irnet: -> %s" format, __FUNCTION__ , ##args);} | 350 | printk(KERN_DEBUG "irnet: -> %s" format, __func__ , ##args);} |
351 | 351 | ||
352 | /* Entering and exiting a function in one go (trace) */ | 352 | /* Entering and exiting a function in one go (trace) */ |
353 | #define DPASS(dbg, format, args...) \ | 353 | #define DPASS(dbg, format, args...) \ |
354 | {if(DEBUG_##dbg) \ | 354 | {if(DEBUG_##dbg) \ |
355 | printk(KERN_DEBUG "irnet: <>%s" format, __FUNCTION__ , ##args);} | 355 | printk(KERN_DEBUG "irnet: <>%s" format, __func__ , ##args);} |
356 | 356 | ||
357 | /* Exiting a function (trace) */ | 357 | /* Exiting a function (trace) */ |
358 | #define DEXIT(dbg, format, args...) \ | 358 | #define DEXIT(dbg, format, args...) \ |
359 | {if(DEBUG_##dbg) \ | 359 | {if(DEBUG_##dbg) \ |
360 | printk(KERN_DEBUG "irnet: <-%s()" format, __FUNCTION__ , ##args);} | 360 | printk(KERN_DEBUG "irnet: <-%s()" format, __func__ , ##args);} |
361 | 361 | ||
362 | /* Exit a function with debug */ | 362 | /* Exit a function with debug */ |
363 | #define DRETURN(ret, dbg, args...) \ | 363 | #define DRETURN(ret, dbg, args...) \ |
diff --git a/net/irda/irnetlink.c b/net/irda/irnetlink.c index cd9ff176ecde..9e1fb82e3220 100644 --- a/net/irda/irnetlink.c +++ b/net/irda/irnetlink.c | |||
@@ -40,7 +40,7 @@ static struct net_device * ifname_to_netdev(struct net *net, struct genl_info *i | |||
40 | 40 | ||
41 | ifname = nla_data(info->attrs[IRDA_NL_ATTR_IFNAME]); | 41 | ifname = nla_data(info->attrs[IRDA_NL_ATTR_IFNAME]); |
42 | 42 | ||
43 | IRDA_DEBUG(5, "%s(): Looking for %s\n", __FUNCTION__, ifname); | 43 | IRDA_DEBUG(5, "%s(): Looking for %s\n", __func__, ifname); |
44 | 44 | ||
45 | return dev_get_by_name(net, ifname); | 45 | return dev_get_by_name(net, ifname); |
46 | } | 46 | } |
@@ -56,7 +56,7 @@ static int irda_nl_set_mode(struct sk_buff *skb, struct genl_info *info) | |||
56 | 56 | ||
57 | mode = nla_get_u32(info->attrs[IRDA_NL_ATTR_MODE]); | 57 | mode = nla_get_u32(info->attrs[IRDA_NL_ATTR_MODE]); |
58 | 58 | ||
59 | IRDA_DEBUG(5, "%s(): Switching to mode: %d\n", __FUNCTION__, mode); | 59 | IRDA_DEBUG(5, "%s(): Switching to mode: %d\n", __func__, mode); |
60 | 60 | ||
61 | dev = ifname_to_netdev(&init_net, info); | 61 | dev = ifname_to_netdev(&init_net, info); |
62 | if (!dev) | 62 | if (!dev) |
diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c index 40c28efaed95..ba01938becb5 100644 --- a/net/irda/irqueue.c +++ b/net/irda/irqueue.c | |||
@@ -232,7 +232,7 @@ static __u32 hash( const char* name) | |||
232 | static void enqueue_first(irda_queue_t **queue, irda_queue_t* element) | 232 | static void enqueue_first(irda_queue_t **queue, irda_queue_t* element) |
233 | { | 233 | { |
234 | 234 | ||
235 | IRDA_DEBUG( 4, "%s()\n", __FUNCTION__); | 235 | IRDA_DEBUG( 4, "%s()\n", __func__); |
236 | 236 | ||
237 | /* | 237 | /* |
238 | * Check if queue is empty. | 238 | * Check if queue is empty. |
@@ -451,7 +451,7 @@ void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, long hashv, | |||
451 | unsigned long flags = 0; | 451 | unsigned long flags = 0; |
452 | int bin; | 452 | int bin; |
453 | 453 | ||
454 | IRDA_DEBUG( 4, "%s()\n", __FUNCTION__); | 454 | IRDA_DEBUG( 4, "%s()\n", __func__); |
455 | 455 | ||
456 | IRDA_ASSERT( hashbin != NULL, return;); | 456 | IRDA_ASSERT( hashbin != NULL, return;); |
457 | IRDA_ASSERT( hashbin->magic == HB_MAGIC, return;); | 457 | IRDA_ASSERT( hashbin->magic == HB_MAGIC, return;); |
@@ -564,7 +564,7 @@ void* hashbin_remove( hashbin_t* hashbin, long hashv, const char* name) | |||
564 | unsigned long flags = 0; | 564 | unsigned long flags = 0; |
565 | irda_queue_t* entry; | 565 | irda_queue_t* entry; |
566 | 566 | ||
567 | IRDA_DEBUG( 4, "%s()\n", __FUNCTION__); | 567 | IRDA_DEBUG( 4, "%s()\n", __func__); |
568 | 568 | ||
569 | IRDA_ASSERT( hashbin != NULL, return NULL;); | 569 | IRDA_ASSERT( hashbin != NULL, return NULL;); |
570 | IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); | 570 | IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); |
@@ -657,7 +657,7 @@ void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry) | |||
657 | int bin; | 657 | int bin; |
658 | long hashv; | 658 | long hashv; |
659 | 659 | ||
660 | IRDA_DEBUG( 4, "%s()\n", __FUNCTION__); | 660 | IRDA_DEBUG( 4, "%s()\n", __func__); |
661 | 661 | ||
662 | IRDA_ASSERT( hashbin != NULL, return NULL;); | 662 | IRDA_ASSERT( hashbin != NULL, return NULL;); |
663 | IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); | 663 | IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); |
diff --git a/net/irda/irttp.c b/net/irda/irttp.c index 97db158c9274..74e439e80823 100644 --- a/net/irda/irttp.c +++ b/net/irda/irttp.c | |||
@@ -95,7 +95,7 @@ int __init irttp_init(void) | |||
95 | irttp->tsaps = hashbin_new(HB_LOCK); | 95 | irttp->tsaps = hashbin_new(HB_LOCK); |
96 | if (!irttp->tsaps) { | 96 | if (!irttp->tsaps) { |
97 | IRDA_ERROR("%s: can't allocate IrTTP hashbin!\n", | 97 | IRDA_ERROR("%s: can't allocate IrTTP hashbin!\n", |
98 | __FUNCTION__); | 98 | __func__); |
99 | kfree(irttp); | 99 | kfree(irttp); |
100 | return -ENOMEM; | 100 | return -ENOMEM; |
101 | } | 101 | } |
@@ -164,7 +164,7 @@ static void irttp_todo_expired(unsigned long data) | |||
164 | if (!self || self->magic != TTP_TSAP_MAGIC) | 164 | if (!self || self->magic != TTP_TSAP_MAGIC) |
165 | return; | 165 | return; |
166 | 166 | ||
167 | IRDA_DEBUG(4, "%s(instance=%p)\n", __FUNCTION__, self); | 167 | IRDA_DEBUG(4, "%s(instance=%p)\n", __func__, self); |
168 | 168 | ||
169 | /* Try to make some progress, especially on Tx side - Jean II */ | 169 | /* Try to make some progress, especially on Tx side - Jean II */ |
170 | irttp_run_rx_queue(self); | 170 | irttp_run_rx_queue(self); |
@@ -205,7 +205,7 @@ void irttp_flush_queues(struct tsap_cb *self) | |||
205 | { | 205 | { |
206 | struct sk_buff* skb; | 206 | struct sk_buff* skb; |
207 | 207 | ||
208 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 208 | IRDA_DEBUG(4, "%s()\n", __func__); |
209 | 209 | ||
210 | IRDA_ASSERT(self != NULL, return;); | 210 | IRDA_ASSERT(self != NULL, return;); |
211 | IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); | 211 | IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); |
@@ -238,7 +238,7 @@ static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self) | |||
238 | IRDA_ASSERT(self != NULL, return NULL;); | 238 | IRDA_ASSERT(self != NULL, return NULL;); |
239 | IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return NULL;); | 239 | IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return NULL;); |
240 | 240 | ||
241 | IRDA_DEBUG(2, "%s(), self->rx_sdu_size=%d\n", __FUNCTION__, | 241 | IRDA_DEBUG(2, "%s(), self->rx_sdu_size=%d\n", __func__, |
242 | self->rx_sdu_size); | 242 | self->rx_sdu_size); |
243 | 243 | ||
244 | skb = dev_alloc_skb(TTP_HEADER + self->rx_sdu_size); | 244 | skb = dev_alloc_skb(TTP_HEADER + self->rx_sdu_size); |
@@ -264,7 +264,7 @@ static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self) | |||
264 | 264 | ||
265 | IRDA_DEBUG(2, | 265 | IRDA_DEBUG(2, |
266 | "%s(), frame len=%d, rx_sdu_size=%d, rx_max_sdu_size=%d\n", | 266 | "%s(), frame len=%d, rx_sdu_size=%d, rx_max_sdu_size=%d\n", |
267 | __FUNCTION__, n, self->rx_sdu_size, self->rx_max_sdu_size); | 267 | __func__, n, self->rx_sdu_size, self->rx_max_sdu_size); |
268 | /* Note : irttp_run_rx_queue() calculate self->rx_sdu_size | 268 | /* Note : irttp_run_rx_queue() calculate self->rx_sdu_size |
269 | * by summing the size of all fragments, so we should always | 269 | * by summing the size of all fragments, so we should always |
270 | * have n == self->rx_sdu_size, except in cases where we | 270 | * have n == self->rx_sdu_size, except in cases where we |
@@ -293,7 +293,7 @@ static inline void irttp_fragment_skb(struct tsap_cb *self, | |||
293 | struct sk_buff *frag; | 293 | struct sk_buff *frag; |
294 | __u8 *frame; | 294 | __u8 *frame; |
295 | 295 | ||
296 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | 296 | IRDA_DEBUG(2, "%s()\n", __func__); |
297 | 297 | ||
298 | IRDA_ASSERT(self != NULL, return;); | 298 | IRDA_ASSERT(self != NULL, return;); |
299 | IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); | 299 | IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); |
@@ -303,7 +303,7 @@ static inline void irttp_fragment_skb(struct tsap_cb *self, | |||
303 | * Split frame into a number of segments | 303 | * Split frame into a number of segments |
304 | */ | 304 | */ |
305 | while (skb->len > self->max_seg_size) { | 305 | while (skb->len > self->max_seg_size) { |
306 | IRDA_DEBUG(2, "%s(), fragmenting ...\n", __FUNCTION__); | 306 | IRDA_DEBUG(2, "%s(), fragmenting ...\n", __func__); |
307 | 307 | ||
308 | /* Make new segment */ | 308 | /* Make new segment */ |
309 | frag = alloc_skb(self->max_seg_size+self->max_header_size, | 309 | frag = alloc_skb(self->max_seg_size+self->max_header_size, |
@@ -328,7 +328,7 @@ static inline void irttp_fragment_skb(struct tsap_cb *self, | |||
328 | skb_queue_tail(&self->tx_queue, frag); | 328 | skb_queue_tail(&self->tx_queue, frag); |
329 | } | 329 | } |
330 | /* Queue what is left of the original skb */ | 330 | /* Queue what is left of the original skb */ |
331 | IRDA_DEBUG(2, "%s(), queuing last segment\n", __FUNCTION__); | 331 | IRDA_DEBUG(2, "%s(), queuing last segment\n", __func__); |
332 | 332 | ||
333 | frame = skb_push(skb, TTP_HEADER); | 333 | frame = skb_push(skb, TTP_HEADER); |
334 | frame[0] = 0x00; /* Clear more bit */ | 334 | frame[0] = 0x00; /* Clear more bit */ |
@@ -359,7 +359,7 @@ static int irttp_param_max_sdu_size(void *instance, irda_param_t *param, | |||
359 | else | 359 | else |
360 | self->tx_max_sdu_size = param->pv.i; | 360 | self->tx_max_sdu_size = param->pv.i; |
361 | 361 | ||
362 | IRDA_DEBUG(1, "%s(), MaxSduSize=%d\n", __FUNCTION__, param->pv.i); | 362 | IRDA_DEBUG(1, "%s(), MaxSduSize=%d\n", __func__, param->pv.i); |
363 | 363 | ||
364 | return 0; | 364 | return 0; |
365 | } | 365 | } |
@@ -400,13 +400,13 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify) | |||
400 | * JeanII */ | 400 | * JeanII */ |
401 | if((stsap_sel != LSAP_ANY) && | 401 | if((stsap_sel != LSAP_ANY) && |
402 | ((stsap_sel < 0x01) || (stsap_sel >= 0x70))) { | 402 | ((stsap_sel < 0x01) || (stsap_sel >= 0x70))) { |
403 | IRDA_DEBUG(0, "%s(), invalid tsap!\n", __FUNCTION__); | 403 | IRDA_DEBUG(0, "%s(), invalid tsap!\n", __func__); |
404 | return NULL; | 404 | return NULL; |
405 | } | 405 | } |
406 | 406 | ||
407 | self = kzalloc(sizeof(struct tsap_cb), GFP_ATOMIC); | 407 | self = kzalloc(sizeof(struct tsap_cb), GFP_ATOMIC); |
408 | if (self == NULL) { | 408 | if (self == NULL) { |
409 | IRDA_DEBUG(0, "%s(), unable to kmalloc!\n", __FUNCTION__); | 409 | IRDA_DEBUG(0, "%s(), unable to kmalloc!\n", __func__); |
410 | return NULL; | 410 | return NULL; |
411 | } | 411 | } |
412 | 412 | ||
@@ -438,7 +438,7 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify) | |||
438 | */ | 438 | */ |
439 | lsap = irlmp_open_lsap(stsap_sel, &ttp_notify, 0); | 439 | lsap = irlmp_open_lsap(stsap_sel, &ttp_notify, 0); |
440 | if (lsap == NULL) { | 440 | if (lsap == NULL) { |
441 | IRDA_WARNING("%s: unable to allocate LSAP!!\n", __FUNCTION__); | 441 | IRDA_WARNING("%s: unable to allocate LSAP!!\n", __func__); |
442 | return NULL; | 442 | return NULL; |
443 | } | 443 | } |
444 | 444 | ||
@@ -448,7 +448,7 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify) | |||
448 | * the stsap_sel we have might not be valid anymore | 448 | * the stsap_sel we have might not be valid anymore |
449 | */ | 449 | */ |
450 | self->stsap_sel = lsap->slsap_sel; | 450 | self->stsap_sel = lsap->slsap_sel; |
451 | IRDA_DEBUG(4, "%s(), stsap_sel=%02x\n", __FUNCTION__, self->stsap_sel); | 451 | IRDA_DEBUG(4, "%s(), stsap_sel=%02x\n", __func__, self->stsap_sel); |
452 | 452 | ||
453 | self->notify = *notify; | 453 | self->notify = *notify; |
454 | self->lsap = lsap; | 454 | self->lsap = lsap; |
@@ -506,7 +506,7 @@ int irttp_close_tsap(struct tsap_cb *self) | |||
506 | { | 506 | { |
507 | struct tsap_cb *tsap; | 507 | struct tsap_cb *tsap; |
508 | 508 | ||
509 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 509 | IRDA_DEBUG(4, "%s()\n", __func__); |
510 | 510 | ||
511 | IRDA_ASSERT(self != NULL, return -1;); | 511 | IRDA_ASSERT(self != NULL, return -1;); |
512 | IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); | 512 | IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); |
@@ -516,7 +516,7 @@ int irttp_close_tsap(struct tsap_cb *self) | |||
516 | /* Check if disconnect is not pending */ | 516 | /* Check if disconnect is not pending */ |
517 | if (!test_bit(0, &self->disconnect_pend)) { | 517 | if (!test_bit(0, &self->disconnect_pend)) { |
518 | IRDA_WARNING("%s: TSAP still connected!\n", | 518 | IRDA_WARNING("%s: TSAP still connected!\n", |
519 | __FUNCTION__); | 519 | __func__); |
520 | irttp_disconnect_request(self, NULL, P_NORMAL); | 520 | irttp_disconnect_request(self, NULL, P_NORMAL); |
521 | } | 521 | } |
522 | self->close_pend = TRUE; | 522 | self->close_pend = TRUE; |
@@ -553,18 +553,18 @@ int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb) | |||
553 | IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); | 553 | IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); |
554 | IRDA_ASSERT(skb != NULL, return -1;); | 554 | IRDA_ASSERT(skb != NULL, return -1;); |
555 | 555 | ||
556 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 556 | IRDA_DEBUG(4, "%s()\n", __func__); |
557 | 557 | ||
558 | /* Check that nothing bad happens */ | 558 | /* Check that nothing bad happens */ |
559 | if ((skb->len == 0) || (!self->connected)) { | 559 | if ((skb->len == 0) || (!self->connected)) { |
560 | IRDA_DEBUG(1, "%s(), No data, or not connected\n", | 560 | IRDA_DEBUG(1, "%s(), No data, or not connected\n", |
561 | __FUNCTION__); | 561 | __func__); |
562 | goto err; | 562 | goto err; |
563 | } | 563 | } |
564 | 564 | ||
565 | if (skb->len > self->max_seg_size) { | 565 | if (skb->len > self->max_seg_size) { |
566 | IRDA_DEBUG(1, "%s(), UData is too large for IrLAP!\n", | 566 | IRDA_DEBUG(1, "%s(), UData is too large for IrLAP!\n", |
567 | __FUNCTION__); | 567 | __func__); |
568 | goto err; | 568 | goto err; |
569 | } | 569 | } |
570 | 570 | ||
@@ -595,12 +595,12 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb) | |||
595 | IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); | 595 | IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); |
596 | IRDA_ASSERT(skb != NULL, return -1;); | 596 | IRDA_ASSERT(skb != NULL, return -1;); |
597 | 597 | ||
598 | IRDA_DEBUG(2, "%s() : queue len = %d\n", __FUNCTION__, | 598 | IRDA_DEBUG(2, "%s() : queue len = %d\n", __func__, |
599 | skb_queue_len(&self->tx_queue)); | 599 | skb_queue_len(&self->tx_queue)); |
600 | 600 | ||
601 | /* Check that nothing bad happens */ | 601 | /* Check that nothing bad happens */ |
602 | if ((skb->len == 0) || (!self->connected)) { | 602 | if ((skb->len == 0) || (!self->connected)) { |
603 | IRDA_WARNING("%s: No data, or not connected\n", __FUNCTION__); | 603 | IRDA_WARNING("%s: No data, or not connected\n", __func__); |
604 | ret = -ENOTCONN; | 604 | ret = -ENOTCONN; |
605 | goto err; | 605 | goto err; |
606 | } | 606 | } |
@@ -611,7 +611,7 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb) | |||
611 | */ | 611 | */ |
612 | if ((self->tx_max_sdu_size == 0) && (skb->len > self->max_seg_size)) { | 612 | if ((self->tx_max_sdu_size == 0) && (skb->len > self->max_seg_size)) { |
613 | IRDA_ERROR("%s: SAR disabled, and data is too large for IrLAP!\n", | 613 | IRDA_ERROR("%s: SAR disabled, and data is too large for IrLAP!\n", |
614 | __FUNCTION__); | 614 | __func__); |
615 | ret = -EMSGSIZE; | 615 | ret = -EMSGSIZE; |
616 | goto err; | 616 | goto err; |
617 | } | 617 | } |
@@ -625,7 +625,7 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb) | |||
625 | (skb->len > self->tx_max_sdu_size)) | 625 | (skb->len > self->tx_max_sdu_size)) |
626 | { | 626 | { |
627 | IRDA_ERROR("%s: SAR enabled, but data is larger than TxMaxSduSize!\n", | 627 | IRDA_ERROR("%s: SAR enabled, but data is larger than TxMaxSduSize!\n", |
628 | __FUNCTION__); | 628 | __func__); |
629 | ret = -EMSGSIZE; | 629 | ret = -EMSGSIZE; |
630 | goto err; | 630 | goto err; |
631 | } | 631 | } |
@@ -704,7 +704,7 @@ static void irttp_run_tx_queue(struct tsap_cb *self) | |||
704 | int n; | 704 | int n; |
705 | 705 | ||
706 | IRDA_DEBUG(2, "%s() : send_credit = %d, queue_len = %d\n", | 706 | IRDA_DEBUG(2, "%s() : send_credit = %d, queue_len = %d\n", |
707 | __FUNCTION__, | 707 | __func__, |
708 | self->send_credit, skb_queue_len(&self->tx_queue)); | 708 | self->send_credit, skb_queue_len(&self->tx_queue)); |
709 | 709 | ||
710 | /* Get exclusive access to the tx queue, otherwise don't touch it */ | 710 | /* Get exclusive access to the tx queue, otherwise don't touch it */ |
@@ -813,7 +813,7 @@ static inline void irttp_give_credit(struct tsap_cb *self) | |||
813 | IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); | 813 | IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); |
814 | 814 | ||
815 | IRDA_DEBUG(4, "%s() send=%d,avail=%d,remote=%d\n", | 815 | IRDA_DEBUG(4, "%s() send=%d,avail=%d,remote=%d\n", |
816 | __FUNCTION__, | 816 | __func__, |
817 | self->send_credit, self->avail_credit, self->remote_credit); | 817 | self->send_credit, self->avail_credit, self->remote_credit); |
818 | 818 | ||
819 | /* Give credit to peer */ | 819 | /* Give credit to peer */ |
@@ -862,7 +862,7 @@ static int irttp_udata_indication(void *instance, void *sap, | |||
862 | struct tsap_cb *self; | 862 | struct tsap_cb *self; |
863 | int err; | 863 | int err; |
864 | 864 | ||
865 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 865 | IRDA_DEBUG(4, "%s()\n", __func__); |
866 | 866 | ||
867 | self = (struct tsap_cb *) instance; | 867 | self = (struct tsap_cb *) instance; |
868 | 868 | ||
@@ -979,7 +979,7 @@ static void irttp_status_indication(void *instance, | |||
979 | { | 979 | { |
980 | struct tsap_cb *self; | 980 | struct tsap_cb *self; |
981 | 981 | ||
982 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 982 | IRDA_DEBUG(4, "%s()\n", __func__); |
983 | 983 | ||
984 | self = (struct tsap_cb *) instance; | 984 | self = (struct tsap_cb *) instance; |
985 | 985 | ||
@@ -997,7 +997,7 @@ static void irttp_status_indication(void *instance, | |||
997 | self->notify.status_indication(self->notify.instance, | 997 | self->notify.status_indication(self->notify.instance, |
998 | link, lock); | 998 | link, lock); |
999 | else | 999 | else |
1000 | IRDA_DEBUG(2, "%s(), no handler\n", __FUNCTION__); | 1000 | IRDA_DEBUG(2, "%s(), no handler\n", __func__); |
1001 | } | 1001 | } |
1002 | 1002 | ||
1003 | /* | 1003 | /* |
@@ -1015,7 +1015,7 @@ static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow) | |||
1015 | IRDA_ASSERT(self != NULL, return;); | 1015 | IRDA_ASSERT(self != NULL, return;); |
1016 | IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); | 1016 | IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); |
1017 | 1017 | ||
1018 | IRDA_DEBUG(4, "%s(instance=%p)\n", __FUNCTION__, self); | 1018 | IRDA_DEBUG(4, "%s(instance=%p)\n", __func__, self); |
1019 | 1019 | ||
1020 | /* We are "polled" directly from LAP, and the LAP want to fill | 1020 | /* We are "polled" directly from LAP, and the LAP want to fill |
1021 | * its Tx window. We want to do our best to send it data, so that | 1021 | * its Tx window. We want to do our best to send it data, so that |
@@ -1053,18 +1053,18 @@ static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow) | |||
1053 | */ | 1053 | */ |
1054 | void irttp_flow_request(struct tsap_cb *self, LOCAL_FLOW flow) | 1054 | void irttp_flow_request(struct tsap_cb *self, LOCAL_FLOW flow) |
1055 | { | 1055 | { |
1056 | IRDA_DEBUG(1, "%s()\n", __FUNCTION__); | 1056 | IRDA_DEBUG(1, "%s()\n", __func__); |
1057 | 1057 | ||
1058 | IRDA_ASSERT(self != NULL, return;); | 1058 | IRDA_ASSERT(self != NULL, return;); |
1059 | IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); | 1059 | IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); |
1060 | 1060 | ||
1061 | switch (flow) { | 1061 | switch (flow) { |
1062 | case FLOW_STOP: | 1062 | case FLOW_STOP: |
1063 | IRDA_DEBUG(1, "%s(), flow stop\n", __FUNCTION__); | 1063 | IRDA_DEBUG(1, "%s(), flow stop\n", __func__); |
1064 | self->rx_sdu_busy = TRUE; | 1064 | self->rx_sdu_busy = TRUE; |
1065 | break; | 1065 | break; |
1066 | case FLOW_START: | 1066 | case FLOW_START: |
1067 | IRDA_DEBUG(1, "%s(), flow start\n", __FUNCTION__); | 1067 | IRDA_DEBUG(1, "%s(), flow start\n", __func__); |
1068 | self->rx_sdu_busy = FALSE; | 1068 | self->rx_sdu_busy = FALSE; |
1069 | 1069 | ||
1070 | /* Client say he can accept more data, try to free our | 1070 | /* Client say he can accept more data, try to free our |
@@ -1073,7 +1073,7 @@ void irttp_flow_request(struct tsap_cb *self, LOCAL_FLOW flow) | |||
1073 | 1073 | ||
1074 | break; | 1074 | break; |
1075 | default: | 1075 | default: |
1076 | IRDA_DEBUG(1, "%s(), Unknown flow command!\n", __FUNCTION__); | 1076 | IRDA_DEBUG(1, "%s(), Unknown flow command!\n", __func__); |
1077 | } | 1077 | } |
1078 | } | 1078 | } |
1079 | EXPORT_SYMBOL(irttp_flow_request); | 1079 | EXPORT_SYMBOL(irttp_flow_request); |
@@ -1093,7 +1093,7 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel, | |||
1093 | __u8 *frame; | 1093 | __u8 *frame; |
1094 | __u8 n; | 1094 | __u8 n; |
1095 | 1095 | ||
1096 | IRDA_DEBUG(4, "%s(), max_sdu_size=%d\n", __FUNCTION__, max_sdu_size); | 1096 | IRDA_DEBUG(4, "%s(), max_sdu_size=%d\n", __func__, max_sdu_size); |
1097 | 1097 | ||
1098 | IRDA_ASSERT(self != NULL, return -EBADR;); | 1098 | IRDA_ASSERT(self != NULL, return -EBADR;); |
1099 | IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -EBADR;); | 1099 | IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -EBADR;); |
@@ -1191,7 +1191,7 @@ static void irttp_connect_confirm(void *instance, void *sap, | |||
1191 | __u8 plen; | 1191 | __u8 plen; |
1192 | __u8 n; | 1192 | __u8 n; |
1193 | 1193 | ||
1194 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 1194 | IRDA_DEBUG(4, "%s()\n", __func__); |
1195 | 1195 | ||
1196 | self = (struct tsap_cb *) instance; | 1196 | self = (struct tsap_cb *) instance; |
1197 | 1197 | ||
@@ -1215,7 +1215,7 @@ static void irttp_connect_confirm(void *instance, void *sap, | |||
1215 | 1215 | ||
1216 | n = skb->data[0] & 0x7f; | 1216 | n = skb->data[0] & 0x7f; |
1217 | 1217 | ||
1218 | IRDA_DEBUG(4, "%s(), Initial send_credit=%d\n", __FUNCTION__, n); | 1218 | IRDA_DEBUG(4, "%s(), Initial send_credit=%d\n", __func__, n); |
1219 | 1219 | ||
1220 | self->send_credit = n; | 1220 | self->send_credit = n; |
1221 | self->tx_max_sdu_size = 0; | 1221 | self->tx_max_sdu_size = 0; |
@@ -1236,7 +1236,7 @@ static void irttp_connect_confirm(void *instance, void *sap, | |||
1236 | /* Any errors in the parameter list? */ | 1236 | /* Any errors in the parameter list? */ |
1237 | if (ret < 0) { | 1237 | if (ret < 0) { |
1238 | IRDA_WARNING("%s: error extracting parameters\n", | 1238 | IRDA_WARNING("%s: error extracting parameters\n", |
1239 | __FUNCTION__); | 1239 | __func__); |
1240 | dev_kfree_skb(skb); | 1240 | dev_kfree_skb(skb); |
1241 | 1241 | ||
1242 | /* Do not accept this connection attempt */ | 1242 | /* Do not accept this connection attempt */ |
@@ -1246,10 +1246,10 @@ static void irttp_connect_confirm(void *instance, void *sap, | |||
1246 | skb_pull(skb, IRDA_MIN(skb->len, plen+1)); | 1246 | skb_pull(skb, IRDA_MIN(skb->len, plen+1)); |
1247 | } | 1247 | } |
1248 | 1248 | ||
1249 | IRDA_DEBUG(4, "%s() send=%d,avail=%d,remote=%d\n", __FUNCTION__, | 1249 | IRDA_DEBUG(4, "%s() send=%d,avail=%d,remote=%d\n", __func__, |
1250 | self->send_credit, self->avail_credit, self->remote_credit); | 1250 | self->send_credit, self->avail_credit, self->remote_credit); |
1251 | 1251 | ||
1252 | IRDA_DEBUG(2, "%s(), MaxSduSize=%d\n", __FUNCTION__, | 1252 | IRDA_DEBUG(2, "%s(), MaxSduSize=%d\n", __func__, |
1253 | self->tx_max_sdu_size); | 1253 | self->tx_max_sdu_size); |
1254 | 1254 | ||
1255 | if (self->notify.connect_confirm) { | 1255 | if (self->notify.connect_confirm) { |
@@ -1288,7 +1288,7 @@ void irttp_connect_indication(void *instance, void *sap, struct qos_info *qos, | |||
1288 | self->max_seg_size = max_seg_size - TTP_HEADER; | 1288 | self->max_seg_size = max_seg_size - TTP_HEADER; |
1289 | self->max_header_size = max_header_size+TTP_HEADER; | 1289 | self->max_header_size = max_header_size+TTP_HEADER; |
1290 | 1290 | ||
1291 | IRDA_DEBUG(4, "%s(), TSAP sel=%02x\n", __FUNCTION__, self->stsap_sel); | 1291 | IRDA_DEBUG(4, "%s(), TSAP sel=%02x\n", __func__, self->stsap_sel); |
1292 | 1292 | ||
1293 | /* Need to update dtsap_sel if its equal to LSAP_ANY */ | 1293 | /* Need to update dtsap_sel if its equal to LSAP_ANY */ |
1294 | self->dtsap_sel = lsap->dlsap_sel; | 1294 | self->dtsap_sel = lsap->dlsap_sel; |
@@ -1313,7 +1313,7 @@ void irttp_connect_indication(void *instance, void *sap, struct qos_info *qos, | |||
1313 | /* Any errors in the parameter list? */ | 1313 | /* Any errors in the parameter list? */ |
1314 | if (ret < 0) { | 1314 | if (ret < 0) { |
1315 | IRDA_WARNING("%s: error extracting parameters\n", | 1315 | IRDA_WARNING("%s: error extracting parameters\n", |
1316 | __FUNCTION__); | 1316 | __func__); |
1317 | dev_kfree_skb(skb); | 1317 | dev_kfree_skb(skb); |
1318 | 1318 | ||
1319 | /* Do not accept this connection attempt */ | 1319 | /* Do not accept this connection attempt */ |
@@ -1350,7 +1350,7 @@ int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size, | |||
1350 | IRDA_ASSERT(self != NULL, return -1;); | 1350 | IRDA_ASSERT(self != NULL, return -1;); |
1351 | IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); | 1351 | IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); |
1352 | 1352 | ||
1353 | IRDA_DEBUG(4, "%s(), Source TSAP selector=%02x\n", __FUNCTION__, | 1353 | IRDA_DEBUG(4, "%s(), Source TSAP selector=%02x\n", __func__, |
1354 | self->stsap_sel); | 1354 | self->stsap_sel); |
1355 | 1355 | ||
1356 | /* Any userdata supplied? */ | 1356 | /* Any userdata supplied? */ |
@@ -1432,14 +1432,14 @@ struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance) | |||
1432 | struct tsap_cb *new; | 1432 | struct tsap_cb *new; |
1433 | unsigned long flags; | 1433 | unsigned long flags; |
1434 | 1434 | ||
1435 | IRDA_DEBUG(1, "%s()\n", __FUNCTION__); | 1435 | IRDA_DEBUG(1, "%s()\n", __func__); |
1436 | 1436 | ||
1437 | /* Protect our access to the old tsap instance */ | 1437 | /* Protect our access to the old tsap instance */ |
1438 | spin_lock_irqsave(&irttp->tsaps->hb_spinlock, flags); | 1438 | spin_lock_irqsave(&irttp->tsaps->hb_spinlock, flags); |
1439 | 1439 | ||
1440 | /* Find the old instance */ | 1440 | /* Find the old instance */ |
1441 | if (!hashbin_find(irttp->tsaps, (long) orig, NULL)) { | 1441 | if (!hashbin_find(irttp->tsaps, (long) orig, NULL)) { |
1442 | IRDA_DEBUG(0, "%s(), unable to find TSAP\n", __FUNCTION__); | 1442 | IRDA_DEBUG(0, "%s(), unable to find TSAP\n", __func__); |
1443 | spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags); | 1443 | spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags); |
1444 | return NULL; | 1444 | return NULL; |
1445 | } | 1445 | } |
@@ -1447,7 +1447,7 @@ struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance) | |||
1447 | /* Allocate a new instance */ | 1447 | /* Allocate a new instance */ |
1448 | new = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC); | 1448 | new = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC); |
1449 | if (!new) { | 1449 | if (!new) { |
1450 | IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __FUNCTION__); | 1450 | IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __func__); |
1451 | spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags); | 1451 | spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags); |
1452 | return NULL; | 1452 | return NULL; |
1453 | } | 1453 | } |
@@ -1460,7 +1460,7 @@ struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance) | |||
1460 | /* Try to dup the LSAP (may fail if we were too slow) */ | 1460 | /* Try to dup the LSAP (may fail if we were too slow) */ |
1461 | new->lsap = irlmp_dup(orig->lsap, new); | 1461 | new->lsap = irlmp_dup(orig->lsap, new); |
1462 | if (!new->lsap) { | 1462 | if (!new->lsap) { |
1463 | IRDA_DEBUG(0, "%s(), dup failed!\n", __FUNCTION__); | 1463 | IRDA_DEBUG(0, "%s(), dup failed!\n", __func__); |
1464 | kfree(new); | 1464 | kfree(new); |
1465 | return NULL; | 1465 | return NULL; |
1466 | } | 1466 | } |
@@ -1495,7 +1495,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata, | |||
1495 | 1495 | ||
1496 | /* Already disconnected? */ | 1496 | /* Already disconnected? */ |
1497 | if (!self->connected) { | 1497 | if (!self->connected) { |
1498 | IRDA_DEBUG(4, "%s(), already disconnected!\n", __FUNCTION__); | 1498 | IRDA_DEBUG(4, "%s(), already disconnected!\n", __func__); |
1499 | if (userdata) | 1499 | if (userdata) |
1500 | dev_kfree_skb(userdata); | 1500 | dev_kfree_skb(userdata); |
1501 | return -1; | 1501 | return -1; |
@@ -1508,7 +1508,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata, | |||
1508 | * Jean II */ | 1508 | * Jean II */ |
1509 | if(test_and_set_bit(0, &self->disconnect_pend)) { | 1509 | if(test_and_set_bit(0, &self->disconnect_pend)) { |
1510 | IRDA_DEBUG(0, "%s(), disconnect already pending\n", | 1510 | IRDA_DEBUG(0, "%s(), disconnect already pending\n", |
1511 | __FUNCTION__); | 1511 | __func__); |
1512 | if (userdata) | 1512 | if (userdata) |
1513 | dev_kfree_skb(userdata); | 1513 | dev_kfree_skb(userdata); |
1514 | 1514 | ||
@@ -1527,7 +1527,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata, | |||
1527 | * disconnecting right now since the data will | 1527 | * disconnecting right now since the data will |
1528 | * not have any usable connection to be sent on | 1528 | * not have any usable connection to be sent on |
1529 | */ | 1529 | */ |
1530 | IRDA_DEBUG(1, "%s(): High priority!!()\n", __FUNCTION__); | 1530 | IRDA_DEBUG(1, "%s(): High priority!!()\n", __func__); |
1531 | irttp_flush_queues(self); | 1531 | irttp_flush_queues(self); |
1532 | } else if (priority == P_NORMAL) { | 1532 | } else if (priority == P_NORMAL) { |
1533 | /* | 1533 | /* |
@@ -1548,7 +1548,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata, | |||
1548 | * be sent at the LMP level (so even if the peer has its Tx queue | 1548 | * be sent at the LMP level (so even if the peer has its Tx queue |
1549 | * full of data). - Jean II */ | 1549 | * full of data). - Jean II */ |
1550 | 1550 | ||
1551 | IRDA_DEBUG(1, "%s(), Disconnecting ...\n", __FUNCTION__); | 1551 | IRDA_DEBUG(1, "%s(), Disconnecting ...\n", __func__); |
1552 | self->connected = FALSE; | 1552 | self->connected = FALSE; |
1553 | 1553 | ||
1554 | if (!userdata) { | 1554 | if (!userdata) { |
@@ -1584,7 +1584,7 @@ void irttp_disconnect_indication(void *instance, void *sap, LM_REASON reason, | |||
1584 | { | 1584 | { |
1585 | struct tsap_cb *self; | 1585 | struct tsap_cb *self; |
1586 | 1586 | ||
1587 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__); | 1587 | IRDA_DEBUG(4, "%s()\n", __func__); |
1588 | 1588 | ||
1589 | self = (struct tsap_cb *) instance; | 1589 | self = (struct tsap_cb *) instance; |
1590 | 1590 | ||
@@ -1644,7 +1644,7 @@ static void irttp_do_data_indication(struct tsap_cb *self, struct sk_buff *skb) | |||
1644 | * give an error back | 1644 | * give an error back |
1645 | */ | 1645 | */ |
1646 | if (err) { | 1646 | if (err) { |
1647 | IRDA_DEBUG(0, "%s() requeueing skb!\n", __FUNCTION__); | 1647 | IRDA_DEBUG(0, "%s() requeueing skb!\n", __func__); |
1648 | 1648 | ||
1649 | /* Make sure we take a break */ | 1649 | /* Make sure we take a break */ |
1650 | self->rx_sdu_busy = TRUE; | 1650 | self->rx_sdu_busy = TRUE; |
@@ -1669,7 +1669,7 @@ void irttp_run_rx_queue(struct tsap_cb *self) | |||
1669 | struct sk_buff *skb; | 1669 | struct sk_buff *skb; |
1670 | int more = 0; | 1670 | int more = 0; |
1671 | 1671 | ||
1672 | IRDA_DEBUG(2, "%s() send=%d,avail=%d,remote=%d\n", __FUNCTION__, | 1672 | IRDA_DEBUG(2, "%s() send=%d,avail=%d,remote=%d\n", __func__, |
1673 | self->send_credit, self->avail_credit, self->remote_credit); | 1673 | self->send_credit, self->avail_credit, self->remote_credit); |
1674 | 1674 | ||
1675 | /* Get exclusive access to the rx queue, otherwise don't touch it */ | 1675 | /* Get exclusive access to the rx queue, otherwise don't touch it */ |
@@ -1710,7 +1710,7 @@ void irttp_run_rx_queue(struct tsap_cb *self) | |||
1710 | */ | 1710 | */ |
1711 | if (self->rx_sdu_size <= self->rx_max_sdu_size) { | 1711 | if (self->rx_sdu_size <= self->rx_max_sdu_size) { |
1712 | IRDA_DEBUG(4, "%s(), queueing frag\n", | 1712 | IRDA_DEBUG(4, "%s(), queueing frag\n", |
1713 | __FUNCTION__); | 1713 | __func__); |
1714 | skb_queue_tail(&self->rx_fragments, skb); | 1714 | skb_queue_tail(&self->rx_fragments, skb); |
1715 | } else { | 1715 | } else { |
1716 | /* Free the part of the SDU that is too big */ | 1716 | /* Free the part of the SDU that is too big */ |
@@ -1740,7 +1740,7 @@ void irttp_run_rx_queue(struct tsap_cb *self) | |||
1740 | /* Now we can deliver the reassembled skb */ | 1740 | /* Now we can deliver the reassembled skb */ |
1741 | irttp_do_data_indication(self, skb); | 1741 | irttp_do_data_indication(self, skb); |
1742 | } else { | 1742 | } else { |
1743 | IRDA_DEBUG(1, "%s(), Truncated frame\n", __FUNCTION__); | 1743 | IRDA_DEBUG(1, "%s(), Truncated frame\n", __func__); |
1744 | 1744 | ||
1745 | /* Free the part of the SDU that is too big */ | 1745 | /* Free the part of the SDU that is too big */ |
1746 | dev_kfree_skb(skb); | 1746 | dev_kfree_skb(skb); |
diff --git a/net/irda/parameters.c b/net/irda/parameters.c index 722bbe044d9c..fc1a20565e2d 100644 --- a/net/irda/parameters.c +++ b/net/irda/parameters.c | |||
@@ -148,23 +148,23 @@ static int irda_insert_integer(void *self, __u8 *buf, int len, __u8 pi, | |||
148 | */ | 148 | */ |
149 | if (p.pl == 0) { | 149 | if (p.pl == 0) { |
150 | if (p.pv.i < 0xff) { | 150 | if (p.pv.i < 0xff) { |
151 | IRDA_DEBUG(2, "%s(), using 1 byte\n", __FUNCTION__); | 151 | IRDA_DEBUG(2, "%s(), using 1 byte\n", __func__); |
152 | p.pl = 1; | 152 | p.pl = 1; |
153 | } else if (p.pv.i < 0xffff) { | 153 | } else if (p.pv.i < 0xffff) { |
154 | IRDA_DEBUG(2, "%s(), using 2 bytes\n", __FUNCTION__); | 154 | IRDA_DEBUG(2, "%s(), using 2 bytes\n", __func__); |
155 | p.pl = 2; | 155 | p.pl = 2; |
156 | } else { | 156 | } else { |
157 | IRDA_DEBUG(2, "%s(), using 4 bytes\n", __FUNCTION__); | 157 | IRDA_DEBUG(2, "%s(), using 4 bytes\n", __func__); |
158 | p.pl = 4; /* Default length */ | 158 | p.pl = 4; /* Default length */ |
159 | } | 159 | } |
160 | } | 160 | } |
161 | /* Check if buffer is long enough for insertion */ | 161 | /* Check if buffer is long enough for insertion */ |
162 | if (len < (2+p.pl)) { | 162 | if (len < (2+p.pl)) { |
163 | IRDA_WARNING("%s: buffer too short for insertion!\n", | 163 | IRDA_WARNING("%s: buffer too short for insertion!\n", |
164 | __FUNCTION__); | 164 | __func__); |
165 | return -1; | 165 | return -1; |
166 | } | 166 | } |
167 | IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d, pi=%d\n", __FUNCTION__, | 167 | IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d, pi=%d\n", __func__, |
168 | p.pi, p.pl, p.pv.i); | 168 | p.pi, p.pl, p.pv.i); |
169 | switch (p.pl) { | 169 | switch (p.pl) { |
170 | case 1: | 170 | case 1: |
@@ -187,7 +187,7 @@ static int irda_insert_integer(void *self, __u8 *buf, int len, __u8 pi, | |||
187 | break; | 187 | break; |
188 | default: | 188 | default: |
189 | IRDA_WARNING("%s: length %d not supported\n", | 189 | IRDA_WARNING("%s: length %d not supported\n", |
190 | __FUNCTION__, p.pl); | 190 | __func__, p.pl); |
191 | /* Skip parameter */ | 191 | /* Skip parameter */ |
192 | return -1; | 192 | return -1; |
193 | } | 193 | } |
@@ -218,7 +218,7 @@ static int irda_extract_integer(void *self, __u8 *buf, int len, __u8 pi, | |||
218 | if (len < (2+p.pl)) { | 218 | if (len < (2+p.pl)) { |
219 | IRDA_WARNING("%s: buffer too short for parsing! " | 219 | IRDA_WARNING("%s: buffer too short for parsing! " |
220 | "Need %d bytes, but len is only %d\n", | 220 | "Need %d bytes, but len is only %d\n", |
221 | __FUNCTION__, p.pl, len); | 221 | __func__, p.pl, len); |
222 | return -1; | 222 | return -1; |
223 | } | 223 | } |
224 | 224 | ||
@@ -230,7 +230,7 @@ static int irda_extract_integer(void *self, __u8 *buf, int len, __u8 pi, | |||
230 | if (((type & PV_MASK) != PV_INTEGER) && ((type & PV_MASK) != p.pl)) { | 230 | if (((type & PV_MASK) != PV_INTEGER) && ((type & PV_MASK) != p.pl)) { |
231 | IRDA_ERROR("%s: invalid parameter length! " | 231 | IRDA_ERROR("%s: invalid parameter length! " |
232 | "Expected %d bytes, but value had %d bytes!\n", | 232 | "Expected %d bytes, but value had %d bytes!\n", |
233 | __FUNCTION__, type & PV_MASK, p.pl); | 233 | __func__, type & PV_MASK, p.pl); |
234 | 234 | ||
235 | /* Most parameters are bit/byte fields or little endian, | 235 | /* Most parameters are bit/byte fields or little endian, |
236 | * so it's ok to only extract a subset of it (the subset | 236 | * so it's ok to only extract a subset of it (the subset |
@@ -268,13 +268,13 @@ static int irda_extract_integer(void *self, __u8 *buf, int len, __u8 pi, | |||
268 | break; | 268 | break; |
269 | default: | 269 | default: |
270 | IRDA_WARNING("%s: length %d not supported\n", | 270 | IRDA_WARNING("%s: length %d not supported\n", |
271 | __FUNCTION__, p.pl); | 271 | __func__, p.pl); |
272 | 272 | ||
273 | /* Skip parameter */ | 273 | /* Skip parameter */ |
274 | return p.pl+2; | 274 | return p.pl+2; |
275 | } | 275 | } |
276 | 276 | ||
277 | IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d, pi=%d\n", __FUNCTION__, | 277 | IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d, pi=%d\n", __func__, |
278 | p.pi, p.pl, p.pv.i); | 278 | p.pi, p.pl, p.pv.i); |
279 | /* Call handler for this parameter */ | 279 | /* Call handler for this parameter */ |
280 | err = (*func)(self, &p, PV_PUT); | 280 | err = (*func)(self, &p, PV_PUT); |
@@ -294,19 +294,19 @@ static int irda_extract_string(void *self, __u8 *buf, int len, __u8 pi, | |||
294 | irda_param_t p; | 294 | irda_param_t p; |
295 | int err; | 295 | int err; |
296 | 296 | ||
297 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | 297 | IRDA_DEBUG(2, "%s()\n", __func__); |
298 | 298 | ||
299 | p.pi = pi; /* In case handler needs to know */ | 299 | p.pi = pi; /* In case handler needs to know */ |
300 | p.pl = buf[1]; /* Extract length of value */ | 300 | p.pl = buf[1]; /* Extract length of value */ |
301 | 301 | ||
302 | IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d\n", __FUNCTION__, | 302 | IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d\n", __func__, |
303 | p.pi, p.pl); | 303 | p.pi, p.pl); |
304 | 304 | ||
305 | /* Check if buffer is long enough for parsing */ | 305 | /* Check if buffer is long enough for parsing */ |
306 | if (len < (2+p.pl)) { | 306 | if (len < (2+p.pl)) { |
307 | IRDA_WARNING("%s: buffer too short for parsing! " | 307 | IRDA_WARNING("%s: buffer too short for parsing! " |
308 | "Need %d bytes, but len is only %d\n", | 308 | "Need %d bytes, but len is only %d\n", |
309 | __FUNCTION__, p.pl, len); | 309 | __func__, p.pl, len); |
310 | return -1; | 310 | return -1; |
311 | } | 311 | } |
312 | 312 | ||
@@ -314,7 +314,7 @@ static int irda_extract_string(void *self, __u8 *buf, int len, __u8 pi, | |||
314 | * checked that the buffer is long enough */ | 314 | * checked that the buffer is long enough */ |
315 | strncpy(str, buf+2, p.pl); | 315 | strncpy(str, buf+2, p.pl); |
316 | 316 | ||
317 | IRDA_DEBUG(2, "%s(), str=0x%02x 0x%02x\n", __FUNCTION__, | 317 | IRDA_DEBUG(2, "%s(), str=0x%02x 0x%02x\n", __func__, |
318 | (__u8) str[0], (__u8) str[1]); | 318 | (__u8) str[0], (__u8) str[1]); |
319 | 319 | ||
320 | /* Null terminate string */ | 320 | /* Null terminate string */ |
@@ -345,11 +345,11 @@ static int irda_extract_octseq(void *self, __u8 *buf, int len, __u8 pi, | |||
345 | if (len < (2+p.pl)) { | 345 | if (len < (2+p.pl)) { |
346 | IRDA_WARNING("%s: buffer too short for parsing! " | 346 | IRDA_WARNING("%s: buffer too short for parsing! " |
347 | "Need %d bytes, but len is only %d\n", | 347 | "Need %d bytes, but len is only %d\n", |
348 | __FUNCTION__, p.pl, len); | 348 | __func__, p.pl, len); |
349 | return -1; | 349 | return -1; |
350 | } | 350 | } |
351 | 351 | ||
352 | IRDA_DEBUG(0, "%s(), not impl\n", __FUNCTION__); | 352 | IRDA_DEBUG(0, "%s(), not impl\n", __func__); |
353 | 353 | ||
354 | return p.pl+2; /* Extracted pl+2 bytes */ | 354 | return p.pl+2; /* Extracted pl+2 bytes */ |
355 | } | 355 | } |
@@ -473,7 +473,7 @@ int irda_param_insert(void *self, __u8 pi, __u8 *buf, int len, | |||
473 | (pi_minor > info->tables[pi_major].len-1)) | 473 | (pi_minor > info->tables[pi_major].len-1)) |
474 | { | 474 | { |
475 | IRDA_DEBUG(0, "%s(), no handler for parameter=0x%02x\n", | 475 | IRDA_DEBUG(0, "%s(), no handler for parameter=0x%02x\n", |
476 | __FUNCTION__, pi); | 476 | __func__, pi); |
477 | 477 | ||
478 | /* Skip this parameter */ | 478 | /* Skip this parameter */ |
479 | return -1; | 479 | return -1; |
@@ -487,7 +487,7 @@ int irda_param_insert(void *self, __u8 pi, __u8 *buf, int len, | |||
487 | 487 | ||
488 | /* Check if handler has been implemented */ | 488 | /* Check if handler has been implemented */ |
489 | if (!pi_minor_info->func) { | 489 | if (!pi_minor_info->func) { |
490 | IRDA_MESSAGE("%s: no handler for pi=%#x\n", __FUNCTION__, pi); | 490 | IRDA_MESSAGE("%s: no handler for pi=%#x\n", __func__, pi); |
491 | /* Skip this parameter */ | 491 | /* Skip this parameter */ |
492 | return -1; | 492 | return -1; |
493 | } | 493 | } |
@@ -527,7 +527,7 @@ static int irda_param_extract(void *self, __u8 *buf, int len, | |||
527 | (pi_minor > info->tables[pi_major].len-1)) | 527 | (pi_minor > info->tables[pi_major].len-1)) |
528 | { | 528 | { |
529 | IRDA_DEBUG(0, "%s(), no handler for parameter=0x%02x\n", | 529 | IRDA_DEBUG(0, "%s(), no handler for parameter=0x%02x\n", |
530 | __FUNCTION__, buf[0]); | 530 | __func__, buf[0]); |
531 | 531 | ||
532 | /* Skip this parameter */ | 532 | /* Skip this parameter */ |
533 | return 2 + buf[n + 1]; /* Continue */ | 533 | return 2 + buf[n + 1]; /* Continue */ |
@@ -539,13 +539,13 @@ static int irda_param_extract(void *self, __u8 *buf, int len, | |||
539 | /* Find expected data type for this parameter identifier (pi)*/ | 539 | /* Find expected data type for this parameter identifier (pi)*/ |
540 | type = pi_minor_info->type; | 540 | type = pi_minor_info->type; |
541 | 541 | ||
542 | IRDA_DEBUG(3, "%s(), pi=[%d,%d], type=%d\n", __FUNCTION__, | 542 | IRDA_DEBUG(3, "%s(), pi=[%d,%d], type=%d\n", __func__, |
543 | pi_major, pi_minor, type); | 543 | pi_major, pi_minor, type); |
544 | 544 | ||
545 | /* Check if handler has been implemented */ | 545 | /* Check if handler has been implemented */ |
546 | if (!pi_minor_info->func) { | 546 | if (!pi_minor_info->func) { |
547 | IRDA_MESSAGE("%s: no handler for pi=%#x\n", | 547 | IRDA_MESSAGE("%s: no handler for pi=%#x\n", |
548 | __FUNCTION__, buf[n]); | 548 | __func__, buf[n]); |
549 | /* Skip this parameter */ | 549 | /* Skip this parameter */ |
550 | return 2 + buf[n + 1]; /* Continue */ | 550 | return 2 + buf[n + 1]; /* Continue */ |
551 | } | 551 | } |
diff --git a/net/irda/qos.c b/net/irda/qos.c index aeb18cf1dcae..2b00974e5bae 100644 --- a/net/irda/qos.c +++ b/net/irda/qos.c | |||
@@ -201,7 +201,7 @@ static int msb_index (__u16 word) | |||
201 | * it's very likely the peer. - Jean II */ | 201 | * it's very likely the peer. - Jean II */ |
202 | if (word == 0) { | 202 | if (word == 0) { |
203 | IRDA_WARNING("%s(), Detected buggy peer, adjust null PV to 0x1!\n", | 203 | IRDA_WARNING("%s(), Detected buggy peer, adjust null PV to 0x1!\n", |
204 | __FUNCTION__); | 204 | __func__); |
205 | /* The only safe choice (we don't know the array size) */ | 205 | /* The only safe choice (we don't know the array size) */ |
206 | word = 0x1; | 206 | word = 0x1; |
207 | } | 207 | } |
@@ -342,7 +342,7 @@ static void irlap_adjust_qos_settings(struct qos_info *qos) | |||
342 | __u32 line_capacity; | 342 | __u32 line_capacity; |
343 | int index; | 343 | int index; |
344 | 344 | ||
345 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | 345 | IRDA_DEBUG(2, "%s()\n", __func__); |
346 | 346 | ||
347 | /* | 347 | /* |
348 | * Make sure the mintt is sensible. | 348 | * Make sure the mintt is sensible. |
@@ -352,7 +352,7 @@ static void irlap_adjust_qos_settings(struct qos_info *qos) | |||
352 | int i; | 352 | int i; |
353 | 353 | ||
354 | IRDA_WARNING("%s(), Detected buggy peer, adjust mtt to %dus!\n", | 354 | IRDA_WARNING("%s(), Detected buggy peer, adjust mtt to %dus!\n", |
355 | __FUNCTION__, sysctl_min_tx_turn_time); | 355 | __func__, sysctl_min_tx_turn_time); |
356 | 356 | ||
357 | /* We don't really need bits, but easier this way */ | 357 | /* We don't really need bits, but easier this way */ |
358 | i = value_highest_bit(sysctl_min_tx_turn_time, min_turn_times, | 358 | i = value_highest_bit(sysctl_min_tx_turn_time, min_turn_times, |
@@ -370,7 +370,7 @@ static void irlap_adjust_qos_settings(struct qos_info *qos) | |||
370 | { | 370 | { |
371 | IRDA_DEBUG(0, | 371 | IRDA_DEBUG(0, |
372 | "%s(), adjusting max turn time from %d to 500 ms\n", | 372 | "%s(), adjusting max turn time from %d to 500 ms\n", |
373 | __FUNCTION__, qos->max_turn_time.value); | 373 | __func__, qos->max_turn_time.value); |
374 | qos->max_turn_time.value = 500; | 374 | qos->max_turn_time.value = 500; |
375 | } | 375 | } |
376 | 376 | ||
@@ -386,7 +386,7 @@ static void irlap_adjust_qos_settings(struct qos_info *qos) | |||
386 | while ((qos->data_size.value > line_capacity) && (index > 0)) { | 386 | while ((qos->data_size.value > line_capacity) && (index > 0)) { |
387 | qos->data_size.value = data_sizes[index--]; | 387 | qos->data_size.value = data_sizes[index--]; |
388 | IRDA_DEBUG(2, "%s(), reducing data size to %d\n", | 388 | IRDA_DEBUG(2, "%s(), reducing data size to %d\n", |
389 | __FUNCTION__, qos->data_size.value); | 389 | __func__, qos->data_size.value); |
390 | } | 390 | } |
391 | #else /* Use method described in section 6.6.11 of IrLAP */ | 391 | #else /* Use method described in section 6.6.11 of IrLAP */ |
392 | while (irlap_requested_line_capacity(qos) > line_capacity) { | 392 | while (irlap_requested_line_capacity(qos) > line_capacity) { |
@@ -396,14 +396,14 @@ static void irlap_adjust_qos_settings(struct qos_info *qos) | |||
396 | if (qos->window_size.value > 1) { | 396 | if (qos->window_size.value > 1) { |
397 | qos->window_size.value--; | 397 | qos->window_size.value--; |
398 | IRDA_DEBUG(2, "%s(), reducing window size to %d\n", | 398 | IRDA_DEBUG(2, "%s(), reducing window size to %d\n", |
399 | __FUNCTION__, qos->window_size.value); | 399 | __func__, qos->window_size.value); |
400 | } else if (index > 1) { | 400 | } else if (index > 1) { |
401 | qos->data_size.value = data_sizes[index--]; | 401 | qos->data_size.value = data_sizes[index--]; |
402 | IRDA_DEBUG(2, "%s(), reducing data size to %d\n", | 402 | IRDA_DEBUG(2, "%s(), reducing data size to %d\n", |
403 | __FUNCTION__, qos->data_size.value); | 403 | __func__, qos->data_size.value); |
404 | } else { | 404 | } else { |
405 | IRDA_WARNING("%s(), nothing more we can do!\n", | 405 | IRDA_WARNING("%s(), nothing more we can do!\n", |
406 | __FUNCTION__); | 406 | __func__); |
407 | } | 407 | } |
408 | } | 408 | } |
409 | #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */ | 409 | #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */ |
@@ -538,7 +538,7 @@ static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get) | |||
538 | if (get) { | 538 | if (get) { |
539 | param->pv.i = self->qos_rx.baud_rate.bits; | 539 | param->pv.i = self->qos_rx.baud_rate.bits; |
540 | IRDA_DEBUG(2, "%s(), baud rate = 0x%02x\n", | 540 | IRDA_DEBUG(2, "%s(), baud rate = 0x%02x\n", |
541 | __FUNCTION__, param->pv.i); | 541 | __func__, param->pv.i); |
542 | } else { | 542 | } else { |
543 | /* | 543 | /* |
544 | * Stations must agree on baud rate, so calculate | 544 | * Stations must agree on baud rate, so calculate |
@@ -711,7 +711,7 @@ __u32 irlap_max_line_capacity(__u32 speed, __u32 max_turn_time) | |||
711 | int i,j; | 711 | int i,j; |
712 | 712 | ||
713 | IRDA_DEBUG(2, "%s(), speed=%d, max_turn_time=%d\n", | 713 | IRDA_DEBUG(2, "%s(), speed=%d, max_turn_time=%d\n", |
714 | __FUNCTION__, speed, max_turn_time); | 714 | __func__, speed, max_turn_time); |
715 | 715 | ||
716 | i = value_index(speed, baud_rates, 10); | 716 | i = value_index(speed, baud_rates, 10); |
717 | j = value_index(max_turn_time, max_turn_times, 4); | 717 | j = value_index(max_turn_time, max_turn_times, 4); |
@@ -722,7 +722,7 @@ __u32 irlap_max_line_capacity(__u32 speed, __u32 max_turn_time) | |||
722 | line_capacity = max_line_capacities[i][j]; | 722 | line_capacity = max_line_capacities[i][j]; |
723 | 723 | ||
724 | IRDA_DEBUG(2, "%s(), line capacity=%d bytes\n", | 724 | IRDA_DEBUG(2, "%s(), line capacity=%d bytes\n", |
725 | __FUNCTION__, line_capacity); | 725 | __func__, line_capacity); |
726 | 726 | ||
727 | return line_capacity; | 727 | return line_capacity; |
728 | } | 728 | } |
@@ -738,7 +738,7 @@ static __u32 irlap_requested_line_capacity(struct qos_info *qos) | |||
738 | qos->min_turn_time.value); | 738 | qos->min_turn_time.value); |
739 | 739 | ||
740 | IRDA_DEBUG(2, "%s(), requested line capacity=%d\n", | 740 | IRDA_DEBUG(2, "%s(), requested line capacity=%d\n", |
741 | __FUNCTION__, line_capacity); | 741 | __func__, line_capacity); |
742 | 742 | ||
743 | return line_capacity; | 743 | return line_capacity; |
744 | } | 744 | } |
diff --git a/net/irda/wrapper.c b/net/irda/wrapper.c index c246983308b8..fd0995b1323a 100644 --- a/net/irda/wrapper.c +++ b/net/irda/wrapper.c | |||
@@ -106,16 +106,16 @@ int async_wrap_skb(struct sk_buff *skb, __u8 *tx_buff, int buffsize) | |||
106 | * Nothing to worry about, but we set the default number of | 106 | * Nothing to worry about, but we set the default number of |
107 | * BOF's | 107 | * BOF's |
108 | */ | 108 | */ |
109 | IRDA_DEBUG(1, "%s(), wrong magic in skb!\n", __FUNCTION__); | 109 | IRDA_DEBUG(1, "%s(), wrong magic in skb!\n", __func__); |
110 | xbofs = 10; | 110 | xbofs = 10; |
111 | } else | 111 | } else |
112 | xbofs = cb->xbofs + cb->xbofs_delay; | 112 | xbofs = cb->xbofs + cb->xbofs_delay; |
113 | 113 | ||
114 | IRDA_DEBUG(4, "%s(), xbofs=%d\n", __FUNCTION__, xbofs); | 114 | IRDA_DEBUG(4, "%s(), xbofs=%d\n", __func__, xbofs); |
115 | 115 | ||
116 | /* Check that we never use more than 115 + 48 xbofs */ | 116 | /* Check that we never use more than 115 + 48 xbofs */ |
117 | if (xbofs > 163) { | 117 | if (xbofs > 163) { |
118 | IRDA_DEBUG(0, "%s(), too many xbofs (%d)\n", __FUNCTION__, | 118 | IRDA_DEBUG(0, "%s(), too many xbofs (%d)\n", __func__, |
119 | xbofs); | 119 | xbofs); |
120 | xbofs = 163; | 120 | xbofs = 163; |
121 | } | 121 | } |
@@ -135,7 +135,7 @@ int async_wrap_skb(struct sk_buff *skb, __u8 *tx_buff, int buffsize) | |||
135 | */ | 135 | */ |
136 | if(n >= (buffsize-5)) { | 136 | if(n >= (buffsize-5)) { |
137 | IRDA_ERROR("%s(), tx buffer overflow (n=%d)\n", | 137 | IRDA_ERROR("%s(), tx buffer overflow (n=%d)\n", |
138 | __FUNCTION__, n); | 138 | __func__, n); |
139 | return n; | 139 | return n; |
140 | } | 140 | } |
141 | 141 | ||
@@ -287,7 +287,7 @@ async_unwrap_bof(struct net_device *dev, | |||
287 | /* Not supposed to happen, the previous frame is not | 287 | /* Not supposed to happen, the previous frame is not |
288 | * finished - Jean II */ | 288 | * finished - Jean II */ |
289 | IRDA_DEBUG(1, "%s(), Discarding incomplete frame\n", | 289 | IRDA_DEBUG(1, "%s(), Discarding incomplete frame\n", |
290 | __FUNCTION__); | 290 | __func__); |
291 | stats->rx_errors++; | 291 | stats->rx_errors++; |
292 | stats->rx_missed_errors++; | 292 | stats->rx_missed_errors++; |
293 | irda_device_set_media_busy(dev, TRUE); | 293 | irda_device_set_media_busy(dev, TRUE); |
@@ -360,7 +360,7 @@ async_unwrap_eof(struct net_device *dev, | |||
360 | /* Wrong CRC, discard frame! */ | 360 | /* Wrong CRC, discard frame! */ |
361 | irda_device_set_media_busy(dev, TRUE); | 361 | irda_device_set_media_busy(dev, TRUE); |
362 | 362 | ||
363 | IRDA_DEBUG(1, "%s(), crc error\n", __FUNCTION__); | 363 | IRDA_DEBUG(1, "%s(), crc error\n", __func__); |
364 | stats->rx_errors++; | 364 | stats->rx_errors++; |
365 | stats->rx_crc_errors++; | 365 | stats->rx_crc_errors++; |
366 | } | 366 | } |
@@ -386,7 +386,7 @@ async_unwrap_ce(struct net_device *dev, | |||
386 | break; | 386 | break; |
387 | 387 | ||
388 | case LINK_ESCAPE: | 388 | case LINK_ESCAPE: |
389 | IRDA_WARNING("%s: state not defined\n", __FUNCTION__); | 389 | IRDA_WARNING("%s: state not defined\n", __func__); |
390 | break; | 390 | break; |
391 | 391 | ||
392 | case BEGIN_FRAME: | 392 | case BEGIN_FRAME: |
@@ -421,7 +421,7 @@ async_unwrap_other(struct net_device *dev, | |||
421 | #endif | 421 | #endif |
422 | } else { | 422 | } else { |
423 | IRDA_DEBUG(1, "%s(), Rx buffer overflow, aborting\n", | 423 | IRDA_DEBUG(1, "%s(), Rx buffer overflow, aborting\n", |
424 | __FUNCTION__); | 424 | __func__); |
425 | rx_buff->state = OUTSIDE_FRAME; | 425 | rx_buff->state = OUTSIDE_FRAME; |
426 | } | 426 | } |
427 | break; | 427 | break; |
@@ -440,7 +440,7 @@ async_unwrap_other(struct net_device *dev, | |||
440 | rx_buff->state = INSIDE_FRAME; | 440 | rx_buff->state = INSIDE_FRAME; |
441 | } else { | 441 | } else { |
442 | IRDA_DEBUG(1, "%s(), Rx buffer overflow, aborting\n", | 442 | IRDA_DEBUG(1, "%s(), Rx buffer overflow, aborting\n", |
443 | __FUNCTION__); | 443 | __func__); |
444 | rx_buff->state = OUTSIDE_FRAME; | 444 | rx_buff->state = OUTSIDE_FRAME; |
445 | } | 445 | } |
446 | break; | 446 | break; |
diff --git a/net/key/af_key.c b/net/key/af_key.c index 8b5f486ac80f..50c442fc99ce 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -48,6 +48,17 @@ struct pfkey_sock { | |||
48 | struct sock sk; | 48 | struct sock sk; |
49 | int registered; | 49 | int registered; |
50 | int promisc; | 50 | int promisc; |
51 | |||
52 | struct { | ||
53 | uint8_t msg_version; | ||
54 | uint32_t msg_pid; | ||
55 | int (*dump)(struct pfkey_sock *sk); | ||
56 | void (*done)(struct pfkey_sock *sk); | ||
57 | union { | ||
58 | struct xfrm_policy_walk policy; | ||
59 | struct xfrm_state_walk state; | ||
60 | } u; | ||
61 | } dump; | ||
51 | }; | 62 | }; |
52 | 63 | ||
53 | static inline struct pfkey_sock *pfkey_sk(struct sock *sk) | 64 | static inline struct pfkey_sock *pfkey_sk(struct sock *sk) |
@@ -55,6 +66,27 @@ static inline struct pfkey_sock *pfkey_sk(struct sock *sk) | |||
55 | return (struct pfkey_sock *)sk; | 66 | return (struct pfkey_sock *)sk; |
56 | } | 67 | } |
57 | 68 | ||
69 | static int pfkey_can_dump(struct sock *sk) | ||
70 | { | ||
71 | if (3 * atomic_read(&sk->sk_rmem_alloc) <= 2 * sk->sk_rcvbuf) | ||
72 | return 1; | ||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | static int pfkey_do_dump(struct pfkey_sock *pfk) | ||
77 | { | ||
78 | int rc; | ||
79 | |||
80 | rc = pfk->dump.dump(pfk); | ||
81 | if (rc == -ENOBUFS) | ||
82 | return 0; | ||
83 | |||
84 | pfk->dump.done(pfk); | ||
85 | pfk->dump.dump = NULL; | ||
86 | pfk->dump.done = NULL; | ||
87 | return rc; | ||
88 | } | ||
89 | |||
58 | static void pfkey_sock_destruct(struct sock *sk) | 90 | static void pfkey_sock_destruct(struct sock *sk) |
59 | { | 91 | { |
60 | skb_queue_purge(&sk->sk_receive_queue); | 92 | skb_queue_purge(&sk->sk_receive_queue); |
@@ -1709,45 +1741,60 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hd | |||
1709 | return 0; | 1741 | return 0; |
1710 | } | 1742 | } |
1711 | 1743 | ||
1712 | struct pfkey_dump_data | ||
1713 | { | ||
1714 | struct sk_buff *skb; | ||
1715 | struct sadb_msg *hdr; | ||
1716 | struct sock *sk; | ||
1717 | }; | ||
1718 | |||
1719 | static int dump_sa(struct xfrm_state *x, int count, void *ptr) | 1744 | static int dump_sa(struct xfrm_state *x, int count, void *ptr) |
1720 | { | 1745 | { |
1721 | struct pfkey_dump_data *data = ptr; | 1746 | struct pfkey_sock *pfk = ptr; |
1722 | struct sk_buff *out_skb; | 1747 | struct sk_buff *out_skb; |
1723 | struct sadb_msg *out_hdr; | 1748 | struct sadb_msg *out_hdr; |
1724 | 1749 | ||
1750 | if (!pfkey_can_dump(&pfk->sk)) | ||
1751 | return -ENOBUFS; | ||
1752 | |||
1725 | out_skb = pfkey_xfrm_state2msg(x); | 1753 | out_skb = pfkey_xfrm_state2msg(x); |
1726 | if (IS_ERR(out_skb)) | 1754 | if (IS_ERR(out_skb)) |
1727 | return PTR_ERR(out_skb); | 1755 | return PTR_ERR(out_skb); |
1728 | 1756 | ||
1729 | out_hdr = (struct sadb_msg *) out_skb->data; | 1757 | out_hdr = (struct sadb_msg *) out_skb->data; |
1730 | out_hdr->sadb_msg_version = data->hdr->sadb_msg_version; | 1758 | out_hdr->sadb_msg_version = pfk->dump.msg_version; |
1731 | out_hdr->sadb_msg_type = SADB_DUMP; | 1759 | out_hdr->sadb_msg_type = SADB_DUMP; |
1732 | out_hdr->sadb_msg_satype = pfkey_proto2satype(x->id.proto); | 1760 | out_hdr->sadb_msg_satype = pfkey_proto2satype(x->id.proto); |
1733 | out_hdr->sadb_msg_errno = 0; | 1761 | out_hdr->sadb_msg_errno = 0; |
1734 | out_hdr->sadb_msg_reserved = 0; | 1762 | out_hdr->sadb_msg_reserved = 0; |
1735 | out_hdr->sadb_msg_seq = count; | 1763 | out_hdr->sadb_msg_seq = count; |
1736 | out_hdr->sadb_msg_pid = data->hdr->sadb_msg_pid; | 1764 | out_hdr->sadb_msg_pid = pfk->dump.msg_pid; |
1737 | pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, data->sk); | 1765 | pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, &pfk->sk); |
1738 | return 0; | 1766 | return 0; |
1739 | } | 1767 | } |
1740 | 1768 | ||
1769 | static int pfkey_dump_sa(struct pfkey_sock *pfk) | ||
1770 | { | ||
1771 | return xfrm_state_walk(&pfk->dump.u.state, dump_sa, (void *) pfk); | ||
1772 | } | ||
1773 | |||
1774 | static void pfkey_dump_sa_done(struct pfkey_sock *pfk) | ||
1775 | { | ||
1776 | xfrm_state_walk_done(&pfk->dump.u.state); | ||
1777 | } | ||
1778 | |||
1741 | static int pfkey_dump(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs) | 1779 | static int pfkey_dump(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs) |
1742 | { | 1780 | { |
1743 | u8 proto; | 1781 | u8 proto; |
1744 | struct pfkey_dump_data data = { .skb = skb, .hdr = hdr, .sk = sk }; | 1782 | struct pfkey_sock *pfk = pfkey_sk(sk); |
1783 | |||
1784 | if (pfk->dump.dump != NULL) | ||
1785 | return -EBUSY; | ||
1745 | 1786 | ||
1746 | proto = pfkey_satype2proto(hdr->sadb_msg_satype); | 1787 | proto = pfkey_satype2proto(hdr->sadb_msg_satype); |
1747 | if (proto == 0) | 1788 | if (proto == 0) |
1748 | return -EINVAL; | 1789 | return -EINVAL; |
1749 | 1790 | ||
1750 | return xfrm_state_walk(proto, dump_sa, &data); | 1791 | pfk->dump.msg_version = hdr->sadb_msg_version; |
1792 | pfk->dump.msg_pid = hdr->sadb_msg_pid; | ||
1793 | pfk->dump.dump = pfkey_dump_sa; | ||
1794 | pfk->dump.done = pfkey_dump_sa_done; | ||
1795 | xfrm_state_walk_init(&pfk->dump.u.state, proto); | ||
1796 | |||
1797 | return pfkey_do_dump(pfk); | ||
1751 | } | 1798 | } |
1752 | 1799 | ||
1753 | static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs) | 1800 | static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs) |
@@ -1780,7 +1827,9 @@ static int check_reqid(struct xfrm_policy *xp, int dir, int count, void *ptr) | |||
1780 | 1827 | ||
1781 | static u32 gen_reqid(void) | 1828 | static u32 gen_reqid(void) |
1782 | { | 1829 | { |
1830 | struct xfrm_policy_walk walk; | ||
1783 | u32 start; | 1831 | u32 start; |
1832 | int rc; | ||
1784 | static u32 reqid = IPSEC_MANUAL_REQID_MAX; | 1833 | static u32 reqid = IPSEC_MANUAL_REQID_MAX; |
1785 | 1834 | ||
1786 | start = reqid; | 1835 | start = reqid; |
@@ -1788,8 +1837,10 @@ static u32 gen_reqid(void) | |||
1788 | ++reqid; | 1837 | ++reqid; |
1789 | if (reqid == 0) | 1838 | if (reqid == 0) |
1790 | reqid = IPSEC_MANUAL_REQID_MAX+1; | 1839 | reqid = IPSEC_MANUAL_REQID_MAX+1; |
1791 | if (xfrm_policy_walk(XFRM_POLICY_TYPE_MAIN, check_reqid, | 1840 | xfrm_policy_walk_init(&walk, XFRM_POLICY_TYPE_MAIN); |
1792 | (void*)&reqid) != -EEXIST) | 1841 | rc = xfrm_policy_walk(&walk, check_reqid, (void*)&reqid); |
1842 | xfrm_policy_walk_done(&walk); | ||
1843 | if (rc != -EEXIST) | ||
1793 | return reqid; | 1844 | return reqid; |
1794 | } while (reqid != start); | 1845 | } while (reqid != start); |
1795 | return 0; | 1846 | return 0; |
@@ -2638,11 +2689,14 @@ out: | |||
2638 | 2689 | ||
2639 | static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr) | 2690 | static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr) |
2640 | { | 2691 | { |
2641 | struct pfkey_dump_data *data = ptr; | 2692 | struct pfkey_sock *pfk = ptr; |
2642 | struct sk_buff *out_skb; | 2693 | struct sk_buff *out_skb; |
2643 | struct sadb_msg *out_hdr; | 2694 | struct sadb_msg *out_hdr; |
2644 | int err; | 2695 | int err; |
2645 | 2696 | ||
2697 | if (!pfkey_can_dump(&pfk->sk)) | ||
2698 | return -ENOBUFS; | ||
2699 | |||
2646 | out_skb = pfkey_xfrm_policy2msg_prep(xp); | 2700 | out_skb = pfkey_xfrm_policy2msg_prep(xp); |
2647 | if (IS_ERR(out_skb)) | 2701 | if (IS_ERR(out_skb)) |
2648 | return PTR_ERR(out_skb); | 2702 | return PTR_ERR(out_skb); |
@@ -2652,21 +2706,40 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr) | |||
2652 | return err; | 2706 | return err; |
2653 | 2707 | ||
2654 | out_hdr = (struct sadb_msg *) out_skb->data; | 2708 | out_hdr = (struct sadb_msg *) out_skb->data; |
2655 | out_hdr->sadb_msg_version = data->hdr->sadb_msg_version; | 2709 | out_hdr->sadb_msg_version = pfk->dump.msg_version; |
2656 | out_hdr->sadb_msg_type = SADB_X_SPDDUMP; | 2710 | out_hdr->sadb_msg_type = SADB_X_SPDDUMP; |
2657 | out_hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC; | 2711 | out_hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC; |
2658 | out_hdr->sadb_msg_errno = 0; | 2712 | out_hdr->sadb_msg_errno = 0; |
2659 | out_hdr->sadb_msg_seq = count; | 2713 | out_hdr->sadb_msg_seq = count; |
2660 | out_hdr->sadb_msg_pid = data->hdr->sadb_msg_pid; | 2714 | out_hdr->sadb_msg_pid = pfk->dump.msg_pid; |
2661 | pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, data->sk); | 2715 | pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, &pfk->sk); |
2662 | return 0; | 2716 | return 0; |
2663 | } | 2717 | } |
2664 | 2718 | ||
2719 | static int pfkey_dump_sp(struct pfkey_sock *pfk) | ||
2720 | { | ||
2721 | return xfrm_policy_walk(&pfk->dump.u.policy, dump_sp, (void *) pfk); | ||
2722 | } | ||
2723 | |||
2724 | static void pfkey_dump_sp_done(struct pfkey_sock *pfk) | ||
2725 | { | ||
2726 | xfrm_policy_walk_done(&pfk->dump.u.policy); | ||
2727 | } | ||
2728 | |||
2665 | static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs) | 2729 | static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs) |
2666 | { | 2730 | { |
2667 | struct pfkey_dump_data data = { .skb = skb, .hdr = hdr, .sk = sk }; | 2731 | struct pfkey_sock *pfk = pfkey_sk(sk); |
2732 | |||
2733 | if (pfk->dump.dump != NULL) | ||
2734 | return -EBUSY; | ||
2735 | |||
2736 | pfk->dump.msg_version = hdr->sadb_msg_version; | ||
2737 | pfk->dump.msg_pid = hdr->sadb_msg_pid; | ||
2738 | pfk->dump.dump = pfkey_dump_sp; | ||
2739 | pfk->dump.done = pfkey_dump_sp_done; | ||
2740 | xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN); | ||
2668 | 2741 | ||
2669 | return xfrm_policy_walk(XFRM_POLICY_TYPE_MAIN, dump_sp, &data); | 2742 | return pfkey_do_dump(pfk); |
2670 | } | 2743 | } |
2671 | 2744 | ||
2672 | static int key_notify_policy_flush(struct km_event *c) | 2745 | static int key_notify_policy_flush(struct km_event *c) |
@@ -3671,6 +3744,7 @@ static int pfkey_recvmsg(struct kiocb *kiocb, | |||
3671 | int flags) | 3744 | int flags) |
3672 | { | 3745 | { |
3673 | struct sock *sk = sock->sk; | 3746 | struct sock *sk = sock->sk; |
3747 | struct pfkey_sock *pfk = pfkey_sk(sk); | ||
3674 | struct sk_buff *skb; | 3748 | struct sk_buff *skb; |
3675 | int copied, err; | 3749 | int copied, err; |
3676 | 3750 | ||
@@ -3698,6 +3772,10 @@ static int pfkey_recvmsg(struct kiocb *kiocb, | |||
3698 | 3772 | ||
3699 | err = (flags & MSG_TRUNC) ? skb->len : copied; | 3773 | err = (flags & MSG_TRUNC) ? skb->len : copied; |
3700 | 3774 | ||
3775 | if (pfk->dump.dump != NULL && | ||
3776 | 3 * atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) | ||
3777 | pfkey_do_dump(pfk); | ||
3778 | |||
3701 | out_free: | 3779 | out_free: |
3702 | skb_free_datagram(sk, skb); | 3780 | skb_free_datagram(sk, skb); |
3703 | out: | 3781 | out: |
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 46cf962f7f88..f93b57618582 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c | |||
@@ -185,7 +185,7 @@ static int llc_ui_release(struct socket *sock) | |||
185 | sock_hold(sk); | 185 | sock_hold(sk); |
186 | lock_sock(sk); | 186 | lock_sock(sk); |
187 | llc = llc_sk(sk); | 187 | llc = llc_sk(sk); |
188 | dprintk("%s: closing local(%02X) remote(%02X)\n", __FUNCTION__, | 188 | dprintk("%s: closing local(%02X) remote(%02X)\n", __func__, |
189 | llc->laddr.lsap, llc->daddr.lsap); | 189 | llc->laddr.lsap, llc->daddr.lsap); |
190 | if (!llc_send_disc(sk)) | 190 | if (!llc_send_disc(sk)) |
191 | llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo); | 191 | llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo); |
@@ -295,7 +295,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) | |||
295 | struct llc_sap *sap; | 295 | struct llc_sap *sap; |
296 | int rc = -EINVAL; | 296 | int rc = -EINVAL; |
297 | 297 | ||
298 | dprintk("%s: binding %02X\n", __FUNCTION__, addr->sllc_sap); | 298 | dprintk("%s: binding %02X\n", __func__, addr->sllc_sap); |
299 | if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr))) | 299 | if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr))) |
300 | goto out; | 300 | goto out; |
301 | rc = -EAFNOSUPPORT; | 301 | rc = -EAFNOSUPPORT; |
@@ -432,7 +432,7 @@ static int llc_ui_connect(struct socket *sock, struct sockaddr *uaddr, | |||
432 | rc = llc_establish_connection(sk, llc->dev->dev_addr, | 432 | rc = llc_establish_connection(sk, llc->dev->dev_addr, |
433 | addr->sllc_mac, addr->sllc_sap); | 433 | addr->sllc_mac, addr->sllc_sap); |
434 | if (rc) { | 434 | if (rc) { |
435 | dprintk("%s: llc_ui_send_conn failed :-(\n", __FUNCTION__); | 435 | dprintk("%s: llc_ui_send_conn failed :-(\n", __func__); |
436 | sock->state = SS_UNCONNECTED; | 436 | sock->state = SS_UNCONNECTED; |
437 | sk->sk_state = TCP_CLOSE; | 437 | sk->sk_state = TCP_CLOSE; |
438 | goto out; | 438 | goto out; |
@@ -604,7 +604,7 @@ static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags) | |||
604 | struct sk_buff *skb; | 604 | struct sk_buff *skb; |
605 | int rc = -EOPNOTSUPP; | 605 | int rc = -EOPNOTSUPP; |
606 | 606 | ||
607 | dprintk("%s: accepting on %02X\n", __FUNCTION__, | 607 | dprintk("%s: accepting on %02X\n", __func__, |
608 | llc_sk(sk)->laddr.lsap); | 608 | llc_sk(sk)->laddr.lsap); |
609 | lock_sock(sk); | 609 | lock_sock(sk); |
610 | if (unlikely(sk->sk_type != SOCK_STREAM)) | 610 | if (unlikely(sk->sk_type != SOCK_STREAM)) |
@@ -619,7 +619,7 @@ static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags) | |||
619 | if (rc) | 619 | if (rc) |
620 | goto out; | 620 | goto out; |
621 | } | 621 | } |
622 | dprintk("%s: got a new connection on %02X\n", __FUNCTION__, | 622 | dprintk("%s: got a new connection on %02X\n", __func__, |
623 | llc_sk(sk)->laddr.lsap); | 623 | llc_sk(sk)->laddr.lsap); |
624 | skb = skb_dequeue(&sk->sk_receive_queue); | 624 | skb = skb_dequeue(&sk->sk_receive_queue); |
625 | rc = -EINVAL; | 625 | rc = -EINVAL; |
@@ -640,7 +640,7 @@ static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags) | |||
640 | /* put original socket back into a clean listen state. */ | 640 | /* put original socket back into a clean listen state. */ |
641 | sk->sk_state = TCP_LISTEN; | 641 | sk->sk_state = TCP_LISTEN; |
642 | sk->sk_ack_backlog--; | 642 | sk->sk_ack_backlog--; |
643 | dprintk("%s: ok success on %02X, client on %02X\n", __FUNCTION__, | 643 | dprintk("%s: ok success on %02X, client on %02X\n", __func__, |
644 | llc_sk(sk)->addr.sllc_sap, newllc->daddr.lsap); | 644 | llc_sk(sk)->addr.sllc_sap, newllc->daddr.lsap); |
645 | frees: | 645 | frees: |
646 | kfree_skb(skb); | 646 | kfree_skb(skb); |
@@ -833,7 +833,7 @@ static int llc_ui_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
833 | size_t size = 0; | 833 | size_t size = 0; |
834 | int rc = -EINVAL, copied = 0, hdrlen; | 834 | int rc = -EINVAL, copied = 0, hdrlen; |
835 | 835 | ||
836 | dprintk("%s: sending from %02X to %02X\n", __FUNCTION__, | 836 | dprintk("%s: sending from %02X to %02X\n", __func__, |
837 | llc->laddr.lsap, llc->daddr.lsap); | 837 | llc->laddr.lsap, llc->daddr.lsap); |
838 | lock_sock(sk); | 838 | lock_sock(sk); |
839 | if (addr) { | 839 | if (addr) { |
@@ -891,7 +891,7 @@ out: | |||
891 | kfree_skb(skb); | 891 | kfree_skb(skb); |
892 | release: | 892 | release: |
893 | dprintk("%s: failed sending from %02X to %02X: %d\n", | 893 | dprintk("%s: failed sending from %02X to %02X: %d\n", |
894 | __FUNCTION__, llc->laddr.lsap, llc->daddr.lsap, rc); | 894 | __func__, llc->laddr.lsap, llc->daddr.lsap, rc); |
895 | } | 895 | } |
896 | release_sock(sk); | 896 | release_sock(sk); |
897 | return rc ? : copied; | 897 | return rc ? : copied; |
diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c index 860140caa6e0..f728ffe288aa 100644 --- a/net/llc/llc_c_ac.c +++ b/net/llc/llc_c_ac.c | |||
@@ -1427,7 +1427,7 @@ static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb) | |||
1427 | { | 1427 | { |
1428 | if (llc_sk(sk)->state == LLC_CONN_OUT_OF_SVC) { | 1428 | if (llc_sk(sk)->state == LLC_CONN_OUT_OF_SVC) { |
1429 | printk(KERN_WARNING "%s: timer called on closed connection\n", | 1429 | printk(KERN_WARNING "%s: timer called on closed connection\n", |
1430 | __FUNCTION__); | 1430 | __func__); |
1431 | kfree_skb(skb); | 1431 | kfree_skb(skb); |
1432 | } else { | 1432 | } else { |
1433 | if (!sock_owned_by_user(sk)) | 1433 | if (!sock_owned_by_user(sk)) |
diff --git a/net/llc/llc_c_ev.c b/net/llc/llc_c_ev.c index c5deda246614..523fdd1cf781 100644 --- a/net/llc/llc_c_ev.c +++ b/net/llc/llc_c_ev.c | |||
@@ -228,7 +228,7 @@ int llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns(struct sock *sk, | |||
228 | llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1; | 228 | llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1; |
229 | if (!rc) | 229 | if (!rc) |
230 | dprintk("%s: matched, state=%d, ns=%d, vr=%d\n", | 230 | dprintk("%s: matched, state=%d, ns=%d, vr=%d\n", |
231 | __FUNCTION__, llc_sk(sk)->state, ns, vr); | 231 | __func__, llc_sk(sk)->state, ns, vr); |
232 | return rc; | 232 | return rc; |
233 | } | 233 | } |
234 | 234 | ||
@@ -306,7 +306,7 @@ int llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns(struct sock *sk, | |||
306 | llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1; | 306 | llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1; |
307 | if (!rc) | 307 | if (!rc) |
308 | dprintk("%s: matched, state=%d, ns=%d, vr=%d\n", | 308 | dprintk("%s: matched, state=%d, ns=%d, vr=%d\n", |
309 | __FUNCTION__, llc_sk(sk)->state, ns, vr); | 309 | __func__, llc_sk(sk)->state, ns, vr); |
310 | return rc; | 310 | return rc; |
311 | } | 311 | } |
312 | 312 | ||
@@ -511,7 +511,7 @@ int llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr(struct sock *sk, | |||
511 | (LLC_PDU_TYPE_IS_I(pdu) || LLC_PDU_TYPE_IS_S(pdu)) && | 511 | (LLC_PDU_TYPE_IS_I(pdu) || LLC_PDU_TYPE_IS_S(pdu)) && |
512 | nr != vs && llc_util_nr_inside_tx_window(sk, nr)) { | 512 | nr != vs && llc_util_nr_inside_tx_window(sk, nr)) { |
513 | dprintk("%s: matched, state=%d, vs=%d, nr=%d\n", | 513 | dprintk("%s: matched, state=%d, vs=%d, nr=%d\n", |
514 | __FUNCTION__, llc_sk(sk)->state, vs, nr); | 514 | __func__, llc_sk(sk)->state, vs, nr); |
515 | rc = 0; | 515 | rc = 0; |
516 | } | 516 | } |
517 | return rc; | 517 | return rc; |
@@ -530,7 +530,7 @@ int llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr(struct sock *sk, | |||
530 | nr != vs && llc_util_nr_inside_tx_window(sk, nr)) { | 530 | nr != vs && llc_util_nr_inside_tx_window(sk, nr)) { |
531 | rc = 0; | 531 | rc = 0; |
532 | dprintk("%s: matched, state=%d, vs=%d, nr=%d\n", | 532 | dprintk("%s: matched, state=%d, vs=%d, nr=%d\n", |
533 | __FUNCTION__, llc_sk(sk)->state, vs, nr); | 533 | __func__, llc_sk(sk)->state, vs, nr); |
534 | } | 534 | } |
535 | return rc; | 535 | return rc; |
536 | } | 536 | } |
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c index 441bc18f996d..5ebfd93ff5e7 100644 --- a/net/llc/llc_conn.c +++ b/net/llc/llc_conn.c | |||
@@ -73,7 +73,7 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb) | |||
73 | */ | 73 | */ |
74 | rc = llc_conn_service(skb->sk, skb); | 74 | rc = llc_conn_service(skb->sk, skb); |
75 | if (unlikely(rc != 0)) { | 75 | if (unlikely(rc != 0)) { |
76 | printk(KERN_ERR "%s: llc_conn_service failed\n", __FUNCTION__); | 76 | printk(KERN_ERR "%s: llc_conn_service failed\n", __func__); |
77 | goto out_kfree_skb; | 77 | goto out_kfree_skb; |
78 | } | 78 | } |
79 | 79 | ||
@@ -99,7 +99,7 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb) | |||
99 | * shouldn't happen | 99 | * shouldn't happen |
100 | */ | 100 | */ |
101 | printk(KERN_ERR "%s: sock_queue_rcv_skb failed!\n", | 101 | printk(KERN_ERR "%s: sock_queue_rcv_skb failed!\n", |
102 | __FUNCTION__); | 102 | __func__); |
103 | kfree_skb(skb); | 103 | kfree_skb(skb); |
104 | } | 104 | } |
105 | break; | 105 | break; |
@@ -132,13 +132,13 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb) | |||
132 | * FIXME: | 132 | * FIXME: |
133 | * RESET is not being notified to upper layers for now | 133 | * RESET is not being notified to upper layers for now |
134 | */ | 134 | */ |
135 | printk(KERN_INFO "%s: received a reset ind!\n", __FUNCTION__); | 135 | printk(KERN_INFO "%s: received a reset ind!\n", __func__); |
136 | kfree_skb(skb); | 136 | kfree_skb(skb); |
137 | break; | 137 | break; |
138 | default: | 138 | default: |
139 | if (ev->ind_prim) { | 139 | if (ev->ind_prim) { |
140 | printk(KERN_INFO "%s: received unknown %d prim!\n", | 140 | printk(KERN_INFO "%s: received unknown %d prim!\n", |
141 | __FUNCTION__, ev->ind_prim); | 141 | __func__, ev->ind_prim); |
142 | kfree_skb(skb); | 142 | kfree_skb(skb); |
143 | } | 143 | } |
144 | /* No indication */ | 144 | /* No indication */ |
@@ -179,12 +179,12 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb) | |||
179 | * FIXME: | 179 | * FIXME: |
180 | * RESET is not being notified to upper layers for now | 180 | * RESET is not being notified to upper layers for now |
181 | */ | 181 | */ |
182 | printk(KERN_INFO "%s: received a reset conf!\n", __FUNCTION__); | 182 | printk(KERN_INFO "%s: received a reset conf!\n", __func__); |
183 | break; | 183 | break; |
184 | default: | 184 | default: |
185 | if (ev->cfm_prim) { | 185 | if (ev->cfm_prim) { |
186 | printk(KERN_INFO "%s: received unknown %d prim!\n", | 186 | printk(KERN_INFO "%s: received unknown %d prim!\n", |
187 | __FUNCTION__, ev->cfm_prim); | 187 | __func__, ev->cfm_prim); |
188 | break; | 188 | break; |
189 | } | 189 | } |
190 | goto out_skb_put; /* No confirmation */ | 190 | goto out_skb_put; /* No confirmation */ |
@@ -759,7 +759,7 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb) | |||
759 | if (!sock_owned_by_user(sk)) | 759 | if (!sock_owned_by_user(sk)) |
760 | llc_conn_rcv(sk, skb); | 760 | llc_conn_rcv(sk, skb); |
761 | else { | 761 | else { |
762 | dprintk("%s: adding to backlog...\n", __FUNCTION__); | 762 | dprintk("%s: adding to backlog...\n", __func__); |
763 | llc_set_backlog_type(skb, LLC_PACKET); | 763 | llc_set_backlog_type(skb, LLC_PACKET); |
764 | sk_add_backlog(sk, skb); | 764 | sk_add_backlog(sk, skb); |
765 | } | 765 | } |
@@ -807,7 +807,7 @@ static int llc_backlog_rcv(struct sock *sk, struct sk_buff *skb) | |||
807 | else | 807 | else |
808 | goto out_kfree_skb; | 808 | goto out_kfree_skb; |
809 | } else { | 809 | } else { |
810 | printk(KERN_ERR "%s: invalid skb in backlog\n", __FUNCTION__); | 810 | printk(KERN_ERR "%s: invalid skb in backlog\n", __func__); |
811 | goto out_kfree_skb; | 811 | goto out_kfree_skb; |
812 | } | 812 | } |
813 | out: | 813 | out: |
@@ -874,7 +874,7 @@ struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, struct pr | |||
874 | #ifdef LLC_REFCNT_DEBUG | 874 | #ifdef LLC_REFCNT_DEBUG |
875 | atomic_inc(&llc_sock_nr); | 875 | atomic_inc(&llc_sock_nr); |
876 | printk(KERN_DEBUG "LLC socket %p created in %s, now we have %d alive\n", sk, | 876 | printk(KERN_DEBUG "LLC socket %p created in %s, now we have %d alive\n", sk, |
877 | __FUNCTION__, atomic_read(&llc_sock_nr)); | 877 | __func__, atomic_read(&llc_sock_nr)); |
878 | #endif | 878 | #endif |
879 | out: | 879 | out: |
880 | return sk; | 880 | return sk; |
@@ -894,7 +894,7 @@ void llc_sk_free(struct sock *sk) | |||
894 | /* Stop all (possibly) running timers */ | 894 | /* Stop all (possibly) running timers */ |
895 | llc_conn_ac_stop_all_timers(sk, NULL); | 895 | llc_conn_ac_stop_all_timers(sk, NULL); |
896 | #ifdef DEBUG_LLC_CONN_ALLOC | 896 | #ifdef DEBUG_LLC_CONN_ALLOC |
897 | printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __FUNCTION__, | 897 | printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __func__, |
898 | skb_queue_len(&llc->pdu_unack_q), | 898 | skb_queue_len(&llc->pdu_unack_q), |
899 | skb_queue_len(&sk->sk_write_queue)); | 899 | skb_queue_len(&sk->sk_write_queue)); |
900 | #endif | 900 | #endif |
@@ -904,13 +904,13 @@ void llc_sk_free(struct sock *sk) | |||
904 | #ifdef LLC_REFCNT_DEBUG | 904 | #ifdef LLC_REFCNT_DEBUG |
905 | if (atomic_read(&sk->sk_refcnt) != 1) { | 905 | if (atomic_read(&sk->sk_refcnt) != 1) { |
906 | printk(KERN_DEBUG "Destruction of LLC sock %p delayed in %s, cnt=%d\n", | 906 | printk(KERN_DEBUG "Destruction of LLC sock %p delayed in %s, cnt=%d\n", |
907 | sk, __FUNCTION__, atomic_read(&sk->sk_refcnt)); | 907 | sk, __func__, atomic_read(&sk->sk_refcnt)); |
908 | printk(KERN_DEBUG "%d LLC sockets are still alive\n", | 908 | printk(KERN_DEBUG "%d LLC sockets are still alive\n", |
909 | atomic_read(&llc_sock_nr)); | 909 | atomic_read(&llc_sock_nr)); |
910 | } else { | 910 | } else { |
911 | atomic_dec(&llc_sock_nr); | 911 | atomic_dec(&llc_sock_nr); |
912 | printk(KERN_DEBUG "LLC socket %p released in %s, %d are still alive\n", sk, | 912 | printk(KERN_DEBUG "LLC socket %p released in %s, %d are still alive\n", sk, |
913 | __FUNCTION__, atomic_read(&llc_sock_nr)); | 913 | __func__, atomic_read(&llc_sock_nr)); |
914 | } | 914 | } |
915 | #endif | 915 | #endif |
916 | sock_put(sk); | 916 | sock_put(sk); |
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c index c40c9b2a345a..b9143d2a04e1 100644 --- a/net/llc/llc_input.c +++ b/net/llc/llc_input.c | |||
@@ -154,7 +154,7 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev, | |||
154 | * receives, do not try to analyse it. | 154 | * receives, do not try to analyse it. |
155 | */ | 155 | */ |
156 | if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) { | 156 | if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) { |
157 | dprintk("%s: PACKET_OTHERHOST\n", __FUNCTION__); | 157 | dprintk("%s: PACKET_OTHERHOST\n", __func__); |
158 | goto drop; | 158 | goto drop; |
159 | } | 159 | } |
160 | skb = skb_share_check(skb, GFP_ATOMIC); | 160 | skb = skb_share_check(skb, GFP_ATOMIC); |
@@ -167,7 +167,7 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev, | |||
167 | goto handle_station; | 167 | goto handle_station; |
168 | sap = llc_sap_find(pdu->dsap); | 168 | sap = llc_sap_find(pdu->dsap); |
169 | if (unlikely(!sap)) {/* unknown SAP */ | 169 | if (unlikely(!sap)) {/* unknown SAP */ |
170 | dprintk("%s: llc_sap_find(%02X) failed!\n", __FUNCTION__, | 170 | dprintk("%s: llc_sap_find(%02X) failed!\n", __func__, |
171 | pdu->dsap); | 171 | pdu->dsap); |
172 | goto drop; | 172 | goto drop; |
173 | } | 173 | } |
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig index 45c7c0c3875e..520a5180a4f6 100644 --- a/net/mac80211/Kconfig +++ b/net/mac80211/Kconfig | |||
@@ -32,15 +32,6 @@ config MAC80211_RC_DEFAULT_PID | |||
32 | default rate control algorithm. You should choose | 32 | default rate control algorithm. You should choose |
33 | this unless you know what you are doing. | 33 | this unless you know what you are doing. |
34 | 34 | ||
35 | config MAC80211_RC_DEFAULT_SIMPLE | ||
36 | bool "Simple rate control algorithm" | ||
37 | select MAC80211_RC_SIMPLE | ||
38 | ---help--- | ||
39 | Select the simple rate control as the default rate | ||
40 | control algorithm. Note that this is a non-responsive, | ||
41 | dumb algorithm. You should choose the PID rate control | ||
42 | instead. | ||
43 | |||
44 | config MAC80211_RC_DEFAULT_NONE | 35 | config MAC80211_RC_DEFAULT_NONE |
45 | bool "No default algorithm" | 36 | bool "No default algorithm" |
46 | depends on EMBEDDED | 37 | depends on EMBEDDED |
@@ -57,7 +48,6 @@ comment "build the algorithm into mac80211." | |||
57 | config MAC80211_RC_DEFAULT | 48 | config MAC80211_RC_DEFAULT |
58 | string | 49 | string |
59 | default "pid" if MAC80211_RC_DEFAULT_PID | 50 | default "pid" if MAC80211_RC_DEFAULT_PID |
60 | default "simple" if MAC80211_RC_DEFAULT_SIMPLE | ||
61 | default "" | 51 | default "" |
62 | 52 | ||
63 | config MAC80211_RC_PID | 53 | config MAC80211_RC_PID |
@@ -70,16 +60,16 @@ config MAC80211_RC_PID | |||
70 | Say Y or M unless you're sure you want to use a | 60 | Say Y or M unless you're sure you want to use a |
71 | different rate control algorithm. | 61 | different rate control algorithm. |
72 | 62 | ||
73 | config MAC80211_RC_SIMPLE | 63 | endmenu |
74 | tristate "Simple rate control algorithm (DEPRECATED)" | 64 | |
65 | config MAC80211_MESH | ||
66 | bool "Enable mac80211 mesh networking (pre-802.11s) support" | ||
67 | depends on MAC80211 && EXPERIMENTAL | ||
75 | ---help--- | 68 | ---help--- |
76 | This option enables a very simple, non-responsive TX | 69 | This options enables support of Draft 802.11s mesh networking. |
77 | rate control algorithm. This algorithm is deprecated | 70 | The implementation is based on Draft 1.08 of the Mesh Networking |
78 | and will be removed from the kernel in the near future. | 71 | amendment. For more information visit http://o11s.org/. |
79 | It has been replaced by the PID algorithm. | ||
80 | 72 | ||
81 | Say N unless you know what you are doing. | ||
82 | endmenu | ||
83 | 73 | ||
84 | config MAC80211_LEDS | 74 | config MAC80211_LEDS |
85 | bool "Enable LED triggers" | 75 | bool "Enable LED triggers" |
@@ -166,3 +156,10 @@ config MAC80211_VERBOSE_PS_DEBUG | |||
166 | ---help--- | 156 | ---help--- |
167 | Say Y here to print out verbose powersave | 157 | Say Y here to print out verbose powersave |
168 | mode debug messages. | 158 | mode debug messages. |
159 | |||
160 | config MAC80211_VERBOSE_MPL_DEBUG | ||
161 | bool "Verbose mesh peer link debugging" | ||
162 | depends on MAC80211_DEBUG && MAC80211_MESH | ||
163 | ---help--- | ||
164 | Say Y here to print out verbose mesh peer link | ||
165 | debug messages. | ||
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile index 54f46bc80cfe..70f4b26c2d87 100644 --- a/net/mac80211/Makefile +++ b/net/mac80211/Makefile | |||
@@ -19,7 +19,6 @@ mac80211-y := \ | |||
19 | ieee80211_iface.o \ | 19 | ieee80211_iface.o \ |
20 | ieee80211_rate.o \ | 20 | ieee80211_rate.o \ |
21 | michael.o \ | 21 | michael.o \ |
22 | regdomain.o \ | ||
23 | tkip.o \ | 22 | tkip.o \ |
24 | aes_ccm.o \ | 23 | aes_ccm.o \ |
25 | cfg.o \ | 24 | cfg.o \ |
@@ -37,11 +36,15 @@ mac80211-$(CONFIG_MAC80211_DEBUGFS) += \ | |||
37 | debugfs_netdev.o \ | 36 | debugfs_netdev.o \ |
38 | debugfs_key.o | 37 | debugfs_key.o |
39 | 38 | ||
39 | mac80211-$(CONFIG_MAC80211_MESH) += \ | ||
40 | mesh.o \ | ||
41 | mesh_pathtbl.o \ | ||
42 | mesh_plink.o \ | ||
43 | mesh_hwmp.o | ||
44 | |||
40 | 45 | ||
41 | # Build rate control algorithm(s) | 46 | # Build rate control algorithm(s) |
42 | CFLAGS_rc80211_simple.o += -DRC80211_SIMPLE_COMPILE | ||
43 | CFLAGS_rc80211_pid_algo.o += -DRC80211_PID_COMPILE | 47 | CFLAGS_rc80211_pid_algo.o += -DRC80211_PID_COMPILE |
44 | mac80211-$(CONFIG_MAC80211_RC_SIMPLE) += rc80211_simple.o | ||
45 | mac80211-$(CONFIG_MAC80211_RC_PID) += $(rc-pid-$(CONFIG_MAC80211_RC_PID)) | 48 | mac80211-$(CONFIG_MAC80211_RC_PID) += $(rc-pid-$(CONFIG_MAC80211_RC_PID)) |
46 | 49 | ||
47 | # Modular rate algorithms are assigned to mac80211-m - make separate modules | 50 | # Modular rate algorithms are assigned to mac80211-m - make separate modules |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 22c9619ba776..6b183a3526b0 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include "ieee80211_i.h" | 15 | #include "ieee80211_i.h" |
16 | #include "cfg.h" | 16 | #include "cfg.h" |
17 | #include "ieee80211_rate.h" | 17 | #include "ieee80211_rate.h" |
18 | #include "mesh.h" | ||
18 | 19 | ||
19 | static enum ieee80211_if_types | 20 | static enum ieee80211_if_types |
20 | nl80211_type_to_mac80211_type(enum nl80211_iftype type) | 21 | nl80211_type_to_mac80211_type(enum nl80211_iftype type) |
@@ -28,16 +29,24 @@ nl80211_type_to_mac80211_type(enum nl80211_iftype type) | |||
28 | return IEEE80211_IF_TYPE_STA; | 29 | return IEEE80211_IF_TYPE_STA; |
29 | case NL80211_IFTYPE_MONITOR: | 30 | case NL80211_IFTYPE_MONITOR: |
30 | return IEEE80211_IF_TYPE_MNTR; | 31 | return IEEE80211_IF_TYPE_MNTR; |
32 | #ifdef CONFIG_MAC80211_MESH | ||
33 | case NL80211_IFTYPE_MESH_POINT: | ||
34 | return IEEE80211_IF_TYPE_MESH_POINT; | ||
35 | #endif | ||
31 | default: | 36 | default: |
32 | return IEEE80211_IF_TYPE_INVALID; | 37 | return IEEE80211_IF_TYPE_INVALID; |
33 | } | 38 | } |
34 | } | 39 | } |
35 | 40 | ||
36 | static int ieee80211_add_iface(struct wiphy *wiphy, char *name, | 41 | static int ieee80211_add_iface(struct wiphy *wiphy, char *name, |
37 | enum nl80211_iftype type) | 42 | enum nl80211_iftype type, u32 *flags, |
43 | struct vif_params *params) | ||
38 | { | 44 | { |
39 | struct ieee80211_local *local = wiphy_priv(wiphy); | 45 | struct ieee80211_local *local = wiphy_priv(wiphy); |
40 | enum ieee80211_if_types itype; | 46 | enum ieee80211_if_types itype; |
47 | struct net_device *dev; | ||
48 | struct ieee80211_sub_if_data *sdata; | ||
49 | int err; | ||
41 | 50 | ||
42 | if (unlikely(local->reg_state != IEEE80211_DEV_REGISTERED)) | 51 | if (unlikely(local->reg_state != IEEE80211_DEV_REGISTERED)) |
43 | return -ENODEV; | 52 | return -ENODEV; |
@@ -46,7 +55,13 @@ static int ieee80211_add_iface(struct wiphy *wiphy, char *name, | |||
46 | if (itype == IEEE80211_IF_TYPE_INVALID) | 55 | if (itype == IEEE80211_IF_TYPE_INVALID) |
47 | return -EINVAL; | 56 | return -EINVAL; |
48 | 57 | ||
49 | return ieee80211_if_add(local->mdev, name, NULL, itype); | 58 | err = ieee80211_if_add(local->mdev, name, &dev, itype, params); |
59 | if (err || itype != IEEE80211_IF_TYPE_MNTR || !flags) | ||
60 | return err; | ||
61 | |||
62 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
63 | sdata->u.mntr_flags = *flags; | ||
64 | return 0; | ||
50 | } | 65 | } |
51 | 66 | ||
52 | static int ieee80211_del_iface(struct wiphy *wiphy, int ifindex) | 67 | static int ieee80211_del_iface(struct wiphy *wiphy, int ifindex) |
@@ -69,7 +84,8 @@ static int ieee80211_del_iface(struct wiphy *wiphy, int ifindex) | |||
69 | } | 84 | } |
70 | 85 | ||
71 | static int ieee80211_change_iface(struct wiphy *wiphy, int ifindex, | 86 | static int ieee80211_change_iface(struct wiphy *wiphy, int ifindex, |
72 | enum nl80211_iftype type) | 87 | enum nl80211_iftype type, u32 *flags, |
88 | struct vif_params *params) | ||
73 | { | 89 | { |
74 | struct ieee80211_local *local = wiphy_priv(wiphy); | 90 | struct ieee80211_local *local = wiphy_priv(wiphy); |
75 | struct net_device *dev; | 91 | struct net_device *dev; |
@@ -99,6 +115,15 @@ static int ieee80211_change_iface(struct wiphy *wiphy, int ifindex, | |||
99 | ieee80211_if_reinit(dev); | 115 | ieee80211_if_reinit(dev); |
100 | ieee80211_if_set_type(dev, itype); | 116 | ieee80211_if_set_type(dev, itype); |
101 | 117 | ||
118 | if (ieee80211_vif_is_mesh(&sdata->vif) && params->mesh_id_len) | ||
119 | ieee80211_if_sta_set_mesh_id(&sdata->u.sta, | ||
120 | params->mesh_id_len, | ||
121 | params->mesh_id); | ||
122 | |||
123 | if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR || !flags) | ||
124 | return 0; | ||
125 | |||
126 | sdata->u.mntr_flags = *flags; | ||
102 | return 0; | 127 | return 0; |
103 | } | 128 | } |
104 | 129 | ||
@@ -109,7 +134,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, | |||
109 | struct ieee80211_sub_if_data *sdata; | 134 | struct ieee80211_sub_if_data *sdata; |
110 | struct sta_info *sta = NULL; | 135 | struct sta_info *sta = NULL; |
111 | enum ieee80211_key_alg alg; | 136 | enum ieee80211_key_alg alg; |
112 | int ret; | 137 | struct ieee80211_key *key; |
113 | 138 | ||
114 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 139 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
115 | 140 | ||
@@ -128,21 +153,21 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, | |||
128 | return -EINVAL; | 153 | return -EINVAL; |
129 | } | 154 | } |
130 | 155 | ||
156 | key = ieee80211_key_alloc(alg, key_idx, params->key_len, params->key); | ||
157 | if (!key) | ||
158 | return -ENOMEM; | ||
159 | |||
131 | if (mac_addr) { | 160 | if (mac_addr) { |
132 | sta = sta_info_get(sdata->local, mac_addr); | 161 | sta = sta_info_get(sdata->local, mac_addr); |
133 | if (!sta) | 162 | if (!sta) { |
163 | ieee80211_key_free(key); | ||
134 | return -ENOENT; | 164 | return -ENOENT; |
165 | } | ||
135 | } | 166 | } |
136 | 167 | ||
137 | ret = 0; | 168 | ieee80211_key_link(key, sdata, sta); |
138 | if (!ieee80211_key_alloc(sdata, sta, alg, key_idx, | ||
139 | params->key_len, params->key)) | ||
140 | ret = -ENOMEM; | ||
141 | |||
142 | if (sta) | ||
143 | sta_info_put(sta); | ||
144 | 169 | ||
145 | return ret; | 170 | return 0; |
146 | } | 171 | } |
147 | 172 | ||
148 | static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, | 173 | static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, |
@@ -160,12 +185,12 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, | |||
160 | return -ENOENT; | 185 | return -ENOENT; |
161 | 186 | ||
162 | ret = 0; | 187 | ret = 0; |
163 | if (sta->key) | 188 | if (sta->key) { |
164 | ieee80211_key_free(sta->key); | 189 | ieee80211_key_free(sta->key); |
165 | else | 190 | WARN_ON(sta->key); |
191 | } else | ||
166 | ret = -ENOENT; | 192 | ret = -ENOENT; |
167 | 193 | ||
168 | sta_info_put(sta); | ||
169 | return ret; | 194 | return ret; |
170 | } | 195 | } |
171 | 196 | ||
@@ -173,6 +198,7 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, | |||
173 | return -ENOENT; | 198 | return -ENOENT; |
174 | 199 | ||
175 | ieee80211_key_free(sdata->keys[key_idx]); | 200 | ieee80211_key_free(sdata->keys[key_idx]); |
201 | WARN_ON(sdata->keys[key_idx]); | ||
176 | 202 | ||
177 | return 0; | 203 | return 0; |
178 | } | 204 | } |
@@ -254,8 +280,6 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev, | |||
254 | err = 0; | 280 | err = 0; |
255 | 281 | ||
256 | out: | 282 | out: |
257 | if (sta) | ||
258 | sta_info_put(sta); | ||
259 | return err; | 283 | return err; |
260 | } | 284 | } |
261 | 285 | ||
@@ -271,29 +295,73 @@ static int ieee80211_config_default_key(struct wiphy *wiphy, | |||
271 | return 0; | 295 | return 0; |
272 | } | 296 | } |
273 | 297 | ||
298 | static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) | ||
299 | { | ||
300 | struct ieee80211_sub_if_data *sdata = sta->sdata; | ||
301 | |||
302 | sinfo->filled = STATION_INFO_INACTIVE_TIME | | ||
303 | STATION_INFO_RX_BYTES | | ||
304 | STATION_INFO_TX_BYTES; | ||
305 | |||
306 | sinfo->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx); | ||
307 | sinfo->rx_bytes = sta->rx_bytes; | ||
308 | sinfo->tx_bytes = sta->tx_bytes; | ||
309 | |||
310 | if (ieee80211_vif_is_mesh(&sdata->vif)) { | ||
311 | #ifdef CONFIG_MAC80211_MESH | ||
312 | sinfo->filled |= STATION_INFO_LLID | | ||
313 | STATION_INFO_PLID | | ||
314 | STATION_INFO_PLINK_STATE; | ||
315 | |||
316 | sinfo->llid = le16_to_cpu(sta->llid); | ||
317 | sinfo->plid = le16_to_cpu(sta->plid); | ||
318 | sinfo->plink_state = sta->plink_state; | ||
319 | #endif | ||
320 | } | ||
321 | } | ||
322 | |||
323 | |||
324 | static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev, | ||
325 | int idx, u8 *mac, struct station_info *sinfo) | ||
326 | { | ||
327 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
328 | struct sta_info *sta; | ||
329 | int ret = -ENOENT; | ||
330 | |||
331 | rcu_read_lock(); | ||
332 | |||
333 | sta = sta_info_get_by_idx(local, idx, dev); | ||
334 | if (sta) { | ||
335 | ret = 0; | ||
336 | memcpy(mac, sta->addr, ETH_ALEN); | ||
337 | sta_set_sinfo(sta, sinfo); | ||
338 | } | ||
339 | |||
340 | rcu_read_unlock(); | ||
341 | |||
342 | return ret; | ||
343 | } | ||
344 | |||
274 | static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev, | 345 | static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev, |
275 | u8 *mac, struct station_stats *stats) | 346 | u8 *mac, struct station_info *sinfo) |
276 | { | 347 | { |
277 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 348 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
278 | struct sta_info *sta; | 349 | struct sta_info *sta; |
350 | int ret = -ENOENT; | ||
279 | 351 | ||
280 | sta = sta_info_get(local, mac); | 352 | rcu_read_lock(); |
281 | if (!sta) | ||
282 | return -ENOENT; | ||
283 | 353 | ||
284 | /* XXX: verify sta->dev == dev */ | 354 | /* XXX: verify sta->dev == dev */ |
285 | 355 | ||
286 | stats->filled = STATION_STAT_INACTIVE_TIME | | 356 | sta = sta_info_get(local, mac); |
287 | STATION_STAT_RX_BYTES | | 357 | if (sta) { |
288 | STATION_STAT_TX_BYTES; | 358 | ret = 0; |
289 | 359 | sta_set_sinfo(sta, sinfo); | |
290 | stats->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx); | 360 | } |
291 | stats->rx_bytes = sta->rx_bytes; | ||
292 | stats->tx_bytes = sta->tx_bytes; | ||
293 | 361 | ||
294 | sta_info_put(sta); | 362 | rcu_read_unlock(); |
295 | 363 | ||
296 | return 0; | 364 | return ret; |
297 | } | 365 | } |
298 | 366 | ||
299 | /* | 367 | /* |
@@ -486,8 +554,8 @@ static void ieee80211_send_layer2_update(struct sta_info *sta) | |||
486 | msg->xid_info[1] = 1; /* LLC types/classes: Type 1 LLC */ | 554 | msg->xid_info[1] = 1; /* LLC types/classes: Type 1 LLC */ |
487 | msg->xid_info[2] = 0; /* XID sender's receive window size (RW) */ | 555 | msg->xid_info[2] = 0; /* XID sender's receive window size (RW) */ |
488 | 556 | ||
489 | skb->dev = sta->dev; | 557 | skb->dev = sta->sdata->dev; |
490 | skb->protocol = eth_type_trans(skb, sta->dev); | 558 | skb->protocol = eth_type_trans(skb, sta->sdata->dev); |
491 | memset(skb->cb, 0, sizeof(skb->cb)); | 559 | memset(skb->cb, 0, sizeof(skb->cb)); |
492 | netif_rx(skb); | 560 | netif_rx(skb); |
493 | } | 561 | } |
@@ -498,7 +566,14 @@ static void sta_apply_parameters(struct ieee80211_local *local, | |||
498 | { | 566 | { |
499 | u32 rates; | 567 | u32 rates; |
500 | int i, j; | 568 | int i, j; |
501 | struct ieee80211_hw_mode *mode; | 569 | struct ieee80211_supported_band *sband; |
570 | struct ieee80211_sub_if_data *sdata = sta->sdata; | ||
571 | |||
572 | /* | ||
573 | * FIXME: updating the flags is racy when this function is | ||
574 | * called from ieee80211_change_station(), this will | ||
575 | * be resolved in a future patch. | ||
576 | */ | ||
502 | 577 | ||
503 | if (params->station_flags & STATION_FLAG_CHANGED) { | 578 | if (params->station_flags & STATION_FLAG_CHANGED) { |
504 | sta->flags &= ~WLAN_STA_AUTHORIZED; | 579 | sta->flags &= ~WLAN_STA_AUTHORIZED; |
@@ -514,6 +589,13 @@ static void sta_apply_parameters(struct ieee80211_local *local, | |||
514 | sta->flags |= WLAN_STA_WME; | 589 | sta->flags |= WLAN_STA_WME; |
515 | } | 590 | } |
516 | 591 | ||
592 | /* | ||
593 | * FIXME: updating the following information is racy when this | ||
594 | * function is called from ieee80211_change_station(). | ||
595 | * However, all this information should be static so | ||
596 | * maybe we should just reject attemps to change it. | ||
597 | */ | ||
598 | |||
517 | if (params->aid) { | 599 | if (params->aid) { |
518 | sta->aid = params->aid; | 600 | sta->aid = params->aid; |
519 | if (sta->aid > IEEE80211_MAX_AID) | 601 | if (sta->aid > IEEE80211_MAX_AID) |
@@ -525,15 +607,27 @@ static void sta_apply_parameters(struct ieee80211_local *local, | |||
525 | 607 | ||
526 | if (params->supported_rates) { | 608 | if (params->supported_rates) { |
527 | rates = 0; | 609 | rates = 0; |
528 | mode = local->oper_hw_mode; | 610 | sband = local->hw.wiphy->bands[local->oper_channel->band]; |
611 | |||
529 | for (i = 0; i < params->supported_rates_len; i++) { | 612 | for (i = 0; i < params->supported_rates_len; i++) { |
530 | int rate = (params->supported_rates[i] & 0x7f) * 5; | 613 | int rate = (params->supported_rates[i] & 0x7f) * 5; |
531 | for (j = 0; j < mode->num_rates; j++) { | 614 | for (j = 0; j < sband->n_bitrates; j++) { |
532 | if (mode->rates[j].rate == rate) | 615 | if (sband->bitrates[j].bitrate == rate) |
533 | rates |= BIT(j); | 616 | rates |= BIT(j); |
534 | } | 617 | } |
535 | } | 618 | } |
536 | sta->supp_rates = rates; | 619 | sta->supp_rates[local->oper_channel->band] = rates; |
620 | } | ||
621 | |||
622 | if (ieee80211_vif_is_mesh(&sdata->vif) && params->plink_action) { | ||
623 | switch (params->plink_action) { | ||
624 | case PLINK_ACTION_OPEN: | ||
625 | mesh_plink_open(sta); | ||
626 | break; | ||
627 | case PLINK_ACTION_BLOCK: | ||
628 | mesh_plink_block(sta); | ||
629 | break; | ||
630 | } | ||
537 | } | 631 | } |
538 | } | 632 | } |
539 | 633 | ||
@@ -543,18 +637,12 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, | |||
543 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 637 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
544 | struct sta_info *sta; | 638 | struct sta_info *sta; |
545 | struct ieee80211_sub_if_data *sdata; | 639 | struct ieee80211_sub_if_data *sdata; |
640 | int err; | ||
546 | 641 | ||
547 | /* Prevent a race with changing the rate control algorithm */ | 642 | /* Prevent a race with changing the rate control algorithm */ |
548 | if (!netif_running(dev)) | 643 | if (!netif_running(dev)) |
549 | return -ENETDOWN; | 644 | return -ENETDOWN; |
550 | 645 | ||
551 | /* XXX: get sta belonging to dev */ | ||
552 | sta = sta_info_get(local, mac); | ||
553 | if (sta) { | ||
554 | sta_info_put(sta); | ||
555 | return -EEXIST; | ||
556 | } | ||
557 | |||
558 | if (params->vlan) { | 646 | if (params->vlan) { |
559 | sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); | 647 | sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); |
560 | 648 | ||
@@ -564,22 +652,36 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, | |||
564 | } else | 652 | } else |
565 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 653 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
566 | 654 | ||
567 | sta = sta_info_add(local, dev, mac, GFP_KERNEL); | 655 | if (compare_ether_addr(mac, dev->dev_addr) == 0) |
656 | return -EINVAL; | ||
657 | |||
658 | if (is_multicast_ether_addr(mac)) | ||
659 | return -EINVAL; | ||
660 | |||
661 | sta = sta_info_alloc(sdata, mac, GFP_KERNEL); | ||
568 | if (!sta) | 662 | if (!sta) |
569 | return -ENOMEM; | 663 | return -ENOMEM; |
570 | 664 | ||
571 | sta->dev = sdata->dev; | ||
572 | if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN || | ||
573 | sdata->vif.type == IEEE80211_IF_TYPE_AP) | ||
574 | ieee80211_send_layer2_update(sta); | ||
575 | |||
576 | sta->flags = WLAN_STA_AUTH | WLAN_STA_ASSOC; | 665 | sta->flags = WLAN_STA_AUTH | WLAN_STA_ASSOC; |
577 | 666 | ||
578 | sta_apply_parameters(local, sta, params); | 667 | sta_apply_parameters(local, sta, params); |
579 | 668 | ||
580 | rate_control_rate_init(sta, local); | 669 | rate_control_rate_init(sta, local); |
581 | 670 | ||
582 | sta_info_put(sta); | 671 | rcu_read_lock(); |
672 | |||
673 | err = sta_info_insert(sta); | ||
674 | if (err) { | ||
675 | sta_info_destroy(sta); | ||
676 | rcu_read_unlock(); | ||
677 | return err; | ||
678 | } | ||
679 | |||
680 | if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN || | ||
681 | sdata->vif.type == IEEE80211_IF_TYPE_AP) | ||
682 | ieee80211_send_layer2_update(sta); | ||
683 | |||
684 | rcu_read_unlock(); | ||
583 | 685 | ||
584 | return 0; | 686 | return 0; |
585 | } | 687 | } |
@@ -587,7 +689,8 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, | |||
587 | static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev, | 689 | static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev, |
588 | u8 *mac) | 690 | u8 *mac) |
589 | { | 691 | { |
590 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 692 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
693 | struct ieee80211_local *local = sdata->local; | ||
591 | struct sta_info *sta; | 694 | struct sta_info *sta; |
592 | 695 | ||
593 | if (mac) { | 696 | if (mac) { |
@@ -596,10 +699,14 @@ static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev, | |||
596 | if (!sta) | 699 | if (!sta) |
597 | return -ENOENT; | 700 | return -ENOENT; |
598 | 701 | ||
599 | sta_info_free(sta); | 702 | sta_info_unlink(&sta); |
600 | sta_info_put(sta); | 703 | |
704 | if (sta) { | ||
705 | synchronize_rcu(); | ||
706 | sta_info_destroy(sta); | ||
707 | } | ||
601 | } else | 708 | } else |
602 | sta_info_flush(local, dev); | 709 | sta_info_flush(local, sdata); |
603 | 710 | ||
604 | return 0; | 711 | return 0; |
605 | } | 712 | } |
@@ -618,23 +725,190 @@ static int ieee80211_change_station(struct wiphy *wiphy, | |||
618 | if (!sta) | 725 | if (!sta) |
619 | return -ENOENT; | 726 | return -ENOENT; |
620 | 727 | ||
621 | if (params->vlan && params->vlan != sta->dev) { | 728 | if (params->vlan && params->vlan != sta->sdata->dev) { |
622 | vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); | 729 | vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); |
623 | 730 | ||
624 | if (vlansdata->vif.type != IEEE80211_IF_TYPE_VLAN || | 731 | if (vlansdata->vif.type != IEEE80211_IF_TYPE_VLAN || |
625 | vlansdata->vif.type != IEEE80211_IF_TYPE_AP) | 732 | vlansdata->vif.type != IEEE80211_IF_TYPE_AP) |
626 | return -EINVAL; | 733 | return -EINVAL; |
627 | 734 | ||
628 | sta->dev = params->vlan; | 735 | sta->sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); |
629 | ieee80211_send_layer2_update(sta); | 736 | ieee80211_send_layer2_update(sta); |
630 | } | 737 | } |
631 | 738 | ||
632 | sta_apply_parameters(local, sta, params); | 739 | sta_apply_parameters(local, sta, params); |
633 | 740 | ||
634 | sta_info_put(sta); | 741 | return 0; |
742 | } | ||
743 | |||
744 | #ifdef CONFIG_MAC80211_MESH | ||
745 | static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev, | ||
746 | u8 *dst, u8 *next_hop) | ||
747 | { | ||
748 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
749 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
750 | struct mesh_path *mpath; | ||
751 | struct sta_info *sta; | ||
752 | int err; | ||
753 | |||
754 | if (!netif_running(dev)) | ||
755 | return -ENETDOWN; | ||
756 | |||
757 | if (sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT) | ||
758 | return -ENOTSUPP; | ||
759 | |||
760 | rcu_read_lock(); | ||
761 | sta = sta_info_get(local, next_hop); | ||
762 | if (!sta) { | ||
763 | rcu_read_unlock(); | ||
764 | return -ENOENT; | ||
765 | } | ||
766 | |||
767 | err = mesh_path_add(dst, dev); | ||
768 | if (err) { | ||
769 | rcu_read_unlock(); | ||
770 | return err; | ||
771 | } | ||
772 | |||
773 | mpath = mesh_path_lookup(dst, dev); | ||
774 | if (!mpath) { | ||
775 | rcu_read_unlock(); | ||
776 | return -ENXIO; | ||
777 | } | ||
778 | mesh_path_fix_nexthop(mpath, sta); | ||
779 | |||
780 | rcu_read_unlock(); | ||
781 | return 0; | ||
782 | } | ||
635 | 783 | ||
784 | static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev, | ||
785 | u8 *dst) | ||
786 | { | ||
787 | if (dst) | ||
788 | return mesh_path_del(dst, dev); | ||
789 | |||
790 | mesh_path_flush(dev); | ||
791 | return 0; | ||
792 | } | ||
793 | |||
794 | static int ieee80211_change_mpath(struct wiphy *wiphy, | ||
795 | struct net_device *dev, | ||
796 | u8 *dst, u8 *next_hop) | ||
797 | { | ||
798 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
799 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
800 | struct mesh_path *mpath; | ||
801 | struct sta_info *sta; | ||
802 | |||
803 | if (!netif_running(dev)) | ||
804 | return -ENETDOWN; | ||
805 | |||
806 | if (sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT) | ||
807 | return -ENOTSUPP; | ||
808 | |||
809 | rcu_read_lock(); | ||
810 | |||
811 | sta = sta_info_get(local, next_hop); | ||
812 | if (!sta) { | ||
813 | rcu_read_unlock(); | ||
814 | return -ENOENT; | ||
815 | } | ||
816 | |||
817 | mpath = mesh_path_lookup(dst, dev); | ||
818 | if (!mpath) { | ||
819 | rcu_read_unlock(); | ||
820 | return -ENOENT; | ||
821 | } | ||
822 | |||
823 | mesh_path_fix_nexthop(mpath, sta); | ||
824 | |||
825 | rcu_read_unlock(); | ||
826 | return 0; | ||
827 | } | ||
828 | |||
829 | static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop, | ||
830 | struct mpath_info *pinfo) | ||
831 | { | ||
832 | if (mpath->next_hop) | ||
833 | memcpy(next_hop, mpath->next_hop->addr, ETH_ALEN); | ||
834 | else | ||
835 | memset(next_hop, 0, ETH_ALEN); | ||
836 | |||
837 | pinfo->filled = MPATH_INFO_FRAME_QLEN | | ||
838 | MPATH_INFO_DSN | | ||
839 | MPATH_INFO_METRIC | | ||
840 | MPATH_INFO_EXPTIME | | ||
841 | MPATH_INFO_DISCOVERY_TIMEOUT | | ||
842 | MPATH_INFO_DISCOVERY_RETRIES | | ||
843 | MPATH_INFO_FLAGS; | ||
844 | |||
845 | pinfo->frame_qlen = mpath->frame_queue.qlen; | ||
846 | pinfo->dsn = mpath->dsn; | ||
847 | pinfo->metric = mpath->metric; | ||
848 | if (time_before(jiffies, mpath->exp_time)) | ||
849 | pinfo->exptime = jiffies_to_msecs(mpath->exp_time - jiffies); | ||
850 | pinfo->discovery_timeout = | ||
851 | jiffies_to_msecs(mpath->discovery_timeout); | ||
852 | pinfo->discovery_retries = mpath->discovery_retries; | ||
853 | pinfo->flags = 0; | ||
854 | if (mpath->flags & MESH_PATH_ACTIVE) | ||
855 | pinfo->flags |= NL80211_MPATH_FLAG_ACTIVE; | ||
856 | if (mpath->flags & MESH_PATH_RESOLVING) | ||
857 | pinfo->flags |= NL80211_MPATH_FLAG_RESOLVING; | ||
858 | if (mpath->flags & MESH_PATH_DSN_VALID) | ||
859 | pinfo->flags |= NL80211_MPATH_FLAG_DSN_VALID; | ||
860 | if (mpath->flags & MESH_PATH_FIXED) | ||
861 | pinfo->flags |= NL80211_MPATH_FLAG_FIXED; | ||
862 | if (mpath->flags & MESH_PATH_RESOLVING) | ||
863 | pinfo->flags |= NL80211_MPATH_FLAG_RESOLVING; | ||
864 | |||
865 | pinfo->flags = mpath->flags; | ||
866 | } | ||
867 | |||
868 | static int ieee80211_get_mpath(struct wiphy *wiphy, struct net_device *dev, | ||
869 | u8 *dst, u8 *next_hop, struct mpath_info *pinfo) | ||
870 | |||
871 | { | ||
872 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
873 | struct mesh_path *mpath; | ||
874 | |||
875 | if (sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT) | ||
876 | return -ENOTSUPP; | ||
877 | |||
878 | rcu_read_lock(); | ||
879 | mpath = mesh_path_lookup(dst, dev); | ||
880 | if (!mpath) { | ||
881 | rcu_read_unlock(); | ||
882 | return -ENOENT; | ||
883 | } | ||
884 | memcpy(dst, mpath->dst, ETH_ALEN); | ||
885 | mpath_set_pinfo(mpath, next_hop, pinfo); | ||
886 | rcu_read_unlock(); | ||
887 | return 0; | ||
888 | } | ||
889 | |||
890 | static int ieee80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev, | ||
891 | int idx, u8 *dst, u8 *next_hop, | ||
892 | struct mpath_info *pinfo) | ||
893 | { | ||
894 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
895 | struct mesh_path *mpath; | ||
896 | |||
897 | if (sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT) | ||
898 | return -ENOTSUPP; | ||
899 | |||
900 | rcu_read_lock(); | ||
901 | mpath = mesh_path_lookup_by_idx(idx, dev); | ||
902 | if (!mpath) { | ||
903 | rcu_read_unlock(); | ||
904 | return -ENOENT; | ||
905 | } | ||
906 | memcpy(dst, mpath->dst, ETH_ALEN); | ||
907 | mpath_set_pinfo(mpath, next_hop, pinfo); | ||
908 | rcu_read_unlock(); | ||
636 | return 0; | 909 | return 0; |
637 | } | 910 | } |
911 | #endif | ||
638 | 912 | ||
639 | struct cfg80211_ops mac80211_config_ops = { | 913 | struct cfg80211_ops mac80211_config_ops = { |
640 | .add_virtual_intf = ieee80211_add_iface, | 914 | .add_virtual_intf = ieee80211_add_iface, |
@@ -651,4 +925,12 @@ struct cfg80211_ops mac80211_config_ops = { | |||
651 | .del_station = ieee80211_del_station, | 925 | .del_station = ieee80211_del_station, |
652 | .change_station = ieee80211_change_station, | 926 | .change_station = ieee80211_change_station, |
653 | .get_station = ieee80211_get_station, | 927 | .get_station = ieee80211_get_station, |
928 | .dump_station = ieee80211_dump_station, | ||
929 | #ifdef CONFIG_MAC80211_MESH | ||
930 | .add_mpath = ieee80211_add_mpath, | ||
931 | .del_mpath = ieee80211_del_mpath, | ||
932 | .change_mpath = ieee80211_change_mpath, | ||
933 | .get_mpath = ieee80211_get_mpath, | ||
934 | .dump_mpath = ieee80211_dump_mpath, | ||
935 | #endif | ||
654 | }; | 936 | }; |
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index 60514b2c97b9..4736c64937b4 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c | |||
@@ -19,41 +19,6 @@ int mac80211_open_file_generic(struct inode *inode, struct file *file) | |||
19 | return 0; | 19 | return 0; |
20 | } | 20 | } |
21 | 21 | ||
22 | static const char *ieee80211_mode_str(int mode) | ||
23 | { | ||
24 | switch (mode) { | ||
25 | case MODE_IEEE80211A: | ||
26 | return "IEEE 802.11a"; | ||
27 | case MODE_IEEE80211B: | ||
28 | return "IEEE 802.11b"; | ||
29 | case MODE_IEEE80211G: | ||
30 | return "IEEE 802.11g"; | ||
31 | default: | ||
32 | return "UNKNOWN"; | ||
33 | } | ||
34 | } | ||
35 | |||
36 | static ssize_t modes_read(struct file *file, char __user *userbuf, | ||
37 | size_t count, loff_t *ppos) | ||
38 | { | ||
39 | struct ieee80211_local *local = file->private_data; | ||
40 | struct ieee80211_hw_mode *mode; | ||
41 | char buf[150], *p = buf; | ||
42 | |||
43 | /* FIXME: locking! */ | ||
44 | list_for_each_entry(mode, &local->modes_list, list) { | ||
45 | p += scnprintf(p, sizeof(buf)+buf-p, | ||
46 | "%s\n", ieee80211_mode_str(mode->mode)); | ||
47 | } | ||
48 | |||
49 | return simple_read_from_buffer(userbuf, count, ppos, buf, p-buf); | ||
50 | } | ||
51 | |||
52 | static const struct file_operations modes_ops = { | ||
53 | .read = modes_read, | ||
54 | .open = mac80211_open_file_generic, | ||
55 | }; | ||
56 | |||
57 | #define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \ | 22 | #define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \ |
58 | static ssize_t name## _read(struct file *file, char __user *userbuf, \ | 23 | static ssize_t name## _read(struct file *file, char __user *userbuf, \ |
59 | size_t count, loff_t *ppos) \ | 24 | size_t count, loff_t *ppos) \ |
@@ -80,10 +45,8 @@ static const struct file_operations name## _ops = { \ | |||
80 | local->debugfs.name = NULL; | 45 | local->debugfs.name = NULL; |
81 | 46 | ||
82 | 47 | ||
83 | DEBUGFS_READONLY_FILE(channel, 20, "%d", | ||
84 | local->hw.conf.channel); | ||
85 | DEBUGFS_READONLY_FILE(frequency, 20, "%d", | 48 | DEBUGFS_READONLY_FILE(frequency, 20, "%d", |
86 | local->hw.conf.freq); | 49 | local->hw.conf.channel->center_freq); |
87 | DEBUGFS_READONLY_FILE(antenna_sel_tx, 20, "%d", | 50 | DEBUGFS_READONLY_FILE(antenna_sel_tx, 20, "%d", |
88 | local->hw.conf.antenna_sel_tx); | 51 | local->hw.conf.antenna_sel_tx); |
89 | DEBUGFS_READONLY_FILE(antenna_sel_rx, 20, "%d", | 52 | DEBUGFS_READONLY_FILE(antenna_sel_rx, 20, "%d", |
@@ -100,8 +63,6 @@ DEBUGFS_READONLY_FILE(long_retry_limit, 20, "%d", | |||
100 | local->long_retry_limit); | 63 | local->long_retry_limit); |
101 | DEBUGFS_READONLY_FILE(total_ps_buffered, 20, "%d", | 64 | DEBUGFS_READONLY_FILE(total_ps_buffered, 20, "%d", |
102 | local->total_ps_buffered); | 65 | local->total_ps_buffered); |
103 | DEBUGFS_READONLY_FILE(mode, 20, "%s", | ||
104 | ieee80211_mode_str(local->hw.conf.phymode)); | ||
105 | DEBUGFS_READONLY_FILE(wep_iv, 20, "%#06x", | 66 | DEBUGFS_READONLY_FILE(wep_iv, 20, "%#06x", |
106 | local->wep_iv & 0xffffff); | 67 | local->wep_iv & 0xffffff); |
107 | DEBUGFS_READONLY_FILE(rate_ctrl_alg, 100, "%s", | 68 | DEBUGFS_READONLY_FILE(rate_ctrl_alg, 100, "%s", |
@@ -294,7 +255,6 @@ void debugfs_hw_add(struct ieee80211_local *local) | |||
294 | local->debugfs.stations = debugfs_create_dir("stations", phyd); | 255 | local->debugfs.stations = debugfs_create_dir("stations", phyd); |
295 | local->debugfs.keys = debugfs_create_dir("keys", phyd); | 256 | local->debugfs.keys = debugfs_create_dir("keys", phyd); |
296 | 257 | ||
297 | DEBUGFS_ADD(channel); | ||
298 | DEBUGFS_ADD(frequency); | 258 | DEBUGFS_ADD(frequency); |
299 | DEBUGFS_ADD(antenna_sel_tx); | 259 | DEBUGFS_ADD(antenna_sel_tx); |
300 | DEBUGFS_ADD(antenna_sel_rx); | 260 | DEBUGFS_ADD(antenna_sel_rx); |
@@ -304,9 +264,7 @@ void debugfs_hw_add(struct ieee80211_local *local) | |||
304 | DEBUGFS_ADD(short_retry_limit); | 264 | DEBUGFS_ADD(short_retry_limit); |
305 | DEBUGFS_ADD(long_retry_limit); | 265 | DEBUGFS_ADD(long_retry_limit); |
306 | DEBUGFS_ADD(total_ps_buffered); | 266 | DEBUGFS_ADD(total_ps_buffered); |
307 | DEBUGFS_ADD(mode); | ||
308 | DEBUGFS_ADD(wep_iv); | 267 | DEBUGFS_ADD(wep_iv); |
309 | DEBUGFS_ADD(modes); | ||
310 | 268 | ||
311 | statsd = debugfs_create_dir("statistics", phyd); | 269 | statsd = debugfs_create_dir("statistics", phyd); |
312 | local->debugfs.statistics = statsd; | 270 | local->debugfs.statistics = statsd; |
@@ -356,7 +314,6 @@ void debugfs_hw_add(struct ieee80211_local *local) | |||
356 | 314 | ||
357 | void debugfs_hw_del(struct ieee80211_local *local) | 315 | void debugfs_hw_del(struct ieee80211_local *local) |
358 | { | 316 | { |
359 | DEBUGFS_DEL(channel); | ||
360 | DEBUGFS_DEL(frequency); | 317 | DEBUGFS_DEL(frequency); |
361 | DEBUGFS_DEL(antenna_sel_tx); | 318 | DEBUGFS_DEL(antenna_sel_tx); |
362 | DEBUGFS_DEL(antenna_sel_rx); | 319 | DEBUGFS_DEL(antenna_sel_rx); |
@@ -366,9 +323,7 @@ void debugfs_hw_del(struct ieee80211_local *local) | |||
366 | DEBUGFS_DEL(short_retry_limit); | 323 | DEBUGFS_DEL(short_retry_limit); |
367 | DEBUGFS_DEL(long_retry_limit); | 324 | DEBUGFS_DEL(long_retry_limit); |
368 | DEBUGFS_DEL(total_ps_buffered); | 325 | DEBUGFS_DEL(total_ps_buffered); |
369 | DEBUGFS_DEL(mode); | ||
370 | DEBUGFS_DEL(wep_iv); | 326 | DEBUGFS_DEL(wep_iv); |
371 | DEBUGFS_DEL(modes); | ||
372 | 327 | ||
373 | DEBUGFS_STATS_DEL(transmitted_fragment_count); | 328 | DEBUGFS_STATS_DEL(transmitted_fragment_count); |
374 | DEBUGFS_STATS_DEL(multicast_transmitted_frame_count); | 329 | DEBUGFS_STATS_DEL(multicast_transmitted_frame_count); |
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 829872a3ae81..107b0fe778d6 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c | |||
@@ -39,6 +39,29 @@ static ssize_t ieee80211_if_read( | |||
39 | return ret; | 39 | return ret; |
40 | } | 40 | } |
41 | 41 | ||
42 | #ifdef CONFIG_MAC80211_MESH | ||
43 | static ssize_t ieee80211_if_write( | ||
44 | struct ieee80211_sub_if_data *sdata, | ||
45 | char const __user *userbuf, | ||
46 | size_t count, loff_t *ppos, | ||
47 | int (*format)(struct ieee80211_sub_if_data *, char *)) | ||
48 | { | ||
49 | char buf[10]; | ||
50 | int buf_size; | ||
51 | |||
52 | memset(buf, 0x00, sizeof(buf)); | ||
53 | buf_size = min(count, (sizeof(buf)-1)); | ||
54 | read_lock(&dev_base_lock); | ||
55 | if (copy_from_user(buf, userbuf, buf_size)) | ||
56 | goto endwrite; | ||
57 | if (sdata->dev->reg_state == NETREG_REGISTERED) | ||
58 | (*format)(sdata, buf); | ||
59 | endwrite: | ||
60 | read_unlock(&dev_base_lock); | ||
61 | return count; | ||
62 | } | ||
63 | #endif | ||
64 | |||
42 | #define IEEE80211_IF_FMT(name, field, format_string) \ | 65 | #define IEEE80211_IF_FMT(name, field, format_string) \ |
43 | static ssize_t ieee80211_if_fmt_##name( \ | 66 | static ssize_t ieee80211_if_fmt_##name( \ |
44 | const struct ieee80211_sub_if_data *sdata, char *buf, \ | 67 | const struct ieee80211_sub_if_data *sdata, char *buf, \ |
@@ -46,6 +69,19 @@ static ssize_t ieee80211_if_fmt_##name( \ | |||
46 | { \ | 69 | { \ |
47 | return scnprintf(buf, buflen, format_string, sdata->field); \ | 70 | return scnprintf(buf, buflen, format_string, sdata->field); \ |
48 | } | 71 | } |
72 | #define IEEE80211_IF_WFMT(name, field, type) \ | ||
73 | static int ieee80211_if_wfmt_##name( \ | ||
74 | struct ieee80211_sub_if_data *sdata, char *buf) \ | ||
75 | { \ | ||
76 | unsigned long tmp; \ | ||
77 | char *endp; \ | ||
78 | \ | ||
79 | tmp = simple_strtoul(buf, &endp, 0); \ | ||
80 | if ((endp == buf) || ((type)tmp != tmp)) \ | ||
81 | return -EINVAL; \ | ||
82 | sdata->field = tmp; \ | ||
83 | return 0; \ | ||
84 | } | ||
49 | #define IEEE80211_IF_FMT_DEC(name, field) \ | 85 | #define IEEE80211_IF_FMT_DEC(name, field) \ |
50 | IEEE80211_IF_FMT(name, field, "%d\n") | 86 | IEEE80211_IF_FMT(name, field, "%d\n") |
51 | #define IEEE80211_IF_FMT_HEX(name, field) \ | 87 | #define IEEE80211_IF_FMT_HEX(name, field) \ |
@@ -88,10 +124,37 @@ static const struct file_operations name##_ops = { \ | |||
88 | IEEE80211_IF_FMT_##format(name, field) \ | 124 | IEEE80211_IF_FMT_##format(name, field) \ |
89 | __IEEE80211_IF_FILE(name) | 125 | __IEEE80211_IF_FILE(name) |
90 | 126 | ||
127 | #define __IEEE80211_IF_WFILE(name) \ | ||
128 | static ssize_t ieee80211_if_read_##name(struct file *file, \ | ||
129 | char __user *userbuf, \ | ||
130 | size_t count, loff_t *ppos) \ | ||
131 | { \ | ||
132 | return ieee80211_if_read(file->private_data, \ | ||
133 | userbuf, count, ppos, \ | ||
134 | ieee80211_if_fmt_##name); \ | ||
135 | } \ | ||
136 | static ssize_t ieee80211_if_write_##name(struct file *file, \ | ||
137 | const char __user *userbuf, \ | ||
138 | size_t count, loff_t *ppos) \ | ||
139 | { \ | ||
140 | return ieee80211_if_write(file->private_data, \ | ||
141 | userbuf, count, ppos, \ | ||
142 | ieee80211_if_wfmt_##name); \ | ||
143 | } \ | ||
144 | static const struct file_operations name##_ops = { \ | ||
145 | .read = ieee80211_if_read_##name, \ | ||
146 | .write = ieee80211_if_write_##name, \ | ||
147 | .open = mac80211_open_file_generic, \ | ||
148 | } | ||
149 | |||
150 | #define IEEE80211_IF_WFILE(name, field, format, type) \ | ||
151 | IEEE80211_IF_FMT_##format(name, field) \ | ||
152 | IEEE80211_IF_WFMT(name, field, type) \ | ||
153 | __IEEE80211_IF_WFILE(name) | ||
154 | |||
91 | /* common attributes */ | 155 | /* common attributes */ |
92 | IEEE80211_IF_FILE(channel_use, channel_use, DEC); | 156 | IEEE80211_IF_FILE(channel_use, channel_use, DEC); |
93 | IEEE80211_IF_FILE(drop_unencrypted, drop_unencrypted, DEC); | 157 | IEEE80211_IF_FILE(drop_unencrypted, drop_unencrypted, DEC); |
94 | IEEE80211_IF_FILE(ieee802_1x_pac, ieee802_1x_pac, DEC); | ||
95 | 158 | ||
96 | /* STA/IBSS attributes */ | 159 | /* STA/IBSS attributes */ |
97 | IEEE80211_IF_FILE(state, u.sta.state, DEC); | 160 | IEEE80211_IF_FILE(state, u.sta.state, DEC); |
@@ -107,6 +170,7 @@ IEEE80211_IF_FILE(assoc_tries, u.sta.assoc_tries, DEC); | |||
107 | IEEE80211_IF_FILE(auth_algs, u.sta.auth_algs, HEX); | 170 | IEEE80211_IF_FILE(auth_algs, u.sta.auth_algs, HEX); |
108 | IEEE80211_IF_FILE(auth_alg, u.sta.auth_alg, DEC); | 171 | IEEE80211_IF_FILE(auth_alg, u.sta.auth_alg, DEC); |
109 | IEEE80211_IF_FILE(auth_transaction, u.sta.auth_transaction, DEC); | 172 | IEEE80211_IF_FILE(auth_transaction, u.sta.auth_transaction, DEC); |
173 | IEEE80211_IF_FILE(num_beacons_sta, u.sta.num_beacons, DEC); | ||
110 | 174 | ||
111 | static ssize_t ieee80211_if_fmt_flags( | 175 | static ssize_t ieee80211_if_fmt_flags( |
112 | const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) | 176 | const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) |
@@ -140,6 +204,42 @@ __IEEE80211_IF_FILE(num_buffered_multicast); | |||
140 | /* WDS attributes */ | 204 | /* WDS attributes */ |
141 | IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC); | 205 | IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC); |
142 | 206 | ||
207 | #ifdef CONFIG_MAC80211_MESH | ||
208 | /* Mesh stats attributes */ | ||
209 | IEEE80211_IF_FILE(fwded_frames, u.sta.mshstats.fwded_frames, DEC); | ||
210 | IEEE80211_IF_FILE(dropped_frames_ttl, u.sta.mshstats.dropped_frames_ttl, DEC); | ||
211 | IEEE80211_IF_FILE(dropped_frames_no_route, | ||
212 | u.sta.mshstats.dropped_frames_no_route, DEC); | ||
213 | IEEE80211_IF_FILE(estab_plinks, u.sta.mshstats.estab_plinks, ATOMIC); | ||
214 | |||
215 | /* Mesh parameters */ | ||
216 | IEEE80211_IF_WFILE(dot11MeshMaxRetries, | ||
217 | u.sta.mshcfg.dot11MeshMaxRetries, DEC, u8); | ||
218 | IEEE80211_IF_WFILE(dot11MeshRetryTimeout, | ||
219 | u.sta.mshcfg.dot11MeshRetryTimeout, DEC, u16); | ||
220 | IEEE80211_IF_WFILE(dot11MeshConfirmTimeout, | ||
221 | u.sta.mshcfg.dot11MeshConfirmTimeout, DEC, u16); | ||
222 | IEEE80211_IF_WFILE(dot11MeshHoldingTimeout, | ||
223 | u.sta.mshcfg.dot11MeshHoldingTimeout, DEC, u16); | ||
224 | IEEE80211_IF_WFILE(dot11MeshTTL, u.sta.mshcfg.dot11MeshTTL, DEC, u8); | ||
225 | IEEE80211_IF_WFILE(auto_open_plinks, u.sta.mshcfg.auto_open_plinks, DEC, bool); | ||
226 | IEEE80211_IF_WFILE(dot11MeshMaxPeerLinks, | ||
227 | u.sta.mshcfg.dot11MeshMaxPeerLinks, DEC, u16); | ||
228 | IEEE80211_IF_WFILE(dot11MeshHWMPactivePathTimeout, | ||
229 | u.sta.mshcfg.dot11MeshHWMPactivePathTimeout, DEC, u32); | ||
230 | IEEE80211_IF_WFILE(dot11MeshHWMPpreqMinInterval, | ||
231 | u.sta.mshcfg.dot11MeshHWMPpreqMinInterval, DEC, u16); | ||
232 | IEEE80211_IF_WFILE(dot11MeshHWMPnetDiameterTraversalTime, | ||
233 | u.sta.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC, u16); | ||
234 | IEEE80211_IF_WFILE(dot11MeshHWMPmaxPREQretries, | ||
235 | u.sta.mshcfg.dot11MeshHWMPmaxPREQretries, DEC, u8); | ||
236 | IEEE80211_IF_WFILE(path_refresh_time, | ||
237 | u.sta.mshcfg.path_refresh_time, DEC, u32); | ||
238 | IEEE80211_IF_WFILE(min_discovery_timeout, | ||
239 | u.sta.mshcfg.min_discovery_timeout, DEC, u16); | ||
240 | #endif | ||
241 | |||
242 | |||
143 | #define DEBUGFS_ADD(name, type)\ | 243 | #define DEBUGFS_ADD(name, type)\ |
144 | sdata->debugfs.type.name = debugfs_create_file(#name, 0444,\ | 244 | sdata->debugfs.type.name = debugfs_create_file(#name, 0444,\ |
145 | sdata->debugfsdir, sdata, &name##_ops); | 245 | sdata->debugfsdir, sdata, &name##_ops); |
@@ -148,7 +248,6 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata) | |||
148 | { | 248 | { |
149 | DEBUGFS_ADD(channel_use, sta); | 249 | DEBUGFS_ADD(channel_use, sta); |
150 | DEBUGFS_ADD(drop_unencrypted, sta); | 250 | DEBUGFS_ADD(drop_unencrypted, sta); |
151 | DEBUGFS_ADD(ieee802_1x_pac, sta); | ||
152 | DEBUGFS_ADD(state, sta); | 251 | DEBUGFS_ADD(state, sta); |
153 | DEBUGFS_ADD(bssid, sta); | 252 | DEBUGFS_ADD(bssid, sta); |
154 | DEBUGFS_ADD(prev_bssid, sta); | 253 | DEBUGFS_ADD(prev_bssid, sta); |
@@ -163,13 +262,13 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata) | |||
163 | DEBUGFS_ADD(auth_alg, sta); | 262 | DEBUGFS_ADD(auth_alg, sta); |
164 | DEBUGFS_ADD(auth_transaction, sta); | 263 | DEBUGFS_ADD(auth_transaction, sta); |
165 | DEBUGFS_ADD(flags, sta); | 264 | DEBUGFS_ADD(flags, sta); |
265 | DEBUGFS_ADD(num_beacons_sta, sta); | ||
166 | } | 266 | } |
167 | 267 | ||
168 | static void add_ap_files(struct ieee80211_sub_if_data *sdata) | 268 | static void add_ap_files(struct ieee80211_sub_if_data *sdata) |
169 | { | 269 | { |
170 | DEBUGFS_ADD(channel_use, ap); | 270 | DEBUGFS_ADD(channel_use, ap); |
171 | DEBUGFS_ADD(drop_unencrypted, ap); | 271 | DEBUGFS_ADD(drop_unencrypted, ap); |
172 | DEBUGFS_ADD(ieee802_1x_pac, ap); | ||
173 | DEBUGFS_ADD(num_sta_ps, ap); | 272 | DEBUGFS_ADD(num_sta_ps, ap); |
174 | DEBUGFS_ADD(dtim_count, ap); | 273 | DEBUGFS_ADD(dtim_count, ap); |
175 | DEBUGFS_ADD(num_beacons, ap); | 274 | DEBUGFS_ADD(num_beacons, ap); |
@@ -182,7 +281,6 @@ static void add_wds_files(struct ieee80211_sub_if_data *sdata) | |||
182 | { | 281 | { |
183 | DEBUGFS_ADD(channel_use, wds); | 282 | DEBUGFS_ADD(channel_use, wds); |
184 | DEBUGFS_ADD(drop_unencrypted, wds); | 283 | DEBUGFS_ADD(drop_unencrypted, wds); |
185 | DEBUGFS_ADD(ieee802_1x_pac, wds); | ||
186 | DEBUGFS_ADD(peer, wds); | 284 | DEBUGFS_ADD(peer, wds); |
187 | } | 285 | } |
188 | 286 | ||
@@ -190,19 +288,63 @@ static void add_vlan_files(struct ieee80211_sub_if_data *sdata) | |||
190 | { | 288 | { |
191 | DEBUGFS_ADD(channel_use, vlan); | 289 | DEBUGFS_ADD(channel_use, vlan); |
192 | DEBUGFS_ADD(drop_unencrypted, vlan); | 290 | DEBUGFS_ADD(drop_unencrypted, vlan); |
193 | DEBUGFS_ADD(ieee802_1x_pac, vlan); | ||
194 | } | 291 | } |
195 | 292 | ||
196 | static void add_monitor_files(struct ieee80211_sub_if_data *sdata) | 293 | static void add_monitor_files(struct ieee80211_sub_if_data *sdata) |
197 | { | 294 | { |
198 | } | 295 | } |
199 | 296 | ||
297 | #ifdef CONFIG_MAC80211_MESH | ||
298 | #define MESHSTATS_ADD(name)\ | ||
299 | sdata->mesh_stats.name = debugfs_create_file(#name, 0444,\ | ||
300 | sdata->mesh_stats_dir, sdata, &name##_ops); | ||
301 | |||
302 | static void add_mesh_stats(struct ieee80211_sub_if_data *sdata) | ||
303 | { | ||
304 | sdata->mesh_stats_dir = debugfs_create_dir("mesh_stats", | ||
305 | sdata->debugfsdir); | ||
306 | MESHSTATS_ADD(fwded_frames); | ||
307 | MESHSTATS_ADD(dropped_frames_ttl); | ||
308 | MESHSTATS_ADD(dropped_frames_no_route); | ||
309 | MESHSTATS_ADD(estab_plinks); | ||
310 | } | ||
311 | |||
312 | #define MESHPARAMS_ADD(name)\ | ||
313 | sdata->mesh_config.name = debugfs_create_file(#name, 0644,\ | ||
314 | sdata->mesh_config_dir, sdata, &name##_ops); | ||
315 | |||
316 | static void add_mesh_config(struct ieee80211_sub_if_data *sdata) | ||
317 | { | ||
318 | sdata->mesh_config_dir = debugfs_create_dir("mesh_config", | ||
319 | sdata->debugfsdir); | ||
320 | MESHPARAMS_ADD(dot11MeshMaxRetries); | ||
321 | MESHPARAMS_ADD(dot11MeshRetryTimeout); | ||
322 | MESHPARAMS_ADD(dot11MeshConfirmTimeout); | ||
323 | MESHPARAMS_ADD(dot11MeshHoldingTimeout); | ||
324 | MESHPARAMS_ADD(dot11MeshTTL); | ||
325 | MESHPARAMS_ADD(auto_open_plinks); | ||
326 | MESHPARAMS_ADD(dot11MeshMaxPeerLinks); | ||
327 | MESHPARAMS_ADD(dot11MeshHWMPactivePathTimeout); | ||
328 | MESHPARAMS_ADD(dot11MeshHWMPpreqMinInterval); | ||
329 | MESHPARAMS_ADD(dot11MeshHWMPnetDiameterTraversalTime); | ||
330 | MESHPARAMS_ADD(dot11MeshHWMPmaxPREQretries); | ||
331 | MESHPARAMS_ADD(path_refresh_time); | ||
332 | MESHPARAMS_ADD(min_discovery_timeout); | ||
333 | } | ||
334 | #endif | ||
335 | |||
200 | static void add_files(struct ieee80211_sub_if_data *sdata) | 336 | static void add_files(struct ieee80211_sub_if_data *sdata) |
201 | { | 337 | { |
202 | if (!sdata->debugfsdir) | 338 | if (!sdata->debugfsdir) |
203 | return; | 339 | return; |
204 | 340 | ||
205 | switch (sdata->vif.type) { | 341 | switch (sdata->vif.type) { |
342 | case IEEE80211_IF_TYPE_MESH_POINT: | ||
343 | #ifdef CONFIG_MAC80211_MESH | ||
344 | add_mesh_stats(sdata); | ||
345 | add_mesh_config(sdata); | ||
346 | #endif | ||
347 | /* fall through */ | ||
206 | case IEEE80211_IF_TYPE_STA: | 348 | case IEEE80211_IF_TYPE_STA: |
207 | case IEEE80211_IF_TYPE_IBSS: | 349 | case IEEE80211_IF_TYPE_IBSS: |
208 | add_sta_files(sdata); | 350 | add_sta_files(sdata); |
@@ -234,7 +376,6 @@ static void del_sta_files(struct ieee80211_sub_if_data *sdata) | |||
234 | { | 376 | { |
235 | DEBUGFS_DEL(channel_use, sta); | 377 | DEBUGFS_DEL(channel_use, sta); |
236 | DEBUGFS_DEL(drop_unencrypted, sta); | 378 | DEBUGFS_DEL(drop_unencrypted, sta); |
237 | DEBUGFS_DEL(ieee802_1x_pac, sta); | ||
238 | DEBUGFS_DEL(state, sta); | 379 | DEBUGFS_DEL(state, sta); |
239 | DEBUGFS_DEL(bssid, sta); | 380 | DEBUGFS_DEL(bssid, sta); |
240 | DEBUGFS_DEL(prev_bssid, sta); | 381 | DEBUGFS_DEL(prev_bssid, sta); |
@@ -249,13 +390,13 @@ static void del_sta_files(struct ieee80211_sub_if_data *sdata) | |||
249 | DEBUGFS_DEL(auth_alg, sta); | 390 | DEBUGFS_DEL(auth_alg, sta); |
250 | DEBUGFS_DEL(auth_transaction, sta); | 391 | DEBUGFS_DEL(auth_transaction, sta); |
251 | DEBUGFS_DEL(flags, sta); | 392 | DEBUGFS_DEL(flags, sta); |
393 | DEBUGFS_DEL(num_beacons_sta, sta); | ||
252 | } | 394 | } |
253 | 395 | ||
254 | static void del_ap_files(struct ieee80211_sub_if_data *sdata) | 396 | static void del_ap_files(struct ieee80211_sub_if_data *sdata) |
255 | { | 397 | { |
256 | DEBUGFS_DEL(channel_use, ap); | 398 | DEBUGFS_DEL(channel_use, ap); |
257 | DEBUGFS_DEL(drop_unencrypted, ap); | 399 | DEBUGFS_DEL(drop_unencrypted, ap); |
258 | DEBUGFS_DEL(ieee802_1x_pac, ap); | ||
259 | DEBUGFS_DEL(num_sta_ps, ap); | 400 | DEBUGFS_DEL(num_sta_ps, ap); |
260 | DEBUGFS_DEL(dtim_count, ap); | 401 | DEBUGFS_DEL(dtim_count, ap); |
261 | DEBUGFS_DEL(num_beacons, ap); | 402 | DEBUGFS_DEL(num_beacons, ap); |
@@ -268,7 +409,6 @@ static void del_wds_files(struct ieee80211_sub_if_data *sdata) | |||
268 | { | 409 | { |
269 | DEBUGFS_DEL(channel_use, wds); | 410 | DEBUGFS_DEL(channel_use, wds); |
270 | DEBUGFS_DEL(drop_unencrypted, wds); | 411 | DEBUGFS_DEL(drop_unencrypted, wds); |
271 | DEBUGFS_DEL(ieee802_1x_pac, wds); | ||
272 | DEBUGFS_DEL(peer, wds); | 412 | DEBUGFS_DEL(peer, wds); |
273 | } | 413 | } |
274 | 414 | ||
@@ -276,19 +416,67 @@ static void del_vlan_files(struct ieee80211_sub_if_data *sdata) | |||
276 | { | 416 | { |
277 | DEBUGFS_DEL(channel_use, vlan); | 417 | DEBUGFS_DEL(channel_use, vlan); |
278 | DEBUGFS_DEL(drop_unencrypted, vlan); | 418 | DEBUGFS_DEL(drop_unencrypted, vlan); |
279 | DEBUGFS_DEL(ieee802_1x_pac, vlan); | ||
280 | } | 419 | } |
281 | 420 | ||
282 | static void del_monitor_files(struct ieee80211_sub_if_data *sdata) | 421 | static void del_monitor_files(struct ieee80211_sub_if_data *sdata) |
283 | { | 422 | { |
284 | } | 423 | } |
285 | 424 | ||
425 | #ifdef CONFIG_MAC80211_MESH | ||
426 | #define MESHSTATS_DEL(name) \ | ||
427 | do { \ | ||
428 | debugfs_remove(sdata->mesh_stats.name); \ | ||
429 | sdata->mesh_stats.name = NULL; \ | ||
430 | } while (0) | ||
431 | |||
432 | static void del_mesh_stats(struct ieee80211_sub_if_data *sdata) | ||
433 | { | ||
434 | MESHSTATS_DEL(fwded_frames); | ||
435 | MESHSTATS_DEL(dropped_frames_ttl); | ||
436 | MESHSTATS_DEL(dropped_frames_no_route); | ||
437 | MESHSTATS_DEL(estab_plinks); | ||
438 | debugfs_remove(sdata->mesh_stats_dir); | ||
439 | sdata->mesh_stats_dir = NULL; | ||
440 | } | ||
441 | |||
442 | #define MESHPARAMS_DEL(name) \ | ||
443 | do { \ | ||
444 | debugfs_remove(sdata->mesh_config.name); \ | ||
445 | sdata->mesh_config.name = NULL; \ | ||
446 | } while (0) | ||
447 | |||
448 | static void del_mesh_config(struct ieee80211_sub_if_data *sdata) | ||
449 | { | ||
450 | MESHPARAMS_DEL(dot11MeshMaxRetries); | ||
451 | MESHPARAMS_DEL(dot11MeshRetryTimeout); | ||
452 | MESHPARAMS_DEL(dot11MeshConfirmTimeout); | ||
453 | MESHPARAMS_DEL(dot11MeshHoldingTimeout); | ||
454 | MESHPARAMS_DEL(dot11MeshTTL); | ||
455 | MESHPARAMS_DEL(auto_open_plinks); | ||
456 | MESHPARAMS_DEL(dot11MeshMaxPeerLinks); | ||
457 | MESHPARAMS_DEL(dot11MeshHWMPactivePathTimeout); | ||
458 | MESHPARAMS_DEL(dot11MeshHWMPpreqMinInterval); | ||
459 | MESHPARAMS_DEL(dot11MeshHWMPnetDiameterTraversalTime); | ||
460 | MESHPARAMS_DEL(dot11MeshHWMPmaxPREQretries); | ||
461 | MESHPARAMS_DEL(path_refresh_time); | ||
462 | MESHPARAMS_DEL(min_discovery_timeout); | ||
463 | debugfs_remove(sdata->mesh_config_dir); | ||
464 | sdata->mesh_config_dir = NULL; | ||
465 | } | ||
466 | #endif | ||
467 | |||
286 | static void del_files(struct ieee80211_sub_if_data *sdata, int type) | 468 | static void del_files(struct ieee80211_sub_if_data *sdata, int type) |
287 | { | 469 | { |
288 | if (!sdata->debugfsdir) | 470 | if (!sdata->debugfsdir) |
289 | return; | 471 | return; |
290 | 472 | ||
291 | switch (type) { | 473 | switch (type) { |
474 | case IEEE80211_IF_TYPE_MESH_POINT: | ||
475 | #ifdef CONFIG_MAC80211_MESH | ||
476 | del_mesh_stats(sdata); | ||
477 | del_mesh_config(sdata); | ||
478 | #endif | ||
479 | /* fall through */ | ||
292 | case IEEE80211_IF_TYPE_STA: | 480 | case IEEE80211_IF_TYPE_STA: |
293 | case IEEE80211_IF_TYPE_IBSS: | 481 | case IEEE80211_IF_TYPE_IBSS: |
294 | del_sta_files(sdata); | 482 | del_sta_files(sdata); |
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index 8f5944c53d4e..fc2c1a192ed2 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c | |||
@@ -33,25 +33,16 @@ static ssize_t sta_ ##name## _read(struct file *file, \ | |||
33 | #define STA_READ_LU(name, field) STA_READ(name, 20, field, "%lu\n") | 33 | #define STA_READ_LU(name, field) STA_READ(name, 20, field, "%lu\n") |
34 | #define STA_READ_S(name, field) STA_READ(name, 20, field, "%s\n") | 34 | #define STA_READ_S(name, field) STA_READ(name, 20, field, "%s\n") |
35 | 35 | ||
36 | #define STA_READ_RATE(name, field) \ | 36 | #define STA_OPS(name) \ |
37 | static ssize_t sta_##name##_read(struct file *file, \ | 37 | static const struct file_operations sta_ ##name## _ops = { \ |
38 | char __user *userbuf, \ | 38 | .read = sta_##name##_read, \ |
39 | size_t count, loff_t *ppos) \ | 39 | .open = mac80211_open_file_generic, \ |
40 | { \ | ||
41 | struct sta_info *sta = file->private_data; \ | ||
42 | struct ieee80211_local *local = wdev_priv(sta->dev->ieee80211_ptr);\ | ||
43 | struct ieee80211_hw_mode *mode = local->oper_hw_mode; \ | ||
44 | char buf[20]; \ | ||
45 | int res = scnprintf(buf, sizeof(buf), "%d\n", \ | ||
46 | (sta->field >= 0 && \ | ||
47 | sta->field < mode->num_rates) ? \ | ||
48 | mode->rates[sta->field].rate : -1); \ | ||
49 | return simple_read_from_buffer(userbuf, count, ppos, buf, res); \ | ||
50 | } | 40 | } |
51 | 41 | ||
52 | #define STA_OPS(name) \ | 42 | #define STA_OPS_WR(name) \ |
53 | static const struct file_operations sta_ ##name## _ops = { \ | 43 | static const struct file_operations sta_ ##name## _ops = { \ |
54 | .read = sta_##name##_read, \ | 44 | .read = sta_##name##_read, \ |
45 | .write = sta_##name##_write, \ | ||
55 | .open = mac80211_open_file_generic, \ | 46 | .open = mac80211_open_file_generic, \ |
56 | } | 47 | } |
57 | 48 | ||
@@ -60,7 +51,7 @@ static const struct file_operations sta_ ##name## _ops = { \ | |||
60 | STA_OPS(name) | 51 | STA_OPS(name) |
61 | 52 | ||
62 | STA_FILE(aid, aid, D); | 53 | STA_FILE(aid, aid, D); |
63 | STA_FILE(dev, dev->name, S); | 54 | STA_FILE(dev, sdata->dev->name, S); |
64 | STA_FILE(rx_packets, rx_packets, LU); | 55 | STA_FILE(rx_packets, rx_packets, LU); |
65 | STA_FILE(tx_packets, tx_packets, LU); | 56 | STA_FILE(tx_packets, tx_packets, LU); |
66 | STA_FILE(rx_bytes, rx_bytes, LU); | 57 | STA_FILE(rx_bytes, rx_bytes, LU); |
@@ -70,27 +61,23 @@ STA_FILE(rx_fragments, rx_fragments, LU); | |||
70 | STA_FILE(rx_dropped, rx_dropped, LU); | 61 | STA_FILE(rx_dropped, rx_dropped, LU); |
71 | STA_FILE(tx_fragments, tx_fragments, LU); | 62 | STA_FILE(tx_fragments, tx_fragments, LU); |
72 | STA_FILE(tx_filtered, tx_filtered_count, LU); | 63 | STA_FILE(tx_filtered, tx_filtered_count, LU); |
73 | STA_FILE(txrate, txrate, RATE); | ||
74 | STA_FILE(last_txrate, last_txrate, RATE); | ||
75 | STA_FILE(tx_retry_failed, tx_retry_failed, LU); | 64 | STA_FILE(tx_retry_failed, tx_retry_failed, LU); |
76 | STA_FILE(tx_retry_count, tx_retry_count, LU); | 65 | STA_FILE(tx_retry_count, tx_retry_count, LU); |
77 | STA_FILE(last_rssi, last_rssi, D); | 66 | STA_FILE(last_rssi, last_rssi, D); |
78 | STA_FILE(last_signal, last_signal, D); | 67 | STA_FILE(last_signal, last_signal, D); |
79 | STA_FILE(last_noise, last_noise, D); | 68 | STA_FILE(last_noise, last_noise, D); |
80 | STA_FILE(channel_use, channel_use, D); | 69 | STA_FILE(channel_use, channel_use, D); |
81 | STA_FILE(wep_weak_iv_count, wep_weak_iv_count, D); | 70 | STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU); |
82 | 71 | ||
83 | static ssize_t sta_flags_read(struct file *file, char __user *userbuf, | 72 | static ssize_t sta_flags_read(struct file *file, char __user *userbuf, |
84 | size_t count, loff_t *ppos) | 73 | size_t count, loff_t *ppos) |
85 | { | 74 | { |
86 | char buf[100]; | 75 | char buf[100]; |
87 | struct sta_info *sta = file->private_data; | 76 | struct sta_info *sta = file->private_data; |
88 | int res = scnprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s", | 77 | int res = scnprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s", |
89 | sta->flags & WLAN_STA_AUTH ? "AUTH\n" : "", | 78 | sta->flags & WLAN_STA_AUTH ? "AUTH\n" : "", |
90 | sta->flags & WLAN_STA_ASSOC ? "ASSOC\n" : "", | 79 | sta->flags & WLAN_STA_ASSOC ? "ASSOC\n" : "", |
91 | sta->flags & WLAN_STA_PS ? "PS\n" : "", | 80 | sta->flags & WLAN_STA_PS ? "PS\n" : "", |
92 | sta->flags & WLAN_STA_TIM ? "TIM\n" : "", | ||
93 | sta->flags & WLAN_STA_PERM ? "PERM\n" : "", | ||
94 | sta->flags & WLAN_STA_AUTHORIZED ? "AUTHORIZED\n" : "", | 81 | sta->flags & WLAN_STA_AUTHORIZED ? "AUTHORIZED\n" : "", |
95 | sta->flags & WLAN_STA_SHORT_PREAMBLE ? "SHORT PREAMBLE\n" : "", | 82 | sta->flags & WLAN_STA_SHORT_PREAMBLE ? "SHORT PREAMBLE\n" : "", |
96 | sta->flags & WLAN_STA_WME ? "WME\n" : "", | 83 | sta->flags & WLAN_STA_WME ? "WME\n" : "", |
@@ -111,31 +98,6 @@ static ssize_t sta_num_ps_buf_frames_read(struct file *file, | |||
111 | } | 98 | } |
112 | STA_OPS(num_ps_buf_frames); | 99 | STA_OPS(num_ps_buf_frames); |
113 | 100 | ||
114 | static ssize_t sta_last_ack_rssi_read(struct file *file, char __user *userbuf, | ||
115 | size_t count, loff_t *ppos) | ||
116 | { | ||
117 | char buf[100]; | ||
118 | struct sta_info *sta = file->private_data; | ||
119 | int res = scnprintf(buf, sizeof(buf), "%d %d %d\n", | ||
120 | sta->last_ack_rssi[0], | ||
121 | sta->last_ack_rssi[1], | ||
122 | sta->last_ack_rssi[2]); | ||
123 | return simple_read_from_buffer(userbuf, count, ppos, buf, res); | ||
124 | } | ||
125 | STA_OPS(last_ack_rssi); | ||
126 | |||
127 | static ssize_t sta_last_ack_ms_read(struct file *file, char __user *userbuf, | ||
128 | size_t count, loff_t *ppos) | ||
129 | { | ||
130 | char buf[20]; | ||
131 | struct sta_info *sta = file->private_data; | ||
132 | int res = scnprintf(buf, sizeof(buf), "%d\n", | ||
133 | sta->last_ack ? | ||
134 | jiffies_to_msecs(jiffies - sta->last_ack) : -1); | ||
135 | return simple_read_from_buffer(userbuf, count, ppos, buf, res); | ||
136 | } | ||
137 | STA_OPS(last_ack_ms); | ||
138 | |||
139 | static ssize_t sta_inactive_ms_read(struct file *file, char __user *userbuf, | 101 | static ssize_t sta_inactive_ms_read(struct file *file, char __user *userbuf, |
140 | size_t count, loff_t *ppos) | 102 | size_t count, loff_t *ppos) |
141 | { | 103 | { |
@@ -191,6 +153,113 @@ static ssize_t sta_wme_tx_queue_read(struct file *file, char __user *userbuf, | |||
191 | STA_OPS(wme_tx_queue); | 153 | STA_OPS(wme_tx_queue); |
192 | #endif | 154 | #endif |
193 | 155 | ||
156 | static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, | ||
157 | size_t count, loff_t *ppos) | ||
158 | { | ||
159 | char buf[768], *p = buf; | ||
160 | int i; | ||
161 | struct sta_info *sta = file->private_data; | ||
162 | p += scnprintf(p, sizeof(buf)+buf-p, "Agg state for STA is:\n"); | ||
163 | p += scnprintf(p, sizeof(buf)+buf-p, " STA next dialog_token is %d \n " | ||
164 | "TIDs info is: \n TID :", | ||
165 | (sta->ampdu_mlme.dialog_token_allocator + 1)); | ||
166 | for (i = 0; i < STA_TID_NUM; i++) | ||
167 | p += scnprintf(p, sizeof(buf)+buf-p, "%5d", i); | ||
168 | |||
169 | p += scnprintf(p, sizeof(buf)+buf-p, "\n RX :"); | ||
170 | for (i = 0; i < STA_TID_NUM; i++) | ||
171 | p += scnprintf(p, sizeof(buf)+buf-p, "%5d", | ||
172 | sta->ampdu_mlme.tid_rx[i].state); | ||
173 | |||
174 | p += scnprintf(p, sizeof(buf)+buf-p, "\n DTKN:"); | ||
175 | for (i = 0; i < STA_TID_NUM; i++) | ||
176 | p += scnprintf(p, sizeof(buf)+buf-p, "%5d", | ||
177 | sta->ampdu_mlme.tid_rx[i].dialog_token); | ||
178 | |||
179 | p += scnprintf(p, sizeof(buf)+buf-p, "\n TX :"); | ||
180 | for (i = 0; i < STA_TID_NUM; i++) | ||
181 | p += scnprintf(p, sizeof(buf)+buf-p, "%5d", | ||
182 | sta->ampdu_mlme.tid_tx[i].state); | ||
183 | |||
184 | p += scnprintf(p, sizeof(buf)+buf-p, "\n DTKN:"); | ||
185 | for (i = 0; i < STA_TID_NUM; i++) | ||
186 | p += scnprintf(p, sizeof(buf)+buf-p, "%5d", | ||
187 | sta->ampdu_mlme.tid_tx[i].dialog_token); | ||
188 | |||
189 | p += scnprintf(p, sizeof(buf)+buf-p, "\n SSN :"); | ||
190 | for (i = 0; i < STA_TID_NUM; i++) | ||
191 | p += scnprintf(p, sizeof(buf)+buf-p, "%5d", | ||
192 | sta->ampdu_mlme.tid_tx[i].ssn); | ||
193 | |||
194 | p += scnprintf(p, sizeof(buf)+buf-p, "\n"); | ||
195 | |||
196 | return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); | ||
197 | } | ||
198 | |||
199 | static ssize_t sta_agg_status_write(struct file *file, | ||
200 | const char __user *user_buf, size_t count, loff_t *ppos) | ||
201 | { | ||
202 | struct sta_info *sta = file->private_data; | ||
203 | struct net_device *dev = sta->sdata->dev; | ||
204 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
205 | struct ieee80211_hw *hw = &local->hw; | ||
206 | u8 *da = sta->addr; | ||
207 | static int tid_static_tx[16] = {0, 0, 0, 0, 0, 0, 0, 0, | ||
208 | 0, 0, 0, 0, 0, 0, 0, 0}; | ||
209 | static int tid_static_rx[16] = {1, 1, 1, 1, 1, 1, 1, 1, | ||
210 | 1, 1, 1, 1, 1, 1, 1, 1}; | ||
211 | char *endp; | ||
212 | char buf[32]; | ||
213 | int buf_size, rs; | ||
214 | unsigned int tid_num; | ||
215 | char state[4]; | ||
216 | |||
217 | memset(buf, 0x00, sizeof(buf)); | ||
218 | buf_size = min(count, (sizeof(buf)-1)); | ||
219 | if (copy_from_user(buf, user_buf, buf_size)) | ||
220 | return -EFAULT; | ||
221 | |||
222 | tid_num = simple_strtoul(buf, &endp, 0); | ||
223 | if (endp == buf) | ||
224 | return -EINVAL; | ||
225 | |||
226 | if ((tid_num >= 100) && (tid_num <= 115)) { | ||
227 | /* toggle Rx aggregation command */ | ||
228 | tid_num = tid_num - 100; | ||
229 | if (tid_static_rx[tid_num] == 1) { | ||
230 | strcpy(state, "off "); | ||
231 | ieee80211_sta_stop_rx_ba_session(dev, da, tid_num, 0, | ||
232 | WLAN_REASON_QSTA_REQUIRE_SETUP); | ||
233 | sta->ampdu_mlme.tid_rx[tid_num].buf_size = 0xFF; | ||
234 | tid_static_rx[tid_num] = 0; | ||
235 | } else { | ||
236 | strcpy(state, "on "); | ||
237 | sta->ampdu_mlme.tid_rx[tid_num].buf_size = 0x00; | ||
238 | tid_static_rx[tid_num] = 1; | ||
239 | } | ||
240 | printk(KERN_DEBUG "debugfs - try switching tid %u %s\n", | ||
241 | tid_num, state); | ||
242 | } else if ((tid_num >= 0) && (tid_num <= 15)) { | ||
243 | /* toggle Tx aggregation command */ | ||
244 | if (tid_static_tx[tid_num] == 0) { | ||
245 | strcpy(state, "on "); | ||
246 | rs = ieee80211_start_tx_ba_session(hw, da, tid_num); | ||
247 | if (rs == 0) | ||
248 | tid_static_tx[tid_num] = 1; | ||
249 | } else { | ||
250 | strcpy(state, "off"); | ||
251 | rs = ieee80211_stop_tx_ba_session(hw, da, tid_num, 1); | ||
252 | if (rs == 0) | ||
253 | tid_static_tx[tid_num] = 0; | ||
254 | } | ||
255 | printk(KERN_DEBUG "debugfs - switching tid %u %s, return=%d\n", | ||
256 | tid_num, state, rs); | ||
257 | } | ||
258 | |||
259 | return count; | ||
260 | } | ||
261 | STA_OPS_WR(agg_status); | ||
262 | |||
194 | #define DEBUGFS_ADD(name) \ | 263 | #define DEBUGFS_ADD(name) \ |
195 | sta->debugfs.name = debugfs_create_file(#name, 0444, \ | 264 | sta->debugfs.name = debugfs_create_file(#name, 0444, \ |
196 | sta->debugfs.dir, sta, &sta_ ##name## _ops); | 265 | sta->debugfs.dir, sta, &sta_ ##name## _ops); |
@@ -203,12 +272,13 @@ STA_OPS(wme_tx_queue); | |||
203 | void ieee80211_sta_debugfs_add(struct sta_info *sta) | 272 | void ieee80211_sta_debugfs_add(struct sta_info *sta) |
204 | { | 273 | { |
205 | struct dentry *stations_dir = sta->local->debugfs.stations; | 274 | struct dentry *stations_dir = sta->local->debugfs.stations; |
206 | DECLARE_MAC_BUF(mac); | 275 | DECLARE_MAC_BUF(mbuf); |
276 | u8 *mac; | ||
207 | 277 | ||
208 | if (!stations_dir) | 278 | if (!stations_dir) |
209 | return; | 279 | return; |
210 | 280 | ||
211 | print_mac(mac, sta->addr); | 281 | mac = print_mac(mbuf, sta->addr); |
212 | 282 | ||
213 | sta->debugfs.dir = debugfs_create_dir(mac, stations_dir); | 283 | sta->debugfs.dir = debugfs_create_dir(mac, stations_dir); |
214 | if (!sta->debugfs.dir) | 284 | if (!sta->debugfs.dir) |
@@ -216,28 +286,26 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta) | |||
216 | 286 | ||
217 | DEBUGFS_ADD(flags); | 287 | DEBUGFS_ADD(flags); |
218 | DEBUGFS_ADD(num_ps_buf_frames); | 288 | DEBUGFS_ADD(num_ps_buf_frames); |
219 | DEBUGFS_ADD(last_ack_rssi); | ||
220 | DEBUGFS_ADD(last_ack_ms); | ||
221 | DEBUGFS_ADD(inactive_ms); | 289 | DEBUGFS_ADD(inactive_ms); |
222 | DEBUGFS_ADD(last_seq_ctrl); | 290 | DEBUGFS_ADD(last_seq_ctrl); |
223 | #ifdef CONFIG_MAC80211_DEBUG_COUNTERS | 291 | #ifdef CONFIG_MAC80211_DEBUG_COUNTERS |
224 | DEBUGFS_ADD(wme_rx_queue); | 292 | DEBUGFS_ADD(wme_rx_queue); |
225 | DEBUGFS_ADD(wme_tx_queue); | 293 | DEBUGFS_ADD(wme_tx_queue); |
226 | #endif | 294 | #endif |
295 | DEBUGFS_ADD(agg_status); | ||
227 | } | 296 | } |
228 | 297 | ||
229 | void ieee80211_sta_debugfs_remove(struct sta_info *sta) | 298 | void ieee80211_sta_debugfs_remove(struct sta_info *sta) |
230 | { | 299 | { |
231 | DEBUGFS_DEL(flags); | 300 | DEBUGFS_DEL(flags); |
232 | DEBUGFS_DEL(num_ps_buf_frames); | 301 | DEBUGFS_DEL(num_ps_buf_frames); |
233 | DEBUGFS_DEL(last_ack_rssi); | ||
234 | DEBUGFS_DEL(last_ack_ms); | ||
235 | DEBUGFS_DEL(inactive_ms); | 302 | DEBUGFS_DEL(inactive_ms); |
236 | DEBUGFS_DEL(last_seq_ctrl); | 303 | DEBUGFS_DEL(last_seq_ctrl); |
237 | #ifdef CONFIG_MAC80211_DEBUG_COUNTERS | 304 | #ifdef CONFIG_MAC80211_DEBUG_COUNTERS |
238 | DEBUGFS_DEL(wme_rx_queue); | 305 | DEBUGFS_DEL(wme_rx_queue); |
239 | DEBUGFS_DEL(wme_tx_queue); | 306 | DEBUGFS_DEL(wme_tx_queue); |
240 | #endif | 307 | #endif |
308 | DEBUGFS_DEL(agg_status); | ||
241 | 309 | ||
242 | debugfs_remove(sta->debugfs.dir); | 310 | debugfs_remove(sta->debugfs.dir); |
243 | sta->debugfs.dir = NULL; | 311 | sta->debugfs.dir = NULL; |
diff --git a/net/mac80211/debugfs_sta.h b/net/mac80211/debugfs_sta.h index 574a1cd54b96..8b608903259f 100644 --- a/net/mac80211/debugfs_sta.h +++ b/net/mac80211/debugfs_sta.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef __MAC80211_DEBUGFS_STA_H | 1 | #ifndef __MAC80211_DEBUGFS_STA_H |
2 | #define __MAC80211_DEBUGFS_STA_H | 2 | #define __MAC80211_DEBUGFS_STA_H |
3 | 3 | ||
4 | #include "sta_info.h" | ||
5 | |||
4 | #ifdef CONFIG_MAC80211_DEBUGFS | 6 | #ifdef CONFIG_MAC80211_DEBUGFS |
5 | void ieee80211_sta_debugfs_add(struct sta_info *sta); | 7 | void ieee80211_sta_debugfs_add(struct sta_info *sta); |
6 | void ieee80211_sta_debugfs_remove(struct sta_info *sta); | 8 | void ieee80211_sta_debugfs_remove(struct sta_info *sta); |
diff --git a/net/mac80211/ieee80211.c b/net/mac80211/ieee80211.c index 28bcdf9fc3df..55b63712e48c 100644 --- a/net/mac80211/ieee80211.c +++ b/net/mac80211/ieee80211.c | |||
@@ -26,6 +26,7 @@ | |||
26 | 26 | ||
27 | #include "ieee80211_i.h" | 27 | #include "ieee80211_i.h" |
28 | #include "ieee80211_rate.h" | 28 | #include "ieee80211_rate.h" |
29 | #include "mesh.h" | ||
29 | #include "wep.h" | 30 | #include "wep.h" |
30 | #include "wme.h" | 31 | #include "wme.h" |
31 | #include "aes_ccm.h" | 32 | #include "aes_ccm.h" |
@@ -67,9 +68,19 @@ static void ieee80211_configure_filter(struct ieee80211_local *local) | |||
67 | new_flags |= FIF_ALLMULTI; | 68 | new_flags |= FIF_ALLMULTI; |
68 | 69 | ||
69 | if (local->monitors) | 70 | if (local->monitors) |
70 | new_flags |= FIF_CONTROL | | 71 | new_flags |= FIF_BCN_PRBRESP_PROMISC; |
71 | FIF_OTHER_BSS | | 72 | |
72 | FIF_BCN_PRBRESP_PROMISC; | 73 | if (local->fif_fcsfail) |
74 | new_flags |= FIF_FCSFAIL; | ||
75 | |||
76 | if (local->fif_plcpfail) | ||
77 | new_flags |= FIF_PLCPFAIL; | ||
78 | |||
79 | if (local->fif_control) | ||
80 | new_flags |= FIF_CONTROL; | ||
81 | |||
82 | if (local->fif_other_bss) | ||
83 | new_flags |= FIF_OTHER_BSS; | ||
73 | 84 | ||
74 | changed_flags = local->filter_flags ^ new_flags; | 85 | changed_flags = local->filter_flags ^ new_flags; |
75 | 86 | ||
@@ -128,9 +139,15 @@ static void ieee80211_master_set_multicast_list(struct net_device *dev) | |||
128 | 139 | ||
129 | static int ieee80211_change_mtu(struct net_device *dev, int new_mtu) | 140 | static int ieee80211_change_mtu(struct net_device *dev, int new_mtu) |
130 | { | 141 | { |
142 | int meshhdrlen; | ||
143 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
144 | |||
145 | meshhdrlen = (sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) ? 5 : 0; | ||
146 | |||
131 | /* FIX: what would be proper limits for MTU? | 147 | /* FIX: what would be proper limits for MTU? |
132 | * This interface uses 802.3 frames. */ | 148 | * This interface uses 802.3 frames. */ |
133 | if (new_mtu < 256 || new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6) { | 149 | if (new_mtu < 256 || |
150 | new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) { | ||
134 | printk(KERN_WARNING "%s: invalid MTU %d\n", | 151 | printk(KERN_WARNING "%s: invalid MTU %d\n", |
135 | dev->name, new_mtu); | 152 | dev->name, new_mtu); |
136 | return -EINVAL; | 153 | return -EINVAL; |
@@ -166,6 +183,7 @@ static int ieee80211_open(struct net_device *dev) | |||
166 | struct ieee80211_if_init_conf conf; | 183 | struct ieee80211_if_init_conf conf; |
167 | int res; | 184 | int res; |
168 | bool need_hw_reconfig = 0; | 185 | bool need_hw_reconfig = 0; |
186 | struct sta_info *sta; | ||
169 | 187 | ||
170 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 188 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
171 | 189 | ||
@@ -173,8 +191,52 @@ static int ieee80211_open(struct net_device *dev) | |||
173 | list_for_each_entry(nsdata, &local->interfaces, list) { | 191 | list_for_each_entry(nsdata, &local->interfaces, list) { |
174 | struct net_device *ndev = nsdata->dev; | 192 | struct net_device *ndev = nsdata->dev; |
175 | 193 | ||
176 | if (ndev != dev && ndev != local->mdev && netif_running(ndev) && | 194 | if (ndev != dev && ndev != local->mdev && netif_running(ndev)) { |
177 | compare_ether_addr(dev->dev_addr, ndev->dev_addr) == 0) { | 195 | /* |
196 | * Allow only a single IBSS interface to be up at any | ||
197 | * time. This is restricted because beacon distribution | ||
198 | * cannot work properly if both are in the same IBSS. | ||
199 | * | ||
200 | * To remove this restriction we'd have to disallow them | ||
201 | * from setting the same SSID on different IBSS interfaces | ||
202 | * belonging to the same hardware. Then, however, we're | ||
203 | * faced with having to adopt two different TSF timers... | ||
204 | */ | ||
205 | if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && | ||
206 | nsdata->vif.type == IEEE80211_IF_TYPE_IBSS) | ||
207 | return -EBUSY; | ||
208 | |||
209 | /* | ||
210 | * Disallow multiple IBSS/STA mode interfaces. | ||
211 | * | ||
212 | * This is a technical restriction, it is possible although | ||
213 | * most likely not IEEE 802.11 compliant to have multiple | ||
214 | * STAs with just a single hardware (the TSF timer will not | ||
215 | * be adjusted properly.) | ||
216 | * | ||
217 | * However, because mac80211 uses the master device's BSS | ||
218 | * information for each STA/IBSS interface, doing this will | ||
219 | * currently corrupt that BSS information completely, unless, | ||
220 | * a not very useful case, both STAs are associated to the | ||
221 | * same BSS. | ||
222 | * | ||
223 | * To remove this restriction, the BSS information needs to | ||
224 | * be embedded in the STA/IBSS mode sdata instead of using | ||
225 | * the master device's BSS structure. | ||
226 | */ | ||
227 | if ((sdata->vif.type == IEEE80211_IF_TYPE_STA || | ||
228 | sdata->vif.type == IEEE80211_IF_TYPE_IBSS) && | ||
229 | (nsdata->vif.type == IEEE80211_IF_TYPE_STA || | ||
230 | nsdata->vif.type == IEEE80211_IF_TYPE_IBSS)) | ||
231 | return -EBUSY; | ||
232 | |||
233 | /* | ||
234 | * The remaining checks are only performed for interfaces | ||
235 | * with the same MAC address. | ||
236 | */ | ||
237 | if (compare_ether_addr(dev->dev_addr, ndev->dev_addr)) | ||
238 | continue; | ||
239 | |||
178 | /* | 240 | /* |
179 | * check whether it may have the same address | 241 | * check whether it may have the same address |
180 | */ | 242 | */ |
@@ -186,8 +248,7 @@ static int ieee80211_open(struct net_device *dev) | |||
186 | * can only add VLANs to enabled APs | 248 | * can only add VLANs to enabled APs |
187 | */ | 249 | */ |
188 | if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN && | 250 | if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN && |
189 | nsdata->vif.type == IEEE80211_IF_TYPE_AP && | 251 | nsdata->vif.type == IEEE80211_IF_TYPE_AP) |
190 | netif_running(nsdata->dev)) | ||
191 | sdata->u.vlan.ap = nsdata; | 252 | sdata->u.vlan.ap = nsdata; |
192 | } | 253 | } |
193 | } | 254 | } |
@@ -196,6 +257,20 @@ static int ieee80211_open(struct net_device *dev) | |||
196 | case IEEE80211_IF_TYPE_WDS: | 257 | case IEEE80211_IF_TYPE_WDS: |
197 | if (is_zero_ether_addr(sdata->u.wds.remote_addr)) | 258 | if (is_zero_ether_addr(sdata->u.wds.remote_addr)) |
198 | return -ENOLINK; | 259 | return -ENOLINK; |
260 | |||
261 | /* Create STA entry for the WDS peer */ | ||
262 | sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr, | ||
263 | GFP_KERNEL); | ||
264 | if (!sta) | ||
265 | return -ENOMEM; | ||
266 | |||
267 | sta->flags |= WLAN_STA_AUTHORIZED; | ||
268 | |||
269 | res = sta_info_insert(sta); | ||
270 | if (res) { | ||
271 | sta_info_destroy(sta); | ||
272 | return res; | ||
273 | } | ||
199 | break; | 274 | break; |
200 | case IEEE80211_IF_TYPE_VLAN: | 275 | case IEEE80211_IF_TYPE_VLAN: |
201 | if (!sdata->u.vlan.ap) | 276 | if (!sdata->u.vlan.ap) |
@@ -205,6 +280,7 @@ static int ieee80211_open(struct net_device *dev) | |||
205 | case IEEE80211_IF_TYPE_STA: | 280 | case IEEE80211_IF_TYPE_STA: |
206 | case IEEE80211_IF_TYPE_MNTR: | 281 | case IEEE80211_IF_TYPE_MNTR: |
207 | case IEEE80211_IF_TYPE_IBSS: | 282 | case IEEE80211_IF_TYPE_IBSS: |
283 | case IEEE80211_IF_TYPE_MESH_POINT: | ||
208 | /* no special treatment */ | 284 | /* no special treatment */ |
209 | break; | 285 | break; |
210 | case IEEE80211_IF_TYPE_INVALID: | 286 | case IEEE80211_IF_TYPE_INVALID: |
@@ -229,15 +305,28 @@ static int ieee80211_open(struct net_device *dev) | |||
229 | /* no need to tell driver */ | 305 | /* no need to tell driver */ |
230 | break; | 306 | break; |
231 | case IEEE80211_IF_TYPE_MNTR: | 307 | case IEEE80211_IF_TYPE_MNTR: |
308 | if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) { | ||
309 | local->cooked_mntrs++; | ||
310 | break; | ||
311 | } | ||
312 | |||
232 | /* must be before the call to ieee80211_configure_filter */ | 313 | /* must be before the call to ieee80211_configure_filter */ |
233 | local->monitors++; | 314 | local->monitors++; |
234 | if (local->monitors == 1) { | 315 | if (local->monitors == 1) |
235 | netif_tx_lock_bh(local->mdev); | ||
236 | ieee80211_configure_filter(local); | ||
237 | netif_tx_unlock_bh(local->mdev); | ||
238 | |||
239 | local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP; | 316 | local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP; |
240 | } | 317 | |
318 | if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL) | ||
319 | local->fif_fcsfail++; | ||
320 | if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL) | ||
321 | local->fif_plcpfail++; | ||
322 | if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL) | ||
323 | local->fif_control++; | ||
324 | if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS) | ||
325 | local->fif_other_bss++; | ||
326 | |||
327 | netif_tx_lock_bh(local->mdev); | ||
328 | ieee80211_configure_filter(local); | ||
329 | netif_tx_unlock_bh(local->mdev); | ||
241 | break; | 330 | break; |
242 | case IEEE80211_IF_TYPE_STA: | 331 | case IEEE80211_IF_TYPE_STA: |
243 | case IEEE80211_IF_TYPE_IBSS: | 332 | case IEEE80211_IF_TYPE_IBSS: |
@@ -293,24 +382,51 @@ static int ieee80211_open(struct net_device *dev) | |||
293 | 382 | ||
294 | static int ieee80211_stop(struct net_device *dev) | 383 | static int ieee80211_stop(struct net_device *dev) |
295 | { | 384 | { |
296 | struct ieee80211_sub_if_data *sdata; | 385 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
297 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 386 | struct ieee80211_local *local = sdata->local; |
298 | struct ieee80211_if_init_conf conf; | 387 | struct ieee80211_if_init_conf conf; |
299 | struct sta_info *sta; | 388 | struct sta_info *sta; |
300 | int i; | 389 | int i; |
301 | 390 | ||
302 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 391 | /* |
392 | * Stop TX on this interface first. | ||
393 | */ | ||
394 | netif_stop_queue(dev); | ||
395 | |||
396 | /* | ||
397 | * Now delete all active aggregation sessions. | ||
398 | */ | ||
399 | rcu_read_lock(); | ||
303 | 400 | ||
304 | list_for_each_entry(sta, &local->sta_list, list) { | 401 | list_for_each_entry_rcu(sta, &local->sta_list, list) { |
305 | if (sta->dev == dev) | 402 | if (sta->sdata == sdata) |
306 | for (i = 0; i < STA_TID_NUM; i++) | 403 | for (i = 0; i < STA_TID_NUM; i++) |
307 | ieee80211_sta_stop_rx_ba_session(sta->dev, | 404 | ieee80211_sta_stop_rx_ba_session(sdata->dev, |
308 | sta->addr, i, | 405 | sta->addr, i, |
309 | WLAN_BACK_RECIPIENT, | 406 | WLAN_BACK_RECIPIENT, |
310 | WLAN_REASON_QSTA_LEAVE_QBSS); | 407 | WLAN_REASON_QSTA_LEAVE_QBSS); |
311 | } | 408 | } |
312 | 409 | ||
313 | netif_stop_queue(dev); | 410 | rcu_read_unlock(); |
411 | |||
412 | /* | ||
413 | * Remove all stations associated with this interface. | ||
414 | * | ||
415 | * This must be done before calling ops->remove_interface() | ||
416 | * because otherwise we can later invoke ops->sta_notify() | ||
417 | * whenever the STAs are removed, and that invalidates driver | ||
418 | * assumptions about always getting a vif pointer that is valid | ||
419 | * (because if we remove a STA after ops->remove_interface() | ||
420 | * the driver will have removed the vif info already!) | ||
421 | * | ||
422 | * We could relax this and only unlink the stations from the | ||
423 | * hash table and list but keep them on a per-sdata list that | ||
424 | * will be inserted back again when the interface is brought | ||
425 | * up again, but I don't currently see a use case for that, | ||
426 | * except with WDS which gets a STA entry created when it is | ||
427 | * brought up. | ||
428 | */ | ||
429 | sta_info_flush(local, sdata); | ||
314 | 430 | ||
315 | /* | 431 | /* |
316 | * Don't count this interface for promisc/allmulti while it | 432 | * Don't count this interface for promisc/allmulti while it |
@@ -352,15 +468,29 @@ static int ieee80211_stop(struct net_device *dev) | |||
352 | /* no need to tell driver */ | 468 | /* no need to tell driver */ |
353 | break; | 469 | break; |
354 | case IEEE80211_IF_TYPE_MNTR: | 470 | case IEEE80211_IF_TYPE_MNTR: |
355 | local->monitors--; | 471 | if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) { |
356 | if (local->monitors == 0) { | 472 | local->cooked_mntrs--; |
357 | netif_tx_lock_bh(local->mdev); | 473 | break; |
358 | ieee80211_configure_filter(local); | 474 | } |
359 | netif_tx_unlock_bh(local->mdev); | ||
360 | 475 | ||
476 | local->monitors--; | ||
477 | if (local->monitors == 0) | ||
361 | local->hw.conf.flags &= ~IEEE80211_CONF_RADIOTAP; | 478 | local->hw.conf.flags &= ~IEEE80211_CONF_RADIOTAP; |
362 | } | 479 | |
480 | if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL) | ||
481 | local->fif_fcsfail--; | ||
482 | if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL) | ||
483 | local->fif_plcpfail--; | ||
484 | if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL) | ||
485 | local->fif_control--; | ||
486 | if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS) | ||
487 | local->fif_other_bss--; | ||
488 | |||
489 | netif_tx_lock_bh(local->mdev); | ||
490 | ieee80211_configure_filter(local); | ||
491 | netif_tx_unlock_bh(local->mdev); | ||
363 | break; | 492 | break; |
493 | case IEEE80211_IF_TYPE_MESH_POINT: | ||
364 | case IEEE80211_IF_TYPE_STA: | 494 | case IEEE80211_IF_TYPE_STA: |
365 | case IEEE80211_IF_TYPE_IBSS: | 495 | case IEEE80211_IF_TYPE_IBSS: |
366 | sdata->u.sta.state = IEEE80211_DISABLED; | 496 | sdata->u.sta.state = IEEE80211_DISABLED; |
@@ -414,6 +544,339 @@ static int ieee80211_stop(struct net_device *dev) | |||
414 | return 0; | 544 | return 0; |
415 | } | 545 | } |
416 | 546 | ||
547 | int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) | ||
548 | { | ||
549 | struct ieee80211_local *local = hw_to_local(hw); | ||
550 | struct sta_info *sta; | ||
551 | struct ieee80211_sub_if_data *sdata; | ||
552 | u16 start_seq_num = 0; | ||
553 | u8 *state; | ||
554 | int ret; | ||
555 | DECLARE_MAC_BUF(mac); | ||
556 | |||
557 | if (tid >= STA_TID_NUM) | ||
558 | return -EINVAL; | ||
559 | |||
560 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
561 | printk(KERN_DEBUG "Open BA session requested for %s tid %u\n", | ||
562 | print_mac(mac, ra), tid); | ||
563 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
564 | |||
565 | rcu_read_lock(); | ||
566 | |||
567 | sta = sta_info_get(local, ra); | ||
568 | if (!sta) { | ||
569 | printk(KERN_DEBUG "Could not find the station\n"); | ||
570 | rcu_read_unlock(); | ||
571 | return -ENOENT; | ||
572 | } | ||
573 | |||
574 | spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); | ||
575 | |||
576 | /* we have tried too many times, receiver does not want A-MPDU */ | ||
577 | if (sta->ampdu_mlme.tid_tx[tid].addba_req_num > HT_AGG_MAX_RETRIES) { | ||
578 | ret = -EBUSY; | ||
579 | goto start_ba_exit; | ||
580 | } | ||
581 | |||
582 | state = &sta->ampdu_mlme.tid_tx[tid].state; | ||
583 | /* check if the TID is not in aggregation flow already */ | ||
584 | if (*state != HT_AGG_STATE_IDLE) { | ||
585 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
586 | printk(KERN_DEBUG "BA request denied - session is not " | ||
587 | "idle on tid %u\n", tid); | ||
588 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
589 | ret = -EAGAIN; | ||
590 | goto start_ba_exit; | ||
591 | } | ||
592 | |||
593 | /* ensure that TX flow won't interrupt us | ||
594 | * until the end of the call to requeue function */ | ||
595 | spin_lock_bh(&local->mdev->queue_lock); | ||
596 | |||
597 | /* create a new queue for this aggregation */ | ||
598 | ret = ieee80211_ht_agg_queue_add(local, sta, tid); | ||
599 | |||
600 | /* case no queue is available to aggregation | ||
601 | * don't switch to aggregation */ | ||
602 | if (ret) { | ||
603 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
604 | printk(KERN_DEBUG "BA request denied - no queue available for" | ||
605 | " tid %d\n", tid); | ||
606 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
607 | spin_unlock_bh(&local->mdev->queue_lock); | ||
608 | goto start_ba_exit; | ||
609 | } | ||
610 | sdata = sta->sdata; | ||
611 | |||
612 | /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the | ||
613 | * call back right away, it must see that the flow has begun */ | ||
614 | *state |= HT_ADDBA_REQUESTED_MSK; | ||
615 | |||
616 | if (local->ops->ampdu_action) | ||
617 | ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START, | ||
618 | ra, tid, &start_seq_num); | ||
619 | |||
620 | if (ret) { | ||
621 | /* No need to requeue the packets in the agg queue, since we | ||
622 | * held the tx lock: no packet could be enqueued to the newly | ||
623 | * allocated queue */ | ||
624 | ieee80211_ht_agg_queue_remove(local, sta, tid, 0); | ||
625 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
626 | printk(KERN_DEBUG "BA request denied - HW or queue unavailable" | ||
627 | " for tid %d\n", tid); | ||
628 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
629 | spin_unlock_bh(&local->mdev->queue_lock); | ||
630 | *state = HT_AGG_STATE_IDLE; | ||
631 | goto start_ba_exit; | ||
632 | } | ||
633 | |||
634 | /* Will put all the packets in the new SW queue */ | ||
635 | ieee80211_requeue(local, ieee802_1d_to_ac[tid]); | ||
636 | spin_unlock_bh(&local->mdev->queue_lock); | ||
637 | |||
638 | /* We have most probably almost emptied the legacy queue */ | ||
639 | /* ieee80211_wake_queue(local_to_hw(local), ieee802_1d_to_ac[tid]); */ | ||
640 | |||
641 | /* send an addBA request */ | ||
642 | sta->ampdu_mlme.dialog_token_allocator++; | ||
643 | sta->ampdu_mlme.tid_tx[tid].dialog_token = | ||
644 | sta->ampdu_mlme.dialog_token_allocator; | ||
645 | sta->ampdu_mlme.tid_tx[tid].ssn = start_seq_num; | ||
646 | |||
647 | ieee80211_send_addba_request(sta->sdata->dev, ra, tid, | ||
648 | sta->ampdu_mlme.tid_tx[tid].dialog_token, | ||
649 | sta->ampdu_mlme.tid_tx[tid].ssn, | ||
650 | 0x40, 5000); | ||
651 | |||
652 | /* activate the timer for the recipient's addBA response */ | ||
653 | sta->ampdu_mlme.tid_tx[tid].addba_resp_timer.expires = | ||
654 | jiffies + ADDBA_RESP_INTERVAL; | ||
655 | add_timer(&sta->ampdu_mlme.tid_tx[tid].addba_resp_timer); | ||
656 | printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid); | ||
657 | |||
658 | start_ba_exit: | ||
659 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); | ||
660 | rcu_read_unlock(); | ||
661 | return ret; | ||
662 | } | ||
663 | EXPORT_SYMBOL(ieee80211_start_tx_ba_session); | ||
664 | |||
665 | int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw, | ||
666 | u8 *ra, u16 tid, | ||
667 | enum ieee80211_back_parties initiator) | ||
668 | { | ||
669 | struct ieee80211_local *local = hw_to_local(hw); | ||
670 | struct sta_info *sta; | ||
671 | u8 *state; | ||
672 | int ret = 0; | ||
673 | DECLARE_MAC_BUF(mac); | ||
674 | |||
675 | if (tid >= STA_TID_NUM) | ||
676 | return -EINVAL; | ||
677 | |||
678 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
679 | printk(KERN_DEBUG "Stop a BA session requested for %s tid %u\n", | ||
680 | print_mac(mac, ra), tid); | ||
681 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
682 | |||
683 | rcu_read_lock(); | ||
684 | sta = sta_info_get(local, ra); | ||
685 | if (!sta) { | ||
686 | rcu_read_unlock(); | ||
687 | return -ENOENT; | ||
688 | } | ||
689 | |||
690 | /* check if the TID is in aggregation */ | ||
691 | state = &sta->ampdu_mlme.tid_tx[tid].state; | ||
692 | spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); | ||
693 | |||
694 | if (*state != HT_AGG_STATE_OPERATIONAL) { | ||
695 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
696 | printk(KERN_DEBUG "Try to stop Tx aggregation on" | ||
697 | " non active TID\n"); | ||
698 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
699 | ret = -ENOENT; | ||
700 | goto stop_BA_exit; | ||
701 | } | ||
702 | |||
703 | ieee80211_stop_queue(hw, sta->tid_to_tx_q[tid]); | ||
704 | |||
705 | *state = HT_AGG_STATE_REQ_STOP_BA_MSK | | ||
706 | (initiator << HT_AGG_STATE_INITIATOR_SHIFT); | ||
707 | |||
708 | if (local->ops->ampdu_action) | ||
709 | ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_STOP, | ||
710 | ra, tid, NULL); | ||
711 | |||
712 | /* case HW denied going back to legacy */ | ||
713 | if (ret) { | ||
714 | WARN_ON(ret != -EBUSY); | ||
715 | *state = HT_AGG_STATE_OPERATIONAL; | ||
716 | ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); | ||
717 | goto stop_BA_exit; | ||
718 | } | ||
719 | |||
720 | stop_BA_exit: | ||
721 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); | ||
722 | rcu_read_unlock(); | ||
723 | return ret; | ||
724 | } | ||
725 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); | ||
726 | |||
727 | void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid) | ||
728 | { | ||
729 | struct ieee80211_local *local = hw_to_local(hw); | ||
730 | struct sta_info *sta; | ||
731 | u8 *state; | ||
732 | DECLARE_MAC_BUF(mac); | ||
733 | |||
734 | if (tid >= STA_TID_NUM) { | ||
735 | printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", | ||
736 | tid, STA_TID_NUM); | ||
737 | return; | ||
738 | } | ||
739 | |||
740 | rcu_read_lock(); | ||
741 | sta = sta_info_get(local, ra); | ||
742 | if (!sta) { | ||
743 | rcu_read_unlock(); | ||
744 | printk(KERN_DEBUG "Could not find station: %s\n", | ||
745 | print_mac(mac, ra)); | ||
746 | return; | ||
747 | } | ||
748 | |||
749 | state = &sta->ampdu_mlme.tid_tx[tid].state; | ||
750 | spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); | ||
751 | |||
752 | if (!(*state & HT_ADDBA_REQUESTED_MSK)) { | ||
753 | printk(KERN_DEBUG "addBA was not requested yet, state is %d\n", | ||
754 | *state); | ||
755 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); | ||
756 | rcu_read_unlock(); | ||
757 | return; | ||
758 | } | ||
759 | |||
760 | WARN_ON_ONCE(*state & HT_ADDBA_DRV_READY_MSK); | ||
761 | |||
762 | *state |= HT_ADDBA_DRV_READY_MSK; | ||
763 | |||
764 | if (*state == HT_AGG_STATE_OPERATIONAL) { | ||
765 | printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid); | ||
766 | ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); | ||
767 | } | ||
768 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); | ||
769 | rcu_read_unlock(); | ||
770 | } | ||
771 | EXPORT_SYMBOL(ieee80211_start_tx_ba_cb); | ||
772 | |||
773 | void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid) | ||
774 | { | ||
775 | struct ieee80211_local *local = hw_to_local(hw); | ||
776 | struct sta_info *sta; | ||
777 | u8 *state; | ||
778 | int agg_queue; | ||
779 | DECLARE_MAC_BUF(mac); | ||
780 | |||
781 | if (tid >= STA_TID_NUM) { | ||
782 | printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", | ||
783 | tid, STA_TID_NUM); | ||
784 | return; | ||
785 | } | ||
786 | |||
787 | printk(KERN_DEBUG "Stop a BA session requested on DA %s tid %d\n", | ||
788 | print_mac(mac, ra), tid); | ||
789 | |||
790 | rcu_read_lock(); | ||
791 | sta = sta_info_get(local, ra); | ||
792 | if (!sta) { | ||
793 | printk(KERN_DEBUG "Could not find station: %s\n", | ||
794 | print_mac(mac, ra)); | ||
795 | rcu_read_unlock(); | ||
796 | return; | ||
797 | } | ||
798 | state = &sta->ampdu_mlme.tid_tx[tid].state; | ||
799 | |||
800 | spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); | ||
801 | if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) { | ||
802 | printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n"); | ||
803 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); | ||
804 | rcu_read_unlock(); | ||
805 | return; | ||
806 | } | ||
807 | |||
808 | if (*state & HT_AGG_STATE_INITIATOR_MSK) | ||
809 | ieee80211_send_delba(sta->sdata->dev, ra, tid, | ||
810 | WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); | ||
811 | |||
812 | agg_queue = sta->tid_to_tx_q[tid]; | ||
813 | |||
814 | /* avoid ordering issues: we are the only one that can modify | ||
815 | * the content of the qdiscs */ | ||
816 | spin_lock_bh(&local->mdev->queue_lock); | ||
817 | /* remove the queue for this aggregation */ | ||
818 | ieee80211_ht_agg_queue_remove(local, sta, tid, 1); | ||
819 | spin_unlock_bh(&local->mdev->queue_lock); | ||
820 | |||
821 | /* we just requeued the all the frames that were in the removed | ||
822 | * queue, and since we might miss a softirq we do netif_schedule. | ||
823 | * ieee80211_wake_queue is not used here as this queue is not | ||
824 | * necessarily stopped */ | ||
825 | netif_schedule(local->mdev); | ||
826 | *state = HT_AGG_STATE_IDLE; | ||
827 | sta->ampdu_mlme.tid_tx[tid].addba_req_num = 0; | ||
828 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); | ||
829 | |||
830 | rcu_read_unlock(); | ||
831 | } | ||
832 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb); | ||
833 | |||
834 | void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw, | ||
835 | const u8 *ra, u16 tid) | ||
836 | { | ||
837 | struct ieee80211_local *local = hw_to_local(hw); | ||
838 | struct ieee80211_ra_tid *ra_tid; | ||
839 | struct sk_buff *skb = dev_alloc_skb(0); | ||
840 | |||
841 | if (unlikely(!skb)) { | ||
842 | if (net_ratelimit()) | ||
843 | printk(KERN_WARNING "%s: Not enough memory, " | ||
844 | "dropping start BA session", skb->dev->name); | ||
845 | return; | ||
846 | } | ||
847 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; | ||
848 | memcpy(&ra_tid->ra, ra, ETH_ALEN); | ||
849 | ra_tid->tid = tid; | ||
850 | |||
851 | skb->pkt_type = IEEE80211_ADDBA_MSG; | ||
852 | skb_queue_tail(&local->skb_queue, skb); | ||
853 | tasklet_schedule(&local->tasklet); | ||
854 | } | ||
855 | EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); | ||
856 | |||
857 | void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw, | ||
858 | const u8 *ra, u16 tid) | ||
859 | { | ||
860 | struct ieee80211_local *local = hw_to_local(hw); | ||
861 | struct ieee80211_ra_tid *ra_tid; | ||
862 | struct sk_buff *skb = dev_alloc_skb(0); | ||
863 | |||
864 | if (unlikely(!skb)) { | ||
865 | if (net_ratelimit()) | ||
866 | printk(KERN_WARNING "%s: Not enough memory, " | ||
867 | "dropping stop BA session", skb->dev->name); | ||
868 | return; | ||
869 | } | ||
870 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; | ||
871 | memcpy(&ra_tid->ra, ra, ETH_ALEN); | ||
872 | ra_tid->tid = tid; | ||
873 | |||
874 | skb->pkt_type = IEEE80211_DELBA_MSG; | ||
875 | skb_queue_tail(&local->skb_queue, skb); | ||
876 | tasklet_schedule(&local->tasklet); | ||
877 | } | ||
878 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe); | ||
879 | |||
417 | static void ieee80211_set_multicast_list(struct net_device *dev) | 880 | static void ieee80211_set_multicast_list(struct net_device *dev) |
418 | { | 881 | { |
419 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 882 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
@@ -465,41 +928,6 @@ void ieee80211_if_setup(struct net_device *dev) | |||
465 | dev->destructor = ieee80211_if_free; | 928 | dev->destructor = ieee80211_if_free; |
466 | } | 929 | } |
467 | 930 | ||
468 | /* WDS specialties */ | ||
469 | |||
470 | int ieee80211_if_update_wds(struct net_device *dev, u8 *remote_addr) | ||
471 | { | ||
472 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
473 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
474 | struct sta_info *sta; | ||
475 | DECLARE_MAC_BUF(mac); | ||
476 | |||
477 | if (compare_ether_addr(remote_addr, sdata->u.wds.remote_addr) == 0) | ||
478 | return 0; | ||
479 | |||
480 | /* Create STA entry for the new peer */ | ||
481 | sta = sta_info_add(local, dev, remote_addr, GFP_KERNEL); | ||
482 | if (!sta) | ||
483 | return -ENOMEM; | ||
484 | sta_info_put(sta); | ||
485 | |||
486 | /* Remove STA entry for the old peer */ | ||
487 | sta = sta_info_get(local, sdata->u.wds.remote_addr); | ||
488 | if (sta) { | ||
489 | sta_info_free(sta); | ||
490 | sta_info_put(sta); | ||
491 | } else { | ||
492 | printk(KERN_DEBUG "%s: could not find STA entry for WDS link " | ||
493 | "peer %s\n", | ||
494 | dev->name, print_mac(mac, sdata->u.wds.remote_addr)); | ||
495 | } | ||
496 | |||
497 | /* Update WDS link data */ | ||
498 | memcpy(&sdata->u.wds.remote_addr, remote_addr, ETH_ALEN); | ||
499 | |||
500 | return 0; | ||
501 | } | ||
502 | |||
503 | /* everything else */ | 931 | /* everything else */ |
504 | 932 | ||
505 | static int __ieee80211_if_config(struct net_device *dev, | 933 | static int __ieee80211_if_config(struct net_device *dev, |
@@ -520,6 +948,9 @@ static int __ieee80211_if_config(struct net_device *dev, | |||
520 | conf.bssid = sdata->u.sta.bssid; | 948 | conf.bssid = sdata->u.sta.bssid; |
521 | conf.ssid = sdata->u.sta.ssid; | 949 | conf.ssid = sdata->u.sta.ssid; |
522 | conf.ssid_len = sdata->u.sta.ssid_len; | 950 | conf.ssid_len = sdata->u.sta.ssid_len; |
951 | } else if (ieee80211_vif_is_mesh(&sdata->vif)) { | ||
952 | conf.beacon = beacon; | ||
953 | ieee80211_start_mesh(dev); | ||
523 | } else if (sdata->vif.type == IEEE80211_IF_TYPE_AP) { | 954 | } else if (sdata->vif.type == IEEE80211_IF_TYPE_AP) { |
524 | conf.ssid = sdata->u.ap.ssid; | 955 | conf.ssid = sdata->u.ap.ssid; |
525 | conf.ssid_len = sdata->u.ap.ssid_len; | 956 | conf.ssid_len = sdata->u.ap.ssid_len; |
@@ -532,6 +963,11 @@ static int __ieee80211_if_config(struct net_device *dev, | |||
532 | 963 | ||
533 | int ieee80211_if_config(struct net_device *dev) | 964 | int ieee80211_if_config(struct net_device *dev) |
534 | { | 965 | { |
966 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
967 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
968 | if (sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT && | ||
969 | (local->hw.flags & IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE)) | ||
970 | return ieee80211_if_config_beacon(dev); | ||
535 | return __ieee80211_if_config(dev, NULL, NULL); | 971 | return __ieee80211_if_config(dev, NULL, NULL); |
536 | } | 972 | } |
537 | 973 | ||
@@ -553,37 +989,28 @@ int ieee80211_if_config_beacon(struct net_device *dev) | |||
553 | 989 | ||
554 | int ieee80211_hw_config(struct ieee80211_local *local) | 990 | int ieee80211_hw_config(struct ieee80211_local *local) |
555 | { | 991 | { |
556 | struct ieee80211_hw_mode *mode; | ||
557 | struct ieee80211_channel *chan; | 992 | struct ieee80211_channel *chan; |
558 | int ret = 0; | 993 | int ret = 0; |
559 | 994 | ||
560 | if (local->sta_sw_scanning) { | 995 | if (local->sta_sw_scanning) |
561 | chan = local->scan_channel; | 996 | chan = local->scan_channel; |
562 | mode = local->scan_hw_mode; | 997 | else |
563 | } else { | ||
564 | chan = local->oper_channel; | 998 | chan = local->oper_channel; |
565 | mode = local->oper_hw_mode; | ||
566 | } | ||
567 | 999 | ||
568 | local->hw.conf.channel = chan->chan; | 1000 | local->hw.conf.channel = chan; |
569 | local->hw.conf.channel_val = chan->val; | 1001 | |
570 | if (!local->hw.conf.power_level) { | 1002 | if (!local->hw.conf.power_level) |
571 | local->hw.conf.power_level = chan->power_level; | 1003 | local->hw.conf.power_level = chan->max_power; |
572 | } else { | 1004 | else |
573 | local->hw.conf.power_level = min(chan->power_level, | 1005 | local->hw.conf.power_level = min(chan->max_power, |
574 | local->hw.conf.power_level); | 1006 | local->hw.conf.power_level); |
575 | } | 1007 | |
576 | local->hw.conf.freq = chan->freq; | 1008 | local->hw.conf.max_antenna_gain = chan->max_antenna_gain; |
577 | local->hw.conf.phymode = mode->mode; | ||
578 | local->hw.conf.antenna_max = chan->antenna_max; | ||
579 | local->hw.conf.chan = chan; | ||
580 | local->hw.conf.mode = mode; | ||
581 | 1009 | ||
582 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 1010 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
583 | printk(KERN_DEBUG "HW CONFIG: channel=%d freq=%d " | 1011 | printk(KERN_DEBUG "%s: HW CONFIG: freq=%d\n", |
584 | "phymode=%d\n", local->hw.conf.channel, local->hw.conf.freq, | 1012 | wiphy_name(local->hw.wiphy), chan->center_freq); |
585 | local->hw.conf.phymode); | 1013 | #endif |
586 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ | ||
587 | 1014 | ||
588 | if (local->open_count) | 1015 | if (local->open_count) |
589 | ret = local->ops->config(local_to_hw(local), &local->hw.conf); | 1016 | ret = local->ops->config(local_to_hw(local), &local->hw.conf); |
@@ -601,11 +1028,13 @@ int ieee80211_hw_config_ht(struct ieee80211_local *local, int enable_ht, | |||
601 | struct ieee80211_ht_bss_info *req_bss_cap) | 1028 | struct ieee80211_ht_bss_info *req_bss_cap) |
602 | { | 1029 | { |
603 | struct ieee80211_conf *conf = &local->hw.conf; | 1030 | struct ieee80211_conf *conf = &local->hw.conf; |
604 | struct ieee80211_hw_mode *mode = conf->mode; | 1031 | struct ieee80211_supported_band *sband; |
605 | int i; | 1032 | int i; |
606 | 1033 | ||
1034 | sband = local->hw.wiphy->bands[conf->channel->band]; | ||
1035 | |||
607 | /* HT is not supported */ | 1036 | /* HT is not supported */ |
608 | if (!mode->ht_info.ht_supported) { | 1037 | if (!sband->ht_info.ht_supported) { |
609 | conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE; | 1038 | conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE; |
610 | return -EOPNOTSUPP; | 1039 | return -EOPNOTSUPP; |
611 | } | 1040 | } |
@@ -615,17 +1044,17 @@ int ieee80211_hw_config_ht(struct ieee80211_local *local, int enable_ht, | |||
615 | conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE; | 1044 | conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE; |
616 | } else { | 1045 | } else { |
617 | conf->flags |= IEEE80211_CONF_SUPPORT_HT_MODE; | 1046 | conf->flags |= IEEE80211_CONF_SUPPORT_HT_MODE; |
618 | conf->ht_conf.cap = req_ht_cap->cap & mode->ht_info.cap; | 1047 | conf->ht_conf.cap = req_ht_cap->cap & sband->ht_info.cap; |
619 | conf->ht_conf.cap &= ~(IEEE80211_HT_CAP_MIMO_PS); | 1048 | conf->ht_conf.cap &= ~(IEEE80211_HT_CAP_MIMO_PS); |
620 | conf->ht_conf.cap |= | 1049 | conf->ht_conf.cap |= |
621 | mode->ht_info.cap & IEEE80211_HT_CAP_MIMO_PS; | 1050 | sband->ht_info.cap & IEEE80211_HT_CAP_MIMO_PS; |
622 | conf->ht_bss_conf.primary_channel = | 1051 | conf->ht_bss_conf.primary_channel = |
623 | req_bss_cap->primary_channel; | 1052 | req_bss_cap->primary_channel; |
624 | conf->ht_bss_conf.bss_cap = req_bss_cap->bss_cap; | 1053 | conf->ht_bss_conf.bss_cap = req_bss_cap->bss_cap; |
625 | conf->ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode; | 1054 | conf->ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode; |
626 | for (i = 0; i < SUPP_MCS_SET_LEN; i++) | 1055 | for (i = 0; i < SUPP_MCS_SET_LEN; i++) |
627 | conf->ht_conf.supp_mcs_set[i] = | 1056 | conf->ht_conf.supp_mcs_set[i] = |
628 | mode->ht_info.supp_mcs_set[i] & | 1057 | sband->ht_info.supp_mcs_set[i] & |
629 | req_ht_cap->supp_mcs_set[i]; | 1058 | req_ht_cap->supp_mcs_set[i]; |
630 | 1059 | ||
631 | /* In STA mode, this gives us indication | 1060 | /* In STA mode, this gives us indication |
@@ -713,6 +1142,7 @@ static void ieee80211_tasklet_handler(unsigned long data) | |||
713 | struct sk_buff *skb; | 1142 | struct sk_buff *skb; |
714 | struct ieee80211_rx_status rx_status; | 1143 | struct ieee80211_rx_status rx_status; |
715 | struct ieee80211_tx_status *tx_status; | 1144 | struct ieee80211_tx_status *tx_status; |
1145 | struct ieee80211_ra_tid *ra_tid; | ||
716 | 1146 | ||
717 | while ((skb = skb_dequeue(&local->skb_queue)) || | 1147 | while ((skb = skb_dequeue(&local->skb_queue)) || |
718 | (skb = skb_dequeue(&local->skb_queue_unreliable))) { | 1148 | (skb = skb_dequeue(&local->skb_queue_unreliable))) { |
@@ -733,6 +1163,18 @@ static void ieee80211_tasklet_handler(unsigned long data) | |||
733 | skb, tx_status); | 1163 | skb, tx_status); |
734 | kfree(tx_status); | 1164 | kfree(tx_status); |
735 | break; | 1165 | break; |
1166 | case IEEE80211_DELBA_MSG: | ||
1167 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; | ||
1168 | ieee80211_stop_tx_ba_cb(local_to_hw(local), | ||
1169 | ra_tid->ra, ra_tid->tid); | ||
1170 | dev_kfree_skb(skb); | ||
1171 | break; | ||
1172 | case IEEE80211_ADDBA_MSG: | ||
1173 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; | ||
1174 | ieee80211_start_tx_ba_cb(local_to_hw(local), | ||
1175 | ra_tid->ra, ra_tid->tid); | ||
1176 | dev_kfree_skb(skb); | ||
1177 | break ; | ||
736 | default: /* should never get here! */ | 1178 | default: /* should never get here! */ |
737 | printk(KERN_ERR "%s: Unknown message type (%d)\n", | 1179 | printk(KERN_ERR "%s: Unknown message type (%d)\n", |
738 | wiphy_name(local->hw.wiphy), skb->pkt_type); | 1180 | wiphy_name(local->hw.wiphy), skb->pkt_type); |
@@ -810,6 +1252,77 @@ no_key: | |||
810 | } | 1252 | } |
811 | } | 1253 | } |
812 | 1254 | ||
1255 | static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, | ||
1256 | struct sta_info *sta, | ||
1257 | struct sk_buff *skb, | ||
1258 | struct ieee80211_tx_status *status) | ||
1259 | { | ||
1260 | sta->tx_filtered_count++; | ||
1261 | |||
1262 | /* | ||
1263 | * Clear the TX filter mask for this STA when sending the next | ||
1264 | * packet. If the STA went to power save mode, this will happen | ||
1265 | * happen when it wakes up for the next time. | ||
1266 | */ | ||
1267 | sta->flags |= WLAN_STA_CLEAR_PS_FILT; | ||
1268 | |||
1269 | /* | ||
1270 | * This code races in the following way: | ||
1271 | * | ||
1272 | * (1) STA sends frame indicating it will go to sleep and does so | ||
1273 | * (2) hardware/firmware adds STA to filter list, passes frame up | ||
1274 | * (3) hardware/firmware processes TX fifo and suppresses a frame | ||
1275 | * (4) we get TX status before having processed the frame and | ||
1276 | * knowing that the STA has gone to sleep. | ||
1277 | * | ||
1278 | * This is actually quite unlikely even when both those events are | ||
1279 | * processed from interrupts coming in quickly after one another or | ||
1280 | * even at the same time because we queue both TX status events and | ||
1281 | * RX frames to be processed by a tasklet and process them in the | ||
1282 | * same order that they were received or TX status last. Hence, there | ||
1283 | * is no race as long as the frame RX is processed before the next TX | ||
1284 | * status, which drivers can ensure, see below. | ||
1285 | * | ||
1286 | * Note that this can only happen if the hardware or firmware can | ||
1287 | * actually add STAs to the filter list, if this is done by the | ||
1288 | * driver in response to set_tim() (which will only reduce the race | ||
1289 | * this whole filtering tries to solve, not completely solve it) | ||
1290 | * this situation cannot happen. | ||
1291 | * | ||
1292 | * To completely solve this race drivers need to make sure that they | ||
1293 | * (a) don't mix the irq-safe/not irq-safe TX status/RX processing | ||
1294 | * functions and | ||
1295 | * (b) always process RX events before TX status events if ordering | ||
1296 | * can be unknown, for example with different interrupt status | ||
1297 | * bits. | ||
1298 | */ | ||
1299 | if (sta->flags & WLAN_STA_PS && | ||
1300 | skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) { | ||
1301 | ieee80211_remove_tx_extra(local, sta->key, skb, | ||
1302 | &status->control); | ||
1303 | skb_queue_tail(&sta->tx_filtered, skb); | ||
1304 | return; | ||
1305 | } | ||
1306 | |||
1307 | if (!(sta->flags & WLAN_STA_PS) && | ||
1308 | !(status->control.flags & IEEE80211_TXCTL_REQUEUE)) { | ||
1309 | /* Software retry the packet once */ | ||
1310 | status->control.flags |= IEEE80211_TXCTL_REQUEUE; | ||
1311 | ieee80211_remove_tx_extra(local, sta->key, skb, | ||
1312 | &status->control); | ||
1313 | dev_queue_xmit(skb); | ||
1314 | return; | ||
1315 | } | ||
1316 | |||
1317 | if (net_ratelimit()) | ||
1318 | printk(KERN_DEBUG "%s: dropped TX filtered frame, " | ||
1319 | "queue_len=%d PS=%d @%lu\n", | ||
1320 | wiphy_name(local->hw.wiphy), | ||
1321 | skb_queue_len(&sta->tx_filtered), | ||
1322 | !!(sta->flags & WLAN_STA_PS), jiffies); | ||
1323 | dev_kfree_skb(skb); | ||
1324 | } | ||
1325 | |||
813 | void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, | 1326 | void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, |
814 | struct ieee80211_tx_status *status) | 1327 | struct ieee80211_tx_status *status) |
815 | { | 1328 | { |
@@ -819,7 +1332,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
819 | u16 frag, type; | 1332 | u16 frag, type; |
820 | struct ieee80211_tx_status_rtap_hdr *rthdr; | 1333 | struct ieee80211_tx_status_rtap_hdr *rthdr; |
821 | struct ieee80211_sub_if_data *sdata; | 1334 | struct ieee80211_sub_if_data *sdata; |
822 | int monitors; | 1335 | struct net_device *prev_dev = NULL; |
823 | 1336 | ||
824 | if (!status) { | 1337 | if (!status) { |
825 | printk(KERN_ERR | 1338 | printk(KERN_ERR |
@@ -829,18 +1342,24 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
829 | return; | 1342 | return; |
830 | } | 1343 | } |
831 | 1344 | ||
1345 | rcu_read_lock(); | ||
1346 | |||
832 | if (status->excessive_retries) { | 1347 | if (status->excessive_retries) { |
833 | struct sta_info *sta; | 1348 | struct sta_info *sta; |
834 | sta = sta_info_get(local, hdr->addr1); | 1349 | sta = sta_info_get(local, hdr->addr1); |
835 | if (sta) { | 1350 | if (sta) { |
836 | if (sta->flags & WLAN_STA_PS) { | 1351 | if (sta->flags & WLAN_STA_PS) { |
837 | /* The STA is in power save mode, so assume | 1352 | /* |
1353 | * The STA is in power save mode, so assume | ||
838 | * that this TX packet failed because of that. | 1354 | * that this TX packet failed because of that. |
839 | */ | 1355 | */ |
840 | status->excessive_retries = 0; | 1356 | status->excessive_retries = 0; |
841 | status->flags |= IEEE80211_TX_STATUS_TX_FILTERED; | 1357 | status->flags |= IEEE80211_TX_STATUS_TX_FILTERED; |
1358 | ieee80211_handle_filtered_frame(local, sta, | ||
1359 | skb, status); | ||
1360 | rcu_read_unlock(); | ||
1361 | return; | ||
842 | } | 1362 | } |
843 | sta_info_put(sta); | ||
844 | } | 1363 | } |
845 | } | 1364 | } |
846 | 1365 | ||
@@ -848,53 +1367,16 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
848 | struct sta_info *sta; | 1367 | struct sta_info *sta; |
849 | sta = sta_info_get(local, hdr->addr1); | 1368 | sta = sta_info_get(local, hdr->addr1); |
850 | if (sta) { | 1369 | if (sta) { |
851 | sta->tx_filtered_count++; | 1370 | ieee80211_handle_filtered_frame(local, sta, skb, |
852 | 1371 | status); | |
853 | /* Clear the TX filter mask for this STA when sending | 1372 | rcu_read_unlock(); |
854 | * the next packet. If the STA went to power save mode, | ||
855 | * this will happen when it is waking up for the next | ||
856 | * time. */ | ||
857 | sta->clear_dst_mask = 1; | ||
858 | |||
859 | /* TODO: Is the WLAN_STA_PS flag always set here or is | ||
860 | * the race between RX and TX status causing some | ||
861 | * packets to be filtered out before 80211.o gets an | ||
862 | * update for PS status? This seems to be the case, so | ||
863 | * no changes are likely to be needed. */ | ||
864 | if (sta->flags & WLAN_STA_PS && | ||
865 | skb_queue_len(&sta->tx_filtered) < | ||
866 | STA_MAX_TX_BUFFER) { | ||
867 | ieee80211_remove_tx_extra(local, sta->key, | ||
868 | skb, | ||
869 | &status->control); | ||
870 | skb_queue_tail(&sta->tx_filtered, skb); | ||
871 | } else if (!(sta->flags & WLAN_STA_PS) && | ||
872 | !(status->control.flags & IEEE80211_TXCTL_REQUEUE)) { | ||
873 | /* Software retry the packet once */ | ||
874 | status->control.flags |= IEEE80211_TXCTL_REQUEUE; | ||
875 | ieee80211_remove_tx_extra(local, sta->key, | ||
876 | skb, | ||
877 | &status->control); | ||
878 | dev_queue_xmit(skb); | ||
879 | } else { | ||
880 | if (net_ratelimit()) { | ||
881 | printk(KERN_DEBUG "%s: dropped TX " | ||
882 | "filtered frame queue_len=%d " | ||
883 | "PS=%d @%lu\n", | ||
884 | wiphy_name(local->hw.wiphy), | ||
885 | skb_queue_len( | ||
886 | &sta->tx_filtered), | ||
887 | !!(sta->flags & WLAN_STA_PS), | ||
888 | jiffies); | ||
889 | } | ||
890 | dev_kfree_skb(skb); | ||
891 | } | ||
892 | sta_info_put(sta); | ||
893 | return; | 1373 | return; |
894 | } | 1374 | } |
895 | } else | 1375 | } else |
896 | rate_control_tx_status(local->mdev, skb, status); | 1376 | rate_control_tx_status(local->mdev, skb, status); |
897 | 1377 | ||
1378 | rcu_read_unlock(); | ||
1379 | |||
898 | ieee80211_led_tx(local, 0); | 1380 | ieee80211_led_tx(local, 0); |
899 | 1381 | ||
900 | /* SNMP counters | 1382 | /* SNMP counters |
@@ -932,7 +1414,11 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
932 | /* this was a transmitted frame, but now we want to reuse it */ | 1414 | /* this was a transmitted frame, but now we want to reuse it */ |
933 | skb_orphan(skb); | 1415 | skb_orphan(skb); |
934 | 1416 | ||
935 | if (!local->monitors) { | 1417 | /* |
1418 | * This is a bit racy but we can avoid a lot of work | ||
1419 | * with this test... | ||
1420 | */ | ||
1421 | if (!local->monitors && !local->cooked_mntrs) { | ||
936 | dev_kfree_skb(skb); | 1422 | dev_kfree_skb(skb); |
937 | return; | 1423 | return; |
938 | } | 1424 | } |
@@ -966,51 +1452,44 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
966 | 1452 | ||
967 | rthdr->data_retries = status->retry_count; | 1453 | rthdr->data_retries = status->retry_count; |
968 | 1454 | ||
1455 | /* XXX: is this sufficient for BPF? */ | ||
1456 | skb_set_mac_header(skb, 0); | ||
1457 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1458 | skb->pkt_type = PACKET_OTHERHOST; | ||
1459 | skb->protocol = htons(ETH_P_802_2); | ||
1460 | memset(skb->cb, 0, sizeof(skb->cb)); | ||
1461 | |||
969 | rcu_read_lock(); | 1462 | rcu_read_lock(); |
970 | monitors = local->monitors; | ||
971 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { | 1463 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { |
972 | /* | ||
973 | * Using the monitors counter is possibly racy, but | ||
974 | * if the value is wrong we simply either clone the skb | ||
975 | * once too much or forget sending it to one monitor iface | ||
976 | * The latter case isn't nice but fixing the race is much | ||
977 | * more complicated. | ||
978 | */ | ||
979 | if (!monitors || !skb) | ||
980 | goto out; | ||
981 | |||
982 | if (sdata->vif.type == IEEE80211_IF_TYPE_MNTR) { | 1464 | if (sdata->vif.type == IEEE80211_IF_TYPE_MNTR) { |
983 | if (!netif_running(sdata->dev)) | 1465 | if (!netif_running(sdata->dev)) |
984 | continue; | 1466 | continue; |
985 | monitors--; | 1467 | |
986 | if (monitors) | 1468 | if (prev_dev) { |
987 | skb2 = skb_clone(skb, GFP_ATOMIC); | 1469 | skb2 = skb_clone(skb, GFP_ATOMIC); |
988 | else | 1470 | if (skb2) { |
989 | skb2 = NULL; | 1471 | skb2->dev = prev_dev; |
990 | skb->dev = sdata->dev; | 1472 | netif_rx(skb2); |
991 | /* XXX: is this sufficient for BPF? */ | 1473 | } |
992 | skb_set_mac_header(skb, 0); | 1474 | } |
993 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1475 | |
994 | skb->pkt_type = PACKET_OTHERHOST; | 1476 | prev_dev = sdata->dev; |
995 | skb->protocol = htons(ETH_P_802_2); | ||
996 | memset(skb->cb, 0, sizeof(skb->cb)); | ||
997 | netif_rx(skb); | ||
998 | skb = skb2; | ||
999 | } | 1477 | } |
1000 | } | 1478 | } |
1001 | out: | 1479 | if (prev_dev) { |
1480 | skb->dev = prev_dev; | ||
1481 | netif_rx(skb); | ||
1482 | skb = NULL; | ||
1483 | } | ||
1002 | rcu_read_unlock(); | 1484 | rcu_read_unlock(); |
1003 | if (skb) | 1485 | dev_kfree_skb(skb); |
1004 | dev_kfree_skb(skb); | ||
1005 | } | 1486 | } |
1006 | EXPORT_SYMBOL(ieee80211_tx_status); | 1487 | EXPORT_SYMBOL(ieee80211_tx_status); |
1007 | 1488 | ||
1008 | struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | 1489 | struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, |
1009 | const struct ieee80211_ops *ops) | 1490 | const struct ieee80211_ops *ops) |
1010 | { | 1491 | { |
1011 | struct net_device *mdev; | ||
1012 | struct ieee80211_local *local; | 1492 | struct ieee80211_local *local; |
1013 | struct ieee80211_sub_if_data *sdata; | ||
1014 | int priv_size; | 1493 | int priv_size; |
1015 | struct wiphy *wiphy; | 1494 | struct wiphy *wiphy; |
1016 | 1495 | ||
@@ -1056,25 +1535,8 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
1056 | BUG_ON(!ops->configure_filter); | 1535 | BUG_ON(!ops->configure_filter); |
1057 | local->ops = ops; | 1536 | local->ops = ops; |
1058 | 1537 | ||
1059 | /* for now, mdev needs sub_if_data :/ */ | ||
1060 | mdev = alloc_netdev(sizeof(struct ieee80211_sub_if_data), | ||
1061 | "wmaster%d", ether_setup); | ||
1062 | if (!mdev) { | ||
1063 | wiphy_free(wiphy); | ||
1064 | return NULL; | ||
1065 | } | ||
1066 | |||
1067 | sdata = IEEE80211_DEV_TO_SUB_IF(mdev); | ||
1068 | mdev->ieee80211_ptr = &sdata->wdev; | ||
1069 | sdata->wdev.wiphy = wiphy; | ||
1070 | |||
1071 | local->hw.queues = 1; /* default */ | 1538 | local->hw.queues = 1; /* default */ |
1072 | 1539 | ||
1073 | local->mdev = mdev; | ||
1074 | local->rx_pre_handlers = ieee80211_rx_pre_handlers; | ||
1075 | local->rx_handlers = ieee80211_rx_handlers; | ||
1076 | local->tx_handlers = ieee80211_tx_handlers; | ||
1077 | |||
1078 | local->bridge_packets = 1; | 1540 | local->bridge_packets = 1; |
1079 | 1541 | ||
1080 | local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; | 1542 | local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; |
@@ -1083,33 +1545,12 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
1083 | local->long_retry_limit = 4; | 1545 | local->long_retry_limit = 4; |
1084 | local->hw.conf.radio_enabled = 1; | 1546 | local->hw.conf.radio_enabled = 1; |
1085 | 1547 | ||
1086 | local->enabled_modes = ~0; | ||
1087 | |||
1088 | INIT_LIST_HEAD(&local->modes_list); | ||
1089 | |||
1090 | INIT_LIST_HEAD(&local->interfaces); | 1548 | INIT_LIST_HEAD(&local->interfaces); |
1091 | 1549 | ||
1092 | INIT_DELAYED_WORK(&local->scan_work, ieee80211_sta_scan_work); | 1550 | INIT_DELAYED_WORK(&local->scan_work, ieee80211_sta_scan_work); |
1093 | ieee80211_rx_bss_list_init(mdev); | ||
1094 | 1551 | ||
1095 | sta_info_init(local); | 1552 | sta_info_init(local); |
1096 | 1553 | ||
1097 | mdev->hard_start_xmit = ieee80211_master_start_xmit; | ||
1098 | mdev->open = ieee80211_master_open; | ||
1099 | mdev->stop = ieee80211_master_stop; | ||
1100 | mdev->type = ARPHRD_IEEE80211; | ||
1101 | mdev->header_ops = &ieee80211_header_ops; | ||
1102 | mdev->set_multicast_list = ieee80211_master_set_multicast_list; | ||
1103 | |||
1104 | sdata->vif.type = IEEE80211_IF_TYPE_AP; | ||
1105 | sdata->dev = mdev; | ||
1106 | sdata->local = local; | ||
1107 | sdata->u.ap.force_unicast_rateidx = -1; | ||
1108 | sdata->u.ap.max_ratectrl_rateidx = -1; | ||
1109 | ieee80211_if_sdata_init(sdata); | ||
1110 | /* no RCU needed since we're still during init phase */ | ||
1111 | list_add_tail(&sdata->list, &local->interfaces); | ||
1112 | |||
1113 | tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending, | 1554 | tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending, |
1114 | (unsigned long)local); | 1555 | (unsigned long)local); |
1115 | tasklet_disable(&local->tx_pending_tasklet); | 1556 | tasklet_disable(&local->tx_pending_tasklet); |
@@ -1131,11 +1572,63 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
1131 | struct ieee80211_local *local = hw_to_local(hw); | 1572 | struct ieee80211_local *local = hw_to_local(hw); |
1132 | const char *name; | 1573 | const char *name; |
1133 | int result; | 1574 | int result; |
1575 | enum ieee80211_band band; | ||
1576 | struct net_device *mdev; | ||
1577 | struct ieee80211_sub_if_data *sdata; | ||
1578 | |||
1579 | /* | ||
1580 | * generic code guarantees at least one band, | ||
1581 | * set this very early because much code assumes | ||
1582 | * that hw.conf.channel is assigned | ||
1583 | */ | ||
1584 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { | ||
1585 | struct ieee80211_supported_band *sband; | ||
1586 | |||
1587 | sband = local->hw.wiphy->bands[band]; | ||
1588 | if (sband) { | ||
1589 | /* init channel we're on */ | ||
1590 | local->hw.conf.channel = | ||
1591 | local->oper_channel = | ||
1592 | local->scan_channel = &sband->channels[0]; | ||
1593 | break; | ||
1594 | } | ||
1595 | } | ||
1134 | 1596 | ||
1135 | result = wiphy_register(local->hw.wiphy); | 1597 | result = wiphy_register(local->hw.wiphy); |
1136 | if (result < 0) | 1598 | if (result < 0) |
1137 | return result; | 1599 | return result; |
1138 | 1600 | ||
1601 | /* for now, mdev needs sub_if_data :/ */ | ||
1602 | mdev = alloc_netdev(sizeof(struct ieee80211_sub_if_data), | ||
1603 | "wmaster%d", ether_setup); | ||
1604 | if (!mdev) | ||
1605 | goto fail_mdev_alloc; | ||
1606 | |||
1607 | sdata = IEEE80211_DEV_TO_SUB_IF(mdev); | ||
1608 | mdev->ieee80211_ptr = &sdata->wdev; | ||
1609 | sdata->wdev.wiphy = local->hw.wiphy; | ||
1610 | |||
1611 | local->mdev = mdev; | ||
1612 | |||
1613 | ieee80211_rx_bss_list_init(mdev); | ||
1614 | |||
1615 | mdev->hard_start_xmit = ieee80211_master_start_xmit; | ||
1616 | mdev->open = ieee80211_master_open; | ||
1617 | mdev->stop = ieee80211_master_stop; | ||
1618 | mdev->type = ARPHRD_IEEE80211; | ||
1619 | mdev->header_ops = &ieee80211_header_ops; | ||
1620 | mdev->set_multicast_list = ieee80211_master_set_multicast_list; | ||
1621 | |||
1622 | sdata->vif.type = IEEE80211_IF_TYPE_AP; | ||
1623 | sdata->dev = mdev; | ||
1624 | sdata->local = local; | ||
1625 | sdata->u.ap.force_unicast_rateidx = -1; | ||
1626 | sdata->u.ap.max_ratectrl_rateidx = -1; | ||
1627 | ieee80211_if_sdata_init(sdata); | ||
1628 | |||
1629 | /* no RCU needed since we're still during init phase */ | ||
1630 | list_add_tail(&sdata->list, &local->interfaces); | ||
1631 | |||
1139 | name = wiphy_dev(local->hw.wiphy)->driver->name; | 1632 | name = wiphy_dev(local->hw.wiphy)->driver->name; |
1140 | local->hw.workqueue = create_singlethread_workqueue(name); | 1633 | local->hw.workqueue = create_singlethread_workqueue(name); |
1141 | if (!local->hw.workqueue) { | 1634 | if (!local->hw.workqueue) { |
@@ -1203,7 +1696,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
1203 | 1696 | ||
1204 | /* add one default STA interface */ | 1697 | /* add one default STA interface */ |
1205 | result = ieee80211_if_add(local->mdev, "wlan%d", NULL, | 1698 | result = ieee80211_if_add(local->mdev, "wlan%d", NULL, |
1206 | IEEE80211_IF_TYPE_STA); | 1699 | IEEE80211_IF_TYPE_STA, NULL); |
1207 | if (result) | 1700 | if (result) |
1208 | printk(KERN_WARNING "%s: Failed to add default virtual iface\n", | 1701 | printk(KERN_WARNING "%s: Failed to add default virtual iface\n", |
1209 | wiphy_name(local->hw.wiphy)); | 1702 | wiphy_name(local->hw.wiphy)); |
@@ -1227,49 +1720,18 @@ fail_sta_info: | |||
1227 | debugfs_hw_del(local); | 1720 | debugfs_hw_del(local); |
1228 | destroy_workqueue(local->hw.workqueue); | 1721 | destroy_workqueue(local->hw.workqueue); |
1229 | fail_workqueue: | 1722 | fail_workqueue: |
1723 | ieee80211_if_free(local->mdev); | ||
1724 | local->mdev = NULL; | ||
1725 | fail_mdev_alloc: | ||
1230 | wiphy_unregister(local->hw.wiphy); | 1726 | wiphy_unregister(local->hw.wiphy); |
1231 | return result; | 1727 | return result; |
1232 | } | 1728 | } |
1233 | EXPORT_SYMBOL(ieee80211_register_hw); | 1729 | EXPORT_SYMBOL(ieee80211_register_hw); |
1234 | 1730 | ||
1235 | int ieee80211_register_hwmode(struct ieee80211_hw *hw, | ||
1236 | struct ieee80211_hw_mode *mode) | ||
1237 | { | ||
1238 | struct ieee80211_local *local = hw_to_local(hw); | ||
1239 | struct ieee80211_rate *rate; | ||
1240 | int i; | ||
1241 | |||
1242 | INIT_LIST_HEAD(&mode->list); | ||
1243 | list_add_tail(&mode->list, &local->modes_list); | ||
1244 | |||
1245 | local->hw_modes |= (1 << mode->mode); | ||
1246 | for (i = 0; i < mode->num_rates; i++) { | ||
1247 | rate = &(mode->rates[i]); | ||
1248 | rate->rate_inv = CHAN_UTIL_RATE_LCM / rate->rate; | ||
1249 | } | ||
1250 | ieee80211_prepare_rates(local, mode); | ||
1251 | |||
1252 | if (!local->oper_hw_mode) { | ||
1253 | /* Default to this mode */ | ||
1254 | local->hw.conf.phymode = mode->mode; | ||
1255 | local->oper_hw_mode = local->scan_hw_mode = mode; | ||
1256 | local->oper_channel = local->scan_channel = &mode->channels[0]; | ||
1257 | local->hw.conf.mode = local->oper_hw_mode; | ||
1258 | local->hw.conf.chan = local->oper_channel; | ||
1259 | } | ||
1260 | |||
1261 | if (!(hw->flags & IEEE80211_HW_DEFAULT_REG_DOMAIN_CONFIGURED)) | ||
1262 | ieee80211_set_default_regdomain(mode); | ||
1263 | |||
1264 | return 0; | ||
1265 | } | ||
1266 | EXPORT_SYMBOL(ieee80211_register_hwmode); | ||
1267 | |||
1268 | void ieee80211_unregister_hw(struct ieee80211_hw *hw) | 1731 | void ieee80211_unregister_hw(struct ieee80211_hw *hw) |
1269 | { | 1732 | { |
1270 | struct ieee80211_local *local = hw_to_local(hw); | 1733 | struct ieee80211_local *local = hw_to_local(hw); |
1271 | struct ieee80211_sub_if_data *sdata, *tmp; | 1734 | struct ieee80211_sub_if_data *sdata, *tmp; |
1272 | int i; | ||
1273 | 1735 | ||
1274 | tasklet_kill(&local->tx_pending_tasklet); | 1736 | tasklet_kill(&local->tx_pending_tasklet); |
1275 | tasklet_kill(&local->tasklet); | 1737 | tasklet_kill(&local->tasklet); |
@@ -1310,11 +1772,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) | |||
1310 | rate_control_deinitialize(local); | 1772 | rate_control_deinitialize(local); |
1311 | debugfs_hw_del(local); | 1773 | debugfs_hw_del(local); |
1312 | 1774 | ||
1313 | for (i = 0; i < NUM_IEEE80211_MODES; i++) { | ||
1314 | kfree(local->supp_rates[i]); | ||
1315 | kfree(local->basic_rates[i]); | ||
1316 | } | ||
1317 | |||
1318 | if (skb_queue_len(&local->skb_queue) | 1775 | if (skb_queue_len(&local->skb_queue) |
1319 | || skb_queue_len(&local->skb_queue_unreliable)) | 1776 | || skb_queue_len(&local->skb_queue_unreliable)) |
1320 | printk(KERN_WARNING "%s: skb_queue not empty\n", | 1777 | printk(KERN_WARNING "%s: skb_queue not empty\n", |
@@ -1326,6 +1783,8 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) | |||
1326 | wiphy_unregister(local->hw.wiphy); | 1783 | wiphy_unregister(local->hw.wiphy); |
1327 | ieee80211_wep_free(local); | 1784 | ieee80211_wep_free(local); |
1328 | ieee80211_led_exit(local); | 1785 | ieee80211_led_exit(local); |
1786 | ieee80211_if_free(local->mdev); | ||
1787 | local->mdev = NULL; | ||
1329 | } | 1788 | } |
1330 | EXPORT_SYMBOL(ieee80211_unregister_hw); | 1789 | EXPORT_SYMBOL(ieee80211_unregister_hw); |
1331 | 1790 | ||
@@ -1333,7 +1792,6 @@ void ieee80211_free_hw(struct ieee80211_hw *hw) | |||
1333 | { | 1792 | { |
1334 | struct ieee80211_local *local = hw_to_local(hw); | 1793 | struct ieee80211_local *local = hw_to_local(hw); |
1335 | 1794 | ||
1336 | ieee80211_if_free(local->mdev); | ||
1337 | wiphy_free(local->hw.wiphy); | 1795 | wiphy_free(local->hw.wiphy); |
1338 | } | 1796 | } |
1339 | EXPORT_SYMBOL(ieee80211_free_hw); | 1797 | EXPORT_SYMBOL(ieee80211_free_hw); |
@@ -1345,13 +1803,9 @@ static int __init ieee80211_init(void) | |||
1345 | 1803 | ||
1346 | BUILD_BUG_ON(sizeof(struct ieee80211_tx_packet_data) > sizeof(skb->cb)); | 1804 | BUILD_BUG_ON(sizeof(struct ieee80211_tx_packet_data) > sizeof(skb->cb)); |
1347 | 1805 | ||
1348 | ret = rc80211_simple_init(); | ||
1349 | if (ret) | ||
1350 | goto out; | ||
1351 | |||
1352 | ret = rc80211_pid_init(); | 1806 | ret = rc80211_pid_init(); |
1353 | if (ret) | 1807 | if (ret) |
1354 | goto out_cleanup_simple; | 1808 | goto out; |
1355 | 1809 | ||
1356 | ret = ieee80211_wme_register(); | 1810 | ret = ieee80211_wme_register(); |
1357 | if (ret) { | 1811 | if (ret) { |
@@ -1361,23 +1815,22 @@ static int __init ieee80211_init(void) | |||
1361 | } | 1815 | } |
1362 | 1816 | ||
1363 | ieee80211_debugfs_netdev_init(); | 1817 | ieee80211_debugfs_netdev_init(); |
1364 | ieee80211_regdomain_init(); | ||
1365 | 1818 | ||
1366 | return 0; | 1819 | return 0; |
1367 | 1820 | ||
1368 | out_cleanup_pid: | 1821 | out_cleanup_pid: |
1369 | rc80211_pid_exit(); | 1822 | rc80211_pid_exit(); |
1370 | out_cleanup_simple: | ||
1371 | rc80211_simple_exit(); | ||
1372 | out: | 1823 | out: |
1373 | return ret; | 1824 | return ret; |
1374 | } | 1825 | } |
1375 | 1826 | ||
1376 | static void __exit ieee80211_exit(void) | 1827 | static void __exit ieee80211_exit(void) |
1377 | { | 1828 | { |
1378 | rc80211_simple_exit(); | ||
1379 | rc80211_pid_exit(); | 1829 | rc80211_pid_exit(); |
1380 | 1830 | ||
1831 | if (mesh_allocated) | ||
1832 | ieee80211s_stop(); | ||
1833 | |||
1381 | ieee80211_wme_unregister(); | 1834 | ieee80211_wme_unregister(); |
1382 | ieee80211_debugfs_netdev_exit(); | 1835 | ieee80211_debugfs_netdev_exit(); |
1383 | } | 1836 | } |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 72ecbf7bf962..7f10ff5d4a0b 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -79,8 +79,7 @@ struct ieee80211_sta_bss { | |||
79 | u8 ssid[IEEE80211_MAX_SSID_LEN]; | 79 | u8 ssid[IEEE80211_MAX_SSID_LEN]; |
80 | size_t ssid_len; | 80 | size_t ssid_len; |
81 | u16 capability; /* host byte order */ | 81 | u16 capability; /* host byte order */ |
82 | int hw_mode; | 82 | enum ieee80211_band band; |
83 | int channel; | ||
84 | int freq; | 83 | int freq; |
85 | int rssi, signal, noise; | 84 | int rssi, signal, noise; |
86 | u8 *wpa_ie; | 85 | u8 *wpa_ie; |
@@ -91,6 +90,11 @@ struct ieee80211_sta_bss { | |||
91 | size_t wmm_ie_len; | 90 | size_t wmm_ie_len; |
92 | u8 *ht_ie; | 91 | u8 *ht_ie; |
93 | size_t ht_ie_len; | 92 | size_t ht_ie_len; |
93 | #ifdef CONFIG_MAC80211_MESH | ||
94 | u8 *mesh_id; | ||
95 | size_t mesh_id_len; | ||
96 | u8 *mesh_cfg; | ||
97 | #endif | ||
94 | #define IEEE80211_MAX_SUPP_RATES 32 | 98 | #define IEEE80211_MAX_SUPP_RATES 32 |
95 | u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; | 99 | u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; |
96 | size_t supp_rates_len; | 100 | size_t supp_rates_len; |
@@ -108,23 +112,81 @@ struct ieee80211_sta_bss { | |||
108 | u8 erp_value; | 112 | u8 erp_value; |
109 | }; | 113 | }; |
110 | 114 | ||
115 | static inline u8 *bss_mesh_cfg(struct ieee80211_sta_bss *bss) | ||
116 | { | ||
117 | #ifdef CONFIG_MAC80211_MESH | ||
118 | return bss->mesh_cfg; | ||
119 | #endif | ||
120 | return NULL; | ||
121 | } | ||
122 | |||
123 | static inline u8 *bss_mesh_id(struct ieee80211_sta_bss *bss) | ||
124 | { | ||
125 | #ifdef CONFIG_MAC80211_MESH | ||
126 | return bss->mesh_id; | ||
127 | #endif | ||
128 | return NULL; | ||
129 | } | ||
130 | |||
131 | static inline u8 bss_mesh_id_len(struct ieee80211_sta_bss *bss) | ||
132 | { | ||
133 | #ifdef CONFIG_MAC80211_MESH | ||
134 | return bss->mesh_id_len; | ||
135 | #endif | ||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | |||
140 | typedef unsigned __bitwise__ ieee80211_tx_result; | ||
141 | #define TX_CONTINUE ((__force ieee80211_tx_result) 0u) | ||
142 | #define TX_DROP ((__force ieee80211_tx_result) 1u) | ||
143 | #define TX_QUEUED ((__force ieee80211_tx_result) 2u) | ||
144 | |||
145 | #define IEEE80211_TX_FRAGMENTED BIT(0) | ||
146 | #define IEEE80211_TX_UNICAST BIT(1) | ||
147 | #define IEEE80211_TX_PS_BUFFERED BIT(2) | ||
148 | #define IEEE80211_TX_PROBE_LAST_FRAG BIT(3) | ||
149 | #define IEEE80211_TX_INJECTED BIT(4) | ||
150 | |||
151 | struct ieee80211_tx_data { | ||
152 | struct sk_buff *skb; | ||
153 | struct net_device *dev; | ||
154 | struct ieee80211_local *local; | ||
155 | struct ieee80211_sub_if_data *sdata; | ||
156 | struct sta_info *sta; | ||
157 | u16 fc, ethertype; | ||
158 | struct ieee80211_key *key; | ||
159 | unsigned int flags; | ||
160 | |||
161 | struct ieee80211_tx_control *control; | ||
162 | struct ieee80211_channel *channel; | ||
163 | struct ieee80211_rate *rate; | ||
164 | /* use this rate (if set) for last fragment; rate can | ||
165 | * be set to lower rate for the first fragments, e.g., | ||
166 | * when using CTS protection with IEEE 802.11g. */ | ||
167 | struct ieee80211_rate *last_frag_rate; | ||
168 | |||
169 | /* Extra fragments (in addition to the first fragment | ||
170 | * in skb) */ | ||
171 | int num_extra_frag; | ||
172 | struct sk_buff **extra_frag; | ||
173 | }; | ||
174 | |||
111 | 175 | ||
112 | typedef enum { | 176 | typedef unsigned __bitwise__ ieee80211_rx_result; |
113 | TXRX_CONTINUE, TXRX_DROP, TXRX_QUEUED | 177 | #define RX_CONTINUE ((__force ieee80211_rx_result) 0u) |
114 | } ieee80211_txrx_result; | 178 | #define RX_DROP_UNUSABLE ((__force ieee80211_rx_result) 1u) |
179 | #define RX_DROP_MONITOR ((__force ieee80211_rx_result) 2u) | ||
180 | #define RX_QUEUED ((__force ieee80211_rx_result) 3u) | ||
115 | 181 | ||
116 | /* flags used in struct ieee80211_txrx_data.flags */ | 182 | #define IEEE80211_RX_IN_SCAN BIT(0) |
117 | /* whether the MSDU was fragmented */ | ||
118 | #define IEEE80211_TXRXD_FRAGMENTED BIT(0) | ||
119 | #define IEEE80211_TXRXD_TXUNICAST BIT(1) | ||
120 | #define IEEE80211_TXRXD_TXPS_BUFFERED BIT(2) | ||
121 | #define IEEE80211_TXRXD_TXPROBE_LAST_FRAG BIT(3) | ||
122 | #define IEEE80211_TXRXD_RXIN_SCAN BIT(4) | ||
123 | /* frame is destined to interface currently processed (incl. multicast frames) */ | 183 | /* frame is destined to interface currently processed (incl. multicast frames) */ |
124 | #define IEEE80211_TXRXD_RXRA_MATCH BIT(5) | 184 | #define IEEE80211_RX_RA_MATCH BIT(1) |
125 | #define IEEE80211_TXRXD_TX_INJECTED BIT(6) | 185 | #define IEEE80211_RX_AMSDU BIT(2) |
126 | #define IEEE80211_TXRXD_RX_AMSDU BIT(7) | 186 | #define IEEE80211_RX_CMNTR_REPORTED BIT(3) |
127 | struct ieee80211_txrx_data { | 187 | #define IEEE80211_RX_FRAGMENTED BIT(4) |
188 | |||
189 | struct ieee80211_rx_data { | ||
128 | struct sk_buff *skb; | 190 | struct sk_buff *skb; |
129 | struct net_device *dev; | 191 | struct net_device *dev; |
130 | struct ieee80211_local *local; | 192 | struct ieee80211_local *local; |
@@ -133,31 +195,14 @@ struct ieee80211_txrx_data { | |||
133 | u16 fc, ethertype; | 195 | u16 fc, ethertype; |
134 | struct ieee80211_key *key; | 196 | struct ieee80211_key *key; |
135 | unsigned int flags; | 197 | unsigned int flags; |
136 | union { | 198 | |
137 | struct { | 199 | struct ieee80211_rx_status *status; |
138 | struct ieee80211_tx_control *control; | 200 | struct ieee80211_rate *rate; |
139 | struct ieee80211_hw_mode *mode; | 201 | int sent_ps_buffered; |
140 | struct ieee80211_rate *rate; | 202 | int queue; |
141 | /* use this rate (if set) for last fragment; rate can | 203 | int load; |
142 | * be set to lower rate for the first fragments, e.g., | 204 | u32 tkip_iv32; |
143 | * when using CTS protection with IEEE 802.11g. */ | 205 | u16 tkip_iv16; |
144 | struct ieee80211_rate *last_frag_rate; | ||
145 | int last_frag_hwrate; | ||
146 | |||
147 | /* Extra fragments (in addition to the first fragment | ||
148 | * in skb) */ | ||
149 | int num_extra_frag; | ||
150 | struct sk_buff **extra_frag; | ||
151 | } tx; | ||
152 | struct { | ||
153 | struct ieee80211_rx_status *status; | ||
154 | int sent_ps_buffered; | ||
155 | int queue; | ||
156 | int load; | ||
157 | u32 tkip_iv32; | ||
158 | u16 tkip_iv16; | ||
159 | } rx; | ||
160 | } u; | ||
161 | }; | 206 | }; |
162 | 207 | ||
163 | /* flags used in struct ieee80211_tx_packet_data.flags */ | 208 | /* flags used in struct ieee80211_tx_packet_data.flags */ |
@@ -165,6 +210,7 @@ struct ieee80211_txrx_data { | |||
165 | #define IEEE80211_TXPD_DO_NOT_ENCRYPT BIT(1) | 210 | #define IEEE80211_TXPD_DO_NOT_ENCRYPT BIT(1) |
166 | #define IEEE80211_TXPD_REQUEUE BIT(2) | 211 | #define IEEE80211_TXPD_REQUEUE BIT(2) |
167 | #define IEEE80211_TXPD_EAPOL_FRAME BIT(3) | 212 | #define IEEE80211_TXPD_EAPOL_FRAME BIT(3) |
213 | #define IEEE80211_TXPD_AMPDU BIT(4) | ||
168 | /* Stored in sk_buff->cb */ | 214 | /* Stored in sk_buff->cb */ |
169 | struct ieee80211_tx_packet_data { | 215 | struct ieee80211_tx_packet_data { |
170 | int ifindex; | 216 | int ifindex; |
@@ -178,18 +224,10 @@ struct ieee80211_tx_stored_packet { | |||
178 | struct sk_buff *skb; | 224 | struct sk_buff *skb; |
179 | int num_extra_frag; | 225 | int num_extra_frag; |
180 | struct sk_buff **extra_frag; | 226 | struct sk_buff **extra_frag; |
181 | int last_frag_rateidx; | ||
182 | int last_frag_hwrate; | ||
183 | struct ieee80211_rate *last_frag_rate; | 227 | struct ieee80211_rate *last_frag_rate; |
184 | unsigned int last_frag_rate_ctrl_probe; | 228 | unsigned int last_frag_rate_ctrl_probe; |
185 | }; | 229 | }; |
186 | 230 | ||
187 | typedef ieee80211_txrx_result (*ieee80211_tx_handler) | ||
188 | (struct ieee80211_txrx_data *tx); | ||
189 | |||
190 | typedef ieee80211_txrx_result (*ieee80211_rx_handler) | ||
191 | (struct ieee80211_txrx_data *rx); | ||
192 | |||
193 | struct beacon_data { | 231 | struct beacon_data { |
194 | u8 *head, *tail; | 232 | u8 *head, *tail; |
195 | int head_len, tail_len; | 233 | int head_len, tail_len; |
@@ -206,7 +244,7 @@ struct ieee80211_if_ap { | |||
206 | 244 | ||
207 | /* yes, this looks ugly, but guarantees that we can later use | 245 | /* yes, this looks ugly, but guarantees that we can later use |
208 | * bitmap_empty :) | 246 | * bitmap_empty :) |
209 | * NB: don't ever use set_bit, use bss_tim_set/bss_tim_clear! */ | 247 | * NB: don't touch this bitmap, use sta_info_{set,clear}_tim_bit */ |
210 | u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)]; | 248 | u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)]; |
211 | atomic_t num_sta_ps; /* number of stations in PS mode */ | 249 | atomic_t num_sta_ps; /* number of stations in PS mode */ |
212 | struct sk_buff_head ps_bc_buf; | 250 | struct sk_buff_head ps_bc_buf; |
@@ -226,6 +264,41 @@ struct ieee80211_if_vlan { | |||
226 | struct list_head list; | 264 | struct list_head list; |
227 | }; | 265 | }; |
228 | 266 | ||
267 | struct mesh_stats { | ||
268 | __u32 fwded_frames; /* Mesh forwarded frames */ | ||
269 | __u32 dropped_frames_ttl; /* Not transmitted since mesh_ttl == 0*/ | ||
270 | __u32 dropped_frames_no_route; /* Not transmitted, no route found */ | ||
271 | atomic_t estab_plinks; | ||
272 | }; | ||
273 | |||
274 | #define PREQ_Q_F_START 0x1 | ||
275 | #define PREQ_Q_F_REFRESH 0x2 | ||
276 | struct mesh_preq_queue { | ||
277 | struct list_head list; | ||
278 | u8 dst[ETH_ALEN]; | ||
279 | u8 flags; | ||
280 | }; | ||
281 | |||
282 | struct mesh_config { | ||
283 | /* Timeouts in ms */ | ||
284 | /* Mesh plink management parameters */ | ||
285 | u16 dot11MeshRetryTimeout; | ||
286 | u16 dot11MeshConfirmTimeout; | ||
287 | u16 dot11MeshHoldingTimeout; | ||
288 | u16 dot11MeshMaxPeerLinks; | ||
289 | u8 dot11MeshMaxRetries; | ||
290 | u8 dot11MeshTTL; | ||
291 | bool auto_open_plinks; | ||
292 | /* HWMP parameters */ | ||
293 | u32 dot11MeshHWMPactivePathTimeout; | ||
294 | u16 dot11MeshHWMPpreqMinInterval; | ||
295 | u16 dot11MeshHWMPnetDiameterTraversalTime; | ||
296 | u8 dot11MeshHWMPmaxPREQretries; | ||
297 | u32 path_refresh_time; | ||
298 | u16 min_discovery_timeout; | ||
299 | }; | ||
300 | |||
301 | |||
229 | /* flags used in struct ieee80211_if_sta.flags */ | 302 | /* flags used in struct ieee80211_if_sta.flags */ |
230 | #define IEEE80211_STA_SSID_SET BIT(0) | 303 | #define IEEE80211_STA_SSID_SET BIT(0) |
231 | #define IEEE80211_STA_BSSID_SET BIT(1) | 304 | #define IEEE80211_STA_BSSID_SET BIT(1) |
@@ -244,7 +317,8 @@ struct ieee80211_if_sta { | |||
244 | enum { | 317 | enum { |
245 | IEEE80211_DISABLED, IEEE80211_AUTHENTICATE, | 318 | IEEE80211_DISABLED, IEEE80211_AUTHENTICATE, |
246 | IEEE80211_ASSOCIATE, IEEE80211_ASSOCIATED, | 319 | IEEE80211_ASSOCIATE, IEEE80211_ASSOCIATED, |
247 | IEEE80211_IBSS_SEARCH, IEEE80211_IBSS_JOINED | 320 | IEEE80211_IBSS_SEARCH, IEEE80211_IBSS_JOINED, |
321 | IEEE80211_MESH_UP | ||
248 | } state; | 322 | } state; |
249 | struct timer_list timer; | 323 | struct timer_list timer; |
250 | struct work_struct work; | 324 | struct work_struct work; |
@@ -253,6 +327,34 @@ struct ieee80211_if_sta { | |||
253 | size_t ssid_len; | 327 | size_t ssid_len; |
254 | u8 scan_ssid[IEEE80211_MAX_SSID_LEN]; | 328 | u8 scan_ssid[IEEE80211_MAX_SSID_LEN]; |
255 | size_t scan_ssid_len; | 329 | size_t scan_ssid_len; |
330 | #ifdef CONFIG_MAC80211_MESH | ||
331 | struct timer_list mesh_path_timer; | ||
332 | u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN]; | ||
333 | bool accepting_plinks; | ||
334 | size_t mesh_id_len; | ||
335 | /* Active Path Selection Protocol Identifier */ | ||
336 | u8 mesh_pp_id[4]; | ||
337 | /* Active Path Selection Metric Identifier */ | ||
338 | u8 mesh_pm_id[4]; | ||
339 | /* Congestion Control Mode Identifier */ | ||
340 | u8 mesh_cc_id[4]; | ||
341 | /* Local mesh Destination Sequence Number */ | ||
342 | u32 dsn; | ||
343 | /* Last used PREQ ID */ | ||
344 | u32 preq_id; | ||
345 | atomic_t mpaths; | ||
346 | /* Timestamp of last DSN update */ | ||
347 | unsigned long last_dsn_update; | ||
348 | /* Timestamp of last DSN sent */ | ||
349 | unsigned long last_preq; | ||
350 | struct mesh_rmc *rmc; | ||
351 | spinlock_t mesh_preq_queue_lock; | ||
352 | struct mesh_preq_queue preq_queue; | ||
353 | int preq_queue_len; | ||
354 | struct mesh_stats mshstats; | ||
355 | struct mesh_config mshcfg; | ||
356 | u8 mesh_seqnum[3]; | ||
357 | #endif | ||
256 | u16 aid; | 358 | u16 aid; |
257 | u16 ap_capab, capab; | 359 | u16 ap_capab, capab; |
258 | u8 *extra_ie; /* to be added to the end of AssocReq */ | 360 | u8 *extra_ie; /* to be added to the end of AssocReq */ |
@@ -282,16 +384,34 @@ struct ieee80211_if_sta { | |||
282 | 384 | ||
283 | unsigned long ibss_join_req; | 385 | unsigned long ibss_join_req; |
284 | struct sk_buff *probe_resp; /* ProbeResp template for IBSS */ | 386 | struct sk_buff *probe_resp; /* ProbeResp template for IBSS */ |
285 | u32 supp_rates_bits; | 387 | u32 supp_rates_bits[IEEE80211_NUM_BANDS]; |
286 | 388 | ||
287 | int wmm_last_param_set; | 389 | int wmm_last_param_set; |
390 | int num_beacons; /* number of TXed beacon frames by this STA */ | ||
288 | }; | 391 | }; |
289 | 392 | ||
393 | static inline void ieee80211_if_sta_set_mesh_id(struct ieee80211_if_sta *ifsta, | ||
394 | u8 mesh_id_len, u8 *mesh_id) | ||
395 | { | ||
396 | #ifdef CONFIG_MAC80211_MESH | ||
397 | ifsta->mesh_id_len = mesh_id_len; | ||
398 | memcpy(ifsta->mesh_id, mesh_id, mesh_id_len); | ||
399 | #endif | ||
400 | } | ||
401 | |||
402 | #ifdef CONFIG_MAC80211_MESH | ||
403 | #define IEEE80211_IFSTA_MESH_CTR_INC(sta, name) \ | ||
404 | do { (sta)->mshstats.name++; } while (0) | ||
405 | #else | ||
406 | #define IEEE80211_IFSTA_MESH_CTR_INC(sta, name) \ | ||
407 | do { } while (0) | ||
408 | #endif | ||
290 | 409 | ||
291 | /* flags used in struct ieee80211_sub_if_data.flags */ | 410 | /* flags used in struct ieee80211_sub_if_data.flags */ |
292 | #define IEEE80211_SDATA_ALLMULTI BIT(0) | 411 | #define IEEE80211_SDATA_ALLMULTI BIT(0) |
293 | #define IEEE80211_SDATA_PROMISC BIT(1) | 412 | #define IEEE80211_SDATA_PROMISC BIT(1) |
294 | #define IEEE80211_SDATA_USERSPACE_MLME BIT(2) | 413 | #define IEEE80211_SDATA_USERSPACE_MLME BIT(2) |
414 | #define IEEE80211_SDATA_OPERATING_GMODE BIT(3) | ||
295 | struct ieee80211_sub_if_data { | 415 | struct ieee80211_sub_if_data { |
296 | struct list_head list; | 416 | struct list_head list; |
297 | 417 | ||
@@ -306,11 +426,11 @@ struct ieee80211_sub_if_data { | |||
306 | unsigned int flags; | 426 | unsigned int flags; |
307 | 427 | ||
308 | int drop_unencrypted; | 428 | int drop_unencrypted; |
429 | |||
309 | /* | 430 | /* |
310 | * IEEE 802.1X Port access control in effect, | 431 | * basic rates of this AP or the AP we're associated to |
311 | * drop packets to/from unauthorized port | ||
312 | */ | 432 | */ |
313 | int ieee802_1x_pac; | 433 | u64 basic_rates; |
314 | 434 | ||
315 | u16 sequence; | 435 | u16 sequence; |
316 | 436 | ||
@@ -338,6 +458,7 @@ struct ieee80211_sub_if_data { | |||
338 | struct ieee80211_if_wds wds; | 458 | struct ieee80211_if_wds wds; |
339 | struct ieee80211_if_vlan vlan; | 459 | struct ieee80211_if_vlan vlan; |
340 | struct ieee80211_if_sta sta; | 460 | struct ieee80211_if_sta sta; |
461 | u32 mntr_flags; | ||
341 | } u; | 462 | } u; |
342 | int channel_use; | 463 | int channel_use; |
343 | int channel_use_raw; | 464 | int channel_use_raw; |
@@ -348,7 +469,6 @@ struct ieee80211_sub_if_data { | |||
348 | struct { | 469 | struct { |
349 | struct dentry *channel_use; | 470 | struct dentry *channel_use; |
350 | struct dentry *drop_unencrypted; | 471 | struct dentry *drop_unencrypted; |
351 | struct dentry *ieee802_1x_pac; | ||
352 | struct dentry *state; | 472 | struct dentry *state; |
353 | struct dentry *bssid; | 473 | struct dentry *bssid; |
354 | struct dentry *prev_bssid; | 474 | struct dentry *prev_bssid; |
@@ -363,11 +483,11 @@ struct ieee80211_sub_if_data { | |||
363 | struct dentry *auth_alg; | 483 | struct dentry *auth_alg; |
364 | struct dentry *auth_transaction; | 484 | struct dentry *auth_transaction; |
365 | struct dentry *flags; | 485 | struct dentry *flags; |
486 | struct dentry *num_beacons_sta; | ||
366 | } sta; | 487 | } sta; |
367 | struct { | 488 | struct { |
368 | struct dentry *channel_use; | 489 | struct dentry *channel_use; |
369 | struct dentry *drop_unencrypted; | 490 | struct dentry *drop_unencrypted; |
370 | struct dentry *ieee802_1x_pac; | ||
371 | struct dentry *num_sta_ps; | 491 | struct dentry *num_sta_ps; |
372 | struct dentry *dtim_count; | 492 | struct dentry *dtim_count; |
373 | struct dentry *num_beacons; | 493 | struct dentry *num_beacons; |
@@ -378,19 +498,46 @@ struct ieee80211_sub_if_data { | |||
378 | struct { | 498 | struct { |
379 | struct dentry *channel_use; | 499 | struct dentry *channel_use; |
380 | struct dentry *drop_unencrypted; | 500 | struct dentry *drop_unencrypted; |
381 | struct dentry *ieee802_1x_pac; | ||
382 | struct dentry *peer; | 501 | struct dentry *peer; |
383 | } wds; | 502 | } wds; |
384 | struct { | 503 | struct { |
385 | struct dentry *channel_use; | 504 | struct dentry *channel_use; |
386 | struct dentry *drop_unencrypted; | 505 | struct dentry *drop_unencrypted; |
387 | struct dentry *ieee802_1x_pac; | ||
388 | } vlan; | 506 | } vlan; |
389 | struct { | 507 | struct { |
390 | struct dentry *mode; | 508 | struct dentry *mode; |
391 | } monitor; | 509 | } monitor; |
392 | struct dentry *default_key; | 510 | struct dentry *default_key; |
393 | } debugfs; | 511 | } debugfs; |
512 | |||
513 | #ifdef CONFIG_MAC80211_MESH | ||
514 | struct dentry *mesh_stats_dir; | ||
515 | struct { | ||
516 | struct dentry *fwded_frames; | ||
517 | struct dentry *dropped_frames_ttl; | ||
518 | struct dentry *dropped_frames_no_route; | ||
519 | struct dentry *estab_plinks; | ||
520 | struct timer_list mesh_path_timer; | ||
521 | } mesh_stats; | ||
522 | |||
523 | struct dentry *mesh_config_dir; | ||
524 | struct { | ||
525 | struct dentry *dot11MeshRetryTimeout; | ||
526 | struct dentry *dot11MeshConfirmTimeout; | ||
527 | struct dentry *dot11MeshHoldingTimeout; | ||
528 | struct dentry *dot11MeshMaxRetries; | ||
529 | struct dentry *dot11MeshTTL; | ||
530 | struct dentry *auto_open_plinks; | ||
531 | struct dentry *dot11MeshMaxPeerLinks; | ||
532 | struct dentry *dot11MeshHWMPactivePathTimeout; | ||
533 | struct dentry *dot11MeshHWMPpreqMinInterval; | ||
534 | struct dentry *dot11MeshHWMPnetDiameterTraversalTime; | ||
535 | struct dentry *dot11MeshHWMPmaxPREQretries; | ||
536 | struct dentry *path_refresh_time; | ||
537 | struct dentry *min_discovery_timeout; | ||
538 | } mesh_config; | ||
539 | #endif | ||
540 | |||
394 | #endif | 541 | #endif |
395 | /* must be last, dynamically sized area in this! */ | 542 | /* must be last, dynamically sized area in this! */ |
396 | struct ieee80211_vif vif; | 543 | struct ieee80211_vif vif; |
@@ -407,6 +554,8 @@ struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p) | |||
407 | enum { | 554 | enum { |
408 | IEEE80211_RX_MSG = 1, | 555 | IEEE80211_RX_MSG = 1, |
409 | IEEE80211_TX_STATUS_MSG = 2, | 556 | IEEE80211_TX_STATUS_MSG = 2, |
557 | IEEE80211_DELBA_MSG = 3, | ||
558 | IEEE80211_ADDBA_MSG = 4, | ||
410 | }; | 559 | }; |
411 | 560 | ||
412 | struct ieee80211_local { | 561 | struct ieee80211_local { |
@@ -417,15 +566,15 @@ struct ieee80211_local { | |||
417 | 566 | ||
418 | const struct ieee80211_ops *ops; | 567 | const struct ieee80211_ops *ops; |
419 | 568 | ||
420 | /* List of registered struct ieee80211_hw_mode */ | ||
421 | struct list_head modes_list; | ||
422 | |||
423 | struct net_device *mdev; /* wmaster# - "master" 802.11 device */ | 569 | struct net_device *mdev; /* wmaster# - "master" 802.11 device */ |
424 | int open_count; | 570 | int open_count; |
425 | int monitors; | 571 | int monitors, cooked_mntrs; |
572 | /* number of interfaces with corresponding FIF_ flags */ | ||
573 | int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss; | ||
426 | unsigned int filter_flags; /* FIF_* */ | 574 | unsigned int filter_flags; /* FIF_* */ |
427 | struct iw_statistics wstats; | 575 | struct iw_statistics wstats; |
428 | u8 wstats_flags; | 576 | u8 wstats_flags; |
577 | bool tim_in_locked_section; /* see ieee80211_beacon_get() */ | ||
429 | int tx_headroom; /* required headroom for hardware/radiotap */ | 578 | int tx_headroom; /* required headroom for hardware/radiotap */ |
430 | 579 | ||
431 | enum { | 580 | enum { |
@@ -443,15 +592,21 @@ struct ieee80211_local { | |||
443 | struct sk_buff_head skb_queue; | 592 | struct sk_buff_head skb_queue; |
444 | struct sk_buff_head skb_queue_unreliable; | 593 | struct sk_buff_head skb_queue_unreliable; |
445 | 594 | ||
446 | /* Station data structures */ | 595 | /* Station data */ |
447 | rwlock_t sta_lock; /* protects STA data structures */ | 596 | /* |
448 | int num_sta; /* number of stations in sta_list */ | 597 | * The lock only protects the list, hash, timer and counter |
598 | * against manipulation, reads are done in RCU. Additionally, | ||
599 | * the lock protects each BSS's TIM bitmap and a few items | ||
600 | * in a STA info structure. | ||
601 | */ | ||
602 | spinlock_t sta_lock; | ||
603 | unsigned long num_sta; | ||
449 | struct list_head sta_list; | 604 | struct list_head sta_list; |
450 | struct sta_info *sta_hash[STA_HASH_SIZE]; | 605 | struct sta_info *sta_hash[STA_HASH_SIZE]; |
451 | struct timer_list sta_cleanup; | 606 | struct timer_list sta_cleanup; |
452 | 607 | ||
453 | unsigned long state[NUM_TX_DATA_QUEUES]; | 608 | unsigned long state[NUM_TX_DATA_QUEUES_AMPDU]; |
454 | struct ieee80211_tx_stored_packet pending_packet[NUM_TX_DATA_QUEUES]; | 609 | struct ieee80211_tx_stored_packet pending_packet[NUM_TX_DATA_QUEUES_AMPDU]; |
455 | struct tasklet_struct tx_pending_tasklet; | 610 | struct tasklet_struct tx_pending_tasklet; |
456 | 611 | ||
457 | /* number of interfaces with corresponding IFF_ flags */ | 612 | /* number of interfaces with corresponding IFF_ flags */ |
@@ -459,11 +614,6 @@ struct ieee80211_local { | |||
459 | 614 | ||
460 | struct rate_control_ref *rate_ctrl; | 615 | struct rate_control_ref *rate_ctrl; |
461 | 616 | ||
462 | /* Supported and basic rate filters for different modes. These are | ||
463 | * pointers to -1 terminated lists and rates in 100 kbps units. */ | ||
464 | int *supp_rates[NUM_IEEE80211_MODES]; | ||
465 | int *basic_rates[NUM_IEEE80211_MODES]; | ||
466 | |||
467 | int rts_threshold; | 617 | int rts_threshold; |
468 | int fragmentation_threshold; | 618 | int fragmentation_threshold; |
469 | int short_retry_limit; /* dot11ShortRetryLimit */ | 619 | int short_retry_limit; /* dot11ShortRetryLimit */ |
@@ -477,21 +627,18 @@ struct ieee80211_local { | |||
477 | * deliver multicast frames both back to wireless | 627 | * deliver multicast frames both back to wireless |
478 | * media and to the local net stack */ | 628 | * media and to the local net stack */ |
479 | 629 | ||
480 | ieee80211_rx_handler *rx_pre_handlers; | ||
481 | ieee80211_rx_handler *rx_handlers; | ||
482 | ieee80211_tx_handler *tx_handlers; | ||
483 | |||
484 | struct list_head interfaces; | 630 | struct list_head interfaces; |
485 | 631 | ||
486 | bool sta_sw_scanning; | 632 | bool sta_sw_scanning; |
487 | bool sta_hw_scanning; | 633 | bool sta_hw_scanning; |
488 | int scan_channel_idx; | 634 | int scan_channel_idx; |
635 | enum ieee80211_band scan_band; | ||
636 | |||
489 | enum { SCAN_SET_CHANNEL, SCAN_SEND_PROBE } scan_state; | 637 | enum { SCAN_SET_CHANNEL, SCAN_SEND_PROBE } scan_state; |
490 | unsigned long last_scan_completed; | 638 | unsigned long last_scan_completed; |
491 | struct delayed_work scan_work; | 639 | struct delayed_work scan_work; |
492 | struct net_device *scan_dev; | 640 | struct net_device *scan_dev; |
493 | struct ieee80211_channel *oper_channel, *scan_channel; | 641 | struct ieee80211_channel *oper_channel, *scan_channel; |
494 | struct ieee80211_hw_mode *oper_hw_mode, *scan_hw_mode; | ||
495 | u8 scan_ssid[IEEE80211_MAX_SSID_LEN]; | 642 | u8 scan_ssid[IEEE80211_MAX_SSID_LEN]; |
496 | size_t scan_ssid_len; | 643 | size_t scan_ssid_len; |
497 | struct list_head sta_bss_list; | 644 | struct list_head sta_bss_list; |
@@ -560,14 +707,8 @@ struct ieee80211_local { | |||
560 | int wifi_wme_noack_test; | 707 | int wifi_wme_noack_test; |
561 | unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */ | 708 | unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */ |
562 | 709 | ||
563 | unsigned int enabled_modes; /* bitfield of allowed modes; | ||
564 | * (1 << MODE_*) */ | ||
565 | unsigned int hw_modes; /* bitfield of supported hardware modes; | ||
566 | * (1 << MODE_*) */ | ||
567 | |||
568 | #ifdef CONFIG_MAC80211_DEBUGFS | 710 | #ifdef CONFIG_MAC80211_DEBUGFS |
569 | struct local_debugfsdentries { | 711 | struct local_debugfsdentries { |
570 | struct dentry *channel; | ||
571 | struct dentry *frequency; | 712 | struct dentry *frequency; |
572 | struct dentry *antenna_sel_tx; | 713 | struct dentry *antenna_sel_tx; |
573 | struct dentry *antenna_sel_rx; | 714 | struct dentry *antenna_sel_rx; |
@@ -577,9 +718,7 @@ struct ieee80211_local { | |||
577 | struct dentry *short_retry_limit; | 718 | struct dentry *short_retry_limit; |
578 | struct dentry *long_retry_limit; | 719 | struct dentry *long_retry_limit; |
579 | struct dentry *total_ps_buffered; | 720 | struct dentry *total_ps_buffered; |
580 | struct dentry *mode; | ||
581 | struct dentry *wep_iv; | 721 | struct dentry *wep_iv; |
582 | struct dentry *modes; | ||
583 | struct dentry *statistics; | 722 | struct dentry *statistics; |
584 | struct local_debugfsdentries_statsdentries { | 723 | struct local_debugfsdentries_statsdentries { |
585 | struct dentry *transmitted_fragment_count; | 724 | struct dentry *transmitted_fragment_count; |
@@ -627,6 +766,63 @@ struct ieee80211_local { | |||
627 | #endif | 766 | #endif |
628 | }; | 767 | }; |
629 | 768 | ||
769 | /* this struct represents 802.11n's RA/TID combination */ | ||
770 | struct ieee80211_ra_tid { | ||
771 | u8 ra[ETH_ALEN]; | ||
772 | u16 tid; | ||
773 | }; | ||
774 | |||
775 | /* Parsed Information Elements */ | ||
776 | struct ieee802_11_elems { | ||
777 | /* pointers to IEs */ | ||
778 | u8 *ssid; | ||
779 | u8 *supp_rates; | ||
780 | u8 *fh_params; | ||
781 | u8 *ds_params; | ||
782 | u8 *cf_params; | ||
783 | u8 *tim; | ||
784 | u8 *ibss_params; | ||
785 | u8 *challenge; | ||
786 | u8 *wpa; | ||
787 | u8 *rsn; | ||
788 | u8 *erp_info; | ||
789 | u8 *ext_supp_rates; | ||
790 | u8 *wmm_info; | ||
791 | u8 *wmm_param; | ||
792 | u8 *ht_cap_elem; | ||
793 | u8 *ht_info_elem; | ||
794 | u8 *mesh_config; | ||
795 | u8 *mesh_id; | ||
796 | u8 *peer_link; | ||
797 | u8 *preq; | ||
798 | u8 *prep; | ||
799 | u8 *perr; | ||
800 | |||
801 | /* length of them, respectively */ | ||
802 | u8 ssid_len; | ||
803 | u8 supp_rates_len; | ||
804 | u8 fh_params_len; | ||
805 | u8 ds_params_len; | ||
806 | u8 cf_params_len; | ||
807 | u8 tim_len; | ||
808 | u8 ibss_params_len; | ||
809 | u8 challenge_len; | ||
810 | u8 wpa_len; | ||
811 | u8 rsn_len; | ||
812 | u8 erp_info_len; | ||
813 | u8 ext_supp_rates_len; | ||
814 | u8 wmm_info_len; | ||
815 | u8 wmm_param_len; | ||
816 | u8 ht_cap_elem_len; | ||
817 | u8 ht_info_elem_len; | ||
818 | u8 mesh_config_len; | ||
819 | u8 mesh_id_len; | ||
820 | u8 peer_link_len; | ||
821 | u8 preq_len; | ||
822 | u8 prep_len; | ||
823 | u8 perr_len; | ||
824 | }; | ||
825 | |||
630 | static inline struct ieee80211_local *hw_to_local( | 826 | static inline struct ieee80211_local *hw_to_local( |
631 | struct ieee80211_hw *hw) | 827 | struct ieee80211_hw *hw) |
632 | { | 828 | { |
@@ -650,57 +846,6 @@ struct sta_attribute { | |||
650 | ssize_t (*store)(struct sta_info *, const char *buf, size_t count); | 846 | ssize_t (*store)(struct sta_info *, const char *buf, size_t count); |
651 | }; | 847 | }; |
652 | 848 | ||
653 | static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid) | ||
654 | { | ||
655 | /* | ||
656 | * This format has been mandated by the IEEE specifications, | ||
657 | * so this line may not be changed to use the __set_bit() format. | ||
658 | */ | ||
659 | bss->tim[aid / 8] |= (1 << (aid % 8)); | ||
660 | } | ||
661 | |||
662 | static inline void bss_tim_set(struct ieee80211_local *local, | ||
663 | struct ieee80211_if_ap *bss, u16 aid) | ||
664 | { | ||
665 | read_lock_bh(&local->sta_lock); | ||
666 | __bss_tim_set(bss, aid); | ||
667 | read_unlock_bh(&local->sta_lock); | ||
668 | } | ||
669 | |||
670 | static inline void __bss_tim_clear(struct ieee80211_if_ap *bss, u16 aid) | ||
671 | { | ||
672 | /* | ||
673 | * This format has been mandated by the IEEE specifications, | ||
674 | * so this line may not be changed to use the __clear_bit() format. | ||
675 | */ | ||
676 | bss->tim[aid / 8] &= ~(1 << (aid % 8)); | ||
677 | } | ||
678 | |||
679 | static inline void bss_tim_clear(struct ieee80211_local *local, | ||
680 | struct ieee80211_if_ap *bss, u16 aid) | ||
681 | { | ||
682 | read_lock_bh(&local->sta_lock); | ||
683 | __bss_tim_clear(bss, aid); | ||
684 | read_unlock_bh(&local->sta_lock); | ||
685 | } | ||
686 | |||
687 | /** | ||
688 | * ieee80211_is_erp_rate - Check if a rate is an ERP rate | ||
689 | * @phymode: The PHY-mode for this rate (MODE_IEEE80211...) | ||
690 | * @rate: Transmission rate to check, in 100 kbps | ||
691 | * | ||
692 | * Check if a given rate is an Extended Rate PHY (ERP) rate. | ||
693 | */ | ||
694 | static inline int ieee80211_is_erp_rate(int phymode, int rate) | ||
695 | { | ||
696 | if (phymode == MODE_IEEE80211G) { | ||
697 | if (rate != 10 && rate != 20 && | ||
698 | rate != 55 && rate != 110) | ||
699 | return 1; | ||
700 | } | ||
701 | return 0; | ||
702 | } | ||
703 | |||
704 | static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr) | 849 | static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr) |
705 | { | 850 | { |
706 | return compare_ether_addr(raddr, addr) == 0 || | 851 | return compare_ether_addr(raddr, addr) == 0 || |
@@ -712,13 +857,8 @@ static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr) | |||
712 | int ieee80211_hw_config(struct ieee80211_local *local); | 857 | int ieee80211_hw_config(struct ieee80211_local *local); |
713 | int ieee80211_if_config(struct net_device *dev); | 858 | int ieee80211_if_config(struct net_device *dev); |
714 | int ieee80211_if_config_beacon(struct net_device *dev); | 859 | int ieee80211_if_config_beacon(struct net_device *dev); |
715 | void ieee80211_prepare_rates(struct ieee80211_local *local, | 860 | void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx); |
716 | struct ieee80211_hw_mode *mode); | ||
717 | void ieee80211_tx_set_iswep(struct ieee80211_txrx_data *tx); | ||
718 | int ieee80211_if_update_wds(struct net_device *dev, u8 *remote_addr); | ||
719 | void ieee80211_if_setup(struct net_device *dev); | 861 | void ieee80211_if_setup(struct net_device *dev); |
720 | struct ieee80211_rate *ieee80211_get_rate(struct ieee80211_local *local, | ||
721 | int phymode, int hwrate); | ||
722 | int ieee80211_hw_config_ht(struct ieee80211_local *local, int enable_ht, | 862 | int ieee80211_hw_config_ht(struct ieee80211_local *local, int enable_ht, |
723 | struct ieee80211_ht_info *req_ht_cap, | 863 | struct ieee80211_ht_info *req_ht_cap, |
724 | struct ieee80211_ht_bss_info *req_bss_cap); | 864 | struct ieee80211_ht_bss_info *req_bss_cap); |
@@ -749,8 +889,9 @@ extern const struct iw_handler_def ieee80211_iw_handler_def; | |||
749 | /* ieee80211_ioctl.c */ | 889 | /* ieee80211_ioctl.c */ |
750 | int ieee80211_set_compression(struct ieee80211_local *local, | 890 | int ieee80211_set_compression(struct ieee80211_local *local, |
751 | struct net_device *dev, struct sta_info *sta); | 891 | struct net_device *dev, struct sta_info *sta); |
752 | int ieee80211_set_channel(struct ieee80211_local *local, int channel, int freq); | 892 | int ieee80211_set_freq(struct ieee80211_local *local, int freq); |
753 | /* ieee80211_sta.c */ | 893 | /* ieee80211_sta.c */ |
894 | #define IEEE80211_FC(type, stype) cpu_to_le16(type | stype) | ||
754 | void ieee80211_sta_timer(unsigned long data); | 895 | void ieee80211_sta_timer(unsigned long data); |
755 | void ieee80211_sta_work(struct work_struct *work); | 896 | void ieee80211_sta_work(struct work_struct *work); |
756 | void ieee80211_sta_scan_work(struct work_struct *work); | 897 | void ieee80211_sta_scan_work(struct work_struct *work); |
@@ -763,9 +904,9 @@ int ieee80211_sta_req_scan(struct net_device *dev, u8 *ssid, size_t ssid_len); | |||
763 | void ieee80211_sta_req_auth(struct net_device *dev, | 904 | void ieee80211_sta_req_auth(struct net_device *dev, |
764 | struct ieee80211_if_sta *ifsta); | 905 | struct ieee80211_if_sta *ifsta); |
765 | int ieee80211_sta_scan_results(struct net_device *dev, char *buf, size_t len); | 906 | int ieee80211_sta_scan_results(struct net_device *dev, char *buf, size_t len); |
766 | ieee80211_txrx_result ieee80211_sta_rx_scan(struct net_device *dev, | 907 | ieee80211_rx_result ieee80211_sta_rx_scan( |
767 | struct sk_buff *skb, | 908 | struct net_device *dev, struct sk_buff *skb, |
768 | struct ieee80211_rx_status *rx_status); | 909 | struct ieee80211_rx_status *rx_status); |
769 | void ieee80211_rx_bss_list_init(struct net_device *dev); | 910 | void ieee80211_rx_bss_list_init(struct net_device *dev); |
770 | void ieee80211_rx_bss_list_deinit(struct net_device *dev); | 911 | void ieee80211_rx_bss_list_deinit(struct net_device *dev); |
771 | int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len); | 912 | int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len); |
@@ -782,12 +923,34 @@ int ieee80211_ht_cap_ie_to_ht_info(struct ieee80211_ht_cap *ht_cap_ie, | |||
782 | int ieee80211_ht_addt_info_ie_to_ht_bss_info( | 923 | int ieee80211_ht_addt_info_ie_to_ht_bss_info( |
783 | struct ieee80211_ht_addt_info *ht_add_info_ie, | 924 | struct ieee80211_ht_addt_info *ht_add_info_ie, |
784 | struct ieee80211_ht_bss_info *bss_info); | 925 | struct ieee80211_ht_bss_info *bss_info); |
926 | void ieee80211_send_addba_request(struct net_device *dev, const u8 *da, | ||
927 | u16 tid, u8 dialog_token, u16 start_seq_num, | ||
928 | u16 agg_size, u16 timeout); | ||
929 | void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid, | ||
930 | u16 initiator, u16 reason_code); | ||
785 | void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *da, | 931 | void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *da, |
786 | u16 tid, u16 initiator, u16 reason); | 932 | u16 tid, u16 initiator, u16 reason); |
787 | void sta_rx_agg_session_timer_expired(unsigned long data); | 933 | void sta_rx_agg_session_timer_expired(unsigned long data); |
934 | void sta_addba_resp_timer_expired(unsigned long data); | ||
935 | u64 ieee80211_sta_get_rates(struct ieee80211_local *local, | ||
936 | struct ieee802_11_elems *elems, | ||
937 | enum ieee80211_band band); | ||
938 | void ieee80211_sta_tx(struct net_device *dev, struct sk_buff *skb, | ||
939 | int encrypt); | ||
940 | void ieee802_11_parse_elems(u8 *start, size_t len, | ||
941 | struct ieee802_11_elems *elems); | ||
942 | |||
943 | #ifdef CONFIG_MAC80211_MESH | ||
944 | void ieee80211_start_mesh(struct net_device *dev); | ||
945 | #else | ||
946 | static inline void ieee80211_start_mesh(struct net_device *dev) | ||
947 | {} | ||
948 | #endif | ||
949 | |||
788 | /* ieee80211_iface.c */ | 950 | /* ieee80211_iface.c */ |
789 | int ieee80211_if_add(struct net_device *dev, const char *name, | 951 | int ieee80211_if_add(struct net_device *dev, const char *name, |
790 | struct net_device **new_dev, int type); | 952 | struct net_device **new_dev, int type, |
953 | struct vif_params *params); | ||
791 | void ieee80211_if_set_type(struct net_device *dev, int type); | 954 | void ieee80211_if_set_type(struct net_device *dev, int type); |
792 | void ieee80211_if_reinit(struct net_device *dev); | 955 | void ieee80211_if_reinit(struct net_device *dev); |
793 | void __ieee80211_if_del(struct ieee80211_local *local, | 956 | void __ieee80211_if_del(struct ieee80211_local *local, |
@@ -796,16 +959,7 @@ int ieee80211_if_remove(struct net_device *dev, const char *name, int id); | |||
796 | void ieee80211_if_free(struct net_device *dev); | 959 | void ieee80211_if_free(struct net_device *dev); |
797 | void ieee80211_if_sdata_init(struct ieee80211_sub_if_data *sdata); | 960 | void ieee80211_if_sdata_init(struct ieee80211_sub_if_data *sdata); |
798 | 961 | ||
799 | /* regdomain.c */ | ||
800 | void ieee80211_regdomain_init(void); | ||
801 | void ieee80211_set_default_regdomain(struct ieee80211_hw_mode *mode); | ||
802 | |||
803 | /* rx handling */ | ||
804 | extern ieee80211_rx_handler ieee80211_rx_pre_handlers[]; | ||
805 | extern ieee80211_rx_handler ieee80211_rx_handlers[]; | ||
806 | |||
807 | /* tx handling */ | 962 | /* tx handling */ |
808 | extern ieee80211_tx_handler ieee80211_tx_handlers[]; | ||
809 | void ieee80211_clear_tx_pending(struct ieee80211_local *local); | 963 | void ieee80211_clear_tx_pending(struct ieee80211_local *local); |
810 | void ieee80211_tx_pending(unsigned long data); | 964 | void ieee80211_tx_pending(unsigned long data); |
811 | int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev); | 965 | int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev); |
diff --git a/net/mac80211/ieee80211_iface.c b/net/mac80211/ieee80211_iface.c index 92f1eb2da311..80954a512185 100644 --- a/net/mac80211/ieee80211_iface.c +++ b/net/mac80211/ieee80211_iface.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include "ieee80211_i.h" | 15 | #include "ieee80211_i.h" |
16 | #include "sta_info.h" | 16 | #include "sta_info.h" |
17 | #include "debugfs_netdev.h" | 17 | #include "debugfs_netdev.h" |
18 | #include "mesh.h" | ||
18 | 19 | ||
19 | void ieee80211_if_sdata_init(struct ieee80211_sub_if_data *sdata) | 20 | void ieee80211_if_sdata_init(struct ieee80211_sub_if_data *sdata) |
20 | { | 21 | { |
@@ -39,7 +40,8 @@ static void ieee80211_if_sdata_deinit(struct ieee80211_sub_if_data *sdata) | |||
39 | 40 | ||
40 | /* Must be called with rtnl lock held. */ | 41 | /* Must be called with rtnl lock held. */ |
41 | int ieee80211_if_add(struct net_device *dev, const char *name, | 42 | int ieee80211_if_add(struct net_device *dev, const char *name, |
42 | struct net_device **new_dev, int type) | 43 | struct net_device **new_dev, int type, |
44 | struct vif_params *params) | ||
43 | { | 45 | { |
44 | struct net_device *ndev; | 46 | struct net_device *ndev; |
45 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 47 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
@@ -78,6 +80,12 @@ int ieee80211_if_add(struct net_device *dev, const char *name, | |||
78 | ieee80211_debugfs_add_netdev(sdata); | 80 | ieee80211_debugfs_add_netdev(sdata); |
79 | ieee80211_if_set_type(ndev, type); | 81 | ieee80211_if_set_type(ndev, type); |
80 | 82 | ||
83 | if (ieee80211_vif_is_mesh(&sdata->vif) && | ||
84 | params && params->mesh_id_len) | ||
85 | ieee80211_if_sta_set_mesh_id(&sdata->u.sta, | ||
86 | params->mesh_id_len, | ||
87 | params->mesh_id); | ||
88 | |||
81 | /* we're under RTNL so all this is fine */ | 89 | /* we're under RTNL so all this is fine */ |
82 | if (unlikely(local->reg_state == IEEE80211_DEV_UNREGISTERED)) { | 90 | if (unlikely(local->reg_state == IEEE80211_DEV_UNREGISTERED)) { |
83 | __ieee80211_if_del(local, sdata); | 91 | __ieee80211_if_del(local, sdata); |
@@ -118,6 +126,8 @@ void ieee80211_if_set_type(struct net_device *dev, int type) | |||
118 | sdata->bss = NULL; | 126 | sdata->bss = NULL; |
119 | sdata->vif.type = type; | 127 | sdata->vif.type = type; |
120 | 128 | ||
129 | sdata->basic_rates = 0; | ||
130 | |||
121 | switch (type) { | 131 | switch (type) { |
122 | case IEEE80211_IF_TYPE_WDS: | 132 | case IEEE80211_IF_TYPE_WDS: |
123 | /* nothing special */ | 133 | /* nothing special */ |
@@ -132,6 +142,7 @@ void ieee80211_if_set_type(struct net_device *dev, int type) | |||
132 | sdata->bss = &sdata->u.ap; | 142 | sdata->bss = &sdata->u.ap; |
133 | INIT_LIST_HEAD(&sdata->u.ap.vlans); | 143 | INIT_LIST_HEAD(&sdata->u.ap.vlans); |
134 | break; | 144 | break; |
145 | case IEEE80211_IF_TYPE_MESH_POINT: | ||
135 | case IEEE80211_IF_TYPE_STA: | 146 | case IEEE80211_IF_TYPE_STA: |
136 | case IEEE80211_IF_TYPE_IBSS: { | 147 | case IEEE80211_IF_TYPE_IBSS: { |
137 | struct ieee80211_sub_if_data *msdata; | 148 | struct ieee80211_sub_if_data *msdata; |
@@ -153,15 +164,20 @@ void ieee80211_if_set_type(struct net_device *dev, int type) | |||
153 | 164 | ||
154 | msdata = IEEE80211_DEV_TO_SUB_IF(sdata->local->mdev); | 165 | msdata = IEEE80211_DEV_TO_SUB_IF(sdata->local->mdev); |
155 | sdata->bss = &msdata->u.ap; | 166 | sdata->bss = &msdata->u.ap; |
167 | |||
168 | if (ieee80211_vif_is_mesh(&sdata->vif)) | ||
169 | ieee80211_mesh_init_sdata(sdata); | ||
156 | break; | 170 | break; |
157 | } | 171 | } |
158 | case IEEE80211_IF_TYPE_MNTR: | 172 | case IEEE80211_IF_TYPE_MNTR: |
159 | dev->type = ARPHRD_IEEE80211_RADIOTAP; | 173 | dev->type = ARPHRD_IEEE80211_RADIOTAP; |
160 | dev->hard_start_xmit = ieee80211_monitor_start_xmit; | 174 | dev->hard_start_xmit = ieee80211_monitor_start_xmit; |
175 | sdata->u.mntr_flags = MONITOR_FLAG_CONTROL | | ||
176 | MONITOR_FLAG_OTHER_BSS; | ||
161 | break; | 177 | break; |
162 | default: | 178 | default: |
163 | printk(KERN_WARNING "%s: %s: Unknown interface type 0x%x", | 179 | printk(KERN_WARNING "%s: %s: Unknown interface type 0x%x", |
164 | dev->name, __FUNCTION__, type); | 180 | dev->name, __func__, type); |
165 | } | 181 | } |
166 | ieee80211_debugfs_change_if_type(sdata, oldtype); | 182 | ieee80211_debugfs_change_if_type(sdata, oldtype); |
167 | } | 183 | } |
@@ -171,8 +187,8 @@ void ieee80211_if_reinit(struct net_device *dev) | |||
171 | { | 187 | { |
172 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 188 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
173 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 189 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
174 | struct sta_info *sta; | ||
175 | struct sk_buff *skb; | 190 | struct sk_buff *skb; |
191 | int flushed; | ||
176 | 192 | ||
177 | ASSERT_RTNL(); | 193 | ASSERT_RTNL(); |
178 | 194 | ||
@@ -180,6 +196,10 @@ void ieee80211_if_reinit(struct net_device *dev) | |||
180 | 196 | ||
181 | ieee80211_if_sdata_deinit(sdata); | 197 | ieee80211_if_sdata_deinit(sdata); |
182 | 198 | ||
199 | /* Need to handle mesh specially to allow eliding the function call */ | ||
200 | if (ieee80211_vif_is_mesh(&sdata->vif)) | ||
201 | mesh_rmc_free(dev); | ||
202 | |||
183 | switch (sdata->vif.type) { | 203 | switch (sdata->vif.type) { |
184 | case IEEE80211_IF_TYPE_INVALID: | 204 | case IEEE80211_IF_TYPE_INVALID: |
185 | /* cannot happen */ | 205 | /* cannot happen */ |
@@ -189,6 +209,7 @@ void ieee80211_if_reinit(struct net_device *dev) | |||
189 | /* Remove all virtual interfaces that use this BSS | 209 | /* Remove all virtual interfaces that use this BSS |
190 | * as their sdata->bss */ | 210 | * as their sdata->bss */ |
191 | struct ieee80211_sub_if_data *tsdata, *n; | 211 | struct ieee80211_sub_if_data *tsdata, *n; |
212 | struct beacon_data *beacon; | ||
192 | 213 | ||
193 | list_for_each_entry_safe(tsdata, n, &local->interfaces, list) { | 214 | list_for_each_entry_safe(tsdata, n, &local->interfaces, list) { |
194 | if (tsdata != sdata && tsdata->bss == &sdata->u.ap) { | 215 | if (tsdata != sdata && tsdata->bss == &sdata->u.ap) { |
@@ -206,7 +227,10 @@ void ieee80211_if_reinit(struct net_device *dev) | |||
206 | } | 227 | } |
207 | } | 228 | } |
208 | 229 | ||
209 | kfree(sdata->u.ap.beacon); | 230 | beacon = sdata->u.ap.beacon; |
231 | rcu_assign_pointer(sdata->u.ap.beacon, NULL); | ||
232 | synchronize_rcu(); | ||
233 | kfree(beacon); | ||
210 | 234 | ||
211 | while ((skb = skb_dequeue(&sdata->u.ap.ps_bc_buf))) { | 235 | while ((skb = skb_dequeue(&sdata->u.ap.ps_bc_buf))) { |
212 | local->total_ps_buffered--; | 236 | local->total_ps_buffered--; |
@@ -216,17 +240,9 @@ void ieee80211_if_reinit(struct net_device *dev) | |||
216 | break; | 240 | break; |
217 | } | 241 | } |
218 | case IEEE80211_IF_TYPE_WDS: | 242 | case IEEE80211_IF_TYPE_WDS: |
219 | sta = sta_info_get(local, sdata->u.wds.remote_addr); | 243 | /* nothing to do */ |
220 | if (sta) { | ||
221 | sta_info_free(sta); | ||
222 | sta_info_put(sta); | ||
223 | } else { | ||
224 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | ||
225 | printk(KERN_DEBUG "%s: Someone had deleted my STA " | ||
226 | "entry for the WDS link\n", dev->name); | ||
227 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ | ||
228 | } | ||
229 | break; | 244 | break; |
245 | case IEEE80211_IF_TYPE_MESH_POINT: | ||
230 | case IEEE80211_IF_TYPE_STA: | 246 | case IEEE80211_IF_TYPE_STA: |
231 | case IEEE80211_IF_TYPE_IBSS: | 247 | case IEEE80211_IF_TYPE_IBSS: |
232 | kfree(sdata->u.sta.extra_ie); | 248 | kfree(sdata->u.sta.extra_ie); |
@@ -249,8 +265,8 @@ void ieee80211_if_reinit(struct net_device *dev) | |||
249 | break; | 265 | break; |
250 | } | 266 | } |
251 | 267 | ||
252 | /* remove all STAs that are bound to this virtual interface */ | 268 | flushed = sta_info_flush(local, sdata); |
253 | sta_info_flush(local, dev); | 269 | WARN_ON(flushed); |
254 | 270 | ||
255 | memset(&sdata->u, 0, sizeof(sdata->u)); | 271 | memset(&sdata->u, 0, sizeof(sdata->u)); |
256 | ieee80211_if_sdata_init(sdata); | 272 | ieee80211_if_sdata_init(sdata); |
diff --git a/net/mac80211/ieee80211_ioctl.c b/net/mac80211/ieee80211_ioctl.c index 5024d3733834..1d91575a0fe9 100644 --- a/net/mac80211/ieee80211_ioctl.c +++ b/net/mac80211/ieee80211_ioctl.c | |||
@@ -33,7 +33,6 @@ static int ieee80211_set_encryption(struct net_device *dev, u8 *sta_addr, | |||
33 | size_t key_len) | 33 | size_t key_len) |
34 | { | 34 | { |
35 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 35 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
36 | int ret = 0; | ||
37 | struct sta_info *sta; | 36 | struct sta_info *sta; |
38 | struct ieee80211_key *key; | 37 | struct ieee80211_key *key; |
39 | struct ieee80211_sub_if_data *sdata; | 38 | struct ieee80211_sub_if_data *sdata; |
@@ -46,59 +45,55 @@ static int ieee80211_set_encryption(struct net_device *dev, u8 *sta_addr, | |||
46 | return -EINVAL; | 45 | return -EINVAL; |
47 | } | 46 | } |
48 | 47 | ||
49 | if (is_broadcast_ether_addr(sta_addr)) { | 48 | if (remove) { |
50 | sta = NULL; | 49 | if (is_broadcast_ether_addr(sta_addr)) { |
51 | key = sdata->keys[idx]; | 50 | key = sdata->keys[idx]; |
52 | } else { | 51 | } else { |
53 | set_tx_key = 0; | 52 | sta = sta_info_get(local, sta_addr); |
54 | /* | 53 | if (!sta) |
55 | * According to the standard, the key index of a pairwise | 54 | return -ENOENT; |
56 | * key must be zero. However, some AP are broken when it | 55 | key = sta->key; |
57 | * comes to WEP key indices, so we work around this. | ||
58 | */ | ||
59 | if (idx != 0 && alg != ALG_WEP) { | ||
60 | printk(KERN_DEBUG "%s: set_encrypt - non-zero idx for " | ||
61 | "individual key\n", dev->name); | ||
62 | return -EINVAL; | ||
63 | } | 56 | } |
64 | 57 | ||
65 | sta = sta_info_get(local, sta_addr); | 58 | if (!key) |
66 | if (!sta) { | ||
67 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | ||
68 | DECLARE_MAC_BUF(mac); | ||
69 | printk(KERN_DEBUG "%s: set_encrypt - unknown addr " | ||
70 | "%s\n", | ||
71 | dev->name, print_mac(mac, sta_addr)); | ||
72 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ | ||
73 | |||
74 | return -ENOENT; | 59 | return -ENOENT; |
75 | } | ||
76 | 60 | ||
77 | key = sta->key; | ||
78 | } | ||
79 | |||
80 | if (remove) { | ||
81 | ieee80211_key_free(key); | 61 | ieee80211_key_free(key); |
82 | key = NULL; | 62 | return 0; |
83 | } else { | 63 | } else { |
84 | /* | 64 | key = ieee80211_key_alloc(alg, idx, key_len, _key); |
85 | * Automatically frees any old key if present. | 65 | if (!key) |
86 | */ | 66 | return -ENOMEM; |
87 | key = ieee80211_key_alloc(sdata, sta, alg, idx, key_len, _key); | 67 | |
88 | if (!key) { | 68 | sta = NULL; |
89 | ret = -ENOMEM; | 69 | |
90 | goto err_out; | 70 | if (!is_broadcast_ether_addr(sta_addr)) { |
71 | set_tx_key = 0; | ||
72 | /* | ||
73 | * According to the standard, the key index of a | ||
74 | * pairwise key must be zero. However, some AP are | ||
75 | * broken when it comes to WEP key indices, so we | ||
76 | * work around this. | ||
77 | */ | ||
78 | if (idx != 0 && alg != ALG_WEP) { | ||
79 | ieee80211_key_free(key); | ||
80 | return -EINVAL; | ||
81 | } | ||
82 | |||
83 | sta = sta_info_get(local, sta_addr); | ||
84 | if (!sta) { | ||
85 | ieee80211_key_free(key); | ||
86 | return -ENOENT; | ||
87 | } | ||
91 | } | 88 | } |
92 | } | ||
93 | 89 | ||
94 | if (set_tx_key || (!sta && !sdata->default_key && key)) | 90 | ieee80211_key_link(key, sdata, sta); |
95 | ieee80211_set_default_key(sdata, idx); | ||
96 | 91 | ||
97 | ret = 0; | 92 | if (set_tx_key || (!sta && !sdata->default_key && key)) |
98 | err_out: | 93 | ieee80211_set_default_key(sdata, idx); |
99 | if (sta) | 94 | } |
100 | sta_info_put(sta); | 95 | |
101 | return ret; | 96 | return 0; |
102 | } | 97 | } |
103 | 98 | ||
104 | static int ieee80211_ioctl_siwgenie(struct net_device *dev, | 99 | static int ieee80211_ioctl_siwgenie(struct net_device *dev, |
@@ -129,22 +124,7 @@ static int ieee80211_ioctl_giwname(struct net_device *dev, | |||
129 | struct iw_request_info *info, | 124 | struct iw_request_info *info, |
130 | char *name, char *extra) | 125 | char *name, char *extra) |
131 | { | 126 | { |
132 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 127 | strcpy(name, "IEEE 802.11"); |
133 | |||
134 | switch (local->hw.conf.phymode) { | ||
135 | case MODE_IEEE80211A: | ||
136 | strcpy(name, "IEEE 802.11a"); | ||
137 | break; | ||
138 | case MODE_IEEE80211B: | ||
139 | strcpy(name, "IEEE 802.11b"); | ||
140 | break; | ||
141 | case MODE_IEEE80211G: | ||
142 | strcpy(name, "IEEE 802.11g"); | ||
143 | break; | ||
144 | default: | ||
145 | strcpy(name, "IEEE 802.11"); | ||
146 | break; | ||
147 | } | ||
148 | 128 | ||
149 | return 0; | 129 | return 0; |
150 | } | 130 | } |
@@ -156,7 +136,7 @@ static int ieee80211_ioctl_giwrange(struct net_device *dev, | |||
156 | { | 136 | { |
157 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 137 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
158 | struct iw_range *range = (struct iw_range *) extra; | 138 | struct iw_range *range = (struct iw_range *) extra; |
159 | struct ieee80211_hw_mode *mode = NULL; | 139 | enum ieee80211_band band; |
160 | int c = 0; | 140 | int c = 0; |
161 | 141 | ||
162 | data->length = sizeof(struct iw_range); | 142 | data->length = sizeof(struct iw_range); |
@@ -191,24 +171,27 @@ static int ieee80211_ioctl_giwrange(struct net_device *dev, | |||
191 | range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | | 171 | range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | |
192 | IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP; | 172 | IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP; |
193 | 173 | ||
194 | list_for_each_entry(mode, &local->modes_list, list) { | ||
195 | int i = 0; | ||
196 | 174 | ||
197 | if (!(local->enabled_modes & (1 << mode->mode)) || | 175 | for (band = 0; band < IEEE80211_NUM_BANDS; band ++) { |
198 | (local->hw_modes & local->enabled_modes & | 176 | int i; |
199 | (1 << MODE_IEEE80211G) && mode->mode == MODE_IEEE80211B)) | 177 | struct ieee80211_supported_band *sband; |
178 | |||
179 | sband = local->hw.wiphy->bands[band]; | ||
180 | |||
181 | if (!sband) | ||
200 | continue; | 182 | continue; |
201 | 183 | ||
202 | while (i < mode->num_channels && c < IW_MAX_FREQUENCIES) { | 184 | for (i = 0; i < sband->n_channels && c < IW_MAX_FREQUENCIES; i++) { |
203 | struct ieee80211_channel *chan = &mode->channels[i]; | 185 | struct ieee80211_channel *chan = &sband->channels[i]; |
204 | 186 | ||
205 | if (chan->flag & IEEE80211_CHAN_W_SCAN) { | 187 | if (!(chan->flags & IEEE80211_CHAN_DISABLED)) { |
206 | range->freq[c].i = chan->chan; | 188 | range->freq[c].i = |
207 | range->freq[c].m = chan->freq * 100000; | 189 | ieee80211_frequency_to_channel( |
208 | range->freq[c].e = 1; | 190 | chan->center_freq); |
191 | range->freq[c].m = chan->center_freq; | ||
192 | range->freq[c].e = 6; | ||
209 | c++; | 193 | c++; |
210 | } | 194 | } |
211 | i++; | ||
212 | } | 195 | } |
213 | } | 196 | } |
214 | range->num_channels = c; | 197 | range->num_channels = c; |
@@ -294,22 +277,29 @@ static int ieee80211_ioctl_giwmode(struct net_device *dev, | |||
294 | return 0; | 277 | return 0; |
295 | } | 278 | } |
296 | 279 | ||
297 | int ieee80211_set_channel(struct ieee80211_local *local, int channel, int freq) | 280 | int ieee80211_set_freq(struct ieee80211_local *local, int freqMHz) |
298 | { | 281 | { |
299 | struct ieee80211_hw_mode *mode; | 282 | int set = 0; |
300 | int c, set = 0; | ||
301 | int ret = -EINVAL; | 283 | int ret = -EINVAL; |
284 | enum ieee80211_band band; | ||
285 | struct ieee80211_supported_band *sband; | ||
286 | int i; | ||
287 | |||
288 | for (band = 0; band < IEEE80211_NUM_BANDS; band ++) { | ||
289 | sband = local->hw.wiphy->bands[band]; | ||
302 | 290 | ||
303 | list_for_each_entry(mode, &local->modes_list, list) { | 291 | if (!sband) |
304 | if (!(local->enabled_modes & (1 << mode->mode))) | ||
305 | continue; | 292 | continue; |
306 | for (c = 0; c < mode->num_channels; c++) { | 293 | |
307 | struct ieee80211_channel *chan = &mode->channels[c]; | 294 | for (i = 0; i < sband->n_channels; i++) { |
308 | if (chan->flag & IEEE80211_CHAN_W_SCAN && | 295 | struct ieee80211_channel *chan = &sband->channels[i]; |
309 | ((chan->chan == channel) || (chan->freq == freq))) { | 296 | |
310 | local->oper_channel = chan; | 297 | if (chan->flags & IEEE80211_CHAN_DISABLED) |
311 | local->oper_hw_mode = mode; | 298 | continue; |
299 | |||
300 | if (chan->center_freq == freqMHz) { | ||
312 | set = 1; | 301 | set = 1; |
302 | local->oper_channel = chan; | ||
313 | break; | 303 | break; |
314 | } | 304 | } |
315 | } | 305 | } |
@@ -347,13 +337,14 @@ static int ieee80211_ioctl_siwfreq(struct net_device *dev, | |||
347 | IEEE80211_STA_AUTO_CHANNEL_SEL; | 337 | IEEE80211_STA_AUTO_CHANNEL_SEL; |
348 | return 0; | 338 | return 0; |
349 | } else | 339 | } else |
350 | return ieee80211_set_channel(local, freq->m, -1); | 340 | return ieee80211_set_freq(local, |
341 | ieee80211_channel_to_frequency(freq->m)); | ||
351 | } else { | 342 | } else { |
352 | int i, div = 1000000; | 343 | int i, div = 1000000; |
353 | for (i = 0; i < freq->e; i++) | 344 | for (i = 0; i < freq->e; i++) |
354 | div /= 10; | 345 | div /= 10; |
355 | if (div > 0) | 346 | if (div > 0) |
356 | return ieee80211_set_channel(local, -1, freq->m / div); | 347 | return ieee80211_set_freq(local, freq->m / div); |
357 | else | 348 | else |
358 | return -EINVAL; | 349 | return -EINVAL; |
359 | } | 350 | } |
@@ -366,10 +357,7 @@ static int ieee80211_ioctl_giwfreq(struct net_device *dev, | |||
366 | { | 357 | { |
367 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 358 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
368 | 359 | ||
369 | /* TODO: in station mode (Managed/Ad-hoc) might need to poll low-level | 360 | freq->m = local->hw.conf.channel->center_freq; |
370 | * driver for the current channel with firmware-based management */ | ||
371 | |||
372 | freq->m = local->hw.conf.freq; | ||
373 | freq->e = 6; | 361 | freq->e = 6; |
374 | 362 | ||
375 | return 0; | 363 | return 0; |
@@ -480,10 +468,20 @@ static int ieee80211_ioctl_siwap(struct net_device *dev, | |||
480 | ieee80211_sta_req_auth(dev, &sdata->u.sta); | 468 | ieee80211_sta_req_auth(dev, &sdata->u.sta); |
481 | return 0; | 469 | return 0; |
482 | } else if (sdata->vif.type == IEEE80211_IF_TYPE_WDS) { | 470 | } else if (sdata->vif.type == IEEE80211_IF_TYPE_WDS) { |
483 | if (memcmp(sdata->u.wds.remote_addr, (u8 *) &ap_addr->sa_data, | 471 | /* |
484 | ETH_ALEN) == 0) | 472 | * If it is necessary to update the WDS peer address |
485 | return 0; | 473 | * while the interface is running, then we need to do |
486 | return ieee80211_if_update_wds(dev, (u8 *) &ap_addr->sa_data); | 474 | * more work here, namely if it is running we need to |
475 | * add a new and remove the old STA entry, this is | ||
476 | * normally handled by _open() and _stop(). | ||
477 | */ | ||
478 | if (netif_running(dev)) | ||
479 | return -EBUSY; | ||
480 | |||
481 | memcpy(&sdata->u.wds.remote_addr, (u8 *) &ap_addr->sa_data, | ||
482 | ETH_ALEN); | ||
483 | |||
484 | return 0; | ||
487 | } | 485 | } |
488 | 486 | ||
489 | return -EOPNOTSUPP; | 487 | return -EOPNOTSUPP; |
@@ -526,6 +524,7 @@ static int ieee80211_ioctl_siwscan(struct net_device *dev, | |||
526 | 524 | ||
527 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA && | 525 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA && |
528 | sdata->vif.type != IEEE80211_IF_TYPE_IBSS && | 526 | sdata->vif.type != IEEE80211_IF_TYPE_IBSS && |
527 | sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT && | ||
529 | sdata->vif.type != IEEE80211_IF_TYPE_AP) | 528 | sdata->vif.type != IEEE80211_IF_TYPE_AP) |
530 | return -EOPNOTSUPP; | 529 | return -EOPNOTSUPP; |
531 | 530 | ||
@@ -566,15 +565,17 @@ static int ieee80211_ioctl_siwrate(struct net_device *dev, | |||
566 | struct iw_param *rate, char *extra) | 565 | struct iw_param *rate, char *extra) |
567 | { | 566 | { |
568 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 567 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
569 | struct ieee80211_hw_mode *mode; | 568 | int i, err = -EINVAL; |
570 | int i; | ||
571 | u32 target_rate = rate->value / 100000; | 569 | u32 target_rate = rate->value / 100000; |
572 | struct ieee80211_sub_if_data *sdata; | 570 | struct ieee80211_sub_if_data *sdata; |
571 | struct ieee80211_supported_band *sband; | ||
573 | 572 | ||
574 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 573 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
575 | if (!sdata->bss) | 574 | if (!sdata->bss) |
576 | return -ENODEV; | 575 | return -ENODEV; |
577 | mode = local->oper_hw_mode; | 576 | |
577 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | ||
578 | |||
578 | /* target_rate = -1, rate->fixed = 0 means auto only, so use all rates | 579 | /* target_rate = -1, rate->fixed = 0 means auto only, so use all rates |
579 | * target_rate = X, rate->fixed = 1 means only rate X | 580 | * target_rate = X, rate->fixed = 1 means only rate X |
580 | * target_rate = X, rate->fixed = 0 means all rates <= X */ | 581 | * target_rate = X, rate->fixed = 0 means all rates <= X */ |
@@ -582,18 +583,20 @@ static int ieee80211_ioctl_siwrate(struct net_device *dev, | |||
582 | sdata->bss->force_unicast_rateidx = -1; | 583 | sdata->bss->force_unicast_rateidx = -1; |
583 | if (rate->value < 0) | 584 | if (rate->value < 0) |
584 | return 0; | 585 | return 0; |
585 | for (i=0; i < mode->num_rates; i++) { | 586 | |
586 | struct ieee80211_rate *rates = &mode->rates[i]; | 587 | for (i=0; i< sband->n_bitrates; i++) { |
587 | int this_rate = rates->rate; | 588 | struct ieee80211_rate *brate = &sband->bitrates[i]; |
589 | int this_rate = brate->bitrate; | ||
588 | 590 | ||
589 | if (target_rate == this_rate) { | 591 | if (target_rate == this_rate) { |
590 | sdata->bss->max_ratectrl_rateidx = i; | 592 | sdata->bss->max_ratectrl_rateidx = i; |
591 | if (rate->fixed) | 593 | if (rate->fixed) |
592 | sdata->bss->force_unicast_rateidx = i; | 594 | sdata->bss->force_unicast_rateidx = i; |
593 | return 0; | 595 | err = 0; |
596 | break; | ||
594 | } | 597 | } |
595 | } | 598 | } |
596 | return -EINVAL; | 599 | return err; |
597 | } | 600 | } |
598 | 601 | ||
599 | static int ieee80211_ioctl_giwrate(struct net_device *dev, | 602 | static int ieee80211_ioctl_giwrate(struct net_device *dev, |
@@ -603,19 +606,25 @@ static int ieee80211_ioctl_giwrate(struct net_device *dev, | |||
603 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 606 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
604 | struct sta_info *sta; | 607 | struct sta_info *sta; |
605 | struct ieee80211_sub_if_data *sdata; | 608 | struct ieee80211_sub_if_data *sdata; |
609 | struct ieee80211_supported_band *sband; | ||
606 | 610 | ||
607 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 611 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
612 | |||
608 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA) | 613 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA) |
609 | sta = sta_info_get(local, sdata->u.sta.bssid); | 614 | sta = sta_info_get(local, sdata->u.sta.bssid); |
610 | else | 615 | else |
611 | return -EOPNOTSUPP; | 616 | return -EOPNOTSUPP; |
612 | if (!sta) | 617 | if (!sta) |
613 | return -ENODEV; | 618 | return -ENODEV; |
614 | if (sta->txrate < local->oper_hw_mode->num_rates) | 619 | |
615 | rate->value = local->oper_hw_mode->rates[sta->txrate].rate * 100000; | 620 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; |
621 | |||
622 | if (sta->txrate_idx < sband->n_bitrates) | ||
623 | rate->value = sband->bitrates[sta->txrate_idx].bitrate; | ||
616 | else | 624 | else |
617 | rate->value = 0; | 625 | rate->value = 0; |
618 | sta_info_put(sta); | 626 | rate->value *= 100000; |
627 | |||
619 | return 0; | 628 | return 0; |
620 | } | 629 | } |
621 | 630 | ||
@@ -625,7 +634,7 @@ static int ieee80211_ioctl_siwtxpower(struct net_device *dev, | |||
625 | { | 634 | { |
626 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 635 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
627 | bool need_reconfig = 0; | 636 | bool need_reconfig = 0; |
628 | u8 new_power_level; | 637 | int new_power_level; |
629 | 638 | ||
630 | if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) | 639 | if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) |
631 | return -EINVAL; | 640 | return -EINVAL; |
@@ -635,13 +644,15 @@ static int ieee80211_ioctl_siwtxpower(struct net_device *dev, | |||
635 | if (data->txpower.fixed) { | 644 | if (data->txpower.fixed) { |
636 | new_power_level = data->txpower.value; | 645 | new_power_level = data->txpower.value; |
637 | } else { | 646 | } else { |
638 | /* Automatic power level. Get the px power from the current | 647 | /* |
639 | * channel. */ | 648 | * Automatic power level. Use maximum power for the current |
640 | struct ieee80211_channel* chan = local->oper_channel; | 649 | * channel. Should be part of rate control. |
650 | */ | ||
651 | struct ieee80211_channel* chan = local->hw.conf.channel; | ||
641 | if (!chan) | 652 | if (!chan) |
642 | return -EINVAL; | 653 | return -EINVAL; |
643 | 654 | ||
644 | new_power_level = chan->power_level; | 655 | new_power_level = chan->max_power; |
645 | } | 656 | } |
646 | 657 | ||
647 | if (local->hw.conf.power_level != new_power_level) { | 658 | if (local->hw.conf.power_level != new_power_level) { |
@@ -988,7 +999,6 @@ static struct iw_statistics *ieee80211_get_wireless_stats(struct net_device *dev | |||
988 | wstats->qual.qual = sta->last_signal; | 999 | wstats->qual.qual = sta->last_signal; |
989 | wstats->qual.noise = sta->last_noise; | 1000 | wstats->qual.noise = sta->last_noise; |
990 | wstats->qual.updated = local->wstats_flags; | 1001 | wstats->qual.updated = local->wstats_flags; |
991 | sta_info_put(sta); | ||
992 | } | 1002 | } |
993 | return wstats; | 1003 | return wstats; |
994 | } | 1004 | } |
diff --git a/net/mac80211/ieee80211_key.h b/net/mac80211/ieee80211_key.h index fc770e98d47b..d670e6dbfa39 100644 --- a/net/mac80211/ieee80211_key.h +++ b/net/mac80211/ieee80211_key.h | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | #include <linux/list.h> | 14 | #include <linux/list.h> |
15 | #include <linux/crypto.h> | 15 | #include <linux/crypto.h> |
16 | #include <linux/rcupdate.h> | ||
16 | #include <net/mac80211.h> | 17 | #include <net/mac80211.h> |
17 | 18 | ||
18 | /* ALG_TKIP | 19 | /* ALG_TKIP |
@@ -45,7 +46,19 @@ struct ieee80211_local; | |||
45 | struct ieee80211_sub_if_data; | 46 | struct ieee80211_sub_if_data; |
46 | struct sta_info; | 47 | struct sta_info; |
47 | 48 | ||
48 | #define KEY_FLAG_UPLOADED_TO_HARDWARE (1<<0) | 49 | /** |
50 | * enum ieee80211_internal_key_flags - internal key flags | ||
51 | * | ||
52 | * @KEY_FLAG_UPLOADED_TO_HARDWARE: Indicates that this key is present | ||
53 | * in the hardware for TX crypto hardware acceleration. | ||
54 | * @KEY_FLAG_REMOVE_FROM_HARDWARE: Indicates to the key code that this | ||
55 | * key is present in the hardware (but it cannot be used for | ||
56 | * hardware acceleration any more!) | ||
57 | */ | ||
58 | enum ieee80211_internal_key_flags { | ||
59 | KEY_FLAG_UPLOADED_TO_HARDWARE = BIT(0), | ||
60 | KEY_FLAG_REMOVE_FROM_HARDWARE = BIT(1), | ||
61 | }; | ||
49 | 62 | ||
50 | struct ieee80211_key { | 63 | struct ieee80211_key { |
51 | struct ieee80211_local *local; | 64 | struct ieee80211_local *local; |
@@ -112,12 +125,17 @@ struct ieee80211_key { | |||
112 | struct ieee80211_key_conf conf; | 125 | struct ieee80211_key_conf conf; |
113 | }; | 126 | }; |
114 | 127 | ||
115 | struct ieee80211_key *ieee80211_key_alloc(struct ieee80211_sub_if_data *sdata, | 128 | struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg, |
116 | struct sta_info *sta, | ||
117 | enum ieee80211_key_alg alg, | ||
118 | int idx, | 129 | int idx, |
119 | size_t key_len, | 130 | size_t key_len, |
120 | const u8 *key_data); | 131 | const u8 *key_data); |
132 | /* | ||
133 | * Insert a key into data structures (sdata, sta if necessary) | ||
134 | * to make it used, free old key. | ||
135 | */ | ||
136 | void ieee80211_key_link(struct ieee80211_key *key, | ||
137 | struct ieee80211_sub_if_data *sdata, | ||
138 | struct sta_info *sta); | ||
121 | void ieee80211_key_free(struct ieee80211_key *key); | 139 | void ieee80211_key_free(struct ieee80211_key *key); |
122 | void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx); | 140 | void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx); |
123 | void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata); | 141 | void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata); |
diff --git a/net/mac80211/ieee80211_rate.c b/net/mac80211/ieee80211_rate.c index b957e67c5fba..4de06f128d90 100644 --- a/net/mac80211/ieee80211_rate.c +++ b/net/mac80211/ieee80211_rate.c | |||
@@ -163,34 +163,37 @@ static void rate_control_release(struct kref *kref) | |||
163 | } | 163 | } |
164 | 164 | ||
165 | void rate_control_get_rate(struct net_device *dev, | 165 | void rate_control_get_rate(struct net_device *dev, |
166 | struct ieee80211_hw_mode *mode, struct sk_buff *skb, | 166 | struct ieee80211_supported_band *sband, |
167 | struct sk_buff *skb, | ||
167 | struct rate_selection *sel) | 168 | struct rate_selection *sel) |
168 | { | 169 | { |
169 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 170 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
170 | struct rate_control_ref *ref = local->rate_ctrl; | 171 | struct rate_control_ref *ref = local->rate_ctrl; |
171 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 172 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
172 | struct sta_info *sta = sta_info_get(local, hdr->addr1); | 173 | struct sta_info *sta; |
173 | int i; | 174 | int i; |
174 | 175 | ||
176 | rcu_read_lock(); | ||
177 | sta = sta_info_get(local, hdr->addr1); | ||
178 | |||
175 | memset(sel, 0, sizeof(struct rate_selection)); | 179 | memset(sel, 0, sizeof(struct rate_selection)); |
176 | 180 | ||
177 | ref->ops->get_rate(ref->priv, dev, mode, skb, sel); | 181 | ref->ops->get_rate(ref->priv, dev, sband, skb, sel); |
178 | 182 | ||
179 | /* Select a non-ERP backup rate. */ | 183 | /* Select a non-ERP backup rate. */ |
180 | if (!sel->nonerp) { | 184 | if (!sel->nonerp) { |
181 | for (i = 0; i < mode->num_rates - 1; i++) { | 185 | for (i = 0; i < sband->n_bitrates; i++) { |
182 | struct ieee80211_rate *rate = &mode->rates[i]; | 186 | struct ieee80211_rate *rate = &sband->bitrates[i]; |
183 | if (sel->rate->rate < rate->rate) | 187 | if (sel->rate->bitrate < rate->bitrate) |
184 | break; | 188 | break; |
185 | 189 | ||
186 | if (rate_supported(sta, mode, i) && | 190 | if (rate_supported(sta, sband->band, i) && |
187 | !(rate->flags & IEEE80211_RATE_ERP)) | 191 | !(rate->flags & IEEE80211_RATE_ERP_G)) |
188 | sel->nonerp = rate; | 192 | sel->nonerp = rate; |
189 | } | 193 | } |
190 | } | 194 | } |
191 | 195 | ||
192 | if (sta) | 196 | rcu_read_unlock(); |
193 | sta_info_put(sta); | ||
194 | } | 197 | } |
195 | 198 | ||
196 | struct rate_control_ref *rate_control_get(struct rate_control_ref *ref) | 199 | struct rate_control_ref *rate_control_get(struct rate_control_ref *ref) |
diff --git a/net/mac80211/ieee80211_rate.h b/net/mac80211/ieee80211_rate.h index 73f19e8aa51c..5b45f33cb766 100644 --- a/net/mac80211/ieee80211_rate.h +++ b/net/mac80211/ieee80211_rate.h | |||
@@ -14,10 +14,12 @@ | |||
14 | #include <linux/netdevice.h> | 14 | #include <linux/netdevice.h> |
15 | #include <linux/skbuff.h> | 15 | #include <linux/skbuff.h> |
16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | #include <linux/kref.h> | ||
17 | #include <net/mac80211.h> | 18 | #include <net/mac80211.h> |
18 | #include "ieee80211_i.h" | 19 | #include "ieee80211_i.h" |
19 | #include "sta_info.h" | 20 | #include "sta_info.h" |
20 | 21 | ||
22 | /* TODO: kdoc */ | ||
21 | struct rate_selection { | 23 | struct rate_selection { |
22 | /* Selected transmission rate */ | 24 | /* Selected transmission rate */ |
23 | struct ieee80211_rate *rate; | 25 | struct ieee80211_rate *rate; |
@@ -34,7 +36,8 @@ struct rate_control_ops { | |||
34 | struct sk_buff *skb, | 36 | struct sk_buff *skb, |
35 | struct ieee80211_tx_status *status); | 37 | struct ieee80211_tx_status *status); |
36 | void (*get_rate)(void *priv, struct net_device *dev, | 38 | void (*get_rate)(void *priv, struct net_device *dev, |
37 | struct ieee80211_hw_mode *mode, struct sk_buff *skb, | 39 | struct ieee80211_supported_band *band, |
40 | struct sk_buff *skb, | ||
38 | struct rate_selection *sel); | 41 | struct rate_selection *sel); |
39 | void (*rate_init)(void *priv, void *priv_sta, | 42 | void (*rate_init)(void *priv, void *priv_sta, |
40 | struct ieee80211_local *local, struct sta_info *sta); | 43 | struct ieee80211_local *local, struct sta_info *sta); |
@@ -66,7 +69,8 @@ void ieee80211_rate_control_unregister(struct rate_control_ops *ops); | |||
66 | struct rate_control_ref *rate_control_alloc(const char *name, | 69 | struct rate_control_ref *rate_control_alloc(const char *name, |
67 | struct ieee80211_local *local); | 70 | struct ieee80211_local *local); |
68 | void rate_control_get_rate(struct net_device *dev, | 71 | void rate_control_get_rate(struct net_device *dev, |
69 | struct ieee80211_hw_mode *mode, struct sk_buff *skb, | 72 | struct ieee80211_supported_band *sband, |
73 | struct sk_buff *skb, | ||
70 | struct rate_selection *sel); | 74 | struct rate_selection *sel); |
71 | struct rate_control_ref *rate_control_get(struct rate_control_ref *ref); | 75 | struct rate_control_ref *rate_control_get(struct rate_control_ref *ref); |
72 | void rate_control_put(struct rate_control_ref *ref); | 76 | void rate_control_put(struct rate_control_ref *ref); |
@@ -127,23 +131,23 @@ static inline void rate_control_remove_sta_debugfs(struct sta_info *sta) | |||
127 | #endif | 131 | #endif |
128 | } | 132 | } |
129 | 133 | ||
130 | static inline int | 134 | static inline int rate_supported(struct sta_info *sta, |
131 | rate_supported(struct sta_info *sta, struct ieee80211_hw_mode *mode, int index) | 135 | enum ieee80211_band band, |
136 | int index) | ||
132 | { | 137 | { |
133 | return (sta == NULL || sta->supp_rates & BIT(index)) && | 138 | return (sta == NULL || sta->supp_rates[band] & BIT(index)); |
134 | (mode->rates[index].flags & IEEE80211_RATE_SUPPORTED); | ||
135 | } | 139 | } |
136 | 140 | ||
137 | static inline int | 141 | static inline int |
138 | rate_lowest_index(struct ieee80211_local *local, struct ieee80211_hw_mode *mode, | 142 | rate_lowest_index(struct ieee80211_local *local, |
143 | struct ieee80211_supported_band *sband, | ||
139 | struct sta_info *sta) | 144 | struct sta_info *sta) |
140 | { | 145 | { |
141 | int i; | 146 | int i; |
142 | 147 | ||
143 | for (i = 0; i < mode->num_rates; i++) { | 148 | for (i = 0; i < sband->n_bitrates; i++) |
144 | if (rate_supported(sta, mode, i)) | 149 | if (rate_supported(sta, sband->band, i)) |
145 | return i; | 150 | return i; |
146 | } | ||
147 | 151 | ||
148 | /* warn when we cannot find a rate. */ | 152 | /* warn when we cannot find a rate. */ |
149 | WARN_ON(1); | 153 | WARN_ON(1); |
@@ -152,10 +156,11 @@ rate_lowest_index(struct ieee80211_local *local, struct ieee80211_hw_mode *mode, | |||
152 | } | 156 | } |
153 | 157 | ||
154 | static inline struct ieee80211_rate * | 158 | static inline struct ieee80211_rate * |
155 | rate_lowest(struct ieee80211_local *local, struct ieee80211_hw_mode *mode, | 159 | rate_lowest(struct ieee80211_local *local, |
160 | struct ieee80211_supported_band *sband, | ||
156 | struct sta_info *sta) | 161 | struct sta_info *sta) |
157 | { | 162 | { |
158 | return &mode->rates[rate_lowest_index(local, mode, sta)]; | 163 | return &sband->bitrates[rate_lowest_index(local, sband, sta)]; |
159 | } | 164 | } |
160 | 165 | ||
161 | 166 | ||
@@ -166,21 +171,6 @@ void rate_control_deinitialize(struct ieee80211_local *local); | |||
166 | 171 | ||
167 | 172 | ||
168 | /* Rate control algorithms */ | 173 | /* Rate control algorithms */ |
169 | #if defined(RC80211_SIMPLE_COMPILE) || \ | ||
170 | (defined(CONFIG_MAC80211_RC_SIMPLE) && \ | ||
171 | !defined(CONFIG_MAC80211_RC_SIMPLE_MODULE)) | ||
172 | extern int rc80211_simple_init(void); | ||
173 | extern void rc80211_simple_exit(void); | ||
174 | #else | ||
175 | static inline int rc80211_simple_init(void) | ||
176 | { | ||
177 | return 0; | ||
178 | } | ||
179 | static inline void rc80211_simple_exit(void) | ||
180 | { | ||
181 | } | ||
182 | #endif | ||
183 | |||
184 | #if defined(RC80211_PID_COMPILE) || \ | 174 | #if defined(RC80211_PID_COMPILE) || \ |
185 | (defined(CONFIG_MAC80211_RC_PID) && \ | 175 | (defined(CONFIG_MAC80211_RC_PID) && \ |
186 | !defined(CONFIG_MAC80211_RC_PID_MODULE)) | 176 | !defined(CONFIG_MAC80211_RC_PID_MODULE)) |
diff --git a/net/mac80211/ieee80211_sta.c b/net/mac80211/ieee80211_sta.c index 9aeed5320228..8b991ebcbb4e 100644 --- a/net/mac80211/ieee80211_sta.c +++ b/net/mac80211/ieee80211_sta.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/wireless.h> | 24 | #include <linux/wireless.h> |
25 | #include <linux/random.h> | 25 | #include <linux/random.h> |
26 | #include <linux/etherdevice.h> | 26 | #include <linux/etherdevice.h> |
27 | #include <linux/rtnetlink.h> | ||
27 | #include <net/iw_handler.h> | 28 | #include <net/iw_handler.h> |
28 | #include <asm/types.h> | 29 | #include <asm/types.h> |
29 | 30 | ||
@@ -31,12 +32,14 @@ | |||
31 | #include "ieee80211_i.h" | 32 | #include "ieee80211_i.h" |
32 | #include "ieee80211_rate.h" | 33 | #include "ieee80211_rate.h" |
33 | #include "ieee80211_led.h" | 34 | #include "ieee80211_led.h" |
35 | #include "mesh.h" | ||
34 | 36 | ||
35 | #define IEEE80211_AUTH_TIMEOUT (HZ / 5) | 37 | #define IEEE80211_AUTH_TIMEOUT (HZ / 5) |
36 | #define IEEE80211_AUTH_MAX_TRIES 3 | 38 | #define IEEE80211_AUTH_MAX_TRIES 3 |
37 | #define IEEE80211_ASSOC_TIMEOUT (HZ / 5) | 39 | #define IEEE80211_ASSOC_TIMEOUT (HZ / 5) |
38 | #define IEEE80211_ASSOC_MAX_TRIES 3 | 40 | #define IEEE80211_ASSOC_MAX_TRIES 3 |
39 | #define IEEE80211_MONITORING_INTERVAL (2 * HZ) | 41 | #define IEEE80211_MONITORING_INTERVAL (2 * HZ) |
42 | #define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ) | ||
40 | #define IEEE80211_PROBE_INTERVAL (60 * HZ) | 43 | #define IEEE80211_PROBE_INTERVAL (60 * HZ) |
41 | #define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ) | 44 | #define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ) |
42 | #define IEEE80211_SCAN_INTERVAL (2 * HZ) | 45 | #define IEEE80211_SCAN_INTERVAL (2 * HZ) |
@@ -49,6 +52,7 @@ | |||
49 | #define IEEE80211_SCAN_RESULT_EXPIRE (10 * HZ) | 52 | #define IEEE80211_SCAN_RESULT_EXPIRE (10 * HZ) |
50 | #define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ) | 53 | #define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ) |
51 | #define IEEE80211_IBSS_INACTIVITY_LIMIT (60 * HZ) | 54 | #define IEEE80211_IBSS_INACTIVITY_LIMIT (60 * HZ) |
55 | #define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ) | ||
52 | 56 | ||
53 | #define IEEE80211_IBSS_MAX_STA_ENTRIES 128 | 57 | #define IEEE80211_IBSS_MAX_STA_ENTRIES 128 |
54 | 58 | ||
@@ -74,7 +78,7 @@ | |||
74 | static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst, | 78 | static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst, |
75 | u8 *ssid, size_t ssid_len); | 79 | u8 *ssid, size_t ssid_len); |
76 | static struct ieee80211_sta_bss * | 80 | static struct ieee80211_sta_bss * |
77 | ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int channel, | 81 | ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int freq, |
78 | u8 *ssid, u8 ssid_len); | 82 | u8 *ssid, u8 ssid_len); |
79 | static void ieee80211_rx_bss_put(struct net_device *dev, | 83 | static void ieee80211_rx_bss_put(struct net_device *dev, |
80 | struct ieee80211_sta_bss *bss); | 84 | struct ieee80211_sta_bss *bss); |
@@ -87,46 +91,8 @@ static int ieee80211_sta_config_auth(struct net_device *dev, | |||
87 | struct ieee80211_if_sta *ifsta); | 91 | struct ieee80211_if_sta *ifsta); |
88 | 92 | ||
89 | 93 | ||
90 | /* Parsed Information Elements */ | 94 | void ieee802_11_parse_elems(u8 *start, size_t len, |
91 | struct ieee802_11_elems { | 95 | struct ieee802_11_elems *elems) |
92 | /* pointers to IEs */ | ||
93 | u8 *ssid; | ||
94 | u8 *supp_rates; | ||
95 | u8 *fh_params; | ||
96 | u8 *ds_params; | ||
97 | u8 *cf_params; | ||
98 | u8 *tim; | ||
99 | u8 *ibss_params; | ||
100 | u8 *challenge; | ||
101 | u8 *wpa; | ||
102 | u8 *rsn; | ||
103 | u8 *erp_info; | ||
104 | u8 *ext_supp_rates; | ||
105 | u8 *wmm_info; | ||
106 | u8 *wmm_param; | ||
107 | u8 *ht_cap_elem; | ||
108 | u8 *ht_info_elem; | ||
109 | /* length of them, respectively */ | ||
110 | u8 ssid_len; | ||
111 | u8 supp_rates_len; | ||
112 | u8 fh_params_len; | ||
113 | u8 ds_params_len; | ||
114 | u8 cf_params_len; | ||
115 | u8 tim_len; | ||
116 | u8 ibss_params_len; | ||
117 | u8 challenge_len; | ||
118 | u8 wpa_len; | ||
119 | u8 rsn_len; | ||
120 | u8 erp_info_len; | ||
121 | u8 ext_supp_rates_len; | ||
122 | u8 wmm_info_len; | ||
123 | u8 wmm_param_len; | ||
124 | u8 ht_cap_elem_len; | ||
125 | u8 ht_info_elem_len; | ||
126 | }; | ||
127 | |||
128 | static void ieee802_11_parse_elems(u8 *start, size_t len, | ||
129 | struct ieee802_11_elems *elems) | ||
130 | { | 96 | { |
131 | size_t left = len; | 97 | size_t left = len; |
132 | u8 *pos = start; | 98 | u8 *pos = start; |
@@ -215,6 +181,30 @@ static void ieee802_11_parse_elems(u8 *start, size_t len, | |||
215 | elems->ht_info_elem = pos; | 181 | elems->ht_info_elem = pos; |
216 | elems->ht_info_elem_len = elen; | 182 | elems->ht_info_elem_len = elen; |
217 | break; | 183 | break; |
184 | case WLAN_EID_MESH_ID: | ||
185 | elems->mesh_id = pos; | ||
186 | elems->mesh_id_len = elen; | ||
187 | break; | ||
188 | case WLAN_EID_MESH_CONFIG: | ||
189 | elems->mesh_config = pos; | ||
190 | elems->mesh_config_len = elen; | ||
191 | break; | ||
192 | case WLAN_EID_PEER_LINK: | ||
193 | elems->peer_link = pos; | ||
194 | elems->peer_link_len = elen; | ||
195 | break; | ||
196 | case WLAN_EID_PREQ: | ||
197 | elems->preq = pos; | ||
198 | elems->preq_len = elen; | ||
199 | break; | ||
200 | case WLAN_EID_PREP: | ||
201 | elems->prep = pos; | ||
202 | elems->prep_len = elen; | ||
203 | break; | ||
204 | case WLAN_EID_PERR: | ||
205 | elems->perr = pos; | ||
206 | elems->perr_len = elen; | ||
207 | break; | ||
218 | default: | 208 | default: |
219 | break; | 209 | break; |
220 | } | 210 | } |
@@ -227,12 +217,7 @@ static void ieee802_11_parse_elems(u8 *start, size_t len, | |||
227 | 217 | ||
228 | static int ecw2cw(int ecw) | 218 | static int ecw2cw(int ecw) |
229 | { | 219 | { |
230 | int cw = 1; | 220 | return (1 << ecw) - 1; |
231 | while (ecw > 0) { | ||
232 | cw <<= 1; | ||
233 | ecw--; | ||
234 | } | ||
235 | return cw - 1; | ||
236 | } | 221 | } |
237 | 222 | ||
238 | static void ieee80211_sta_wmm_params(struct net_device *dev, | 223 | static void ieee80211_sta_wmm_params(struct net_device *dev, |
@@ -297,12 +282,13 @@ static void ieee80211_sta_wmm_params(struct net_device *dev, | |||
297 | params.aifs = pos[0] & 0x0f; | 282 | params.aifs = pos[0] & 0x0f; |
298 | params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4); | 283 | params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4); |
299 | params.cw_min = ecw2cw(pos[1] & 0x0f); | 284 | params.cw_min = ecw2cw(pos[1] & 0x0f); |
300 | /* TXOP is in units of 32 usec; burst_time in 0.1 ms */ | 285 | params.txop = pos[2] | (pos[3] << 8); |
301 | params.burst_time = (pos[2] | (pos[3] << 8)) * 32 / 100; | 286 | #ifdef CONFIG_MAC80211_DEBUG |
302 | printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d " | 287 | printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d " |
303 | "cWmin=%d cWmax=%d burst=%d\n", | 288 | "cWmin=%d cWmax=%d txop=%d\n", |
304 | dev->name, queue, aci, acm, params.aifs, params.cw_min, | 289 | dev->name, queue, aci, acm, params.aifs, params.cw_min, |
305 | params.cw_max, params.burst_time); | 290 | params.cw_max, params.txop); |
291 | #endif | ||
306 | /* TODO: handle ACM (block TX, fallback to next lowest allowed | 292 | /* TODO: handle ACM (block TX, fallback to next lowest allowed |
307 | * AC for now) */ | 293 | * AC for now) */ |
308 | if (local->ops->conf_tx(local_to_hw(local), queue, ¶ms)) { | 294 | if (local->ops->conf_tx(local_to_hw(local), queue, ¶ms)) { |
@@ -466,7 +452,7 @@ static void ieee80211_set_associated(struct net_device *dev, | |||
466 | return; | 452 | return; |
467 | 453 | ||
468 | bss = ieee80211_rx_bss_get(dev, ifsta->bssid, | 454 | bss = ieee80211_rx_bss_get(dev, ifsta->bssid, |
469 | local->hw.conf.channel, | 455 | local->hw.conf.channel->center_freq, |
470 | ifsta->ssid, ifsta->ssid_len); | 456 | ifsta->ssid, ifsta->ssid_len); |
471 | if (bss) { | 457 | if (bss) { |
472 | if (bss->has_erp_value) | 458 | if (bss->has_erp_value) |
@@ -492,6 +478,7 @@ static void ieee80211_set_associated(struct net_device *dev, | |||
492 | ifsta->last_probe = jiffies; | 478 | ifsta->last_probe = jiffies; |
493 | ieee80211_led_assoc(local, assoc); | 479 | ieee80211_led_assoc(local, assoc); |
494 | 480 | ||
481 | sdata->bss_conf.assoc = assoc; | ||
495 | ieee80211_bss_info_change_notify(sdata, changed); | 482 | ieee80211_bss_info_change_notify(sdata, changed); |
496 | } | 483 | } |
497 | 484 | ||
@@ -504,8 +491,8 @@ static void ieee80211_set_disassoc(struct net_device *dev, | |||
504 | ieee80211_set_associated(dev, ifsta, 0); | 491 | ieee80211_set_associated(dev, ifsta, 0); |
505 | } | 492 | } |
506 | 493 | ||
507 | static void ieee80211_sta_tx(struct net_device *dev, struct sk_buff *skb, | 494 | void ieee80211_sta_tx(struct net_device *dev, struct sk_buff *skb, |
508 | int encrypt) | 495 | int encrypt) |
509 | { | 496 | { |
510 | struct ieee80211_sub_if_data *sdata; | 497 | struct ieee80211_sub_if_data *sdata; |
511 | struct ieee80211_tx_packet_data *pkt_data; | 498 | struct ieee80211_tx_packet_data *pkt_data; |
@@ -592,7 +579,6 @@ static void ieee80211_send_assoc(struct net_device *dev, | |||
592 | struct ieee80211_if_sta *ifsta) | 579 | struct ieee80211_if_sta *ifsta) |
593 | { | 580 | { |
594 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 581 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
595 | struct ieee80211_hw_mode *mode; | ||
596 | struct sk_buff *skb; | 582 | struct sk_buff *skb; |
597 | struct ieee80211_mgmt *mgmt; | 583 | struct ieee80211_mgmt *mgmt; |
598 | u8 *pos, *ies; | 584 | u8 *pos, *ies; |
@@ -600,6 +586,7 @@ static void ieee80211_send_assoc(struct net_device *dev, | |||
600 | u16 capab; | 586 | u16 capab; |
601 | struct ieee80211_sta_bss *bss; | 587 | struct ieee80211_sta_bss *bss; |
602 | int wmm = 0; | 588 | int wmm = 0; |
589 | struct ieee80211_supported_band *sband; | ||
603 | 590 | ||
604 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + | 591 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + |
605 | sizeof(*mgmt) + 200 + ifsta->extra_ie_len + | 592 | sizeof(*mgmt) + 200 + ifsta->extra_ie_len + |
@@ -611,13 +598,19 @@ static void ieee80211_send_assoc(struct net_device *dev, | |||
611 | } | 598 | } |
612 | skb_reserve(skb, local->hw.extra_tx_headroom); | 599 | skb_reserve(skb, local->hw.extra_tx_headroom); |
613 | 600 | ||
614 | mode = local->oper_hw_mode; | 601 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; |
602 | |||
615 | capab = ifsta->capab; | 603 | capab = ifsta->capab; |
616 | if (mode->mode == MODE_IEEE80211G) { | 604 | |
617 | capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME | | 605 | if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) { |
618 | WLAN_CAPABILITY_SHORT_PREAMBLE; | 606 | if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE)) |
607 | capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME; | ||
608 | if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE)) | ||
609 | capab |= WLAN_CAPABILITY_SHORT_PREAMBLE; | ||
619 | } | 610 | } |
620 | bss = ieee80211_rx_bss_get(dev, ifsta->bssid, local->hw.conf.channel, | 611 | |
612 | bss = ieee80211_rx_bss_get(dev, ifsta->bssid, | ||
613 | local->hw.conf.channel->center_freq, | ||
621 | ifsta->ssid, ifsta->ssid_len); | 614 | ifsta->ssid, ifsta->ssid_len); |
622 | if (bss) { | 615 | if (bss) { |
623 | if (bss->capability & WLAN_CAPABILITY_PRIVACY) | 616 | if (bss->capability & WLAN_CAPABILITY_PRIVACY) |
@@ -656,23 +649,23 @@ static void ieee80211_send_assoc(struct net_device *dev, | |||
656 | *pos++ = ifsta->ssid_len; | 649 | *pos++ = ifsta->ssid_len; |
657 | memcpy(pos, ifsta->ssid, ifsta->ssid_len); | 650 | memcpy(pos, ifsta->ssid, ifsta->ssid_len); |
658 | 651 | ||
659 | len = mode->num_rates; | 652 | len = sband->n_bitrates; |
660 | if (len > 8) | 653 | if (len > 8) |
661 | len = 8; | 654 | len = 8; |
662 | pos = skb_put(skb, len + 2); | 655 | pos = skb_put(skb, len + 2); |
663 | *pos++ = WLAN_EID_SUPP_RATES; | 656 | *pos++ = WLAN_EID_SUPP_RATES; |
664 | *pos++ = len; | 657 | *pos++ = len; |
665 | for (i = 0; i < len; i++) { | 658 | for (i = 0; i < len; i++) { |
666 | int rate = mode->rates[i].rate; | 659 | int rate = sband->bitrates[i].bitrate; |
667 | *pos++ = (u8) (rate / 5); | 660 | *pos++ = (u8) (rate / 5); |
668 | } | 661 | } |
669 | 662 | ||
670 | if (mode->num_rates > len) { | 663 | if (sband->n_bitrates > len) { |
671 | pos = skb_put(skb, mode->num_rates - len + 2); | 664 | pos = skb_put(skb, sband->n_bitrates - len + 2); |
672 | *pos++ = WLAN_EID_EXT_SUPP_RATES; | 665 | *pos++ = WLAN_EID_EXT_SUPP_RATES; |
673 | *pos++ = mode->num_rates - len; | 666 | *pos++ = sband->n_bitrates - len; |
674 | for (i = len; i < mode->num_rates; i++) { | 667 | for (i = len; i < sband->n_bitrates; i++) { |
675 | int rate = mode->rates[i].rate; | 668 | int rate = sband->bitrates[i].bitrate; |
676 | *pos++ = (u8) (rate / 5); | 669 | *pos++ = (u8) (rate / 5); |
677 | } | 670 | } |
678 | } | 671 | } |
@@ -695,17 +688,18 @@ static void ieee80211_send_assoc(struct net_device *dev, | |||
695 | *pos++ = 0; | 688 | *pos++ = 0; |
696 | } | 689 | } |
697 | /* wmm support is a must to HT */ | 690 | /* wmm support is a must to HT */ |
698 | if (wmm && mode->ht_info.ht_supported) { | 691 | if (wmm && sband->ht_info.ht_supported) { |
699 | __le16 tmp = cpu_to_le16(mode->ht_info.cap); | 692 | __le16 tmp = cpu_to_le16(sband->ht_info.cap); |
700 | pos = skb_put(skb, sizeof(struct ieee80211_ht_cap)+2); | 693 | pos = skb_put(skb, sizeof(struct ieee80211_ht_cap)+2); |
701 | *pos++ = WLAN_EID_HT_CAPABILITY; | 694 | *pos++ = WLAN_EID_HT_CAPABILITY; |
702 | *pos++ = sizeof(struct ieee80211_ht_cap); | 695 | *pos++ = sizeof(struct ieee80211_ht_cap); |
703 | memset(pos, 0, sizeof(struct ieee80211_ht_cap)); | 696 | memset(pos, 0, sizeof(struct ieee80211_ht_cap)); |
704 | memcpy(pos, &tmp, sizeof(u16)); | 697 | memcpy(pos, &tmp, sizeof(u16)); |
705 | pos += sizeof(u16); | 698 | pos += sizeof(u16); |
706 | *pos++ = (mode->ht_info.ampdu_factor | | 699 | /* TODO: needs a define here for << 2 */ |
707 | (mode->ht_info.ampdu_density << 2)); | 700 | *pos++ = sband->ht_info.ampdu_factor | |
708 | memcpy(pos, mode->ht_info.supp_mcs_set, 16); | 701 | (sband->ht_info.ampdu_density << 2); |
702 | memcpy(pos, sband->ht_info.supp_mcs_set, 16); | ||
709 | } | 703 | } |
710 | 704 | ||
711 | kfree(ifsta->assocreq_ies); | 705 | kfree(ifsta->assocreq_ies); |
@@ -788,7 +782,8 @@ static int ieee80211_privacy_mismatch(struct net_device *dev, | |||
788 | if (!ifsta || (ifsta->flags & IEEE80211_STA_MIXED_CELL)) | 782 | if (!ifsta || (ifsta->flags & IEEE80211_STA_MIXED_CELL)) |
789 | return 0; | 783 | return 0; |
790 | 784 | ||
791 | bss = ieee80211_rx_bss_get(dev, ifsta->bssid, local->hw.conf.channel, | 785 | bss = ieee80211_rx_bss_get(dev, ifsta->bssid, |
786 | local->hw.conf.channel->center_freq, | ||
792 | ifsta->ssid, ifsta->ssid_len); | 787 | ifsta->ssid, ifsta->ssid_len); |
793 | if (!bss) | 788 | if (!bss) |
794 | return 0; | 789 | return 0; |
@@ -851,6 +846,8 @@ static void ieee80211_associated(struct net_device *dev, | |||
851 | 846 | ||
852 | ifsta->state = IEEE80211_ASSOCIATED; | 847 | ifsta->state = IEEE80211_ASSOCIATED; |
853 | 848 | ||
849 | rcu_read_lock(); | ||
850 | |||
854 | sta = sta_info_get(local, ifsta->bssid); | 851 | sta = sta_info_get(local, ifsta->bssid); |
855 | if (!sta) { | 852 | if (!sta) { |
856 | printk(KERN_DEBUG "%s: No STA entry for own AP %s\n", | 853 | printk(KERN_DEBUG "%s: No STA entry for own AP %s\n", |
@@ -866,7 +863,7 @@ static void ieee80211_associated(struct net_device *dev, | |||
866 | "range\n", | 863 | "range\n", |
867 | dev->name, print_mac(mac, ifsta->bssid)); | 864 | dev->name, print_mac(mac, ifsta->bssid)); |
868 | disassoc = 1; | 865 | disassoc = 1; |
869 | sta_info_free(sta); | 866 | sta_info_unlink(&sta); |
870 | } else | 867 | } else |
871 | ieee80211_send_probe_req(dev, ifsta->bssid, | 868 | ieee80211_send_probe_req(dev, ifsta->bssid, |
872 | local->scan_ssid, | 869 | local->scan_ssid, |
@@ -882,8 +879,17 @@ static void ieee80211_associated(struct net_device *dev, | |||
882 | ifsta->ssid_len); | 879 | ifsta->ssid_len); |
883 | } | 880 | } |
884 | } | 881 | } |
885 | sta_info_put(sta); | ||
886 | } | 882 | } |
883 | |||
884 | rcu_read_unlock(); | ||
885 | |||
886 | if (disassoc && sta) { | ||
887 | synchronize_rcu(); | ||
888 | rtnl_lock(); | ||
889 | sta_info_destroy(sta); | ||
890 | rtnl_unlock(); | ||
891 | } | ||
892 | |||
887 | if (disassoc) { | 893 | if (disassoc) { |
888 | ifsta->state = IEEE80211_DISABLED; | 894 | ifsta->state = IEEE80211_DISABLED; |
889 | ieee80211_set_associated(dev, ifsta, 0); | 895 | ieee80211_set_associated(dev, ifsta, 0); |
@@ -898,7 +904,7 @@ static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst, | |||
898 | u8 *ssid, size_t ssid_len) | 904 | u8 *ssid, size_t ssid_len) |
899 | { | 905 | { |
900 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 906 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
901 | struct ieee80211_hw_mode *mode; | 907 | struct ieee80211_supported_band *sband; |
902 | struct sk_buff *skb; | 908 | struct sk_buff *skb; |
903 | struct ieee80211_mgmt *mgmt; | 909 | struct ieee80211_mgmt *mgmt; |
904 | u8 *pos, *supp_rates, *esupp_rates = NULL; | 910 | u8 *pos, *supp_rates, *esupp_rates = NULL; |
@@ -932,11 +938,10 @@ static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst, | |||
932 | supp_rates = skb_put(skb, 2); | 938 | supp_rates = skb_put(skb, 2); |
933 | supp_rates[0] = WLAN_EID_SUPP_RATES; | 939 | supp_rates[0] = WLAN_EID_SUPP_RATES; |
934 | supp_rates[1] = 0; | 940 | supp_rates[1] = 0; |
935 | mode = local->oper_hw_mode; | 941 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; |
936 | for (i = 0; i < mode->num_rates; i++) { | 942 | |
937 | struct ieee80211_rate *rate = &mode->rates[i]; | 943 | for (i = 0; i < sband->n_bitrates; i++) { |
938 | if (!(rate->flags & IEEE80211_RATE_SUPPORTED)) | 944 | struct ieee80211_rate *rate = &sband->bitrates[i]; |
939 | continue; | ||
940 | if (esupp_rates) { | 945 | if (esupp_rates) { |
941 | pos = skb_put(skb, 1); | 946 | pos = skb_put(skb, 1); |
942 | esupp_rates[1]++; | 947 | esupp_rates[1]++; |
@@ -949,7 +954,7 @@ static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst, | |||
949 | pos = skb_put(skb, 1); | 954 | pos = skb_put(skb, 1); |
950 | supp_rates[1]++; | 955 | supp_rates[1]++; |
951 | } | 956 | } |
952 | *pos = rate->rate / 5; | 957 | *pos = rate->bitrate / 5; |
953 | } | 958 | } |
954 | 959 | ||
955 | ieee80211_sta_tx(dev, skb, 0); | 960 | ieee80211_sta_tx(dev, skb, 0); |
@@ -1044,6 +1049,58 @@ static void ieee80211_send_addba_resp(struct net_device *dev, u8 *da, u16 tid, | |||
1044 | return; | 1049 | return; |
1045 | } | 1050 | } |
1046 | 1051 | ||
1052 | void ieee80211_send_addba_request(struct net_device *dev, const u8 *da, | ||
1053 | u16 tid, u8 dialog_token, u16 start_seq_num, | ||
1054 | u16 agg_size, u16 timeout) | ||
1055 | { | ||
1056 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
1057 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
1058 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | ||
1059 | struct sk_buff *skb; | ||
1060 | struct ieee80211_mgmt *mgmt; | ||
1061 | u16 capab; | ||
1062 | |||
1063 | skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom + 1 + | ||
1064 | sizeof(mgmt->u.action.u.addba_req)); | ||
1065 | |||
1066 | |||
1067 | if (!skb) { | ||
1068 | printk(KERN_ERR "%s: failed to allocate buffer " | ||
1069 | "for addba request frame\n", dev->name); | ||
1070 | return; | ||
1071 | } | ||
1072 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
1073 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); | ||
1074 | memset(mgmt, 0, 24); | ||
1075 | memcpy(mgmt->da, da, ETH_ALEN); | ||
1076 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | ||
1077 | if (sdata->vif.type == IEEE80211_IF_TYPE_AP) | ||
1078 | memcpy(mgmt->bssid, dev->dev_addr, ETH_ALEN); | ||
1079 | else | ||
1080 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); | ||
1081 | |||
1082 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | ||
1083 | IEEE80211_STYPE_ACTION); | ||
1084 | |||
1085 | skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req)); | ||
1086 | |||
1087 | mgmt->u.action.category = WLAN_CATEGORY_BACK; | ||
1088 | mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ; | ||
1089 | |||
1090 | mgmt->u.action.u.addba_req.dialog_token = dialog_token; | ||
1091 | capab = (u16)(1 << 1); /* bit 1 aggregation policy */ | ||
1092 | capab |= (u16)(tid << 2); /* bit 5:2 TID number */ | ||
1093 | capab |= (u16)(agg_size << 6); /* bit 15:6 max size of aggergation */ | ||
1094 | |||
1095 | mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab); | ||
1096 | |||
1097 | mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout); | ||
1098 | mgmt->u.action.u.addba_req.start_seq_num = | ||
1099 | cpu_to_le16(start_seq_num << 4); | ||
1100 | |||
1101 | ieee80211_sta_tx(dev, skb, 0); | ||
1102 | } | ||
1103 | |||
1047 | static void ieee80211_sta_process_addba_request(struct net_device *dev, | 1104 | static void ieee80211_sta_process_addba_request(struct net_device *dev, |
1048 | struct ieee80211_mgmt *mgmt, | 1105 | struct ieee80211_mgmt *mgmt, |
1049 | size_t len) | 1106 | size_t len) |
@@ -1058,9 +1115,13 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev, | |||
1058 | int ret = -EOPNOTSUPP; | 1115 | int ret = -EOPNOTSUPP; |
1059 | DECLARE_MAC_BUF(mac); | 1116 | DECLARE_MAC_BUF(mac); |
1060 | 1117 | ||
1118 | rcu_read_lock(); | ||
1119 | |||
1061 | sta = sta_info_get(local, mgmt->sa); | 1120 | sta = sta_info_get(local, mgmt->sa); |
1062 | if (!sta) | 1121 | if (!sta) { |
1122 | rcu_read_unlock(); | ||
1063 | return; | 1123 | return; |
1124 | } | ||
1064 | 1125 | ||
1065 | /* extract session parameters from addba request frame */ | 1126 | /* extract session parameters from addba request frame */ |
1066 | dialog_token = mgmt->u.action.u.addba_req.dialog_token; | 1127 | dialog_token = mgmt->u.action.u.addba_req.dialog_token; |
@@ -1093,9 +1154,11 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev, | |||
1093 | } | 1154 | } |
1094 | /* determine default buffer size */ | 1155 | /* determine default buffer size */ |
1095 | if (buf_size == 0) { | 1156 | if (buf_size == 0) { |
1096 | struct ieee80211_hw_mode *mode = conf->mode; | 1157 | struct ieee80211_supported_band *sband; |
1158 | |||
1159 | sband = local->hw.wiphy->bands[conf->channel->band]; | ||
1097 | buf_size = IEEE80211_MIN_AMPDU_BUF; | 1160 | buf_size = IEEE80211_MIN_AMPDU_BUF; |
1098 | buf_size = buf_size << mode->ht_info.ampdu_factor; | 1161 | buf_size = buf_size << sband->ht_info.ampdu_factor; |
1099 | } | 1162 | } |
1100 | 1163 | ||
1101 | tid_agg_rx = &sta->ampdu_mlme.tid_rx[tid]; | 1164 | tid_agg_rx = &sta->ampdu_mlme.tid_rx[tid]; |
@@ -1127,7 +1190,7 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev, | |||
1127 | 1190 | ||
1128 | if (local->ops->ampdu_action) | 1191 | if (local->ops->ampdu_action) |
1129 | ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_START, | 1192 | ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_START, |
1130 | sta->addr, tid, start_seq_num); | 1193 | sta->addr, tid, &start_seq_num); |
1131 | #ifdef CONFIG_MAC80211_HT_DEBUG | 1194 | #ifdef CONFIG_MAC80211_HT_DEBUG |
1132 | printk(KERN_DEBUG "Rx A-MPDU on tid %d result %d", tid, ret); | 1195 | printk(KERN_DEBUG "Rx A-MPDU on tid %d result %d", tid, ret); |
1133 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | 1196 | #endif /* CONFIG_MAC80211_HT_DEBUG */ |
@@ -1150,13 +1213,89 @@ end: | |||
1150 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx); | 1213 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx); |
1151 | 1214 | ||
1152 | end_no_lock: | 1215 | end_no_lock: |
1153 | ieee80211_send_addba_resp(sta->dev, sta->addr, tid, dialog_token, | 1216 | ieee80211_send_addba_resp(sta->sdata->dev, sta->addr, tid, |
1154 | status, 1, buf_size, timeout); | 1217 | dialog_token, status, 1, buf_size, timeout); |
1155 | sta_info_put(sta); | 1218 | rcu_read_unlock(); |
1219 | } | ||
1220 | |||
1221 | static void ieee80211_sta_process_addba_resp(struct net_device *dev, | ||
1222 | struct ieee80211_mgmt *mgmt, | ||
1223 | size_t len) | ||
1224 | { | ||
1225 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
1226 | struct ieee80211_hw *hw = &local->hw; | ||
1227 | struct sta_info *sta; | ||
1228 | u16 capab; | ||
1229 | u16 tid; | ||
1230 | u8 *state; | ||
1231 | |||
1232 | rcu_read_lock(); | ||
1233 | |||
1234 | sta = sta_info_get(local, mgmt->sa); | ||
1235 | if (!sta) { | ||
1236 | rcu_read_unlock(); | ||
1237 | return; | ||
1238 | } | ||
1239 | |||
1240 | capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); | ||
1241 | tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; | ||
1242 | |||
1243 | state = &sta->ampdu_mlme.tid_tx[tid].state; | ||
1244 | |||
1245 | spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); | ||
1246 | |||
1247 | if (mgmt->u.action.u.addba_resp.dialog_token != | ||
1248 | sta->ampdu_mlme.tid_tx[tid].dialog_token) { | ||
1249 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); | ||
1250 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
1251 | printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid); | ||
1252 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
1253 | rcu_read_unlock(); | ||
1254 | return; | ||
1255 | } | ||
1256 | |||
1257 | del_timer_sync(&sta->ampdu_mlme.tid_tx[tid].addba_resp_timer); | ||
1258 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
1259 | printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid); | ||
1260 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
1261 | if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) | ||
1262 | == WLAN_STATUS_SUCCESS) { | ||
1263 | if (!(*state & HT_ADDBA_REQUESTED_MSK)) { | ||
1264 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); | ||
1265 | printk(KERN_DEBUG "state not HT_ADDBA_REQUESTED_MSK:" | ||
1266 | "%d\n", *state); | ||
1267 | rcu_read_unlock(); | ||
1268 | return; | ||
1269 | } | ||
1270 | |||
1271 | if (*state & HT_ADDBA_RECEIVED_MSK) | ||
1272 | printk(KERN_DEBUG "double addBA response\n"); | ||
1273 | |||
1274 | *state |= HT_ADDBA_RECEIVED_MSK; | ||
1275 | sta->ampdu_mlme.tid_tx[tid].addba_req_num = 0; | ||
1276 | |||
1277 | if (*state == HT_AGG_STATE_OPERATIONAL) { | ||
1278 | printk(KERN_DEBUG "Aggregation on for tid %d \n", tid); | ||
1279 | ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); | ||
1280 | } | ||
1281 | |||
1282 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); | ||
1283 | printk(KERN_DEBUG "recipient accepted agg: tid %d \n", tid); | ||
1284 | } else { | ||
1285 | printk(KERN_DEBUG "recipient rejected agg: tid %d \n", tid); | ||
1286 | |||
1287 | sta->ampdu_mlme.tid_tx[tid].addba_req_num++; | ||
1288 | /* this will allow the state check in stop_BA_session */ | ||
1289 | *state = HT_AGG_STATE_OPERATIONAL; | ||
1290 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); | ||
1291 | ieee80211_stop_tx_ba_session(hw, sta->addr, tid, | ||
1292 | WLAN_BACK_INITIATOR); | ||
1293 | } | ||
1294 | rcu_read_unlock(); | ||
1156 | } | 1295 | } |
1157 | 1296 | ||
1158 | static void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid, | 1297 | void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid, |
1159 | u16 initiator, u16 reason_code) | 1298 | u16 initiator, u16 reason_code) |
1160 | { | 1299 | { |
1161 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1300 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
1162 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 1301 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
@@ -1207,16 +1346,20 @@ void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *ra, u16 tid, | |||
1207 | struct sta_info *sta; | 1346 | struct sta_info *sta; |
1208 | int ret, i; | 1347 | int ret, i; |
1209 | 1348 | ||
1349 | rcu_read_lock(); | ||
1350 | |||
1210 | sta = sta_info_get(local, ra); | 1351 | sta = sta_info_get(local, ra); |
1211 | if (!sta) | 1352 | if (!sta) { |
1353 | rcu_read_unlock(); | ||
1212 | return; | 1354 | return; |
1355 | } | ||
1213 | 1356 | ||
1214 | /* check if TID is in operational state */ | 1357 | /* check if TID is in operational state */ |
1215 | spin_lock_bh(&sta->ampdu_mlme.ampdu_rx); | 1358 | spin_lock_bh(&sta->ampdu_mlme.ampdu_rx); |
1216 | if (sta->ampdu_mlme.tid_rx[tid].state | 1359 | if (sta->ampdu_mlme.tid_rx[tid].state |
1217 | != HT_AGG_STATE_OPERATIONAL) { | 1360 | != HT_AGG_STATE_OPERATIONAL) { |
1218 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx); | 1361 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx); |
1219 | sta_info_put(sta); | 1362 | rcu_read_unlock(); |
1220 | return; | 1363 | return; |
1221 | } | 1364 | } |
1222 | sta->ampdu_mlme.tid_rx[tid].state = | 1365 | sta->ampdu_mlme.tid_rx[tid].state = |
@@ -1229,7 +1372,7 @@ void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *ra, u16 tid, | |||
1229 | BUG_ON(!local->ops->ampdu_action); | 1372 | BUG_ON(!local->ops->ampdu_action); |
1230 | 1373 | ||
1231 | ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_STOP, | 1374 | ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_STOP, |
1232 | ra, tid, EINVAL); | 1375 | ra, tid, NULL); |
1233 | if (ret) | 1376 | if (ret) |
1234 | printk(KERN_DEBUG "HW problem - can not stop rx " | 1377 | printk(KERN_DEBUG "HW problem - can not stop rx " |
1235 | "aggergation for tid %d\n", tid); | 1378 | "aggergation for tid %d\n", tid); |
@@ -1255,9 +1398,10 @@ void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *ra, u16 tid, | |||
1255 | kfree(sta->ampdu_mlme.tid_rx[tid].reorder_buf); | 1398 | kfree(sta->ampdu_mlme.tid_rx[tid].reorder_buf); |
1256 | 1399 | ||
1257 | sta->ampdu_mlme.tid_rx[tid].state = HT_AGG_STATE_IDLE; | 1400 | sta->ampdu_mlme.tid_rx[tid].state = HT_AGG_STATE_IDLE; |
1258 | sta_info_put(sta); | 1401 | rcu_read_unlock(); |
1259 | } | 1402 | } |
1260 | 1403 | ||
1404 | |||
1261 | static void ieee80211_sta_process_delba(struct net_device *dev, | 1405 | static void ieee80211_sta_process_delba(struct net_device *dev, |
1262 | struct ieee80211_mgmt *mgmt, size_t len) | 1406 | struct ieee80211_mgmt *mgmt, size_t len) |
1263 | { | 1407 | { |
@@ -1267,9 +1411,13 @@ static void ieee80211_sta_process_delba(struct net_device *dev, | |||
1267 | u16 initiator; | 1411 | u16 initiator; |
1268 | DECLARE_MAC_BUF(mac); | 1412 | DECLARE_MAC_BUF(mac); |
1269 | 1413 | ||
1414 | rcu_read_lock(); | ||
1415 | |||
1270 | sta = sta_info_get(local, mgmt->sa); | 1416 | sta = sta_info_get(local, mgmt->sa); |
1271 | if (!sta) | 1417 | if (!sta) { |
1418 | rcu_read_unlock(); | ||
1272 | return; | 1419 | return; |
1420 | } | ||
1273 | 1421 | ||
1274 | params = le16_to_cpu(mgmt->u.action.u.delba.params); | 1422 | params = le16_to_cpu(mgmt->u.action.u.delba.params); |
1275 | tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12; | 1423 | tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12; |
@@ -1277,15 +1425,75 @@ static void ieee80211_sta_process_delba(struct net_device *dev, | |||
1277 | 1425 | ||
1278 | #ifdef CONFIG_MAC80211_HT_DEBUG | 1426 | #ifdef CONFIG_MAC80211_HT_DEBUG |
1279 | if (net_ratelimit()) | 1427 | if (net_ratelimit()) |
1280 | printk(KERN_DEBUG "delba from %s on tid %d reason code %d\n", | 1428 | printk(KERN_DEBUG "delba from %s (%s) tid %d reason code %d\n", |
1281 | print_mac(mac, mgmt->sa), tid, | 1429 | print_mac(mac, mgmt->sa), |
1430 | initiator ? "recipient" : "initiator", tid, | ||
1282 | mgmt->u.action.u.delba.reason_code); | 1431 | mgmt->u.action.u.delba.reason_code); |
1283 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | 1432 | #endif /* CONFIG_MAC80211_HT_DEBUG */ |
1284 | 1433 | ||
1285 | if (initiator == WLAN_BACK_INITIATOR) | 1434 | if (initiator == WLAN_BACK_INITIATOR) |
1286 | ieee80211_sta_stop_rx_ba_session(dev, sta->addr, tid, | 1435 | ieee80211_sta_stop_rx_ba_session(dev, sta->addr, tid, |
1287 | WLAN_BACK_INITIATOR, 0); | 1436 | WLAN_BACK_INITIATOR, 0); |
1288 | sta_info_put(sta); | 1437 | else { /* WLAN_BACK_RECIPIENT */ |
1438 | spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); | ||
1439 | sta->ampdu_mlme.tid_tx[tid].state = | ||
1440 | HT_AGG_STATE_OPERATIONAL; | ||
1441 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); | ||
1442 | ieee80211_stop_tx_ba_session(&local->hw, sta->addr, tid, | ||
1443 | WLAN_BACK_RECIPIENT); | ||
1444 | } | ||
1445 | rcu_read_unlock(); | ||
1446 | } | ||
1447 | |||
1448 | /* | ||
1449 | * After sending add Block Ack request we activated a timer until | ||
1450 | * add Block Ack response will arrive from the recipient. | ||
1451 | * If this timer expires sta_addba_resp_timer_expired will be executed. | ||
1452 | */ | ||
1453 | void sta_addba_resp_timer_expired(unsigned long data) | ||
1454 | { | ||
1455 | /* not an elegant detour, but there is no choice as the timer passes | ||
1456 | * only one argument, and both sta_info and TID are needed, so init | ||
1457 | * flow in sta_info_create gives the TID as data, while the timer_to_id | ||
1458 | * array gives the sta through container_of */ | ||
1459 | u16 tid = *(int *)data; | ||
1460 | struct sta_info *temp_sta = container_of((void *)data, | ||
1461 | struct sta_info, timer_to_tid[tid]); | ||
1462 | |||
1463 | struct ieee80211_local *local = temp_sta->local; | ||
1464 | struct ieee80211_hw *hw = &local->hw; | ||
1465 | struct sta_info *sta; | ||
1466 | u8 *state; | ||
1467 | |||
1468 | rcu_read_lock(); | ||
1469 | |||
1470 | sta = sta_info_get(local, temp_sta->addr); | ||
1471 | if (!sta) { | ||
1472 | rcu_read_unlock(); | ||
1473 | return; | ||
1474 | } | ||
1475 | |||
1476 | state = &sta->ampdu_mlme.tid_tx[tid].state; | ||
1477 | /* check if the TID waits for addBA response */ | ||
1478 | spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); | ||
1479 | if (!(*state & HT_ADDBA_REQUESTED_MSK)) { | ||
1480 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); | ||
1481 | *state = HT_AGG_STATE_IDLE; | ||
1482 | printk(KERN_DEBUG "timer expired on tid %d but we are not " | ||
1483 | "expecting addBA response there", tid); | ||
1484 | goto timer_expired_exit; | ||
1485 | } | ||
1486 | |||
1487 | printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid); | ||
1488 | |||
1489 | /* go through the state check in stop_BA_session */ | ||
1490 | *state = HT_AGG_STATE_OPERATIONAL; | ||
1491 | spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); | ||
1492 | ieee80211_stop_tx_ba_session(hw, temp_sta->addr, tid, | ||
1493 | WLAN_BACK_INITIATOR); | ||
1494 | |||
1495 | timer_expired_exit: | ||
1496 | rcu_read_unlock(); | ||
1289 | } | 1497 | } |
1290 | 1498 | ||
1291 | /* | 1499 | /* |
@@ -1297,7 +1505,7 @@ void sta_rx_agg_session_timer_expired(unsigned long data) | |||
1297 | { | 1505 | { |
1298 | /* not an elegant detour, but there is no choice as the timer passes | 1506 | /* not an elegant detour, but there is no choice as the timer passes |
1299 | * only one argument, and verious sta_info are needed here, so init | 1507 | * only one argument, and verious sta_info are needed here, so init |
1300 | * flow in sta_info_add gives the TID as data, while the timer_to_id | 1508 | * flow in sta_info_create gives the TID as data, while the timer_to_id |
1301 | * array gives the sta through container_of */ | 1509 | * array gives the sta through container_of */ |
1302 | u8 *ptid = (u8 *)data; | 1510 | u8 *ptid = (u8 *)data; |
1303 | u8 *timer_to_id = ptid - *ptid; | 1511 | u8 *timer_to_id = ptid - *ptid; |
@@ -1305,8 +1513,8 @@ void sta_rx_agg_session_timer_expired(unsigned long data) | |||
1305 | timer_to_tid[0]); | 1513 | timer_to_tid[0]); |
1306 | 1514 | ||
1307 | printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); | 1515 | printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); |
1308 | ieee80211_sta_stop_rx_ba_session(sta->dev, sta->addr, (u16)*ptid, | 1516 | ieee80211_sta_stop_rx_ba_session(sta->sdata->dev, sta->addr, |
1309 | WLAN_BACK_TIMER, | 1517 | (u16)*ptid, WLAN_BACK_TIMER, |
1310 | WLAN_REASON_QSTA_TIMEOUT); | 1518 | WLAN_REASON_QSTA_TIMEOUT); |
1311 | } | 1519 | } |
1312 | 1520 | ||
@@ -1536,15 +1744,16 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
1536 | { | 1744 | { |
1537 | struct ieee80211_local *local = sdata->local; | 1745 | struct ieee80211_local *local = sdata->local; |
1538 | struct net_device *dev = sdata->dev; | 1746 | struct net_device *dev = sdata->dev; |
1539 | struct ieee80211_hw_mode *mode; | 1747 | struct ieee80211_supported_band *sband; |
1540 | struct sta_info *sta; | 1748 | struct sta_info *sta; |
1541 | u32 rates; | 1749 | u64 rates, basic_rates; |
1542 | u16 capab_info, status_code, aid; | 1750 | u16 capab_info, status_code, aid; |
1543 | struct ieee802_11_elems elems; | 1751 | struct ieee802_11_elems elems; |
1544 | struct ieee80211_bss_conf *bss_conf = &sdata->bss_conf; | 1752 | struct ieee80211_bss_conf *bss_conf = &sdata->bss_conf; |
1545 | u8 *pos; | 1753 | u8 *pos; |
1546 | int i, j; | 1754 | int i, j; |
1547 | DECLARE_MAC_BUF(mac); | 1755 | DECLARE_MAC_BUF(mac); |
1756 | bool have_higher_than_11mbit = false; | ||
1548 | 1757 | ||
1549 | /* AssocResp and ReassocResp have identical structure, so process both | 1758 | /* AssocResp and ReassocResp have identical structure, so process both |
1550 | * of them in this function. */ | 1759 | * of them in this function. */ |
@@ -1614,22 +1823,23 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
1614 | if (ifsta->assocresp_ies) | 1823 | if (ifsta->assocresp_ies) |
1615 | memcpy(ifsta->assocresp_ies, pos, ifsta->assocresp_ies_len); | 1824 | memcpy(ifsta->assocresp_ies, pos, ifsta->assocresp_ies_len); |
1616 | 1825 | ||
1617 | /* set AID, ieee80211_set_associated() will tell the driver */ | 1826 | rcu_read_lock(); |
1618 | bss_conf->aid = aid; | ||
1619 | ieee80211_set_associated(dev, ifsta, 1); | ||
1620 | 1827 | ||
1621 | /* Add STA entry for the AP */ | 1828 | /* Add STA entry for the AP */ |
1622 | sta = sta_info_get(local, ifsta->bssid); | 1829 | sta = sta_info_get(local, ifsta->bssid); |
1623 | if (!sta) { | 1830 | if (!sta) { |
1624 | struct ieee80211_sta_bss *bss; | 1831 | struct ieee80211_sta_bss *bss; |
1625 | sta = sta_info_add(local, dev, ifsta->bssid, GFP_KERNEL); | 1832 | int err; |
1833 | |||
1834 | sta = sta_info_alloc(sdata, ifsta->bssid, GFP_ATOMIC); | ||
1626 | if (!sta) { | 1835 | if (!sta) { |
1627 | printk(KERN_DEBUG "%s: failed to add STA entry for the" | 1836 | printk(KERN_DEBUG "%s: failed to alloc STA entry for" |
1628 | " AP\n", dev->name); | 1837 | " the AP\n", dev->name); |
1838 | rcu_read_unlock(); | ||
1629 | return; | 1839 | return; |
1630 | } | 1840 | } |
1631 | bss = ieee80211_rx_bss_get(dev, ifsta->bssid, | 1841 | bss = ieee80211_rx_bss_get(dev, ifsta->bssid, |
1632 | local->hw.conf.channel, | 1842 | local->hw.conf.channel->center_freq, |
1633 | ifsta->ssid, ifsta->ssid_len); | 1843 | ifsta->ssid, ifsta->ssid_len); |
1634 | if (bss) { | 1844 | if (bss) { |
1635 | sta->last_rssi = bss->rssi; | 1845 | sta->last_rssi = bss->rssi; |
@@ -1637,26 +1847,71 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
1637 | sta->last_noise = bss->noise; | 1847 | sta->last_noise = bss->noise; |
1638 | ieee80211_rx_bss_put(dev, bss); | 1848 | ieee80211_rx_bss_put(dev, bss); |
1639 | } | 1849 | } |
1850 | |||
1851 | err = sta_info_insert(sta); | ||
1852 | if (err) { | ||
1853 | printk(KERN_DEBUG "%s: failed to insert STA entry for" | ||
1854 | " the AP (error %d)\n", dev->name, err); | ||
1855 | sta_info_destroy(sta); | ||
1856 | rcu_read_unlock(); | ||
1857 | return; | ||
1858 | } | ||
1640 | } | 1859 | } |
1641 | 1860 | ||
1642 | sta->dev = dev; | 1861 | /* |
1643 | sta->flags |= WLAN_STA_AUTH | WLAN_STA_ASSOC | WLAN_STA_ASSOC_AP; | 1862 | * FIXME: Do we really need to update the sta_info's information here? |
1863 | * We already know about the AP (we found it in our list) so it | ||
1864 | * should already be filled with the right info, no? | ||
1865 | * As is stands, all this is racy because typically we assume | ||
1866 | * the information that is filled in here (except flags) doesn't | ||
1867 | * change while a STA structure is alive. As such, it should move | ||
1868 | * to between the sta_info_alloc() and sta_info_insert() above. | ||
1869 | */ | ||
1870 | |||
1871 | sta->flags |= WLAN_STA_AUTH | WLAN_STA_ASSOC | WLAN_STA_ASSOC_AP | | ||
1872 | WLAN_STA_AUTHORIZED; | ||
1644 | 1873 | ||
1645 | rates = 0; | 1874 | rates = 0; |
1646 | mode = local->oper_hw_mode; | 1875 | basic_rates = 0; |
1876 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | ||
1877 | |||
1647 | for (i = 0; i < elems.supp_rates_len; i++) { | 1878 | for (i = 0; i < elems.supp_rates_len; i++) { |
1648 | int rate = (elems.supp_rates[i] & 0x7f) * 5; | 1879 | int rate = (elems.supp_rates[i] & 0x7f) * 5; |
1649 | for (j = 0; j < mode->num_rates; j++) | 1880 | |
1650 | if (mode->rates[j].rate == rate) | 1881 | if (rate > 110) |
1882 | have_higher_than_11mbit = true; | ||
1883 | |||
1884 | for (j = 0; j < sband->n_bitrates; j++) { | ||
1885 | if (sband->bitrates[j].bitrate == rate) | ||
1651 | rates |= BIT(j); | 1886 | rates |= BIT(j); |
1887 | if (elems.supp_rates[i] & 0x80) | ||
1888 | basic_rates |= BIT(j); | ||
1889 | } | ||
1652 | } | 1890 | } |
1891 | |||
1653 | for (i = 0; i < elems.ext_supp_rates_len; i++) { | 1892 | for (i = 0; i < elems.ext_supp_rates_len; i++) { |
1654 | int rate = (elems.ext_supp_rates[i] & 0x7f) * 5; | 1893 | int rate = (elems.ext_supp_rates[i] & 0x7f) * 5; |
1655 | for (j = 0; j < mode->num_rates; j++) | 1894 | |
1656 | if (mode->rates[j].rate == rate) | 1895 | if (rate > 110) |
1896 | have_higher_than_11mbit = true; | ||
1897 | |||
1898 | for (j = 0; j < sband->n_bitrates; j++) { | ||
1899 | if (sband->bitrates[j].bitrate == rate) | ||
1657 | rates |= BIT(j); | 1900 | rates |= BIT(j); |
1901 | if (elems.ext_supp_rates[i] & 0x80) | ||
1902 | basic_rates |= BIT(j); | ||
1903 | } | ||
1658 | } | 1904 | } |
1659 | sta->supp_rates = rates; | 1905 | |
1906 | sta->supp_rates[local->hw.conf.channel->band] = rates; | ||
1907 | sdata->basic_rates = basic_rates; | ||
1908 | |||
1909 | /* cf. IEEE 802.11 9.2.12 */ | ||
1910 | if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ && | ||
1911 | have_higher_than_11mbit) | ||
1912 | sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE; | ||
1913 | else | ||
1914 | sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE; | ||
1660 | 1915 | ||
1661 | if (elems.ht_cap_elem && elems.ht_info_elem && elems.wmm_param && | 1916 | if (elems.ht_cap_elem && elems.ht_info_elem && elems.wmm_param && |
1662 | local->ops->conf_ht) { | 1917 | local->ops->conf_ht) { |
@@ -1675,12 +1930,15 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
1675 | 1930 | ||
1676 | if (elems.wmm_param && (ifsta->flags & IEEE80211_STA_WMM_ENABLED)) { | 1931 | if (elems.wmm_param && (ifsta->flags & IEEE80211_STA_WMM_ENABLED)) { |
1677 | sta->flags |= WLAN_STA_WME; | 1932 | sta->flags |= WLAN_STA_WME; |
1933 | rcu_read_unlock(); | ||
1678 | ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param, | 1934 | ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param, |
1679 | elems.wmm_param_len); | 1935 | elems.wmm_param_len); |
1680 | } | 1936 | } else |
1681 | 1937 | rcu_read_unlock(); | |
1682 | 1938 | ||
1683 | sta_info_put(sta); | 1939 | /* set AID, ieee80211_set_associated() will tell the driver */ |
1940 | bss_conf->aid = aid; | ||
1941 | ieee80211_set_associated(dev, ifsta, 1); | ||
1684 | 1942 | ||
1685 | ieee80211_associated(dev, ifsta); | 1943 | ieee80211_associated(dev, ifsta); |
1686 | } | 1944 | } |
@@ -1691,8 +1949,16 @@ static void __ieee80211_rx_bss_hash_add(struct net_device *dev, | |||
1691 | struct ieee80211_sta_bss *bss) | 1949 | struct ieee80211_sta_bss *bss) |
1692 | { | 1950 | { |
1693 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1951 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
1694 | bss->hnext = local->sta_bss_hash[STA_HASH(bss->bssid)]; | 1952 | u8 hash_idx; |
1695 | local->sta_bss_hash[STA_HASH(bss->bssid)] = bss; | 1953 | |
1954 | if (bss_mesh_cfg(bss)) | ||
1955 | hash_idx = mesh_id_hash(bss_mesh_id(bss), | ||
1956 | bss_mesh_id_len(bss)); | ||
1957 | else | ||
1958 | hash_idx = STA_HASH(bss->bssid); | ||
1959 | |||
1960 | bss->hnext = local->sta_bss_hash[hash_idx]; | ||
1961 | local->sta_bss_hash[hash_idx] = bss; | ||
1696 | } | 1962 | } |
1697 | 1963 | ||
1698 | 1964 | ||
@@ -1719,7 +1985,7 @@ static void __ieee80211_rx_bss_hash_del(struct net_device *dev, | |||
1719 | 1985 | ||
1720 | 1986 | ||
1721 | static struct ieee80211_sta_bss * | 1987 | static struct ieee80211_sta_bss * |
1722 | ieee80211_rx_bss_add(struct net_device *dev, u8 *bssid, int channel, | 1988 | ieee80211_rx_bss_add(struct net_device *dev, u8 *bssid, int freq, |
1723 | u8 *ssid, u8 ssid_len) | 1989 | u8 *ssid, u8 ssid_len) |
1724 | { | 1990 | { |
1725 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1991 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
@@ -1731,7 +1997,7 @@ ieee80211_rx_bss_add(struct net_device *dev, u8 *bssid, int channel, | |||
1731 | atomic_inc(&bss->users); | 1997 | atomic_inc(&bss->users); |
1732 | atomic_inc(&bss->users); | 1998 | atomic_inc(&bss->users); |
1733 | memcpy(bss->bssid, bssid, ETH_ALEN); | 1999 | memcpy(bss->bssid, bssid, ETH_ALEN); |
1734 | bss->channel = channel; | 2000 | bss->freq = freq; |
1735 | if (ssid && ssid_len <= IEEE80211_MAX_SSID_LEN) { | 2001 | if (ssid && ssid_len <= IEEE80211_MAX_SSID_LEN) { |
1736 | memcpy(bss->ssid, ssid, ssid_len); | 2002 | memcpy(bss->ssid, ssid, ssid_len); |
1737 | bss->ssid_len = ssid_len; | 2003 | bss->ssid_len = ssid_len; |
@@ -1745,9 +2011,8 @@ ieee80211_rx_bss_add(struct net_device *dev, u8 *bssid, int channel, | |||
1745 | return bss; | 2011 | return bss; |
1746 | } | 2012 | } |
1747 | 2013 | ||
1748 | |||
1749 | static struct ieee80211_sta_bss * | 2014 | static struct ieee80211_sta_bss * |
1750 | ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int channel, | 2015 | ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int freq, |
1751 | u8 *ssid, u8 ssid_len) | 2016 | u8 *ssid, u8 ssid_len) |
1752 | { | 2017 | { |
1753 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 2018 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
@@ -1756,8 +2021,9 @@ ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int channel, | |||
1756 | spin_lock_bh(&local->sta_bss_lock); | 2021 | spin_lock_bh(&local->sta_bss_lock); |
1757 | bss = local->sta_bss_hash[STA_HASH(bssid)]; | 2022 | bss = local->sta_bss_hash[STA_HASH(bssid)]; |
1758 | while (bss) { | 2023 | while (bss) { |
1759 | if (!memcmp(bss->bssid, bssid, ETH_ALEN) && | 2024 | if (!bss_mesh_cfg(bss) && |
1760 | bss->channel == channel && | 2025 | !memcmp(bss->bssid, bssid, ETH_ALEN) && |
2026 | bss->freq == freq && | ||
1761 | bss->ssid_len == ssid_len && | 2027 | bss->ssid_len == ssid_len && |
1762 | (ssid_len == 0 || !memcmp(bss->ssid, ssid, ssid_len))) { | 2028 | (ssid_len == 0 || !memcmp(bss->ssid, ssid, ssid_len))) { |
1763 | atomic_inc(&bss->users); | 2029 | atomic_inc(&bss->users); |
@@ -1769,6 +2035,72 @@ ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int channel, | |||
1769 | return bss; | 2035 | return bss; |
1770 | } | 2036 | } |
1771 | 2037 | ||
2038 | #ifdef CONFIG_MAC80211_MESH | ||
2039 | static struct ieee80211_sta_bss * | ||
2040 | ieee80211_rx_mesh_bss_get(struct net_device *dev, u8 *mesh_id, int mesh_id_len, | ||
2041 | u8 *mesh_cfg, int freq) | ||
2042 | { | ||
2043 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
2044 | struct ieee80211_sta_bss *bss; | ||
2045 | |||
2046 | spin_lock_bh(&local->sta_bss_lock); | ||
2047 | bss = local->sta_bss_hash[mesh_id_hash(mesh_id, mesh_id_len)]; | ||
2048 | while (bss) { | ||
2049 | if (bss_mesh_cfg(bss) && | ||
2050 | !memcmp(bss_mesh_cfg(bss), mesh_cfg, MESH_CFG_CMP_LEN) && | ||
2051 | bss->freq == freq && | ||
2052 | mesh_id_len == bss->mesh_id_len && | ||
2053 | (mesh_id_len == 0 || !memcmp(bss->mesh_id, mesh_id, | ||
2054 | mesh_id_len))) { | ||
2055 | atomic_inc(&bss->users); | ||
2056 | break; | ||
2057 | } | ||
2058 | bss = bss->hnext; | ||
2059 | } | ||
2060 | spin_unlock_bh(&local->sta_bss_lock); | ||
2061 | return bss; | ||
2062 | } | ||
2063 | |||
2064 | static struct ieee80211_sta_bss * | ||
2065 | ieee80211_rx_mesh_bss_add(struct net_device *dev, u8 *mesh_id, int mesh_id_len, | ||
2066 | u8 *mesh_cfg, int freq) | ||
2067 | { | ||
2068 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
2069 | struct ieee80211_sta_bss *bss; | ||
2070 | |||
2071 | bss = kzalloc(sizeof(*bss), GFP_ATOMIC); | ||
2072 | if (!bss) | ||
2073 | return NULL; | ||
2074 | |||
2075 | bss->mesh_cfg = kmalloc(MESH_CFG_CMP_LEN, GFP_ATOMIC); | ||
2076 | if (!bss->mesh_cfg) { | ||
2077 | kfree(bss); | ||
2078 | return NULL; | ||
2079 | } | ||
2080 | |||
2081 | if (mesh_id_len && mesh_id_len <= IEEE80211_MAX_MESH_ID_LEN) { | ||
2082 | bss->mesh_id = kmalloc(mesh_id_len, GFP_ATOMIC); | ||
2083 | if (!bss->mesh_id) { | ||
2084 | kfree(bss->mesh_cfg); | ||
2085 | kfree(bss); | ||
2086 | return NULL; | ||
2087 | } | ||
2088 | memcpy(bss->mesh_id, mesh_id, mesh_id_len); | ||
2089 | } | ||
2090 | |||
2091 | atomic_inc(&bss->users); | ||
2092 | atomic_inc(&bss->users); | ||
2093 | memcpy(bss->mesh_cfg, mesh_cfg, MESH_CFG_CMP_LEN); | ||
2094 | bss->mesh_id_len = mesh_id_len; | ||
2095 | bss->freq = freq; | ||
2096 | spin_lock_bh(&local->sta_bss_lock); | ||
2097 | /* TODO: order by RSSI? */ | ||
2098 | list_add_tail(&bss->list, &local->sta_bss_list); | ||
2099 | __ieee80211_rx_bss_hash_add(dev, bss); | ||
2100 | spin_unlock_bh(&local->sta_bss_lock); | ||
2101 | return bss; | ||
2102 | } | ||
2103 | #endif | ||
1772 | 2104 | ||
1773 | static void ieee80211_rx_bss_free(struct ieee80211_sta_bss *bss) | 2105 | static void ieee80211_rx_bss_free(struct ieee80211_sta_bss *bss) |
1774 | { | 2106 | { |
@@ -1776,6 +2108,8 @@ static void ieee80211_rx_bss_free(struct ieee80211_sta_bss *bss) | |||
1776 | kfree(bss->rsn_ie); | 2108 | kfree(bss->rsn_ie); |
1777 | kfree(bss->wmm_ie); | 2109 | kfree(bss->wmm_ie); |
1778 | kfree(bss->ht_ie); | 2110 | kfree(bss->ht_ie); |
2111 | kfree(bss_mesh_id(bss)); | ||
2112 | kfree(bss_mesh_cfg(bss)); | ||
1779 | kfree(bss); | 2113 | kfree(bss); |
1780 | } | 2114 | } |
1781 | 2115 | ||
@@ -1813,6 +2147,201 @@ void ieee80211_rx_bss_list_deinit(struct net_device *dev) | |||
1813 | } | 2147 | } |
1814 | 2148 | ||
1815 | 2149 | ||
2150 | static int ieee80211_sta_join_ibss(struct net_device *dev, | ||
2151 | struct ieee80211_if_sta *ifsta, | ||
2152 | struct ieee80211_sta_bss *bss) | ||
2153 | { | ||
2154 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
2155 | int res, rates, i, j; | ||
2156 | struct sk_buff *skb; | ||
2157 | struct ieee80211_mgmt *mgmt; | ||
2158 | struct ieee80211_tx_control control; | ||
2159 | struct rate_selection ratesel; | ||
2160 | u8 *pos; | ||
2161 | struct ieee80211_sub_if_data *sdata; | ||
2162 | struct ieee80211_supported_band *sband; | ||
2163 | |||
2164 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | ||
2165 | |||
2166 | /* Remove possible STA entries from other IBSS networks. */ | ||
2167 | sta_info_flush(local, NULL); | ||
2168 | |||
2169 | if (local->ops->reset_tsf) { | ||
2170 | /* Reset own TSF to allow time synchronization work. */ | ||
2171 | local->ops->reset_tsf(local_to_hw(local)); | ||
2172 | } | ||
2173 | memcpy(ifsta->bssid, bss->bssid, ETH_ALEN); | ||
2174 | res = ieee80211_if_config(dev); | ||
2175 | if (res) | ||
2176 | return res; | ||
2177 | |||
2178 | local->hw.conf.beacon_int = bss->beacon_int >= 10 ? bss->beacon_int : 10; | ||
2179 | |||
2180 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
2181 | sdata->drop_unencrypted = bss->capability & | ||
2182 | WLAN_CAPABILITY_PRIVACY ? 1 : 0; | ||
2183 | |||
2184 | res = ieee80211_set_freq(local, bss->freq); | ||
2185 | |||
2186 | if (local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS) { | ||
2187 | printk(KERN_DEBUG "%s: IBSS not allowed on frequency " | ||
2188 | "%d MHz\n", dev->name, local->oper_channel->center_freq); | ||
2189 | return -1; | ||
2190 | } | ||
2191 | |||
2192 | /* Set beacon template */ | ||
2193 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); | ||
2194 | do { | ||
2195 | if (!skb) | ||
2196 | break; | ||
2197 | |||
2198 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
2199 | |||
2200 | mgmt = (struct ieee80211_mgmt *) | ||
2201 | skb_put(skb, 24 + sizeof(mgmt->u.beacon)); | ||
2202 | memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon)); | ||
2203 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | ||
2204 | IEEE80211_STYPE_BEACON); | ||
2205 | memset(mgmt->da, 0xff, ETH_ALEN); | ||
2206 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | ||
2207 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); | ||
2208 | mgmt->u.beacon.beacon_int = | ||
2209 | cpu_to_le16(local->hw.conf.beacon_int); | ||
2210 | mgmt->u.beacon.capab_info = cpu_to_le16(bss->capability); | ||
2211 | |||
2212 | pos = skb_put(skb, 2 + ifsta->ssid_len); | ||
2213 | *pos++ = WLAN_EID_SSID; | ||
2214 | *pos++ = ifsta->ssid_len; | ||
2215 | memcpy(pos, ifsta->ssid, ifsta->ssid_len); | ||
2216 | |||
2217 | rates = bss->supp_rates_len; | ||
2218 | if (rates > 8) | ||
2219 | rates = 8; | ||
2220 | pos = skb_put(skb, 2 + rates); | ||
2221 | *pos++ = WLAN_EID_SUPP_RATES; | ||
2222 | *pos++ = rates; | ||
2223 | memcpy(pos, bss->supp_rates, rates); | ||
2224 | |||
2225 | if (bss->band == IEEE80211_BAND_2GHZ) { | ||
2226 | pos = skb_put(skb, 2 + 1); | ||
2227 | *pos++ = WLAN_EID_DS_PARAMS; | ||
2228 | *pos++ = 1; | ||
2229 | *pos++ = ieee80211_frequency_to_channel(bss->freq); | ||
2230 | } | ||
2231 | |||
2232 | pos = skb_put(skb, 2 + 2); | ||
2233 | *pos++ = WLAN_EID_IBSS_PARAMS; | ||
2234 | *pos++ = 2; | ||
2235 | /* FIX: set ATIM window based on scan results */ | ||
2236 | *pos++ = 0; | ||
2237 | *pos++ = 0; | ||
2238 | |||
2239 | if (bss->supp_rates_len > 8) { | ||
2240 | rates = bss->supp_rates_len - 8; | ||
2241 | pos = skb_put(skb, 2 + rates); | ||
2242 | *pos++ = WLAN_EID_EXT_SUPP_RATES; | ||
2243 | *pos++ = rates; | ||
2244 | memcpy(pos, &bss->supp_rates[8], rates); | ||
2245 | } | ||
2246 | |||
2247 | memset(&control, 0, sizeof(control)); | ||
2248 | rate_control_get_rate(dev, sband, skb, &ratesel); | ||
2249 | if (!ratesel.rate) { | ||
2250 | printk(KERN_DEBUG "%s: Failed to determine TX rate " | ||
2251 | "for IBSS beacon\n", dev->name); | ||
2252 | break; | ||
2253 | } | ||
2254 | control.vif = &sdata->vif; | ||
2255 | control.tx_rate = ratesel.rate; | ||
2256 | if (sdata->bss_conf.use_short_preamble && | ||
2257 | ratesel.rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) | ||
2258 | control.flags |= IEEE80211_TXCTL_SHORT_PREAMBLE; | ||
2259 | control.antenna_sel_tx = local->hw.conf.antenna_sel_tx; | ||
2260 | control.flags |= IEEE80211_TXCTL_NO_ACK; | ||
2261 | control.retry_limit = 1; | ||
2262 | |||
2263 | ifsta->probe_resp = skb_copy(skb, GFP_ATOMIC); | ||
2264 | if (ifsta->probe_resp) { | ||
2265 | mgmt = (struct ieee80211_mgmt *) | ||
2266 | ifsta->probe_resp->data; | ||
2267 | mgmt->frame_control = | ||
2268 | IEEE80211_FC(IEEE80211_FTYPE_MGMT, | ||
2269 | IEEE80211_STYPE_PROBE_RESP); | ||
2270 | } else { | ||
2271 | printk(KERN_DEBUG "%s: Could not allocate ProbeResp " | ||
2272 | "template for IBSS\n", dev->name); | ||
2273 | } | ||
2274 | |||
2275 | if (local->ops->beacon_update && | ||
2276 | local->ops->beacon_update(local_to_hw(local), | ||
2277 | skb, &control) == 0) { | ||
2278 | printk(KERN_DEBUG "%s: Configured IBSS beacon " | ||
2279 | "template\n", dev->name); | ||
2280 | skb = NULL; | ||
2281 | } | ||
2282 | |||
2283 | rates = 0; | ||
2284 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | ||
2285 | for (i = 0; i < bss->supp_rates_len; i++) { | ||
2286 | int bitrate = (bss->supp_rates[i] & 0x7f) * 5; | ||
2287 | for (j = 0; j < sband->n_bitrates; j++) | ||
2288 | if (sband->bitrates[j].bitrate == bitrate) | ||
2289 | rates |= BIT(j); | ||
2290 | } | ||
2291 | ifsta->supp_rates_bits[local->hw.conf.channel->band] = rates; | ||
2292 | } while (0); | ||
2293 | |||
2294 | if (skb) { | ||
2295 | printk(KERN_DEBUG "%s: Failed to configure IBSS beacon " | ||
2296 | "template\n", dev->name); | ||
2297 | dev_kfree_skb(skb); | ||
2298 | } | ||
2299 | |||
2300 | ifsta->state = IEEE80211_IBSS_JOINED; | ||
2301 | mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL); | ||
2302 | |||
2303 | ieee80211_rx_bss_put(dev, bss); | ||
2304 | |||
2305 | return res; | ||
2306 | } | ||
2307 | |||
2308 | u64 ieee80211_sta_get_rates(struct ieee80211_local *local, | ||
2309 | struct ieee802_11_elems *elems, | ||
2310 | enum ieee80211_band band) | ||
2311 | { | ||
2312 | struct ieee80211_supported_band *sband; | ||
2313 | struct ieee80211_rate *bitrates; | ||
2314 | size_t num_rates; | ||
2315 | u64 supp_rates; | ||
2316 | int i, j; | ||
2317 | sband = local->hw.wiphy->bands[band]; | ||
2318 | |||
2319 | if (!sband) { | ||
2320 | WARN_ON(1); | ||
2321 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | ||
2322 | } | ||
2323 | |||
2324 | bitrates = sband->bitrates; | ||
2325 | num_rates = sband->n_bitrates; | ||
2326 | supp_rates = 0; | ||
2327 | for (i = 0; i < elems->supp_rates_len + | ||
2328 | elems->ext_supp_rates_len; i++) { | ||
2329 | u8 rate = 0; | ||
2330 | int own_rate; | ||
2331 | if (i < elems->supp_rates_len) | ||
2332 | rate = elems->supp_rates[i]; | ||
2333 | else if (elems->ext_supp_rates) | ||
2334 | rate = elems->ext_supp_rates | ||
2335 | [i - elems->supp_rates_len]; | ||
2336 | own_rate = 5 * (rate & 0x7f); | ||
2337 | for (j = 0; j < num_rates; j++) | ||
2338 | if (bitrates[j].bitrate == own_rate) | ||
2339 | supp_rates |= BIT(j); | ||
2340 | } | ||
2341 | return supp_rates; | ||
2342 | } | ||
2343 | |||
2344 | |||
1816 | static void ieee80211_rx_bss_info(struct net_device *dev, | 2345 | static void ieee80211_rx_bss_info(struct net_device *dev, |
1817 | struct ieee80211_mgmt *mgmt, | 2346 | struct ieee80211_mgmt *mgmt, |
1818 | size_t len, | 2347 | size_t len, |
@@ -1822,11 +2351,11 @@ static void ieee80211_rx_bss_info(struct net_device *dev, | |||
1822 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 2351 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
1823 | struct ieee802_11_elems elems; | 2352 | struct ieee802_11_elems elems; |
1824 | size_t baselen; | 2353 | size_t baselen; |
1825 | int channel, clen; | 2354 | int freq, clen; |
1826 | struct ieee80211_sta_bss *bss; | 2355 | struct ieee80211_sta_bss *bss; |
1827 | struct sta_info *sta; | 2356 | struct sta_info *sta; |
1828 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 2357 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
1829 | u64 timestamp; | 2358 | u64 beacon_timestamp, rx_timestamp; |
1830 | DECLARE_MAC_BUF(mac); | 2359 | DECLARE_MAC_BUF(mac); |
1831 | DECLARE_MAC_BUF(mac2); | 2360 | DECLARE_MAC_BUF(mac2); |
1832 | 2361 | ||
@@ -1843,104 +2372,71 @@ static void ieee80211_rx_bss_info(struct net_device *dev, | |||
1843 | if (baselen > len) | 2372 | if (baselen > len) |
1844 | return; | 2373 | return; |
1845 | 2374 | ||
1846 | timestamp = le64_to_cpu(mgmt->u.beacon.timestamp); | 2375 | beacon_timestamp = le64_to_cpu(mgmt->u.beacon.timestamp); |
2376 | ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems); | ||
1847 | 2377 | ||
1848 | if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && beacon && | 2378 | if (ieee80211_vif_is_mesh(&sdata->vif) && elems.mesh_id && |
1849 | memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0) { | 2379 | elems.mesh_config && mesh_matches_local(&elems, dev)) { |
1850 | #ifdef CONFIG_MAC80211_IBSS_DEBUG | 2380 | u64 rates = ieee80211_sta_get_rates(local, &elems, |
1851 | static unsigned long last_tsf_debug = 0; | 2381 | rx_status->band); |
1852 | u64 tsf; | 2382 | |
1853 | if (local->ops->get_tsf) | 2383 | mesh_neighbour_update(mgmt->sa, rates, dev, |
1854 | tsf = local->ops->get_tsf(local_to_hw(local)); | 2384 | mesh_peer_accepts_plinks(&elems, dev)); |
1855 | else | ||
1856 | tsf = -1LLU; | ||
1857 | if (time_after(jiffies, last_tsf_debug + 5 * HZ)) { | ||
1858 | printk(KERN_DEBUG "RX beacon SA=%s BSSID=" | ||
1859 | "%s TSF=0x%llx BCN=0x%llx diff=%lld " | ||
1860 | "@%lu\n", | ||
1861 | print_mac(mac, mgmt->sa), print_mac(mac2, mgmt->bssid), | ||
1862 | (unsigned long long)tsf, | ||
1863 | (unsigned long long)timestamp, | ||
1864 | (unsigned long long)(tsf - timestamp), | ||
1865 | jiffies); | ||
1866 | last_tsf_debug = jiffies; | ||
1867 | } | ||
1868 | #endif /* CONFIG_MAC80211_IBSS_DEBUG */ | ||
1869 | } | 2385 | } |
1870 | 2386 | ||
1871 | ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems); | 2387 | rcu_read_lock(); |
1872 | 2388 | ||
1873 | if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && elems.supp_rates && | 2389 | if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && elems.supp_rates && |
1874 | memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0 && | 2390 | memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0 && |
1875 | (sta = sta_info_get(local, mgmt->sa))) { | 2391 | (sta = sta_info_get(local, mgmt->sa))) { |
1876 | struct ieee80211_hw_mode *mode; | 2392 | u64 prev_rates; |
1877 | struct ieee80211_rate *rates; | 2393 | u64 supp_rates = ieee80211_sta_get_rates(local, &elems, |
1878 | size_t num_rates; | 2394 | rx_status->band); |
1879 | u32 supp_rates, prev_rates; | ||
1880 | int i, j; | ||
1881 | |||
1882 | mode = local->sta_sw_scanning ? | ||
1883 | local->scan_hw_mode : local->oper_hw_mode; | ||
1884 | |||
1885 | if (local->sta_hw_scanning) { | ||
1886 | /* search for the correct mode matches the beacon */ | ||
1887 | list_for_each_entry(mode, &local->modes_list, list) | ||
1888 | if (mode->mode == rx_status->phymode) | ||
1889 | break; | ||
1890 | |||
1891 | if (mode == NULL) | ||
1892 | mode = local->oper_hw_mode; | ||
1893 | } | ||
1894 | rates = mode->rates; | ||
1895 | num_rates = mode->num_rates; | ||
1896 | |||
1897 | supp_rates = 0; | ||
1898 | for (i = 0; i < elems.supp_rates_len + | ||
1899 | elems.ext_supp_rates_len; i++) { | ||
1900 | u8 rate = 0; | ||
1901 | int own_rate; | ||
1902 | if (i < elems.supp_rates_len) | ||
1903 | rate = elems.supp_rates[i]; | ||
1904 | else if (elems.ext_supp_rates) | ||
1905 | rate = elems.ext_supp_rates | ||
1906 | [i - elems.supp_rates_len]; | ||
1907 | own_rate = 5 * (rate & 0x7f); | ||
1908 | for (j = 0; j < num_rates; j++) | ||
1909 | if (rates[j].rate == own_rate) | ||
1910 | supp_rates |= BIT(j); | ||
1911 | } | ||
1912 | 2395 | ||
1913 | prev_rates = sta->supp_rates; | 2396 | prev_rates = sta->supp_rates[rx_status->band]; |
1914 | sta->supp_rates &= supp_rates; | 2397 | sta->supp_rates[rx_status->band] &= supp_rates; |
1915 | if (sta->supp_rates == 0) { | 2398 | if (sta->supp_rates[rx_status->band] == 0) { |
1916 | /* No matching rates - this should not really happen. | 2399 | /* No matching rates - this should not really happen. |
1917 | * Make sure that at least one rate is marked | 2400 | * Make sure that at least one rate is marked |
1918 | * supported to avoid issues with TX rate ctrl. */ | 2401 | * supported to avoid issues with TX rate ctrl. */ |
1919 | sta->supp_rates = sdata->u.sta.supp_rates_bits; | 2402 | sta->supp_rates[rx_status->band] = |
2403 | sdata->u.sta.supp_rates_bits[rx_status->band]; | ||
1920 | } | 2404 | } |
1921 | if (sta->supp_rates != prev_rates) { | 2405 | if (sta->supp_rates[rx_status->band] != prev_rates) { |
1922 | printk(KERN_DEBUG "%s: updated supp_rates set for " | 2406 | printk(KERN_DEBUG "%s: updated supp_rates set for " |
1923 | "%s based on beacon info (0x%x & 0x%x -> " | 2407 | "%s based on beacon info (0x%llx & 0x%llx -> " |
1924 | "0x%x)\n", | 2408 | "0x%llx)\n", |
1925 | dev->name, print_mac(mac, sta->addr), prev_rates, | 2409 | dev->name, print_mac(mac, sta->addr), |
1926 | supp_rates, sta->supp_rates); | 2410 | (unsigned long long) prev_rates, |
2411 | (unsigned long long) supp_rates, | ||
2412 | (unsigned long long) sta->supp_rates[rx_status->band]); | ||
1927 | } | 2413 | } |
1928 | sta_info_put(sta); | ||
1929 | } | 2414 | } |
1930 | 2415 | ||
1931 | if (!elems.ssid) | 2416 | rcu_read_unlock(); |
1932 | return; | ||
1933 | 2417 | ||
1934 | if (elems.ds_params && elems.ds_params_len == 1) | 2418 | if (elems.ds_params && elems.ds_params_len == 1) |
1935 | channel = elems.ds_params[0]; | 2419 | freq = ieee80211_channel_to_frequency(elems.ds_params[0]); |
1936 | else | 2420 | else |
1937 | channel = rx_status->channel; | 2421 | freq = rx_status->freq; |
1938 | 2422 | ||
1939 | bss = ieee80211_rx_bss_get(dev, mgmt->bssid, channel, | 2423 | #ifdef CONFIG_MAC80211_MESH |
1940 | elems.ssid, elems.ssid_len); | 2424 | if (elems.mesh_config) |
1941 | if (!bss) { | 2425 | bss = ieee80211_rx_mesh_bss_get(dev, elems.mesh_id, |
1942 | bss = ieee80211_rx_bss_add(dev, mgmt->bssid, channel, | 2426 | elems.mesh_id_len, elems.mesh_config, freq); |
2427 | else | ||
2428 | #endif | ||
2429 | bss = ieee80211_rx_bss_get(dev, mgmt->bssid, freq, | ||
1943 | elems.ssid, elems.ssid_len); | 2430 | elems.ssid, elems.ssid_len); |
2431 | if (!bss) { | ||
2432 | #ifdef CONFIG_MAC80211_MESH | ||
2433 | if (elems.mesh_config) | ||
2434 | bss = ieee80211_rx_mesh_bss_add(dev, elems.mesh_id, | ||
2435 | elems.mesh_id_len, elems.mesh_config, freq); | ||
2436 | else | ||
2437 | #endif | ||
2438 | bss = ieee80211_rx_bss_add(dev, mgmt->bssid, freq, | ||
2439 | elems.ssid, elems.ssid_len); | ||
1944 | if (!bss) | 2440 | if (!bss) |
1945 | return; | 2441 | return; |
1946 | } else { | 2442 | } else { |
@@ -1952,8 +2448,12 @@ static void ieee80211_rx_bss_info(struct net_device *dev, | |||
1952 | #endif | 2448 | #endif |
1953 | } | 2449 | } |
1954 | 2450 | ||
1955 | if (bss->probe_resp && beacon) { | 2451 | bss->band = rx_status->band; |
1956 | /* Do not allow beacon to override data from Probe Response. */ | 2452 | |
2453 | if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS && | ||
2454 | bss->probe_resp && beacon) { | ||
2455 | /* STA mode: | ||
2456 | * Do not allow beacon to override data from Probe Response. */ | ||
1957 | ieee80211_rx_bss_put(dev, bss); | 2457 | ieee80211_rx_bss_put(dev, bss); |
1958 | return; | 2458 | return; |
1959 | } | 2459 | } |
@@ -2050,27 +2550,69 @@ static void ieee80211_rx_bss_info(struct net_device *dev, | |||
2050 | bss->ht_ie_len = 0; | 2550 | bss->ht_ie_len = 0; |
2051 | } | 2551 | } |
2052 | 2552 | ||
2053 | bss->hw_mode = rx_status->phymode; | 2553 | bss->timestamp = beacon_timestamp; |
2054 | bss->freq = rx_status->freq; | ||
2055 | if (channel != rx_status->channel && | ||
2056 | (bss->hw_mode == MODE_IEEE80211G || | ||
2057 | bss->hw_mode == MODE_IEEE80211B) && | ||
2058 | channel >= 1 && channel <= 14) { | ||
2059 | static const int freq_list[] = { | ||
2060 | 2412, 2417, 2422, 2427, 2432, 2437, 2442, | ||
2061 | 2447, 2452, 2457, 2462, 2467, 2472, 2484 | ||
2062 | }; | ||
2063 | /* IEEE 802.11g/b mode can receive packets from neighboring | ||
2064 | * channels, so map the channel into frequency. */ | ||
2065 | bss->freq = freq_list[channel - 1]; | ||
2066 | } | ||
2067 | bss->timestamp = timestamp; | ||
2068 | bss->last_update = jiffies; | 2554 | bss->last_update = jiffies; |
2069 | bss->rssi = rx_status->ssi; | 2555 | bss->rssi = rx_status->ssi; |
2070 | bss->signal = rx_status->signal; | 2556 | bss->signal = rx_status->signal; |
2071 | bss->noise = rx_status->noise; | 2557 | bss->noise = rx_status->noise; |
2072 | if (!beacon) | 2558 | if (!beacon) |
2073 | bss->probe_resp++; | 2559 | bss->probe_resp++; |
2560 | |||
2561 | /* check if we need to merge IBSS */ | ||
2562 | if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && beacon && | ||
2563 | !local->sta_sw_scanning && !local->sta_hw_scanning && | ||
2564 | bss->capability & WLAN_CAPABILITY_IBSS && | ||
2565 | bss->freq == local->oper_channel->center_freq && | ||
2566 | elems.ssid_len == sdata->u.sta.ssid_len && | ||
2567 | memcmp(elems.ssid, sdata->u.sta.ssid, sdata->u.sta.ssid_len) == 0) { | ||
2568 | if (rx_status->flag & RX_FLAG_TSFT) { | ||
2569 | /* in order for correct IBSS merging we need mactime | ||
2570 | * | ||
2571 | * since mactime is defined as the time the first data | ||
2572 | * symbol of the frame hits the PHY, and the timestamp | ||
2573 | * of the beacon is defined as "the time that the data | ||
2574 | * symbol containing the first bit of the timestamp is | ||
2575 | * transmitted to the PHY plus the transmitting STA’s | ||
2576 | * delays through its local PHY from the MAC-PHY | ||
2577 | * interface to its interface with the WM" | ||
2578 | * (802.11 11.1.2) - equals the time this bit arrives at | ||
2579 | * the receiver - we have to take into account the | ||
2580 | * offset between the two. | ||
2581 | * e.g: at 1 MBit that means mactime is 192 usec earlier | ||
2582 | * (=24 bytes * 8 usecs/byte) than the beacon timestamp. | ||
2583 | */ | ||
2584 | int rate = local->hw.wiphy->bands[rx_status->band]-> | ||
2585 | bitrates[rx_status->rate_idx].bitrate; | ||
2586 | rx_timestamp = rx_status->mactime + (24 * 8 * 10 / rate); | ||
2587 | } else if (local && local->ops && local->ops->get_tsf) | ||
2588 | /* second best option: get current TSF */ | ||
2589 | rx_timestamp = local->ops->get_tsf(local_to_hw(local)); | ||
2590 | else | ||
2591 | /* can't merge without knowing the TSF */ | ||
2592 | rx_timestamp = -1LLU; | ||
2593 | #ifdef CONFIG_MAC80211_IBSS_DEBUG | ||
2594 | printk(KERN_DEBUG "RX beacon SA=%s BSSID=" | ||
2595 | "%s TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n", | ||
2596 | print_mac(mac, mgmt->sa), | ||
2597 | print_mac(mac2, mgmt->bssid), | ||
2598 | (unsigned long long)rx_timestamp, | ||
2599 | (unsigned long long)beacon_timestamp, | ||
2600 | (unsigned long long)(rx_timestamp - beacon_timestamp), | ||
2601 | jiffies); | ||
2602 | #endif /* CONFIG_MAC80211_IBSS_DEBUG */ | ||
2603 | if (beacon_timestamp > rx_timestamp) { | ||
2604 | #ifndef CONFIG_MAC80211_IBSS_DEBUG | ||
2605 | if (net_ratelimit()) | ||
2606 | #endif | ||
2607 | printk(KERN_DEBUG "%s: beacon TSF higher than " | ||
2608 | "local TSF - IBSS merge with BSSID %s\n", | ||
2609 | dev->name, print_mac(mac, mgmt->bssid)); | ||
2610 | ieee80211_sta_join_ibss(dev, &sdata->u.sta, bss); | ||
2611 | ieee80211_ibss_add_sta(dev, NULL, | ||
2612 | mgmt->bssid, mgmt->sa); | ||
2613 | } | ||
2614 | } | ||
2615 | |||
2074 | ieee80211_rx_bss_put(dev, bss); | 2616 | ieee80211_rx_bss_put(dev, bss); |
2075 | } | 2617 | } |
2076 | 2618 | ||
@@ -2221,8 +2763,11 @@ static void ieee80211_rx_mgmt_probe_req(struct net_device *dev, | |||
2221 | static void ieee80211_rx_mgmt_action(struct net_device *dev, | 2763 | static void ieee80211_rx_mgmt_action(struct net_device *dev, |
2222 | struct ieee80211_if_sta *ifsta, | 2764 | struct ieee80211_if_sta *ifsta, |
2223 | struct ieee80211_mgmt *mgmt, | 2765 | struct ieee80211_mgmt *mgmt, |
2224 | size_t len) | 2766 | size_t len, |
2767 | struct ieee80211_rx_status *rx_status) | ||
2225 | { | 2768 | { |
2769 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
2770 | |||
2226 | if (len < IEEE80211_MIN_ACTION_SIZE) | 2771 | if (len < IEEE80211_MIN_ACTION_SIZE) |
2227 | return; | 2772 | return; |
2228 | 2773 | ||
@@ -2235,6 +2780,12 @@ static void ieee80211_rx_mgmt_action(struct net_device *dev, | |||
2235 | break; | 2780 | break; |
2236 | ieee80211_sta_process_addba_request(dev, mgmt, len); | 2781 | ieee80211_sta_process_addba_request(dev, mgmt, len); |
2237 | break; | 2782 | break; |
2783 | case WLAN_ACTION_ADDBA_RESP: | ||
2784 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
2785 | sizeof(mgmt->u.action.u.addba_resp))) | ||
2786 | break; | ||
2787 | ieee80211_sta_process_addba_resp(dev, mgmt, len); | ||
2788 | break; | ||
2238 | case WLAN_ACTION_DELBA: | 2789 | case WLAN_ACTION_DELBA: |
2239 | if (len < (IEEE80211_MIN_ACTION_SIZE + | 2790 | if (len < (IEEE80211_MIN_ACTION_SIZE + |
2240 | sizeof(mgmt->u.action.u.delba))) | 2791 | sizeof(mgmt->u.action.u.delba))) |
@@ -2248,7 +2799,18 @@ static void ieee80211_rx_mgmt_action(struct net_device *dev, | |||
2248 | break; | 2799 | break; |
2249 | } | 2800 | } |
2250 | break; | 2801 | break; |
2802 | case PLINK_CATEGORY: | ||
2803 | if (ieee80211_vif_is_mesh(&sdata->vif)) | ||
2804 | mesh_rx_plink_frame(dev, mgmt, len, rx_status); | ||
2805 | break; | ||
2806 | case MESH_PATH_SEL_CATEGORY: | ||
2807 | if (ieee80211_vif_is_mesh(&sdata->vif)) | ||
2808 | mesh_rx_path_sel_frame(dev, mgmt, len); | ||
2809 | break; | ||
2251 | default: | 2810 | default: |
2811 | if (net_ratelimit()) | ||
2812 | printk(KERN_DEBUG "%s: Rx unknown action frame - " | ||
2813 | "category=%d\n", dev->name, mgmt->u.action.category); | ||
2252 | break; | 2814 | break; |
2253 | } | 2815 | } |
2254 | } | 2816 | } |
@@ -2275,13 +2837,13 @@ void ieee80211_sta_rx_mgmt(struct net_device *dev, struct sk_buff *skb, | |||
2275 | case IEEE80211_STYPE_PROBE_REQ: | 2837 | case IEEE80211_STYPE_PROBE_REQ: |
2276 | case IEEE80211_STYPE_PROBE_RESP: | 2838 | case IEEE80211_STYPE_PROBE_RESP: |
2277 | case IEEE80211_STYPE_BEACON: | 2839 | case IEEE80211_STYPE_BEACON: |
2840 | case IEEE80211_STYPE_ACTION: | ||
2278 | memcpy(skb->cb, rx_status, sizeof(*rx_status)); | 2841 | memcpy(skb->cb, rx_status, sizeof(*rx_status)); |
2279 | case IEEE80211_STYPE_AUTH: | 2842 | case IEEE80211_STYPE_AUTH: |
2280 | case IEEE80211_STYPE_ASSOC_RESP: | 2843 | case IEEE80211_STYPE_ASSOC_RESP: |
2281 | case IEEE80211_STYPE_REASSOC_RESP: | 2844 | case IEEE80211_STYPE_REASSOC_RESP: |
2282 | case IEEE80211_STYPE_DEAUTH: | 2845 | case IEEE80211_STYPE_DEAUTH: |
2283 | case IEEE80211_STYPE_DISASSOC: | 2846 | case IEEE80211_STYPE_DISASSOC: |
2284 | case IEEE80211_STYPE_ACTION: | ||
2285 | skb_queue_tail(&ifsta->skb_queue, skb); | 2847 | skb_queue_tail(&ifsta->skb_queue, skb); |
2286 | queue_work(local->hw.workqueue, &ifsta->work); | 2848 | queue_work(local->hw.workqueue, &ifsta->work); |
2287 | return; | 2849 | return; |
@@ -2340,7 +2902,7 @@ static void ieee80211_sta_rx_queued_mgmt(struct net_device *dev, | |||
2340 | ieee80211_rx_mgmt_disassoc(dev, ifsta, mgmt, skb->len); | 2902 | ieee80211_rx_mgmt_disassoc(dev, ifsta, mgmt, skb->len); |
2341 | break; | 2903 | break; |
2342 | case IEEE80211_STYPE_ACTION: | 2904 | case IEEE80211_STYPE_ACTION: |
2343 | ieee80211_rx_mgmt_action(dev, ifsta, mgmt, skb->len); | 2905 | ieee80211_rx_mgmt_action(dev, ifsta, mgmt, skb->len, rx_status); |
2344 | break; | 2906 | break; |
2345 | } | 2907 | } |
2346 | 2908 | ||
@@ -2348,7 +2910,7 @@ static void ieee80211_sta_rx_queued_mgmt(struct net_device *dev, | |||
2348 | } | 2910 | } |
2349 | 2911 | ||
2350 | 2912 | ||
2351 | ieee80211_txrx_result | 2913 | ieee80211_rx_result |
2352 | ieee80211_sta_rx_scan(struct net_device *dev, struct sk_buff *skb, | 2914 | ieee80211_sta_rx_scan(struct net_device *dev, struct sk_buff *skb, |
2353 | struct ieee80211_rx_status *rx_status) | 2915 | struct ieee80211_rx_status *rx_status) |
2354 | { | 2916 | { |
@@ -2356,31 +2918,31 @@ ieee80211_sta_rx_scan(struct net_device *dev, struct sk_buff *skb, | |||
2356 | u16 fc; | 2918 | u16 fc; |
2357 | 2919 | ||
2358 | if (skb->len < 2) | 2920 | if (skb->len < 2) |
2359 | return TXRX_DROP; | 2921 | return RX_DROP_UNUSABLE; |
2360 | 2922 | ||
2361 | mgmt = (struct ieee80211_mgmt *) skb->data; | 2923 | mgmt = (struct ieee80211_mgmt *) skb->data; |
2362 | fc = le16_to_cpu(mgmt->frame_control); | 2924 | fc = le16_to_cpu(mgmt->frame_control); |
2363 | 2925 | ||
2364 | if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) | 2926 | if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) |
2365 | return TXRX_CONTINUE; | 2927 | return RX_CONTINUE; |
2366 | 2928 | ||
2367 | if (skb->len < 24) | 2929 | if (skb->len < 24) |
2368 | return TXRX_DROP; | 2930 | return RX_DROP_MONITOR; |
2369 | 2931 | ||
2370 | if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) { | 2932 | if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) { |
2371 | if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP) { | 2933 | if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP) { |
2372 | ieee80211_rx_mgmt_probe_resp(dev, mgmt, | 2934 | ieee80211_rx_mgmt_probe_resp(dev, mgmt, |
2373 | skb->len, rx_status); | 2935 | skb->len, rx_status); |
2374 | dev_kfree_skb(skb); | 2936 | dev_kfree_skb(skb); |
2375 | return TXRX_QUEUED; | 2937 | return RX_QUEUED; |
2376 | } else if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON) { | 2938 | } else if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON) { |
2377 | ieee80211_rx_mgmt_beacon(dev, mgmt, skb->len, | 2939 | ieee80211_rx_mgmt_beacon(dev, mgmt, skb->len, |
2378 | rx_status); | 2940 | rx_status); |
2379 | dev_kfree_skb(skb); | 2941 | dev_kfree_skb(skb); |
2380 | return TXRX_QUEUED; | 2942 | return RX_QUEUED; |
2381 | } | 2943 | } |
2382 | } | 2944 | } |
2383 | return TXRX_CONTINUE; | 2945 | return RX_CONTINUE; |
2384 | } | 2946 | } |
2385 | 2947 | ||
2386 | 2948 | ||
@@ -2389,45 +2951,50 @@ static int ieee80211_sta_active_ibss(struct net_device *dev) | |||
2389 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 2951 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
2390 | int active = 0; | 2952 | int active = 0; |
2391 | struct sta_info *sta; | 2953 | struct sta_info *sta; |
2954 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
2955 | |||
2956 | rcu_read_lock(); | ||
2392 | 2957 | ||
2393 | read_lock_bh(&local->sta_lock); | 2958 | list_for_each_entry_rcu(sta, &local->sta_list, list) { |
2394 | list_for_each_entry(sta, &local->sta_list, list) { | 2959 | if (sta->sdata == sdata && |
2395 | if (sta->dev == dev && | ||
2396 | time_after(sta->last_rx + IEEE80211_IBSS_MERGE_INTERVAL, | 2960 | time_after(sta->last_rx + IEEE80211_IBSS_MERGE_INTERVAL, |
2397 | jiffies)) { | 2961 | jiffies)) { |
2398 | active++; | 2962 | active++; |
2399 | break; | 2963 | break; |
2400 | } | 2964 | } |
2401 | } | 2965 | } |
2402 | read_unlock_bh(&local->sta_lock); | 2966 | |
2967 | rcu_read_unlock(); | ||
2403 | 2968 | ||
2404 | return active; | 2969 | return active; |
2405 | } | 2970 | } |
2406 | 2971 | ||
2407 | 2972 | ||
2408 | static void ieee80211_sta_expire(struct net_device *dev) | 2973 | static void ieee80211_sta_expire(struct net_device *dev, unsigned long exp_time) |
2409 | { | 2974 | { |
2410 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 2975 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
2411 | struct sta_info *sta, *tmp; | 2976 | struct sta_info *sta, *tmp; |
2412 | LIST_HEAD(tmp_list); | 2977 | LIST_HEAD(tmp_list); |
2413 | DECLARE_MAC_BUF(mac); | 2978 | DECLARE_MAC_BUF(mac); |
2979 | unsigned long flags; | ||
2414 | 2980 | ||
2415 | write_lock_bh(&local->sta_lock); | 2981 | spin_lock_irqsave(&local->sta_lock, flags); |
2416 | list_for_each_entry_safe(sta, tmp, &local->sta_list, list) | 2982 | list_for_each_entry_safe(sta, tmp, &local->sta_list, list) |
2417 | if (time_after(jiffies, sta->last_rx + | 2983 | if (time_after(jiffies, sta->last_rx + exp_time)) { |
2418 | IEEE80211_IBSS_INACTIVITY_LIMIT)) { | ||
2419 | printk(KERN_DEBUG "%s: expiring inactive STA %s\n", | 2984 | printk(KERN_DEBUG "%s: expiring inactive STA %s\n", |
2420 | dev->name, print_mac(mac, sta->addr)); | 2985 | dev->name, print_mac(mac, sta->addr)); |
2421 | __sta_info_get(sta); | 2986 | sta_info_unlink(&sta); |
2422 | sta_info_remove(sta); | 2987 | if (sta) |
2423 | list_add(&sta->list, &tmp_list); | 2988 | list_add(&sta->list, &tmp_list); |
2424 | } | 2989 | } |
2425 | write_unlock_bh(&local->sta_lock); | 2990 | spin_unlock_irqrestore(&local->sta_lock, flags); |
2426 | 2991 | ||
2427 | list_for_each_entry_safe(sta, tmp, &tmp_list, list) { | 2992 | synchronize_rcu(); |
2428 | sta_info_free(sta); | 2993 | |
2429 | sta_info_put(sta); | 2994 | rtnl_lock(); |
2430 | } | 2995 | list_for_each_entry_safe(sta, tmp, &tmp_list, list) |
2996 | sta_info_destroy(sta); | ||
2997 | rtnl_unlock(); | ||
2431 | } | 2998 | } |
2432 | 2999 | ||
2433 | 3000 | ||
@@ -2436,7 +3003,7 @@ static void ieee80211_sta_merge_ibss(struct net_device *dev, | |||
2436 | { | 3003 | { |
2437 | mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL); | 3004 | mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL); |
2438 | 3005 | ||
2439 | ieee80211_sta_expire(dev); | 3006 | ieee80211_sta_expire(dev, IEEE80211_IBSS_INACTIVITY_LIMIT); |
2440 | if (ieee80211_sta_active_ibss(dev)) | 3007 | if (ieee80211_sta_active_ibss(dev)) |
2441 | return; | 3008 | return; |
2442 | 3009 | ||
@@ -2446,6 +3013,36 @@ static void ieee80211_sta_merge_ibss(struct net_device *dev, | |||
2446 | } | 3013 | } |
2447 | 3014 | ||
2448 | 3015 | ||
3016 | #ifdef CONFIG_MAC80211_MESH | ||
3017 | static void ieee80211_mesh_housekeeping(struct net_device *dev, | ||
3018 | struct ieee80211_if_sta *ifsta) | ||
3019 | { | ||
3020 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
3021 | bool free_plinks; | ||
3022 | |||
3023 | ieee80211_sta_expire(dev, IEEE80211_MESH_PEER_INACTIVITY_LIMIT); | ||
3024 | mesh_path_expire(dev); | ||
3025 | |||
3026 | free_plinks = mesh_plink_availables(sdata); | ||
3027 | if (free_plinks != sdata->u.sta.accepting_plinks) | ||
3028 | ieee80211_if_config_beacon(dev); | ||
3029 | |||
3030 | mod_timer(&ifsta->timer, jiffies + | ||
3031 | IEEE80211_MESH_HOUSEKEEPING_INTERVAL); | ||
3032 | } | ||
3033 | |||
3034 | |||
3035 | void ieee80211_start_mesh(struct net_device *dev) | ||
3036 | { | ||
3037 | struct ieee80211_if_sta *ifsta; | ||
3038 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
3039 | ifsta = &sdata->u.sta; | ||
3040 | ifsta->state = IEEE80211_MESH_UP; | ||
3041 | ieee80211_sta_timer((unsigned long)sdata); | ||
3042 | } | ||
3043 | #endif | ||
3044 | |||
3045 | |||
2449 | void ieee80211_sta_timer(unsigned long data) | 3046 | void ieee80211_sta_timer(unsigned long data) |
2450 | { | 3047 | { |
2451 | struct ieee80211_sub_if_data *sdata = | 3048 | struct ieee80211_sub_if_data *sdata = |
@@ -2457,7 +3054,6 @@ void ieee80211_sta_timer(unsigned long data) | |||
2457 | queue_work(local->hw.workqueue, &ifsta->work); | 3054 | queue_work(local->hw.workqueue, &ifsta->work); |
2458 | } | 3055 | } |
2459 | 3056 | ||
2460 | |||
2461 | void ieee80211_sta_work(struct work_struct *work) | 3057 | void ieee80211_sta_work(struct work_struct *work) |
2462 | { | 3058 | { |
2463 | struct ieee80211_sub_if_data *sdata = | 3059 | struct ieee80211_sub_if_data *sdata = |
@@ -2474,7 +3070,8 @@ void ieee80211_sta_work(struct work_struct *work) | |||
2474 | return; | 3070 | return; |
2475 | 3071 | ||
2476 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA && | 3072 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA && |
2477 | sdata->vif.type != IEEE80211_IF_TYPE_IBSS) { | 3073 | sdata->vif.type != IEEE80211_IF_TYPE_IBSS && |
3074 | sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT) { | ||
2478 | printk(KERN_DEBUG "%s: ieee80211_sta_work: non-STA interface " | 3075 | printk(KERN_DEBUG "%s: ieee80211_sta_work: non-STA interface " |
2479 | "(type=%d)\n", dev->name, sdata->vif.type); | 3076 | "(type=%d)\n", dev->name, sdata->vif.type); |
2480 | return; | 3077 | return; |
@@ -2484,6 +3081,13 @@ void ieee80211_sta_work(struct work_struct *work) | |||
2484 | while ((skb = skb_dequeue(&ifsta->skb_queue))) | 3081 | while ((skb = skb_dequeue(&ifsta->skb_queue))) |
2485 | ieee80211_sta_rx_queued_mgmt(dev, skb); | 3082 | ieee80211_sta_rx_queued_mgmt(dev, skb); |
2486 | 3083 | ||
3084 | #ifdef CONFIG_MAC80211_MESH | ||
3085 | if (ifsta->preq_queue_len && | ||
3086 | time_after(jiffies, | ||
3087 | ifsta->last_preq + msecs_to_jiffies(ifsta->mshcfg.dot11MeshHWMPpreqMinInterval))) | ||
3088 | mesh_path_start_discovery(dev); | ||
3089 | #endif | ||
3090 | |||
2487 | if (ifsta->state != IEEE80211_AUTHENTICATE && | 3091 | if (ifsta->state != IEEE80211_AUTHENTICATE && |
2488 | ifsta->state != IEEE80211_ASSOCIATE && | 3092 | ifsta->state != IEEE80211_ASSOCIATE && |
2489 | test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request)) { | 3093 | test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request)) { |
@@ -2519,6 +3123,11 @@ void ieee80211_sta_work(struct work_struct *work) | |||
2519 | case IEEE80211_IBSS_JOINED: | 3123 | case IEEE80211_IBSS_JOINED: |
2520 | ieee80211_sta_merge_ibss(dev, ifsta); | 3124 | ieee80211_sta_merge_ibss(dev, ifsta); |
2521 | break; | 3125 | break; |
3126 | #ifdef CONFIG_MAC80211_MESH | ||
3127 | case IEEE80211_MESH_UP: | ||
3128 | ieee80211_mesh_housekeeping(dev, ifsta); | ||
3129 | break; | ||
3130 | #endif | ||
2522 | default: | 3131 | default: |
2523 | printk(KERN_DEBUG "ieee80211_sta_work: Unknown state %d\n", | 3132 | printk(KERN_DEBUG "ieee80211_sta_work: Unknown state %d\n", |
2524 | ifsta->state); | 3133 | ifsta->state); |
@@ -2629,7 +3238,7 @@ static int ieee80211_sta_config_auth(struct net_device *dev, | |||
2629 | } | 3238 | } |
2630 | 3239 | ||
2631 | spin_lock_bh(&local->sta_bss_lock); | 3240 | spin_lock_bh(&local->sta_bss_lock); |
2632 | freq = local->oper_channel->freq; | 3241 | freq = local->oper_channel->center_freq; |
2633 | list_for_each_entry(bss, &local->sta_bss_list, list) { | 3242 | list_for_each_entry(bss, &local->sta_bss_list, list) { |
2634 | if (!(bss->capability & WLAN_CAPABILITY_ESS)) | 3243 | if (!(bss->capability & WLAN_CAPABILITY_ESS)) |
2635 | continue; | 3244 | continue; |
@@ -2660,7 +3269,7 @@ static int ieee80211_sta_config_auth(struct net_device *dev, | |||
2660 | spin_unlock_bh(&local->sta_bss_lock); | 3269 | spin_unlock_bh(&local->sta_bss_lock); |
2661 | 3270 | ||
2662 | if (selected) { | 3271 | if (selected) { |
2663 | ieee80211_set_channel(local, -1, selected->freq); | 3272 | ieee80211_set_freq(local, selected->freq); |
2664 | if (!(ifsta->flags & IEEE80211_STA_SSID_SET)) | 3273 | if (!(ifsta->flags & IEEE80211_STA_SSID_SET)) |
2665 | ieee80211_sta_set_ssid(dev, selected->ssid, | 3274 | ieee80211_sta_set_ssid(dev, selected->ssid, |
2666 | selected->ssid_len); | 3275 | selected->ssid_len); |
@@ -2684,162 +3293,6 @@ static int ieee80211_sta_config_auth(struct net_device *dev, | |||
2684 | return -1; | 3293 | return -1; |
2685 | } | 3294 | } |
2686 | 3295 | ||
2687 | static int ieee80211_sta_join_ibss(struct net_device *dev, | ||
2688 | struct ieee80211_if_sta *ifsta, | ||
2689 | struct ieee80211_sta_bss *bss) | ||
2690 | { | ||
2691 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
2692 | int res, rates, i, j; | ||
2693 | struct sk_buff *skb; | ||
2694 | struct ieee80211_mgmt *mgmt; | ||
2695 | struct ieee80211_tx_control control; | ||
2696 | struct ieee80211_hw_mode *mode; | ||
2697 | struct rate_selection ratesel; | ||
2698 | u8 *pos; | ||
2699 | struct ieee80211_sub_if_data *sdata; | ||
2700 | |||
2701 | /* Remove possible STA entries from other IBSS networks. */ | ||
2702 | sta_info_flush(local, NULL); | ||
2703 | |||
2704 | if (local->ops->reset_tsf) { | ||
2705 | /* Reset own TSF to allow time synchronization work. */ | ||
2706 | local->ops->reset_tsf(local_to_hw(local)); | ||
2707 | } | ||
2708 | memcpy(ifsta->bssid, bss->bssid, ETH_ALEN); | ||
2709 | res = ieee80211_if_config(dev); | ||
2710 | if (res) | ||
2711 | return res; | ||
2712 | |||
2713 | local->hw.conf.beacon_int = bss->beacon_int >= 10 ? bss->beacon_int : 10; | ||
2714 | |||
2715 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
2716 | sdata->drop_unencrypted = bss->capability & | ||
2717 | WLAN_CAPABILITY_PRIVACY ? 1 : 0; | ||
2718 | |||
2719 | res = ieee80211_set_channel(local, -1, bss->freq); | ||
2720 | |||
2721 | if (!(local->oper_channel->flag & IEEE80211_CHAN_W_IBSS)) { | ||
2722 | printk(KERN_DEBUG "%s: IBSS not allowed on channel %d " | ||
2723 | "(%d MHz)\n", dev->name, local->hw.conf.channel, | ||
2724 | local->hw.conf.freq); | ||
2725 | return -1; | ||
2726 | } | ||
2727 | |||
2728 | /* Set beacon template based on scan results */ | ||
2729 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); | ||
2730 | do { | ||
2731 | if (!skb) | ||
2732 | break; | ||
2733 | |||
2734 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
2735 | |||
2736 | mgmt = (struct ieee80211_mgmt *) | ||
2737 | skb_put(skb, 24 + sizeof(mgmt->u.beacon)); | ||
2738 | memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon)); | ||
2739 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | ||
2740 | IEEE80211_STYPE_BEACON); | ||
2741 | memset(mgmt->da, 0xff, ETH_ALEN); | ||
2742 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | ||
2743 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); | ||
2744 | mgmt->u.beacon.beacon_int = | ||
2745 | cpu_to_le16(local->hw.conf.beacon_int); | ||
2746 | mgmt->u.beacon.capab_info = cpu_to_le16(bss->capability); | ||
2747 | |||
2748 | pos = skb_put(skb, 2 + ifsta->ssid_len); | ||
2749 | *pos++ = WLAN_EID_SSID; | ||
2750 | *pos++ = ifsta->ssid_len; | ||
2751 | memcpy(pos, ifsta->ssid, ifsta->ssid_len); | ||
2752 | |||
2753 | rates = bss->supp_rates_len; | ||
2754 | if (rates > 8) | ||
2755 | rates = 8; | ||
2756 | pos = skb_put(skb, 2 + rates); | ||
2757 | *pos++ = WLAN_EID_SUPP_RATES; | ||
2758 | *pos++ = rates; | ||
2759 | memcpy(pos, bss->supp_rates, rates); | ||
2760 | |||
2761 | pos = skb_put(skb, 2 + 1); | ||
2762 | *pos++ = WLAN_EID_DS_PARAMS; | ||
2763 | *pos++ = 1; | ||
2764 | *pos++ = bss->channel; | ||
2765 | |||
2766 | pos = skb_put(skb, 2 + 2); | ||
2767 | *pos++ = WLAN_EID_IBSS_PARAMS; | ||
2768 | *pos++ = 2; | ||
2769 | /* FIX: set ATIM window based on scan results */ | ||
2770 | *pos++ = 0; | ||
2771 | *pos++ = 0; | ||
2772 | |||
2773 | if (bss->supp_rates_len > 8) { | ||
2774 | rates = bss->supp_rates_len - 8; | ||
2775 | pos = skb_put(skb, 2 + rates); | ||
2776 | *pos++ = WLAN_EID_EXT_SUPP_RATES; | ||
2777 | *pos++ = rates; | ||
2778 | memcpy(pos, &bss->supp_rates[8], rates); | ||
2779 | } | ||
2780 | |||
2781 | memset(&control, 0, sizeof(control)); | ||
2782 | rate_control_get_rate(dev, local->oper_hw_mode, skb, &ratesel); | ||
2783 | if (!ratesel.rate) { | ||
2784 | printk(KERN_DEBUG "%s: Failed to determine TX rate " | ||
2785 | "for IBSS beacon\n", dev->name); | ||
2786 | break; | ||
2787 | } | ||
2788 | control.vif = &sdata->vif; | ||
2789 | control.tx_rate = | ||
2790 | (sdata->bss_conf.use_short_preamble && | ||
2791 | (ratesel.rate->flags & IEEE80211_RATE_PREAMBLE2)) ? | ||
2792 | ratesel.rate->val2 : ratesel.rate->val; | ||
2793 | control.antenna_sel_tx = local->hw.conf.antenna_sel_tx; | ||
2794 | control.power_level = local->hw.conf.power_level; | ||
2795 | control.flags |= IEEE80211_TXCTL_NO_ACK; | ||
2796 | control.retry_limit = 1; | ||
2797 | |||
2798 | ifsta->probe_resp = skb_copy(skb, GFP_ATOMIC); | ||
2799 | if (ifsta->probe_resp) { | ||
2800 | mgmt = (struct ieee80211_mgmt *) | ||
2801 | ifsta->probe_resp->data; | ||
2802 | mgmt->frame_control = | ||
2803 | IEEE80211_FC(IEEE80211_FTYPE_MGMT, | ||
2804 | IEEE80211_STYPE_PROBE_RESP); | ||
2805 | } else { | ||
2806 | printk(KERN_DEBUG "%s: Could not allocate ProbeResp " | ||
2807 | "template for IBSS\n", dev->name); | ||
2808 | } | ||
2809 | |||
2810 | if (local->ops->beacon_update && | ||
2811 | local->ops->beacon_update(local_to_hw(local), | ||
2812 | skb, &control) == 0) { | ||
2813 | printk(KERN_DEBUG "%s: Configured IBSS beacon " | ||
2814 | "template based on scan results\n", dev->name); | ||
2815 | skb = NULL; | ||
2816 | } | ||
2817 | |||
2818 | rates = 0; | ||
2819 | mode = local->oper_hw_mode; | ||
2820 | for (i = 0; i < bss->supp_rates_len; i++) { | ||
2821 | int bitrate = (bss->supp_rates[i] & 0x7f) * 5; | ||
2822 | for (j = 0; j < mode->num_rates; j++) | ||
2823 | if (mode->rates[j].rate == bitrate) | ||
2824 | rates |= BIT(j); | ||
2825 | } | ||
2826 | ifsta->supp_rates_bits = rates; | ||
2827 | } while (0); | ||
2828 | |||
2829 | if (skb) { | ||
2830 | printk(KERN_DEBUG "%s: Failed to configure IBSS beacon " | ||
2831 | "template\n", dev->name); | ||
2832 | dev_kfree_skb(skb); | ||
2833 | } | ||
2834 | |||
2835 | ifsta->state = IEEE80211_IBSS_JOINED; | ||
2836 | mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL); | ||
2837 | |||
2838 | ieee80211_rx_bss_put(dev, bss); | ||
2839 | |||
2840 | return res; | ||
2841 | } | ||
2842 | |||
2843 | 3296 | ||
2844 | static int ieee80211_sta_create_ibss(struct net_device *dev, | 3297 | static int ieee80211_sta_create_ibss(struct net_device *dev, |
2845 | struct ieee80211_if_sta *ifsta) | 3298 | struct ieee80211_if_sta *ifsta) |
@@ -2847,7 +3300,7 @@ static int ieee80211_sta_create_ibss(struct net_device *dev, | |||
2847 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 3300 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
2848 | struct ieee80211_sta_bss *bss; | 3301 | struct ieee80211_sta_bss *bss; |
2849 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 3302 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
2850 | struct ieee80211_hw_mode *mode; | 3303 | struct ieee80211_supported_band *sband; |
2851 | u8 bssid[ETH_ALEN], *pos; | 3304 | u8 bssid[ETH_ALEN], *pos; |
2852 | int i; | 3305 | int i; |
2853 | DECLARE_MAC_BUF(mac); | 3306 | DECLARE_MAC_BUF(mac); |
@@ -2869,28 +3322,28 @@ static int ieee80211_sta_create_ibss(struct net_device *dev, | |||
2869 | printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %s\n", | 3322 | printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %s\n", |
2870 | dev->name, print_mac(mac, bssid)); | 3323 | dev->name, print_mac(mac, bssid)); |
2871 | 3324 | ||
2872 | bss = ieee80211_rx_bss_add(dev, bssid, local->hw.conf.channel, | 3325 | bss = ieee80211_rx_bss_add(dev, bssid, |
3326 | local->hw.conf.channel->center_freq, | ||
2873 | sdata->u.sta.ssid, sdata->u.sta.ssid_len); | 3327 | sdata->u.sta.ssid, sdata->u.sta.ssid_len); |
2874 | if (!bss) | 3328 | if (!bss) |
2875 | return -ENOMEM; | 3329 | return -ENOMEM; |
2876 | 3330 | ||
2877 | mode = local->oper_hw_mode; | 3331 | bss->band = local->hw.conf.channel->band; |
3332 | sband = local->hw.wiphy->bands[bss->band]; | ||
2878 | 3333 | ||
2879 | if (local->hw.conf.beacon_int == 0) | 3334 | if (local->hw.conf.beacon_int == 0) |
2880 | local->hw.conf.beacon_int = 100; | 3335 | local->hw.conf.beacon_int = 10000; |
2881 | bss->beacon_int = local->hw.conf.beacon_int; | 3336 | bss->beacon_int = local->hw.conf.beacon_int; |
2882 | bss->hw_mode = local->hw.conf.phymode; | ||
2883 | bss->freq = local->hw.conf.freq; | ||
2884 | bss->last_update = jiffies; | 3337 | bss->last_update = jiffies; |
2885 | bss->capability = WLAN_CAPABILITY_IBSS; | 3338 | bss->capability = WLAN_CAPABILITY_IBSS; |
2886 | if (sdata->default_key) { | 3339 | if (sdata->default_key) { |
2887 | bss->capability |= WLAN_CAPABILITY_PRIVACY; | 3340 | bss->capability |= WLAN_CAPABILITY_PRIVACY; |
2888 | } else | 3341 | } else |
2889 | sdata->drop_unencrypted = 0; | 3342 | sdata->drop_unencrypted = 0; |
2890 | bss->supp_rates_len = mode->num_rates; | 3343 | bss->supp_rates_len = sband->n_bitrates; |
2891 | pos = bss->supp_rates; | 3344 | pos = bss->supp_rates; |
2892 | for (i = 0; i < mode->num_rates; i++) { | 3345 | for (i = 0; i < sband->n_bitrates; i++) { |
2893 | int rate = mode->rates[i].rate; | 3346 | int rate = sband->bitrates[i].bitrate; |
2894 | *pos++ = (u8) (rate / 5); | 3347 | *pos++ = (u8) (rate / 5); |
2895 | } | 3348 | } |
2896 | 3349 | ||
@@ -2939,7 +3392,8 @@ static int ieee80211_sta_find_ibss(struct net_device *dev, | |||
2939 | "%s\n", print_mac(mac, bssid), print_mac(mac2, ifsta->bssid)); | 3392 | "%s\n", print_mac(mac, bssid), print_mac(mac2, ifsta->bssid)); |
2940 | #endif /* CONFIG_MAC80211_IBSS_DEBUG */ | 3393 | #endif /* CONFIG_MAC80211_IBSS_DEBUG */ |
2941 | if (found && memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0 && | 3394 | if (found && memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0 && |
2942 | (bss = ieee80211_rx_bss_get(dev, bssid, local->hw.conf.channel, | 3395 | (bss = ieee80211_rx_bss_get(dev, bssid, |
3396 | local->hw.conf.channel->center_freq, | ||
2943 | ifsta->ssid, ifsta->ssid_len))) { | 3397 | ifsta->ssid, ifsta->ssid_len))) { |
2944 | printk(KERN_DEBUG "%s: Selected IBSS BSSID %s" | 3398 | printk(KERN_DEBUG "%s: Selected IBSS BSSID %s" |
2945 | " based on configured SSID\n", | 3399 | " based on configured SSID\n", |
@@ -2967,13 +3421,13 @@ static int ieee80211_sta_find_ibss(struct net_device *dev, | |||
2967 | if (time_after(jiffies, ifsta->ibss_join_req + | 3421 | if (time_after(jiffies, ifsta->ibss_join_req + |
2968 | IEEE80211_IBSS_JOIN_TIMEOUT)) { | 3422 | IEEE80211_IBSS_JOIN_TIMEOUT)) { |
2969 | if ((ifsta->flags & IEEE80211_STA_CREATE_IBSS) && | 3423 | if ((ifsta->flags & IEEE80211_STA_CREATE_IBSS) && |
2970 | local->oper_channel->flag & IEEE80211_CHAN_W_IBSS) | 3424 | (!(local->oper_channel->flags & |
3425 | IEEE80211_CHAN_NO_IBSS))) | ||
2971 | return ieee80211_sta_create_ibss(dev, ifsta); | 3426 | return ieee80211_sta_create_ibss(dev, ifsta); |
2972 | if (ifsta->flags & IEEE80211_STA_CREATE_IBSS) { | 3427 | if (ifsta->flags & IEEE80211_STA_CREATE_IBSS) { |
2973 | printk(KERN_DEBUG "%s: IBSS not allowed on the" | 3428 | printk(KERN_DEBUG "%s: IBSS not allowed on" |
2974 | " configured channel %d (%d MHz)\n", | 3429 | " %d MHz\n", dev->name, |
2975 | dev->name, local->hw.conf.channel, | 3430 | local->hw.conf.channel->center_freq); |
2976 | local->hw.conf.freq); | ||
2977 | } | 3431 | } |
2978 | 3432 | ||
2979 | /* No IBSS found - decrease scan interval and continue | 3433 | /* No IBSS found - decrease scan interval and continue |
@@ -2992,7 +3446,7 @@ static int ieee80211_sta_find_ibss(struct net_device *dev, | |||
2992 | 3446 | ||
2993 | int ieee80211_sta_set_ssid(struct net_device *dev, char *ssid, size_t len) | 3447 | int ieee80211_sta_set_ssid(struct net_device *dev, char *ssid, size_t len) |
2994 | { | 3448 | { |
2995 | struct ieee80211_sub_if_data *sdata; | 3449 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
2996 | struct ieee80211_if_sta *ifsta; | 3450 | struct ieee80211_if_sta *ifsta; |
2997 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 3451 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
2998 | 3452 | ||
@@ -3006,18 +3460,23 @@ int ieee80211_sta_set_ssid(struct net_device *dev, char *ssid, size_t len) | |||
3006 | int i; | 3460 | int i; |
3007 | 3461 | ||
3008 | memset(&qparam, 0, sizeof(qparam)); | 3462 | memset(&qparam, 0, sizeof(qparam)); |
3009 | /* TODO: are these ok defaults for all hw_modes? */ | 3463 | |
3010 | qparam.aifs = 2; | 3464 | qparam.aifs = 2; |
3011 | qparam.cw_min = | 3465 | |
3012 | local->hw.conf.phymode == MODE_IEEE80211B ? 31 : 15; | 3466 | if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ && |
3467 | !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)) | ||
3468 | qparam.cw_min = 31; | ||
3469 | else | ||
3470 | qparam.cw_min = 15; | ||
3471 | |||
3013 | qparam.cw_max = 1023; | 3472 | qparam.cw_max = 1023; |
3014 | qparam.burst_time = 0; | 3473 | qparam.txop = 0; |
3474 | |||
3015 | for (i = IEEE80211_TX_QUEUE_DATA0; i < NUM_TX_DATA_QUEUES; i++) | 3475 | for (i = IEEE80211_TX_QUEUE_DATA0; i < NUM_TX_DATA_QUEUES; i++) |
3016 | { | ||
3017 | local->ops->conf_tx(local_to_hw(local), | 3476 | local->ops->conf_tx(local_to_hw(local), |
3018 | i + IEEE80211_TX_QUEUE_DATA0, | 3477 | i + IEEE80211_TX_QUEUE_DATA0, |
3019 | &qparam); | 3478 | &qparam); |
3020 | } | 3479 | |
3021 | /* IBSS uses different parameters for Beacon sending */ | 3480 | /* IBSS uses different parameters for Beacon sending */ |
3022 | qparam.cw_min++; | 3481 | qparam.cw_min++; |
3023 | qparam.cw_min *= 2; | 3482 | qparam.cw_min *= 2; |
@@ -3026,7 +3485,6 @@ int ieee80211_sta_set_ssid(struct net_device *dev, char *ssid, size_t len) | |||
3026 | IEEE80211_TX_QUEUE_BEACON, &qparam); | 3485 | IEEE80211_TX_QUEUE_BEACON, &qparam); |
3027 | } | 3486 | } |
3028 | 3487 | ||
3029 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
3030 | ifsta = &sdata->u.sta; | 3488 | ifsta = &sdata->u.sta; |
3031 | 3489 | ||
3032 | if (ifsta->ssid_len != len || memcmp(ifsta->ssid, ssid, len) != 0) | 3490 | if (ifsta->ssid_len != len || memcmp(ifsta->ssid, ssid, len) != 0) |
@@ -3118,6 +3576,13 @@ static void ieee80211_send_nullfunc(struct ieee80211_local *local, | |||
3118 | } | 3576 | } |
3119 | 3577 | ||
3120 | 3578 | ||
3579 | static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata) | ||
3580 | { | ||
3581 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA || | ||
3582 | ieee80211_vif_is_mesh(&sdata->vif)) | ||
3583 | ieee80211_sta_timer((unsigned long)sdata); | ||
3584 | } | ||
3585 | |||
3121 | void ieee80211_scan_completed(struct ieee80211_hw *hw) | 3586 | void ieee80211_scan_completed(struct ieee80211_hw *hw) |
3122 | { | 3587 | { |
3123 | struct ieee80211_local *local = hw_to_local(hw); | 3588 | struct ieee80211_local *local = hw_to_local(hw); |
@@ -3131,6 +3596,12 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw) | |||
3131 | 3596 | ||
3132 | if (local->sta_hw_scanning) { | 3597 | if (local->sta_hw_scanning) { |
3133 | local->sta_hw_scanning = 0; | 3598 | local->sta_hw_scanning = 0; |
3599 | /* Restart STA timer for HW scan case */ | ||
3600 | rcu_read_lock(); | ||
3601 | list_for_each_entry_rcu(sdata, &local->interfaces, list) | ||
3602 | ieee80211_restart_sta_timer(sdata); | ||
3603 | rcu_read_unlock(); | ||
3604 | |||
3134 | goto done; | 3605 | goto done; |
3135 | } | 3606 | } |
3136 | 3607 | ||
@@ -3157,11 +3628,12 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw) | |||
3157 | if (sdata->dev == local->mdev) | 3628 | if (sdata->dev == local->mdev) |
3158 | continue; | 3629 | continue; |
3159 | 3630 | ||
3160 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA) { | 3631 | /* Tell AP we're back */ |
3161 | if (sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED) | 3632 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA && |
3162 | ieee80211_send_nullfunc(local, sdata, 0); | 3633 | sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED) |
3163 | ieee80211_sta_timer((unsigned long)sdata); | 3634 | ieee80211_send_nullfunc(local, sdata, 0); |
3164 | } | 3635 | |
3636 | ieee80211_restart_sta_timer(sdata); | ||
3165 | 3637 | ||
3166 | netif_wake_queue(sdata->dev); | 3638 | netif_wake_queue(sdata->dev); |
3167 | } | 3639 | } |
@@ -3185,7 +3657,7 @@ void ieee80211_sta_scan_work(struct work_struct *work) | |||
3185 | container_of(work, struct ieee80211_local, scan_work.work); | 3657 | container_of(work, struct ieee80211_local, scan_work.work); |
3186 | struct net_device *dev = local->scan_dev; | 3658 | struct net_device *dev = local->scan_dev; |
3187 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 3659 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
3188 | struct ieee80211_hw_mode *mode; | 3660 | struct ieee80211_supported_band *sband; |
3189 | struct ieee80211_channel *chan; | 3661 | struct ieee80211_channel *chan; |
3190 | int skip; | 3662 | int skip; |
3191 | unsigned long next_delay = 0; | 3663 | unsigned long next_delay = 0; |
@@ -3195,44 +3667,59 @@ void ieee80211_sta_scan_work(struct work_struct *work) | |||
3195 | 3667 | ||
3196 | switch (local->scan_state) { | 3668 | switch (local->scan_state) { |
3197 | case SCAN_SET_CHANNEL: | 3669 | case SCAN_SET_CHANNEL: |
3198 | mode = local->scan_hw_mode; | 3670 | /* |
3199 | if (local->scan_hw_mode->list.next == &local->modes_list && | 3671 | * Get current scan band. scan_band may be IEEE80211_NUM_BANDS |
3200 | local->scan_channel_idx >= mode->num_channels) { | 3672 | * after we successfully scanned the last channel of the last |
3673 | * band (and the last band is supported by the hw) | ||
3674 | */ | ||
3675 | if (local->scan_band < IEEE80211_NUM_BANDS) | ||
3676 | sband = local->hw.wiphy->bands[local->scan_band]; | ||
3677 | else | ||
3678 | sband = NULL; | ||
3679 | |||
3680 | /* | ||
3681 | * If we are at an unsupported band and have more bands | ||
3682 | * left to scan, advance to the next supported one. | ||
3683 | */ | ||
3684 | while (!sband && local->scan_band < IEEE80211_NUM_BANDS - 1) { | ||
3685 | local->scan_band++; | ||
3686 | sband = local->hw.wiphy->bands[local->scan_band]; | ||
3687 | local->scan_channel_idx = 0; | ||
3688 | } | ||
3689 | |||
3690 | /* if no more bands/channels left, complete scan */ | ||
3691 | if (!sband || local->scan_channel_idx >= sband->n_channels) { | ||
3201 | ieee80211_scan_completed(local_to_hw(local)); | 3692 | ieee80211_scan_completed(local_to_hw(local)); |
3202 | return; | 3693 | return; |
3203 | } | 3694 | } |
3204 | skip = !(local->enabled_modes & (1 << mode->mode)); | 3695 | skip = 0; |
3205 | chan = &mode->channels[local->scan_channel_idx]; | 3696 | chan = &sband->channels[local->scan_channel_idx]; |
3206 | if (!(chan->flag & IEEE80211_CHAN_W_SCAN) || | 3697 | |
3698 | if (chan->flags & IEEE80211_CHAN_DISABLED || | ||
3207 | (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && | 3699 | (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && |
3208 | !(chan->flag & IEEE80211_CHAN_W_IBSS)) || | 3700 | chan->flags & IEEE80211_CHAN_NO_IBSS)) |
3209 | (local->hw_modes & local->enabled_modes & | ||
3210 | (1 << MODE_IEEE80211G) && mode->mode == MODE_IEEE80211B)) | ||
3211 | skip = 1; | 3701 | skip = 1; |
3212 | 3702 | ||
3213 | if (!skip) { | 3703 | if (!skip) { |
3214 | #if 0 | ||
3215 | printk(KERN_DEBUG "%s: scan channel %d (%d MHz)\n", | ||
3216 | dev->name, chan->chan, chan->freq); | ||
3217 | #endif | ||
3218 | |||
3219 | local->scan_channel = chan; | 3704 | local->scan_channel = chan; |
3220 | if (ieee80211_hw_config(local)) { | 3705 | if (ieee80211_hw_config(local)) { |
3221 | printk(KERN_DEBUG "%s: failed to set channel " | 3706 | printk(KERN_DEBUG "%s: failed to set freq to " |
3222 | "%d (%d MHz) for scan\n", dev->name, | 3707 | "%d MHz for scan\n", dev->name, |
3223 | chan->chan, chan->freq); | 3708 | chan->center_freq); |
3224 | skip = 1; | 3709 | skip = 1; |
3225 | } | 3710 | } |
3226 | } | 3711 | } |
3227 | 3712 | ||
3713 | /* advance state machine to next channel/band */ | ||
3228 | local->scan_channel_idx++; | 3714 | local->scan_channel_idx++; |
3229 | if (local->scan_channel_idx >= local->scan_hw_mode->num_channels) { | 3715 | if (local->scan_channel_idx >= sband->n_channels) { |
3230 | if (local->scan_hw_mode->list.next != &local->modes_list) { | 3716 | /* |
3231 | local->scan_hw_mode = list_entry(local->scan_hw_mode->list.next, | 3717 | * scan_band may end up == IEEE80211_NUM_BANDS, but |
3232 | struct ieee80211_hw_mode, | 3718 | * we'll catch that case above and complete the scan |
3233 | list); | 3719 | * if that is the case. |
3234 | local->scan_channel_idx = 0; | 3720 | */ |
3235 | } | 3721 | local->scan_band++; |
3722 | local->scan_channel_idx = 0; | ||
3236 | } | 3723 | } |
3237 | 3724 | ||
3238 | if (skip) | 3725 | if (skip) |
@@ -3243,13 +3730,14 @@ void ieee80211_sta_scan_work(struct work_struct *work) | |||
3243 | local->scan_state = SCAN_SEND_PROBE; | 3730 | local->scan_state = SCAN_SEND_PROBE; |
3244 | break; | 3731 | break; |
3245 | case SCAN_SEND_PROBE: | 3732 | case SCAN_SEND_PROBE: |
3246 | if (local->scan_channel->flag & IEEE80211_CHAN_W_ACTIVE_SCAN) { | 3733 | next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; |
3247 | ieee80211_send_probe_req(dev, NULL, local->scan_ssid, | ||
3248 | local->scan_ssid_len); | ||
3249 | next_delay = IEEE80211_CHANNEL_TIME; | ||
3250 | } else | ||
3251 | next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; | ||
3252 | local->scan_state = SCAN_SET_CHANNEL; | 3734 | local->scan_state = SCAN_SET_CHANNEL; |
3735 | |||
3736 | if (local->scan_channel->flags & IEEE80211_CHAN_PASSIVE_SCAN) | ||
3737 | break; | ||
3738 | ieee80211_send_probe_req(dev, NULL, local->scan_ssid, | ||
3739 | local->scan_ssid_len); | ||
3740 | next_delay = IEEE80211_CHANNEL_TIME; | ||
3253 | break; | 3741 | break; |
3254 | } | 3742 | } |
3255 | 3743 | ||
@@ -3324,10 +3812,8 @@ static int ieee80211_sta_start_scan(struct net_device *dev, | |||
3324 | } else | 3812 | } else |
3325 | local->scan_ssid_len = 0; | 3813 | local->scan_ssid_len = 0; |
3326 | local->scan_state = SCAN_SET_CHANNEL; | 3814 | local->scan_state = SCAN_SET_CHANNEL; |
3327 | local->scan_hw_mode = list_entry(local->modes_list.next, | ||
3328 | struct ieee80211_hw_mode, | ||
3329 | list); | ||
3330 | local->scan_channel_idx = 0; | 3815 | local->scan_channel_idx = 0; |
3816 | local->scan_band = IEEE80211_BAND_2GHZ; | ||
3331 | local->scan_dev = dev; | 3817 | local->scan_dev = dev; |
3332 | 3818 | ||
3333 | netif_tx_lock_bh(local->mdev); | 3819 | netif_tx_lock_bh(local->mdev); |
@@ -3382,9 +3868,6 @@ ieee80211_sta_scan_result(struct net_device *dev, | |||
3382 | bss->last_update + IEEE80211_SCAN_RESULT_EXPIRE)) | 3868 | bss->last_update + IEEE80211_SCAN_RESULT_EXPIRE)) |
3383 | return current_ev; | 3869 | return current_ev; |
3384 | 3870 | ||
3385 | if (!(local->enabled_modes & (1 << bss->hw_mode))) | ||
3386 | return current_ev; | ||
3387 | |||
3388 | memset(&iwe, 0, sizeof(iwe)); | 3871 | memset(&iwe, 0, sizeof(iwe)); |
3389 | iwe.cmd = SIOCGIWAP; | 3872 | iwe.cmd = SIOCGIWAP; |
3390 | iwe.u.ap_addr.sa_family = ARPHRD_ETHER; | 3873 | iwe.u.ap_addr.sa_family = ARPHRD_ETHER; |
@@ -3394,15 +3877,25 @@ ieee80211_sta_scan_result(struct net_device *dev, | |||
3394 | 3877 | ||
3395 | memset(&iwe, 0, sizeof(iwe)); | 3878 | memset(&iwe, 0, sizeof(iwe)); |
3396 | iwe.cmd = SIOCGIWESSID; | 3879 | iwe.cmd = SIOCGIWESSID; |
3397 | iwe.u.data.length = bss->ssid_len; | 3880 | if (bss_mesh_cfg(bss)) { |
3398 | iwe.u.data.flags = 1; | 3881 | iwe.u.data.length = bss_mesh_id_len(bss); |
3399 | current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, | 3882 | iwe.u.data.flags = 1; |
3400 | bss->ssid); | 3883 | current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, |
3884 | bss_mesh_id(bss)); | ||
3885 | } else { | ||
3886 | iwe.u.data.length = bss->ssid_len; | ||
3887 | iwe.u.data.flags = 1; | ||
3888 | current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, | ||
3889 | bss->ssid); | ||
3890 | } | ||
3401 | 3891 | ||
3402 | if (bss->capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) { | 3892 | if (bss->capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS) |
3893 | || bss_mesh_cfg(bss)) { | ||
3403 | memset(&iwe, 0, sizeof(iwe)); | 3894 | memset(&iwe, 0, sizeof(iwe)); |
3404 | iwe.cmd = SIOCGIWMODE; | 3895 | iwe.cmd = SIOCGIWMODE; |
3405 | if (bss->capability & WLAN_CAPABILITY_ESS) | 3896 | if (bss_mesh_cfg(bss)) |
3897 | iwe.u.mode = IW_MODE_MESH; | ||
3898 | else if (bss->capability & WLAN_CAPABILITY_ESS) | ||
3406 | iwe.u.mode = IW_MODE_MASTER; | 3899 | iwe.u.mode = IW_MODE_MASTER; |
3407 | else | 3900 | else |
3408 | iwe.u.mode = IW_MODE_ADHOC; | 3901 | iwe.u.mode = IW_MODE_ADHOC; |
@@ -3412,12 +3905,15 @@ ieee80211_sta_scan_result(struct net_device *dev, | |||
3412 | 3905 | ||
3413 | memset(&iwe, 0, sizeof(iwe)); | 3906 | memset(&iwe, 0, sizeof(iwe)); |
3414 | iwe.cmd = SIOCGIWFREQ; | 3907 | iwe.cmd = SIOCGIWFREQ; |
3415 | iwe.u.freq.m = bss->channel; | 3908 | iwe.u.freq.m = bss->freq; |
3416 | iwe.u.freq.e = 0; | 3909 | iwe.u.freq.e = 6; |
3417 | current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, | 3910 | current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, |
3418 | IW_EV_FREQ_LEN); | 3911 | IW_EV_FREQ_LEN); |
3419 | iwe.u.freq.m = bss->freq * 100000; | 3912 | |
3420 | iwe.u.freq.e = 1; | 3913 | memset(&iwe, 0, sizeof(iwe)); |
3914 | iwe.cmd = SIOCGIWFREQ; | ||
3915 | iwe.u.freq.m = ieee80211_frequency_to_channel(bss->freq); | ||
3916 | iwe.u.freq.e = 0; | ||
3421 | current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, | 3917 | current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, |
3422 | IW_EV_FREQ_LEN); | 3918 | IW_EV_FREQ_LEN); |
3423 | 3919 | ||
@@ -3488,6 +3984,45 @@ ieee80211_sta_scan_result(struct net_device *dev, | |||
3488 | } | 3984 | } |
3489 | } | 3985 | } |
3490 | 3986 | ||
3987 | if (bss_mesh_cfg(bss)) { | ||
3988 | char *buf; | ||
3989 | u8 *cfg = bss_mesh_cfg(bss); | ||
3990 | buf = kmalloc(50, GFP_ATOMIC); | ||
3991 | if (buf) { | ||
3992 | memset(&iwe, 0, sizeof(iwe)); | ||
3993 | iwe.cmd = IWEVCUSTOM; | ||
3994 | sprintf(buf, "Mesh network (version %d)", cfg[0]); | ||
3995 | iwe.u.data.length = strlen(buf); | ||
3996 | current_ev = iwe_stream_add_point(current_ev, end_buf, | ||
3997 | &iwe, buf); | ||
3998 | sprintf(buf, "Path Selection Protocol ID: " | ||
3999 | "0x%02X%02X%02X%02X", cfg[1], cfg[2], cfg[3], | ||
4000 | cfg[4]); | ||
4001 | iwe.u.data.length = strlen(buf); | ||
4002 | current_ev = iwe_stream_add_point(current_ev, end_buf, | ||
4003 | &iwe, buf); | ||
4004 | sprintf(buf, "Path Selection Metric ID: " | ||
4005 | "0x%02X%02X%02X%02X", cfg[5], cfg[6], cfg[7], | ||
4006 | cfg[8]); | ||
4007 | iwe.u.data.length = strlen(buf); | ||
4008 | current_ev = iwe_stream_add_point(current_ev, end_buf, | ||
4009 | &iwe, buf); | ||
4010 | sprintf(buf, "Congestion Control Mode ID: " | ||
4011 | "0x%02X%02X%02X%02X", cfg[9], cfg[10], | ||
4012 | cfg[11], cfg[12]); | ||
4013 | iwe.u.data.length = strlen(buf); | ||
4014 | current_ev = iwe_stream_add_point(current_ev, end_buf, | ||
4015 | &iwe, buf); | ||
4016 | sprintf(buf, "Channel Precedence: " | ||
4017 | "0x%02X%02X%02X%02X", cfg[13], cfg[14], | ||
4018 | cfg[15], cfg[16]); | ||
4019 | iwe.u.data.length = strlen(buf); | ||
4020 | current_ev = iwe_stream_add_point(current_ev, end_buf, | ||
4021 | &iwe, buf); | ||
4022 | kfree(buf); | ||
4023 | } | ||
4024 | } | ||
4025 | |||
3491 | return current_ev; | 4026 | return current_ev; |
3492 | } | 4027 | } |
3493 | 4028 | ||
@@ -3556,15 +4091,23 @@ struct sta_info * ieee80211_ibss_add_sta(struct net_device *dev, | |||
3556 | printk(KERN_DEBUG "%s: Adding new IBSS station %s (dev=%s)\n", | 4091 | printk(KERN_DEBUG "%s: Adding new IBSS station %s (dev=%s)\n", |
3557 | wiphy_name(local->hw.wiphy), print_mac(mac, addr), dev->name); | 4092 | wiphy_name(local->hw.wiphy), print_mac(mac, addr), dev->name); |
3558 | 4093 | ||
3559 | sta = sta_info_add(local, dev, addr, GFP_ATOMIC); | 4094 | sta = sta_info_alloc(sdata, addr, GFP_ATOMIC); |
3560 | if (!sta) | 4095 | if (!sta) |
3561 | return NULL; | 4096 | return NULL; |
3562 | 4097 | ||
3563 | sta->supp_rates = sdata->u.sta.supp_rates_bits; | 4098 | sta->flags |= WLAN_STA_AUTHORIZED; |
4099 | |||
4100 | sta->supp_rates[local->hw.conf.channel->band] = | ||
4101 | sdata->u.sta.supp_rates_bits[local->hw.conf.channel->band]; | ||
3564 | 4102 | ||
3565 | rate_control_rate_init(sta, local); | 4103 | rate_control_rate_init(sta, local); |
3566 | 4104 | ||
3567 | return sta; /* caller will call sta_info_put() */ | 4105 | if (sta_info_insert(sta)) { |
4106 | sta_info_destroy(sta); | ||
4107 | return NULL; | ||
4108 | } | ||
4109 | |||
4110 | return sta; | ||
3568 | } | 4111 | } |
3569 | 4112 | ||
3570 | 4113 | ||
diff --git a/net/mac80211/key.c b/net/mac80211/key.c index ed57fb8e82fc..f91fb4092652 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c | |||
@@ -13,14 +13,15 @@ | |||
13 | #include <linux/etherdevice.h> | 13 | #include <linux/etherdevice.h> |
14 | #include <linux/list.h> | 14 | #include <linux/list.h> |
15 | #include <linux/rcupdate.h> | 15 | #include <linux/rcupdate.h> |
16 | #include <linux/rtnetlink.h> | ||
16 | #include <net/mac80211.h> | 17 | #include <net/mac80211.h> |
17 | #include "ieee80211_i.h" | 18 | #include "ieee80211_i.h" |
18 | #include "debugfs_key.h" | 19 | #include "debugfs_key.h" |
19 | #include "aes_ccm.h" | 20 | #include "aes_ccm.h" |
20 | 21 | ||
21 | 22 | ||
22 | /* | 23 | /** |
23 | * Key handling basics | 24 | * DOC: Key handling basics |
24 | * | 25 | * |
25 | * Key handling in mac80211 is done based on per-interface (sub_if_data) | 26 | * Key handling in mac80211 is done based on per-interface (sub_if_data) |
26 | * keys and per-station keys. Since each station belongs to an interface, | 27 | * keys and per-station keys. Since each station belongs to an interface, |
@@ -34,6 +35,10 @@ | |||
34 | * | 35 | * |
35 | * All operations here are called under RTNL so no extra locking is | 36 | * All operations here are called under RTNL so no extra locking is |
36 | * required. | 37 | * required. |
38 | * | ||
39 | * NOTE: This code requires that sta info *destruction* is done under | ||
40 | * RTNL, otherwise it can try to access already freed STA structs | ||
41 | * when a STA key is being freed. | ||
37 | */ | 42 | */ |
38 | 43 | ||
39 | static const u8 bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; | 44 | static const u8 bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; |
@@ -84,16 +89,25 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key) | |||
84 | key->conf.keyidx, print_mac(mac, addr), ret); | 89 | key->conf.keyidx, print_mac(mac, addr), ret); |
85 | } | 90 | } |
86 | 91 | ||
92 | static void ieee80211_key_mark_hw_accel_off(struct ieee80211_key *key) | ||
93 | { | ||
94 | if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { | ||
95 | key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; | ||
96 | key->flags |= KEY_FLAG_REMOVE_FROM_HARDWARE; | ||
97 | } | ||
98 | } | ||
99 | |||
87 | static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) | 100 | static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) |
88 | { | 101 | { |
89 | const u8 *addr; | 102 | const u8 *addr; |
90 | int ret; | 103 | int ret; |
91 | DECLARE_MAC_BUF(mac); | 104 | DECLARE_MAC_BUF(mac); |
92 | 105 | ||
93 | if (!key->local->ops->set_key) | 106 | if (!key || !key->local->ops->set_key) |
94 | return; | 107 | return; |
95 | 108 | ||
96 | if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) | 109 | if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && |
110 | !(key->flags & KEY_FLAG_REMOVE_FROM_HARDWARE)) | ||
97 | return; | 111 | return; |
98 | 112 | ||
99 | addr = get_mac_for_key(key); | 113 | addr = get_mac_for_key(key); |
@@ -108,12 +122,11 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) | |||
108 | wiphy_name(key->local->hw.wiphy), | 122 | wiphy_name(key->local->hw.wiphy), |
109 | key->conf.keyidx, print_mac(mac, addr), ret); | 123 | key->conf.keyidx, print_mac(mac, addr), ret); |
110 | 124 | ||
111 | key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; | 125 | key->flags &= ~(KEY_FLAG_UPLOADED_TO_HARDWARE | |
126 | KEY_FLAG_REMOVE_FROM_HARDWARE); | ||
112 | } | 127 | } |
113 | 128 | ||
114 | struct ieee80211_key *ieee80211_key_alloc(struct ieee80211_sub_if_data *sdata, | 129 | struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg, |
115 | struct sta_info *sta, | ||
116 | enum ieee80211_key_alg alg, | ||
117 | int idx, | 130 | int idx, |
118 | size_t key_len, | 131 | size_t key_len, |
119 | const u8 *key_data) | 132 | const u8 *key_data) |
@@ -137,10 +150,7 @@ struct ieee80211_key *ieee80211_key_alloc(struct ieee80211_sub_if_data *sdata, | |||
137 | key->conf.keyidx = idx; | 150 | key->conf.keyidx = idx; |
138 | key->conf.keylen = key_len; | 151 | key->conf.keylen = key_len; |
139 | memcpy(key->conf.key, key_data, key_len); | 152 | memcpy(key->conf.key, key_data, key_len); |
140 | 153 | INIT_LIST_HEAD(&key->list); | |
141 | key->local = sdata->local; | ||
142 | key->sdata = sdata; | ||
143 | key->sta = sta; | ||
144 | 154 | ||
145 | if (alg == ALG_CCMP) { | 155 | if (alg == ALG_CCMP) { |
146 | /* | 156 | /* |
@@ -154,13 +164,68 @@ struct ieee80211_key *ieee80211_key_alloc(struct ieee80211_sub_if_data *sdata, | |||
154 | } | 164 | } |
155 | } | 165 | } |
156 | 166 | ||
157 | ieee80211_debugfs_key_add(key->local, key); | 167 | return key; |
168 | } | ||
158 | 169 | ||
159 | /* remove key first */ | 170 | static void __ieee80211_key_replace(struct ieee80211_sub_if_data *sdata, |
160 | if (sta) | 171 | struct sta_info *sta, |
161 | ieee80211_key_free(sta->key); | 172 | struct ieee80211_key *key, |
162 | else | 173 | struct ieee80211_key *new) |
163 | ieee80211_key_free(sdata->keys[idx]); | 174 | { |
175 | int idx, defkey; | ||
176 | |||
177 | if (new) | ||
178 | list_add(&new->list, &sdata->key_list); | ||
179 | |||
180 | if (sta) { | ||
181 | rcu_assign_pointer(sta->key, new); | ||
182 | } else { | ||
183 | WARN_ON(new && key && new->conf.keyidx != key->conf.keyidx); | ||
184 | |||
185 | if (key) | ||
186 | idx = key->conf.keyidx; | ||
187 | else | ||
188 | idx = new->conf.keyidx; | ||
189 | |||
190 | defkey = key && sdata->default_key == key; | ||
191 | |||
192 | if (defkey && !new) | ||
193 | ieee80211_set_default_key(sdata, -1); | ||
194 | |||
195 | rcu_assign_pointer(sdata->keys[idx], new); | ||
196 | if (defkey && new) | ||
197 | ieee80211_set_default_key(sdata, new->conf.keyidx); | ||
198 | } | ||
199 | |||
200 | if (key) { | ||
201 | ieee80211_key_mark_hw_accel_off(key); | ||
202 | /* | ||
203 | * We'll use an empty list to indicate that the key | ||
204 | * has already been removed. | ||
205 | */ | ||
206 | list_del_init(&key->list); | ||
207 | } | ||
208 | } | ||
209 | |||
210 | void ieee80211_key_link(struct ieee80211_key *key, | ||
211 | struct ieee80211_sub_if_data *sdata, | ||
212 | struct sta_info *sta) | ||
213 | { | ||
214 | struct ieee80211_key *old_key; | ||
215 | int idx; | ||
216 | |||
217 | ASSERT_RTNL(); | ||
218 | might_sleep(); | ||
219 | |||
220 | BUG_ON(!sdata); | ||
221 | BUG_ON(!key); | ||
222 | |||
223 | idx = key->conf.keyidx; | ||
224 | key->local = sdata->local; | ||
225 | key->sdata = sdata; | ||
226 | key->sta = sta; | ||
227 | |||
228 | ieee80211_debugfs_key_add(key->local, key); | ||
164 | 229 | ||
165 | if (sta) { | 230 | if (sta) { |
166 | ieee80211_debugfs_key_sta_link(key, sta); | 231 | ieee80211_debugfs_key_sta_link(key, sta); |
@@ -175,61 +240,76 @@ struct ieee80211_key *ieee80211_key_alloc(struct ieee80211_sub_if_data *sdata, | |||
175 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA) { | 240 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA) { |
176 | struct sta_info *ap; | 241 | struct sta_info *ap; |
177 | 242 | ||
243 | rcu_read_lock(); | ||
244 | |||
178 | /* same here, the AP could be using QoS */ | 245 | /* same here, the AP could be using QoS */ |
179 | ap = sta_info_get(key->local, key->sdata->u.sta.bssid); | 246 | ap = sta_info_get(key->local, key->sdata->u.sta.bssid); |
180 | if (ap) { | 247 | if (ap) { |
181 | if (ap->flags & WLAN_STA_WME) | 248 | if (ap->flags & WLAN_STA_WME) |
182 | key->conf.flags |= | 249 | key->conf.flags |= |
183 | IEEE80211_KEY_FLAG_WMM_STA; | 250 | IEEE80211_KEY_FLAG_WMM_STA; |
184 | sta_info_put(ap); | ||
185 | } | 251 | } |
252 | |||
253 | rcu_read_unlock(); | ||
186 | } | 254 | } |
187 | } | 255 | } |
188 | 256 | ||
189 | /* enable hwaccel if appropriate */ | ||
190 | if (netif_running(key->sdata->dev)) | ||
191 | ieee80211_key_enable_hw_accel(key); | ||
192 | |||
193 | if (sta) | 257 | if (sta) |
194 | rcu_assign_pointer(sta->key, key); | 258 | old_key = sta->key; |
195 | else | 259 | else |
196 | rcu_assign_pointer(sdata->keys[idx], key); | 260 | old_key = sdata->keys[idx]; |
197 | 261 | ||
198 | list_add(&key->list, &sdata->key_list); | 262 | __ieee80211_key_replace(sdata, sta, old_key, key); |
199 | 263 | ||
200 | return key; | 264 | if (old_key) { |
265 | synchronize_rcu(); | ||
266 | ieee80211_key_free(old_key); | ||
267 | } | ||
268 | |||
269 | if (netif_running(sdata->dev)) | ||
270 | ieee80211_key_enable_hw_accel(key); | ||
201 | } | 271 | } |
202 | 272 | ||
203 | void ieee80211_key_free(struct ieee80211_key *key) | 273 | void ieee80211_key_free(struct ieee80211_key *key) |
204 | { | 274 | { |
275 | ASSERT_RTNL(); | ||
276 | might_sleep(); | ||
277 | |||
205 | if (!key) | 278 | if (!key) |
206 | return; | 279 | return; |
207 | 280 | ||
208 | if (key->sta) { | 281 | if (key->sdata) { |
209 | rcu_assign_pointer(key->sta->key, NULL); | 282 | /* |
210 | } else { | 283 | * Replace key with nothingness. |
211 | if (key->sdata->default_key == key) | 284 | * |
212 | ieee80211_set_default_key(key->sdata, -1); | 285 | * Because other code may have key reference (RCU protected) |
213 | if (key->conf.keyidx >= 0 && | 286 | * right now, we then wait for a grace period before freeing |
214 | key->conf.keyidx < NUM_DEFAULT_KEYS) | 287 | * it. |
215 | rcu_assign_pointer(key->sdata->keys[key->conf.keyidx], | 288 | * An empty list indicates it was never added to the key list |
216 | NULL); | 289 | * or has been removed already. It may, however, still be in |
217 | else | 290 | * hardware for acceleration. |
218 | WARN_ON(1); | 291 | */ |
219 | } | 292 | if (!list_empty(&key->list)) |
293 | __ieee80211_key_replace(key->sdata, key->sta, | ||
294 | key, NULL); | ||
220 | 295 | ||
221 | /* wait for all key users to complete */ | 296 | /* |
222 | synchronize_rcu(); | 297 | * Do NOT remove this without looking at sta_info_destroy() |
298 | */ | ||
299 | synchronize_rcu(); | ||
223 | 300 | ||
224 | /* remove from hwaccel if appropriate */ | 301 | /* |
225 | ieee80211_key_disable_hw_accel(key); | 302 | * Remove from hwaccel if appropriate, this will |
303 | * only happen when the key is actually unlinked, | ||
304 | * it will already be done when the key was replaced. | ||
305 | */ | ||
306 | ieee80211_key_disable_hw_accel(key); | ||
307 | } | ||
226 | 308 | ||
227 | if (key->conf.alg == ALG_CCMP) | 309 | if (key->conf.alg == ALG_CCMP) |
228 | ieee80211_aes_key_free(key->u.ccmp.tfm); | 310 | ieee80211_aes_key_free(key->u.ccmp.tfm); |
229 | ieee80211_debugfs_key_remove(key); | 311 | ieee80211_debugfs_key_remove(key); |
230 | 312 | ||
231 | list_del(&key->list); | ||
232 | |||
233 | kfree(key); | 313 | kfree(key); |
234 | } | 314 | } |
235 | 315 | ||
@@ -253,6 +333,10 @@ void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx) | |||
253 | void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata) | 333 | void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata) |
254 | { | 334 | { |
255 | struct ieee80211_key *key, *tmp; | 335 | struct ieee80211_key *key, *tmp; |
336 | LIST_HEAD(tmp_list); | ||
337 | |||
338 | ASSERT_RTNL(); | ||
339 | might_sleep(); | ||
256 | 340 | ||
257 | list_for_each_entry_safe(key, tmp, &sdata->key_list, list) | 341 | list_for_each_entry_safe(key, tmp, &sdata->key_list, list) |
258 | ieee80211_key_free(key); | 342 | ieee80211_key_free(key); |
@@ -262,8 +346,10 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) | |||
262 | { | 346 | { |
263 | struct ieee80211_key *key; | 347 | struct ieee80211_key *key; |
264 | 348 | ||
265 | WARN_ON(!netif_running(sdata->dev)); | 349 | ASSERT_RTNL(); |
266 | if (!netif_running(sdata->dev)) | 350 | might_sleep(); |
351 | |||
352 | if (WARN_ON(!netif_running(sdata->dev))) | ||
267 | return; | 353 | return; |
268 | 354 | ||
269 | list_for_each_entry(key, &sdata->key_list, list) | 355 | list_for_each_entry(key, &sdata->key_list, list) |
@@ -274,6 +360,9 @@ void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata) | |||
274 | { | 360 | { |
275 | struct ieee80211_key *key; | 361 | struct ieee80211_key *key; |
276 | 362 | ||
363 | ASSERT_RTNL(); | ||
364 | might_sleep(); | ||
365 | |||
277 | list_for_each_entry(key, &sdata->key_list, list) | 366 | list_for_each_entry(key, &sdata->key_list, list) |
278 | ieee80211_key_disable_hw_accel(key); | 367 | ieee80211_key_disable_hw_accel(key); |
279 | } | 368 | } |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c new file mode 100644 index 000000000000..594a3356a508 --- /dev/null +++ b/net/mac80211/mesh.c | |||
@@ -0,0 +1,449 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2008 open80211s Ltd. | ||
3 | * Authors: Luis Carlos Cobo <luisca@cozybit.com> | ||
4 | * Javier Cardona <javier@cozybit.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include "ieee80211_i.h" | ||
12 | #include "mesh.h" | ||
13 | |||
14 | #define PP_OFFSET 1 /* Path Selection Protocol */ | ||
15 | #define PM_OFFSET 5 /* Path Selection Metric */ | ||
16 | #define CC_OFFSET 9 /* Congestion Control Mode */ | ||
17 | #define CAPAB_OFFSET 17 | ||
18 | #define ACCEPT_PLINKS 0x80 | ||
19 | |||
20 | int mesh_allocated; | ||
21 | static struct kmem_cache *rm_cache; | ||
22 | |||
23 | void ieee80211s_init(void) | ||
24 | { | ||
25 | mesh_pathtbl_init(); | ||
26 | mesh_allocated = 1; | ||
27 | rm_cache = kmem_cache_create("mesh_rmc", sizeof(struct rmc_entry), | ||
28 | 0, 0, NULL); | ||
29 | } | ||
30 | |||
31 | void ieee80211s_stop(void) | ||
32 | { | ||
33 | mesh_pathtbl_unregister(); | ||
34 | kmem_cache_destroy(rm_cache); | ||
35 | } | ||
36 | |||
37 | /** | ||
38 | * mesh_matches_local - check if the config of a mesh point matches ours | ||
39 | * | ||
40 | * @ie: information elements of a management frame from the mesh peer | ||
41 | * @dev: local mesh interface | ||
42 | * | ||
43 | * This function checks if the mesh configuration of a mesh point matches the | ||
44 | * local mesh configuration, i.e. if both nodes belong to the same mesh network. | ||
45 | */ | ||
46 | bool mesh_matches_local(struct ieee802_11_elems *ie, struct net_device *dev) | ||
47 | { | ||
48 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
49 | struct ieee80211_if_sta *sta = &sdata->u.sta; | ||
50 | |||
51 | /* | ||
52 | * As support for each feature is added, check for matching | ||
53 | * - On mesh config capabilities | ||
54 | * - Power Save Support En | ||
55 | * - Sync support enabled | ||
56 | * - Sync support active | ||
57 | * - Sync support required from peer | ||
58 | * - MDA enabled | ||
59 | * - Power management control on fc | ||
60 | */ | ||
61 | if (sta->mesh_id_len == ie->mesh_id_len && | ||
62 | memcmp(sta->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 && | ||
63 | memcmp(sta->mesh_pp_id, ie->mesh_config + PP_OFFSET, 4) == 0 && | ||
64 | memcmp(sta->mesh_pm_id, ie->mesh_config + PM_OFFSET, 4) == 0 && | ||
65 | memcmp(sta->mesh_cc_id, ie->mesh_config + CC_OFFSET, 4) == 0) | ||
66 | return true; | ||
67 | |||
68 | return false; | ||
69 | } | ||
70 | |||
71 | /** | ||
72 | * mesh_peer_accepts_plinks - check if an mp is willing to establish peer links | ||
73 | * | ||
74 | * @ie: information elements of a management frame from the mesh peer | ||
75 | * @dev: local mesh interface | ||
76 | */ | ||
77 | bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie, | ||
78 | struct net_device *dev) | ||
79 | { | ||
80 | return (*(ie->mesh_config + CAPAB_OFFSET) & ACCEPT_PLINKS) != 0; | ||
81 | } | ||
82 | |||
83 | /** | ||
84 | * mesh_accept_plinks_update: update accepting_plink in local mesh beacons | ||
85 | * | ||
86 | * @sdata: mesh interface in which mesh beacons are going to be updated | ||
87 | */ | ||
88 | void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata) | ||
89 | { | ||
90 | bool free_plinks; | ||
91 | |||
92 | /* In case mesh_plink_free_count > 0 and mesh_plinktbl_capacity == 0, | ||
93 | * the mesh interface might be able to establish plinks with peers that | ||
94 | * are already on the table but are not on PLINK_ESTAB state. However, | ||
95 | * in general the mesh interface is not accepting peer link requests | ||
96 | * from new peers, and that must be reflected in the beacon | ||
97 | */ | ||
98 | free_plinks = mesh_plink_availables(sdata); | ||
99 | |||
100 | if (free_plinks != sdata->u.sta.accepting_plinks) | ||
101 | ieee80211_sta_timer((unsigned long) sdata); | ||
102 | } | ||
103 | |||
104 | void mesh_ids_set_default(struct ieee80211_if_sta *sta) | ||
105 | { | ||
106 | u8 def_id[4] = {0x00, 0x0F, 0xAC, 0xff}; | ||
107 | |||
108 | memcpy(sta->mesh_pp_id, def_id, 4); | ||
109 | memcpy(sta->mesh_pm_id, def_id, 4); | ||
110 | memcpy(sta->mesh_cc_id, def_id, 4); | ||
111 | } | ||
112 | |||
113 | int mesh_rmc_init(struct net_device *dev) | ||
114 | { | ||
115 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
116 | int i; | ||
117 | |||
118 | sdata->u.sta.rmc = kmalloc(sizeof(struct mesh_rmc), GFP_KERNEL); | ||
119 | if (!sdata->u.sta.rmc) | ||
120 | return -ENOMEM; | ||
121 | sdata->u.sta.rmc->idx_mask = RMC_BUCKETS - 1; | ||
122 | for (i = 0; i < RMC_BUCKETS; i++) | ||
123 | INIT_LIST_HEAD(&sdata->u.sta.rmc->bucket[i].list); | ||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | void mesh_rmc_free(struct net_device *dev) | ||
128 | { | ||
129 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
130 | struct mesh_rmc *rmc = sdata->u.sta.rmc; | ||
131 | struct rmc_entry *p, *n; | ||
132 | int i; | ||
133 | |||
134 | if (!sdata->u.sta.rmc) | ||
135 | return; | ||
136 | |||
137 | for (i = 0; i < RMC_BUCKETS; i++) | ||
138 | list_for_each_entry_safe(p, n, &rmc->bucket[i].list, list) { | ||
139 | list_del(&p->list); | ||
140 | kmem_cache_free(rm_cache, p); | ||
141 | } | ||
142 | |||
143 | kfree(rmc); | ||
144 | sdata->u.sta.rmc = NULL; | ||
145 | } | ||
146 | |||
147 | /** | ||
148 | * mesh_rmc_check - Check frame in recent multicast cache and add if absent. | ||
149 | * | ||
150 | * @sa: source address | ||
151 | * @mesh_hdr: mesh_header | ||
152 | * | ||
153 | * Returns: 0 if the frame is not in the cache, nonzero otherwise. | ||
154 | * | ||
155 | * Checks using the source address and the mesh sequence number if we have | ||
156 | * received this frame lately. If the frame is not in the cache, it is added to | ||
157 | * it. | ||
158 | */ | ||
159 | int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr, | ||
160 | struct net_device *dev) | ||
161 | { | ||
162 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
163 | struct mesh_rmc *rmc = sdata->u.sta.rmc; | ||
164 | u32 seqnum = 0; | ||
165 | int entries = 0; | ||
166 | u8 idx; | ||
167 | struct rmc_entry *p, *n; | ||
168 | |||
169 | /* Don't care about endianness since only match matters */ | ||
170 | memcpy(&seqnum, mesh_hdr->seqnum, sizeof(mesh_hdr->seqnum)); | ||
171 | idx = mesh_hdr->seqnum[0] & rmc->idx_mask; | ||
172 | list_for_each_entry_safe(p, n, &rmc->bucket[idx].list, list) { | ||
173 | ++entries; | ||
174 | if (time_after(jiffies, p->exp_time) || | ||
175 | (entries == RMC_QUEUE_MAX_LEN)) { | ||
176 | list_del(&p->list); | ||
177 | kmem_cache_free(rm_cache, p); | ||
178 | --entries; | ||
179 | } else if ((seqnum == p->seqnum) | ||
180 | && (memcmp(sa, p->sa, ETH_ALEN) == 0)) | ||
181 | return -1; | ||
182 | } | ||
183 | |||
184 | p = kmem_cache_alloc(rm_cache, GFP_ATOMIC); | ||
185 | if (!p) { | ||
186 | printk(KERN_DEBUG "o11s: could not allocate RMC entry\n"); | ||
187 | return 0; | ||
188 | } | ||
189 | p->seqnum = seqnum; | ||
190 | p->exp_time = jiffies + RMC_TIMEOUT; | ||
191 | memcpy(p->sa, sa, ETH_ALEN); | ||
192 | list_add(&p->list, &rmc->bucket[idx].list); | ||
193 | return 0; | ||
194 | } | ||
195 | |||
196 | void mesh_mgmt_ies_add(struct sk_buff *skb, struct net_device *dev) | ||
197 | { | ||
198 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
199 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
200 | struct ieee80211_supported_band *sband; | ||
201 | u8 *pos; | ||
202 | int len, i, rate; | ||
203 | |||
204 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | ||
205 | len = sband->n_bitrates; | ||
206 | if (len > 8) | ||
207 | len = 8; | ||
208 | pos = skb_put(skb, len + 2); | ||
209 | *pos++ = WLAN_EID_SUPP_RATES; | ||
210 | *pos++ = len; | ||
211 | for (i = 0; i < len; i++) { | ||
212 | rate = sband->bitrates[i].bitrate; | ||
213 | *pos++ = (u8) (rate / 5); | ||
214 | } | ||
215 | |||
216 | if (sband->n_bitrates > len) { | ||
217 | pos = skb_put(skb, sband->n_bitrates - len + 2); | ||
218 | *pos++ = WLAN_EID_EXT_SUPP_RATES; | ||
219 | *pos++ = sband->n_bitrates - len; | ||
220 | for (i = len; i < sband->n_bitrates; i++) { | ||
221 | rate = sband->bitrates[i].bitrate; | ||
222 | *pos++ = (u8) (rate / 5); | ||
223 | } | ||
224 | } | ||
225 | |||
226 | pos = skb_put(skb, 2 + sdata->u.sta.mesh_id_len); | ||
227 | *pos++ = WLAN_EID_MESH_ID; | ||
228 | *pos++ = sdata->u.sta.mesh_id_len; | ||
229 | if (sdata->u.sta.mesh_id_len) | ||
230 | memcpy(pos, sdata->u.sta.mesh_id, sdata->u.sta.mesh_id_len); | ||
231 | |||
232 | pos = skb_put(skb, 21); | ||
233 | *pos++ = WLAN_EID_MESH_CONFIG; | ||
234 | *pos++ = MESH_CFG_LEN; | ||
235 | /* Version */ | ||
236 | *pos++ = 1; | ||
237 | |||
238 | /* Active path selection protocol ID */ | ||
239 | memcpy(pos, sdata->u.sta.mesh_pp_id, 4); | ||
240 | pos += 4; | ||
241 | |||
242 | /* Active path selection metric ID */ | ||
243 | memcpy(pos, sdata->u.sta.mesh_pm_id, 4); | ||
244 | pos += 4; | ||
245 | |||
246 | /* Congestion control mode identifier */ | ||
247 | memcpy(pos, sdata->u.sta.mesh_cc_id, 4); | ||
248 | pos += 4; | ||
249 | |||
250 | /* Channel precedence: | ||
251 | * Not running simple channel unification protocol | ||
252 | */ | ||
253 | memset(pos, 0x00, 4); | ||
254 | pos += 4; | ||
255 | |||
256 | /* Mesh capability */ | ||
257 | sdata->u.sta.accepting_plinks = mesh_plink_availables(sdata); | ||
258 | *pos++ = sdata->u.sta.accepting_plinks ? ACCEPT_PLINKS : 0x00; | ||
259 | *pos++ = 0x00; | ||
260 | |||
261 | return; | ||
262 | } | ||
263 | |||
264 | u32 mesh_table_hash(u8 *addr, struct net_device *dev, struct mesh_table *tbl) | ||
265 | { | ||
266 | /* Use last four bytes of hw addr and interface index as hash index */ | ||
267 | return jhash_2words(*(u32 *)(addr+2), dev->ifindex, tbl->hash_rnd) | ||
268 | & tbl->hash_mask; | ||
269 | } | ||
270 | |||
271 | u8 mesh_id_hash(u8 *mesh_id, int mesh_id_len) | ||
272 | { | ||
273 | if (!mesh_id_len) | ||
274 | return 1; | ||
275 | else if (mesh_id_len == 1) | ||
276 | return (u8) mesh_id[0]; | ||
277 | else | ||
278 | return (u8) (mesh_id[0] + 2 * mesh_id[1]); | ||
279 | } | ||
280 | |||
281 | struct mesh_table *mesh_table_alloc(int size_order) | ||
282 | { | ||
283 | int i; | ||
284 | struct mesh_table *newtbl; | ||
285 | |||
286 | newtbl = kmalloc(sizeof(struct mesh_table), GFP_KERNEL); | ||
287 | if (!newtbl) | ||
288 | return NULL; | ||
289 | |||
290 | newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) * | ||
291 | (1 << size_order), GFP_KERNEL); | ||
292 | |||
293 | if (!newtbl->hash_buckets) { | ||
294 | kfree(newtbl); | ||
295 | return NULL; | ||
296 | } | ||
297 | |||
298 | newtbl->hashwlock = kmalloc(sizeof(spinlock_t) * | ||
299 | (1 << size_order), GFP_KERNEL); | ||
300 | if (!newtbl->hashwlock) { | ||
301 | kfree(newtbl->hash_buckets); | ||
302 | kfree(newtbl); | ||
303 | return NULL; | ||
304 | } | ||
305 | |||
306 | newtbl->size_order = size_order; | ||
307 | newtbl->hash_mask = (1 << size_order) - 1; | ||
308 | atomic_set(&newtbl->entries, 0); | ||
309 | get_random_bytes(&newtbl->hash_rnd, | ||
310 | sizeof(newtbl->hash_rnd)); | ||
311 | for (i = 0; i <= newtbl->hash_mask; i++) | ||
312 | spin_lock_init(&newtbl->hashwlock[i]); | ||
313 | |||
314 | return newtbl; | ||
315 | } | ||
316 | |||
317 | void mesh_table_free(struct mesh_table *tbl, bool free_leafs) | ||
318 | { | ||
319 | struct hlist_head *mesh_hash; | ||
320 | struct hlist_node *p, *q; | ||
321 | int i; | ||
322 | |||
323 | mesh_hash = tbl->hash_buckets; | ||
324 | for (i = 0; i <= tbl->hash_mask; i++) { | ||
325 | spin_lock(&tbl->hashwlock[i]); | ||
326 | hlist_for_each_safe(p, q, &mesh_hash[i]) { | ||
327 | tbl->free_node(p, free_leafs); | ||
328 | atomic_dec(&tbl->entries); | ||
329 | } | ||
330 | spin_unlock(&tbl->hashwlock[i]); | ||
331 | } | ||
332 | kfree(tbl->hash_buckets); | ||
333 | kfree(tbl->hashwlock); | ||
334 | kfree(tbl); | ||
335 | } | ||
336 | |||
337 | static void ieee80211_mesh_path_timer(unsigned long data) | ||
338 | { | ||
339 | struct ieee80211_sub_if_data *sdata = | ||
340 | (struct ieee80211_sub_if_data *) data; | ||
341 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | ||
342 | struct ieee80211_local *local = wdev_priv(&sdata->wdev); | ||
343 | |||
344 | queue_work(local->hw.workqueue, &ifsta->work); | ||
345 | } | ||
346 | |||
347 | struct mesh_table *mesh_table_grow(struct mesh_table *tbl) | ||
348 | { | ||
349 | struct mesh_table *newtbl; | ||
350 | struct hlist_head *oldhash; | ||
351 | struct hlist_node *p; | ||
352 | int err = 0; | ||
353 | int i; | ||
354 | |||
355 | if (atomic_read(&tbl->entries) | ||
356 | < tbl->mean_chain_len * (tbl->hash_mask + 1)) { | ||
357 | err = -EPERM; | ||
358 | goto endgrow; | ||
359 | } | ||
360 | |||
361 | newtbl = mesh_table_alloc(tbl->size_order + 1); | ||
362 | if (!newtbl) { | ||
363 | err = -ENOMEM; | ||
364 | goto endgrow; | ||
365 | } | ||
366 | |||
367 | newtbl->free_node = tbl->free_node; | ||
368 | newtbl->mean_chain_len = tbl->mean_chain_len; | ||
369 | newtbl->copy_node = tbl->copy_node; | ||
370 | atomic_set(&newtbl->entries, atomic_read(&tbl->entries)); | ||
371 | |||
372 | oldhash = tbl->hash_buckets; | ||
373 | for (i = 0; i <= tbl->hash_mask; i++) | ||
374 | hlist_for_each(p, &oldhash[i]) | ||
375 | tbl->copy_node(p, newtbl); | ||
376 | |||
377 | endgrow: | ||
378 | if (err) | ||
379 | return NULL; | ||
380 | else | ||
381 | return newtbl; | ||
382 | } | ||
383 | |||
384 | /** | ||
385 | * ieee80211_new_mesh_header - create a new mesh header | ||
386 | * @meshhdr: uninitialized mesh header | ||
387 | * @sdata: mesh interface to be used | ||
388 | * | ||
389 | * Return the header length. | ||
390 | */ | ||
391 | int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, | ||
392 | struct ieee80211_sub_if_data *sdata) | ||
393 | { | ||
394 | meshhdr->flags = 0; | ||
395 | meshhdr->ttl = sdata->u.sta.mshcfg.dot11MeshTTL; | ||
396 | |||
397 | meshhdr->seqnum[0] = sdata->u.sta.mesh_seqnum[0]++; | ||
398 | meshhdr->seqnum[1] = sdata->u.sta.mesh_seqnum[1]; | ||
399 | meshhdr->seqnum[2] = sdata->u.sta.mesh_seqnum[2]; | ||
400 | |||
401 | if (sdata->u.sta.mesh_seqnum[0] == 0) { | ||
402 | sdata->u.sta.mesh_seqnum[1]++; | ||
403 | if (sdata->u.sta.mesh_seqnum[1] == 0) | ||
404 | sdata->u.sta.mesh_seqnum[2]++; | ||
405 | } | ||
406 | |||
407 | return 5; | ||
408 | } | ||
409 | |||
410 | void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) | ||
411 | { | ||
412 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | ||
413 | |||
414 | ifsta->mshcfg.dot11MeshRetryTimeout = MESH_RET_T; | ||
415 | ifsta->mshcfg.dot11MeshConfirmTimeout = MESH_CONF_T; | ||
416 | ifsta->mshcfg.dot11MeshHoldingTimeout = MESH_HOLD_T; | ||
417 | ifsta->mshcfg.dot11MeshMaxRetries = MESH_MAX_RETR; | ||
418 | ifsta->mshcfg.dot11MeshTTL = MESH_TTL; | ||
419 | ifsta->mshcfg.auto_open_plinks = true; | ||
420 | ifsta->mshcfg.dot11MeshMaxPeerLinks = | ||
421 | MESH_MAX_ESTAB_PLINKS; | ||
422 | ifsta->mshcfg.dot11MeshHWMPactivePathTimeout = | ||
423 | MESH_PATH_TIMEOUT; | ||
424 | ifsta->mshcfg.dot11MeshHWMPpreqMinInterval = | ||
425 | MESH_PREQ_MIN_INT; | ||
426 | ifsta->mshcfg.dot11MeshHWMPnetDiameterTraversalTime = | ||
427 | MESH_DIAM_TRAVERSAL_TIME; | ||
428 | ifsta->mshcfg.dot11MeshHWMPmaxPREQretries = | ||
429 | MESH_MAX_PREQ_RETRIES; | ||
430 | ifsta->mshcfg.path_refresh_time = | ||
431 | MESH_PATH_REFRESH_TIME; | ||
432 | ifsta->mshcfg.min_discovery_timeout = | ||
433 | MESH_MIN_DISCOVERY_TIMEOUT; | ||
434 | ifsta->accepting_plinks = true; | ||
435 | ifsta->preq_id = 0; | ||
436 | ifsta->dsn = 0; | ||
437 | atomic_set(&ifsta->mpaths, 0); | ||
438 | mesh_rmc_init(sdata->dev); | ||
439 | ifsta->last_preq = jiffies; | ||
440 | /* Allocate all mesh structures when creating the first mesh interface. */ | ||
441 | if (!mesh_allocated) | ||
442 | ieee80211s_init(); | ||
443 | mesh_ids_set_default(ifsta); | ||
444 | setup_timer(&ifsta->mesh_path_timer, | ||
445 | ieee80211_mesh_path_timer, | ||
446 | (unsigned long) sdata); | ||
447 | INIT_LIST_HEAD(&ifsta->preq_queue.list); | ||
448 | spin_lock_init(&ifsta->mesh_preq_queue_lock); | ||
449 | } | ||
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h new file mode 100644 index 000000000000..742003d3a841 --- /dev/null +++ b/net/mac80211/mesh.h | |||
@@ -0,0 +1,290 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2008 open80211s Ltd. | ||
3 | * Authors: Luis Carlos Cobo <luisca@cozybit.com> | ||
4 | * Javier Cardona <javier@cozybit.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #ifndef IEEE80211S_H | ||
12 | #define IEEE80211S_H | ||
13 | |||
14 | #include <linux/types.h> | ||
15 | #include <linux/jhash.h> | ||
16 | #include "ieee80211_i.h" | ||
17 | |||
18 | |||
19 | /* Data structures */ | ||
20 | |||
21 | /** | ||
22 | * enum mesh_path_flags - mac80211 mesh path flags | ||
23 | * | ||
24 | * | ||
25 | * | ||
26 | * @MESH_PATH_ACTIVE: the mesh path is can be used for forwarding | ||
27 | * @MESH_PATH_RESOLVED: the discovery process is running for this mesh path | ||
28 | * @MESH_PATH_DSN_VALID: the mesh path contains a valid destination sequence | ||
29 | * number | ||
30 | * @MESH_PATH_FIXED: the mesh path has been manually set and should not be | ||
31 | * modified | ||
32 | * @MESH_PATH_RESOLVED: the mesh path can has been resolved | ||
33 | * | ||
34 | * MESH_PATH_RESOLVED and MESH_PATH_DELETE are used by the mesh path timer to | ||
35 | * decide when to stop or cancel the mesh path discovery. | ||
36 | */ | ||
37 | enum mesh_path_flags { | ||
38 | MESH_PATH_ACTIVE = BIT(0), | ||
39 | MESH_PATH_RESOLVING = BIT(1), | ||
40 | MESH_PATH_DSN_VALID = BIT(2), | ||
41 | MESH_PATH_FIXED = BIT(3), | ||
42 | MESH_PATH_RESOLVED = BIT(4), | ||
43 | }; | ||
44 | |||
45 | /** | ||
46 | * struct mesh_path - mac80211 mesh path structure | ||
47 | * | ||
48 | * @dst: mesh path destination mac address | ||
49 | * @dev: mesh path device | ||
50 | * @next_hop: mesh neighbor to which frames for this destination will be | ||
51 | * forwarded | ||
52 | * @timer: mesh path discovery timer | ||
53 | * @frame_queue: pending queue for frames sent to this destination while the | ||
54 | * path is unresolved | ||
55 | * @dsn: destination sequence number of the destination | ||
56 | * @metric: current metric to this destination | ||
57 | * @hop_count: hops to destination | ||
58 | * @exp_time: in jiffies, when the path will expire or when it expired | ||
59 | * @discovery_timeout: timeout (lapse in jiffies) used for the last discovery | ||
60 | * retry | ||
61 | * @discovery_retries: number of discovery retries | ||
62 | * @flags: mesh path flags, as specified on &enum mesh_path_flags | ||
63 | * @state_lock: mesh pat state lock | ||
64 | * | ||
65 | * | ||
66 | * The combination of dst and dev is unique in the mesh path table. Since the | ||
67 | * next_hop STA is only protected by RCU as well, deleting the STA must also | ||
68 | * remove/substitute the mesh_path structure and wait until that is no longer | ||
69 | * reachable before destroying the STA completely. | ||
70 | */ | ||
71 | struct mesh_path { | ||
72 | u8 dst[ETH_ALEN]; | ||
73 | struct net_device *dev; | ||
74 | struct sta_info *next_hop; | ||
75 | struct timer_list timer; | ||
76 | struct sk_buff_head frame_queue; | ||
77 | struct rcu_head rcu; | ||
78 | u32 dsn; | ||
79 | u32 metric; | ||
80 | u8 hop_count; | ||
81 | unsigned long exp_time; | ||
82 | u32 discovery_timeout; | ||
83 | u8 discovery_retries; | ||
84 | enum mesh_path_flags flags; | ||
85 | spinlock_t state_lock; | ||
86 | }; | ||
87 | |||
88 | /** | ||
89 | * struct mesh_table | ||
90 | * | ||
91 | * @hash_buckets: array of hash buckets of the table | ||
92 | * @hashwlock: array of locks to protect write operations, one per bucket | ||
93 | * @hash_mask: 2^size_order - 1, used to compute hash idx | ||
94 | * @hash_rnd: random value used for hash computations | ||
95 | * @entries: number of entries in the table | ||
96 | * @free_node: function to free nodes of the table | ||
97 | * @copy_node: fuction to copy nodes of the table | ||
98 | * @size_order: determines size of the table, there will be 2^size_order hash | ||
99 | * buckets | ||
100 | * @mean_chain_len: maximum average length for the hash buckets' list, if it is | ||
101 | * reached, the table will grow | ||
102 | */ | ||
103 | struct mesh_table { | ||
104 | /* Number of buckets will be 2^N */ | ||
105 | struct hlist_head *hash_buckets; | ||
106 | spinlock_t *hashwlock; /* One per bucket, for add/del */ | ||
107 | unsigned int hash_mask; /* (2^size_order) - 1 */ | ||
108 | __u32 hash_rnd; /* Used for hash generation */ | ||
109 | atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ | ||
110 | void (*free_node) (struct hlist_node *p, bool free_leafs); | ||
111 | void (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl); | ||
112 | int size_order; | ||
113 | int mean_chain_len; | ||
114 | }; | ||
115 | |||
116 | /* Recent multicast cache */ | ||
117 | /* RMC_BUCKETS must be a power of 2, maximum 256 */ | ||
118 | #define RMC_BUCKETS 256 | ||
119 | #define RMC_QUEUE_MAX_LEN 4 | ||
120 | #define RMC_TIMEOUT (3 * HZ) | ||
121 | |||
122 | /** | ||
123 | * struct rmc_entry - entry in the Recent Multicast Cache | ||
124 | * | ||
125 | * @seqnum: mesh sequence number of the frame | ||
126 | * @exp_time: expiration time of the entry, in jiffies | ||
127 | * @sa: source address of the frame | ||
128 | * | ||
129 | * The Recent Multicast Cache keeps track of the latest multicast frames that | ||
130 | * have been received by a mesh interface and discards received multicast frames | ||
131 | * that are found in the cache. | ||
132 | */ | ||
133 | struct rmc_entry { | ||
134 | struct list_head list; | ||
135 | u32 seqnum; | ||
136 | unsigned long exp_time; | ||
137 | u8 sa[ETH_ALEN]; | ||
138 | }; | ||
139 | |||
140 | struct mesh_rmc { | ||
141 | struct rmc_entry bucket[RMC_BUCKETS]; | ||
142 | u8 idx_mask; | ||
143 | }; | ||
144 | |||
145 | |||
146 | /* Mesh IEs constants */ | ||
147 | #define MESH_CFG_LEN 19 | ||
148 | |||
149 | /* | ||
150 | * MESH_CFG_COMP_LEN Includes: | ||
151 | * - Active path selection protocol ID. | ||
152 | * - Active path selection metric ID. | ||
153 | * - Congestion control mode identifier. | ||
154 | * - Channel precedence. | ||
155 | * Does not include mesh capabilities, which may vary across nodes in the same | ||
156 | * mesh | ||
157 | */ | ||
158 | #define MESH_CFG_CMP_LEN 17 | ||
159 | |||
160 | /* Default values, timeouts in ms */ | ||
161 | #define MESH_TTL 5 | ||
162 | #define MESH_MAX_RETR 3 | ||
163 | #define MESH_RET_T 100 | ||
164 | #define MESH_CONF_T 100 | ||
165 | #define MESH_HOLD_T 100 | ||
166 | |||
167 | #define MESH_PATH_TIMEOUT 5000 | ||
168 | /* Minimum interval between two consecutive PREQs originated by the same | ||
169 | * interface | ||
170 | */ | ||
171 | #define MESH_PREQ_MIN_INT 10 | ||
172 | #define MESH_DIAM_TRAVERSAL_TIME 50 | ||
173 | /* Paths will be refreshed if they are closer than PATH_REFRESH_TIME to their | ||
174 | * expiration | ||
175 | */ | ||
176 | #define MESH_PATH_REFRESH_TIME 1000 | ||
177 | #define MESH_MIN_DISCOVERY_TIMEOUT (2 * MESH_DIAM_TRAVERSAL_TIME) | ||
178 | |||
179 | #define MESH_MAX_PREQ_RETRIES 4 | ||
180 | #define MESH_PATH_EXPIRE (600 * HZ) | ||
181 | |||
182 | /* Default maximum number of established plinks per interface */ | ||
183 | #define MESH_MAX_ESTAB_PLINKS 32 | ||
184 | |||
185 | /* Default maximum number of plinks per interface */ | ||
186 | #define MESH_MAX_PLINKS 256 | ||
187 | |||
188 | /* Maximum number of paths per interface */ | ||
189 | #define MESH_MAX_MPATHS 1024 | ||
190 | |||
191 | /* Pending ANA approval */ | ||
192 | #define PLINK_CATEGORY 30 | ||
193 | #define MESH_PATH_SEL_CATEGORY 32 | ||
194 | |||
195 | /* Mesh Header Flags */ | ||
196 | #define IEEE80211S_FLAGS_AE 0x3 | ||
197 | |||
198 | /* Public interfaces */ | ||
199 | /* Various */ | ||
200 | u8 mesh_id_hash(u8 *mesh_id, int mesh_id_len); | ||
201 | int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr); | ||
202 | int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, | ||
203 | struct ieee80211_sub_if_data *sdata); | ||
204 | int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr, | ||
205 | struct net_device *dev); | ||
206 | bool mesh_matches_local(struct ieee802_11_elems *ie, struct net_device *dev); | ||
207 | void mesh_ids_set_default(struct ieee80211_if_sta *sta); | ||
208 | void mesh_mgmt_ies_add(struct sk_buff *skb, struct net_device *dev); | ||
209 | void mesh_rmc_free(struct net_device *dev); | ||
210 | int mesh_rmc_init(struct net_device *dev); | ||
211 | void ieee80211s_init(void); | ||
212 | void ieee80211s_stop(void); | ||
213 | void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); | ||
214 | |||
215 | /* Mesh paths */ | ||
216 | int mesh_nexthop_lookup(u8 *next_hop, struct sk_buff *skb, | ||
217 | struct net_device *dev); | ||
218 | void mesh_path_start_discovery(struct net_device *dev); | ||
219 | struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev); | ||
220 | struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev); | ||
221 | void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop); | ||
222 | void mesh_path_expire(struct net_device *dev); | ||
223 | void mesh_path_flush(struct net_device *dev); | ||
224 | void mesh_rx_path_sel_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | ||
225 | size_t len); | ||
226 | int mesh_path_add(u8 *dst, struct net_device *dev); | ||
227 | /* Mesh plinks */ | ||
228 | void mesh_neighbour_update(u8 *hw_addr, u64 rates, struct net_device *dev, | ||
229 | bool add); | ||
230 | bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie, | ||
231 | struct net_device *dev); | ||
232 | void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata); | ||
233 | void mesh_plink_broken(struct sta_info *sta); | ||
234 | void mesh_plink_deactivate(struct sta_info *sta); | ||
235 | int mesh_plink_open(struct sta_info *sta); | ||
236 | int mesh_plink_close(struct sta_info *sta); | ||
237 | void mesh_plink_block(struct sta_info *sta); | ||
238 | void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | ||
239 | size_t len, struct ieee80211_rx_status *rx_status); | ||
240 | |||
241 | /* Private interfaces */ | ||
242 | /* Mesh tables */ | ||
243 | struct mesh_table *mesh_table_alloc(int size_order); | ||
244 | void mesh_table_free(struct mesh_table *tbl, bool free_leafs); | ||
245 | struct mesh_table *mesh_table_grow(struct mesh_table *tbl); | ||
246 | u32 mesh_table_hash(u8 *addr, struct net_device *dev, struct mesh_table *tbl); | ||
247 | /* Mesh paths */ | ||
248 | int mesh_path_error_tx(u8 *dest, __le32 dest_dsn, u8 *ra, | ||
249 | struct net_device *dev); | ||
250 | void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta); | ||
251 | void mesh_path_flush_pending(struct mesh_path *mpath); | ||
252 | void mesh_path_tx_pending(struct mesh_path *mpath); | ||
253 | int mesh_pathtbl_init(void); | ||
254 | void mesh_pathtbl_unregister(void); | ||
255 | int mesh_path_del(u8 *addr, struct net_device *dev); | ||
256 | void mesh_path_timer(unsigned long data); | ||
257 | void mesh_path_flush_by_nexthop(struct sta_info *sta); | ||
258 | void mesh_path_discard_frame(struct sk_buff *skb, struct net_device *dev); | ||
259 | |||
260 | #ifdef CONFIG_MAC80211_MESH | ||
261 | extern int mesh_allocated; | ||
262 | |||
263 | static inline int mesh_plink_free_count(struct ieee80211_sub_if_data *sdata) | ||
264 | { | ||
265 | return sdata->u.sta.mshcfg.dot11MeshMaxPeerLinks - | ||
266 | atomic_read(&sdata->u.sta.mshstats.estab_plinks); | ||
267 | } | ||
268 | |||
269 | static inline bool mesh_plink_availables(struct ieee80211_sub_if_data *sdata) | ||
270 | { | ||
271 | return (min_t(long, mesh_plink_free_count(sdata), | ||
272 | MESH_MAX_PLINKS - sdata->local->num_sta)) > 0; | ||
273 | } | ||
274 | |||
275 | static inline void mesh_path_activate(struct mesh_path *mpath) | ||
276 | { | ||
277 | mpath->flags |= MESH_PATH_ACTIVE | MESH_PATH_RESOLVED; | ||
278 | } | ||
279 | |||
280 | #define for_each_mesh_entry(x, p, node, i) \ | ||
281 | for (i = 0; i <= x->hash_mask; i++) \ | ||
282 | hlist_for_each_entry_rcu(node, p, &x->hash_buckets[i], list) | ||
283 | |||
284 | #else | ||
285 | #define mesh_allocated 0 | ||
286 | #endif | ||
287 | |||
288 | #define MESH_PREQ(skb) (skb->cb + 30) | ||
289 | |||
290 | #endif /* IEEE80211S_H */ | ||
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c new file mode 100644 index 000000000000..576a6e55323e --- /dev/null +++ b/net/mac80211/mesh_hwmp.c | |||
@@ -0,0 +1,857 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2008 open80211s Ltd. | ||
3 | * Author: Luis Carlos Cobo <luisca@cozybit.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | |||
10 | #include <asm/unaligned.h> | ||
11 | #include "mesh.h" | ||
12 | |||
13 | #define IEEE80211_FC(type, stype) cpu_to_le16(type | stype) | ||
14 | |||
15 | #define TEST_FRAME_LEN 8192 | ||
16 | #define MAX_METRIC 0xffffffff | ||
17 | #define ARITH_SHIFT 8 | ||
18 | |||
19 | /* Number of frames buffered per destination for unresolved destinations */ | ||
20 | #define MESH_FRAME_QUEUE_LEN 10 | ||
21 | #define MAX_PREQ_QUEUE_LEN 64 | ||
22 | |||
23 | /* Destination only */ | ||
24 | #define MP_F_DO 0x1 | ||
25 | /* Reply and forward */ | ||
26 | #define MP_F_RF 0x2 | ||
27 | |||
28 | static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae) | ||
29 | { | ||
30 | if (ae) | ||
31 | offset += 6; | ||
32 | return le32_to_cpu(get_unaligned((__le32 *) (preq_elem + offset))); | ||
33 | } | ||
34 | |||
35 | /* HWMP IE processing macros */ | ||
36 | #define AE_F (1<<6) | ||
37 | #define AE_F_SET(x) (*x & AE_F) | ||
38 | #define PREQ_IE_FLAGS(x) (*(x)) | ||
39 | #define PREQ_IE_HOPCOUNT(x) (*(x + 1)) | ||
40 | #define PREQ_IE_TTL(x) (*(x + 2)) | ||
41 | #define PREQ_IE_PREQ_ID(x) u32_field_get(x, 3, 0) | ||
42 | #define PREQ_IE_ORIG_ADDR(x) (x + 7) | ||
43 | #define PREQ_IE_ORIG_DSN(x) u32_field_get(x, 13, 0); | ||
44 | #define PREQ_IE_LIFETIME(x) u32_field_get(x, 17, AE_F_SET(x)); | ||
45 | #define PREQ_IE_METRIC(x) u32_field_get(x, 21, AE_F_SET(x)); | ||
46 | #define PREQ_IE_DST_F(x) (*(AE_F_SET(x) ? x + 32 : x + 26)) | ||
47 | #define PREQ_IE_DST_ADDR(x) (AE_F_SET(x) ? x + 33 : x + 27) | ||
48 | #define PREQ_IE_DST_DSN(x) u32_field_get(x, 33, AE_F_SET(x)); | ||
49 | |||
50 | |||
51 | #define PREP_IE_FLAGS(x) PREQ_IE_FLAGS(x) | ||
52 | #define PREP_IE_HOPCOUNT(x) PREQ_IE_HOPCOUNT(x) | ||
53 | #define PREP_IE_TTL(x) PREQ_IE_TTL(x) | ||
54 | #define PREP_IE_ORIG_ADDR(x) (x + 3) | ||
55 | #define PREP_IE_ORIG_DSN(x) u32_field_get(x, 9, 0); | ||
56 | #define PREP_IE_LIFETIME(x) u32_field_get(x, 13, AE_F_SET(x)); | ||
57 | #define PREP_IE_METRIC(x) u32_field_get(x, 17, AE_F_SET(x)); | ||
58 | #define PREP_IE_DST_ADDR(x) (AE_F_SET(x) ? x + 27 : x + 21) | ||
59 | #define PREP_IE_DST_DSN(x) u32_field_get(x, 27, AE_F_SET(x)); | ||
60 | |||
61 | #define PERR_IE_DST_ADDR(x) (x + 2) | ||
62 | #define PERR_IE_DST_DSN(x) u32_field_get(x, 8, 0); | ||
63 | |||
64 | #define TU_TO_EXP_TIME(x) (jiffies + msecs_to_jiffies(x * 1024 / 1000)) | ||
65 | #define MSEC_TO_TU(x) (x*1000/1024) | ||
66 | #define DSN_GT(x, y) ((long) (y) - (long) (x) < 0) | ||
67 | #define DSN_LT(x, y) ((long) (x) - (long) (y) < 0) | ||
68 | |||
69 | #define net_traversal_jiffies(s) \ | ||
70 | msecs_to_jiffies(s->u.sta.mshcfg.dot11MeshHWMPnetDiameterTraversalTime) | ||
71 | #define default_lifetime(s) \ | ||
72 | MSEC_TO_TU(s->u.sta.mshcfg.dot11MeshHWMPactivePathTimeout) | ||
73 | #define min_preq_int_jiff(s) \ | ||
74 | (msecs_to_jiffies(s->u.sta.mshcfg.dot11MeshHWMPpreqMinInterval)) | ||
75 | #define max_preq_retries(s) (s->u.sta.mshcfg.dot11MeshHWMPmaxPREQretries) | ||
76 | #define disc_timeout_jiff(s) \ | ||
77 | msecs_to_jiffies(sdata->u.sta.mshcfg.min_discovery_timeout) | ||
78 | |||
79 | enum mpath_frame_type { | ||
80 | MPATH_PREQ = 0, | ||
81 | MPATH_PREP, | ||
82 | MPATH_PERR | ||
83 | }; | ||
84 | |||
85 | static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, | ||
86 | u8 *orig_addr, __le32 orig_dsn, u8 dst_flags, u8 *dst, | ||
87 | __le32 dst_dsn, u8 *da, u8 hop_count, u8 ttl, __le32 lifetime, | ||
88 | __le32 metric, __le32 preq_id, struct net_device *dev) | ||
89 | { | ||
90 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
91 | struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); | ||
92 | struct ieee80211_mgmt *mgmt; | ||
93 | u8 *pos; | ||
94 | int ie_len; | ||
95 | |||
96 | if (!skb) | ||
97 | return -1; | ||
98 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
99 | /* 25 is the size of the common mgmt part (24) plus the size of the | ||
100 | * common action part (1) | ||
101 | */ | ||
102 | mgmt = (struct ieee80211_mgmt *) | ||
103 | skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action)); | ||
104 | memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action)); | ||
105 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | ||
106 | IEEE80211_STYPE_ACTION); | ||
107 | |||
108 | memcpy(mgmt->da, da, ETH_ALEN); | ||
109 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | ||
110 | /* BSSID is left zeroed, wildcard value */ | ||
111 | mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; | ||
112 | mgmt->u.action.u.mesh_action.action_code = action; | ||
113 | |||
114 | switch (action) { | ||
115 | case MPATH_PREQ: | ||
116 | ie_len = 37; | ||
117 | pos = skb_put(skb, 2 + ie_len); | ||
118 | *pos++ = WLAN_EID_PREQ; | ||
119 | break; | ||
120 | case MPATH_PREP: | ||
121 | ie_len = 31; | ||
122 | pos = skb_put(skb, 2 + ie_len); | ||
123 | *pos++ = WLAN_EID_PREP; | ||
124 | break; | ||
125 | default: | ||
126 | kfree(skb); | ||
127 | return -ENOTSUPP; | ||
128 | break; | ||
129 | } | ||
130 | *pos++ = ie_len; | ||
131 | *pos++ = flags; | ||
132 | *pos++ = hop_count; | ||
133 | *pos++ = ttl; | ||
134 | if (action == MPATH_PREQ) { | ||
135 | memcpy(pos, &preq_id, 4); | ||
136 | pos += 4; | ||
137 | } | ||
138 | memcpy(pos, orig_addr, ETH_ALEN); | ||
139 | pos += ETH_ALEN; | ||
140 | memcpy(pos, &orig_dsn, 4); | ||
141 | pos += 4; | ||
142 | memcpy(pos, &lifetime, 4); | ||
143 | pos += 4; | ||
144 | memcpy(pos, &metric, 4); | ||
145 | pos += 4; | ||
146 | if (action == MPATH_PREQ) { | ||
147 | /* destination count */ | ||
148 | *pos++ = 1; | ||
149 | *pos++ = dst_flags; | ||
150 | } | ||
151 | memcpy(pos, dst, ETH_ALEN); | ||
152 | pos += ETH_ALEN; | ||
153 | memcpy(pos, &dst_dsn, 4); | ||
154 | |||
155 | ieee80211_sta_tx(dev, skb, 0); | ||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | /** | ||
160 | * mesh_send_path error - Sends a PERR mesh management frame | ||
161 | * | ||
162 | * @dst: broken destination | ||
163 | * @dst_dsn: dsn of the broken destination | ||
164 | * @ra: node this frame is addressed to | ||
165 | */ | ||
166 | int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra, | ||
167 | struct net_device *dev) | ||
168 | { | ||
169 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
170 | struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); | ||
171 | struct ieee80211_mgmt *mgmt; | ||
172 | u8 *pos; | ||
173 | int ie_len; | ||
174 | |||
175 | if (!skb) | ||
176 | return -1; | ||
177 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
178 | /* 25 is the size of the common mgmt part (24) plus the size of the | ||
179 | * common action part (1) | ||
180 | */ | ||
181 | mgmt = (struct ieee80211_mgmt *) | ||
182 | skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action)); | ||
183 | memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action)); | ||
184 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | ||
185 | IEEE80211_STYPE_ACTION); | ||
186 | |||
187 | memcpy(mgmt->da, ra, ETH_ALEN); | ||
188 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | ||
189 | /* BSSID is left zeroed, wildcard value */ | ||
190 | mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; | ||
191 | mgmt->u.action.u.mesh_action.action_code = MPATH_PERR; | ||
192 | ie_len = 12; | ||
193 | pos = skb_put(skb, 2 + ie_len); | ||
194 | *pos++ = WLAN_EID_PERR; | ||
195 | *pos++ = ie_len; | ||
196 | /* mode flags, reserved */ | ||
197 | *pos++ = 0; | ||
198 | /* number of destinations */ | ||
199 | *pos++ = 1; | ||
200 | memcpy(pos, dst, ETH_ALEN); | ||
201 | pos += ETH_ALEN; | ||
202 | memcpy(pos, &dst_dsn, 4); | ||
203 | |||
204 | ieee80211_sta_tx(dev, skb, 0); | ||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | static u32 airtime_link_metric_get(struct ieee80211_local *local, | ||
209 | struct sta_info *sta) | ||
210 | { | ||
211 | struct ieee80211_supported_band *sband; | ||
212 | /* This should be adjusted for each device */ | ||
213 | int device_constant = 1 << ARITH_SHIFT; | ||
214 | int test_frame_len = TEST_FRAME_LEN << ARITH_SHIFT; | ||
215 | int s_unit = 1 << ARITH_SHIFT; | ||
216 | int rate, err; | ||
217 | u32 tx_time, estimated_retx; | ||
218 | u64 result; | ||
219 | |||
220 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | ||
221 | |||
222 | if (sta->fail_avg >= 100) | ||
223 | return MAX_METRIC; | ||
224 | err = (sta->fail_avg << ARITH_SHIFT) / 100; | ||
225 | |||
226 | /* bitrate is in units of 100 Kbps, while we need rate in units of | ||
227 | * 1Mbps. This will be corrected on tx_time computation. | ||
228 | */ | ||
229 | rate = sband->bitrates[sta->txrate_idx].bitrate; | ||
230 | tx_time = (device_constant + 10 * test_frame_len / rate); | ||
231 | estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err)); | ||
232 | result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT) ; | ||
233 | return (u32)result; | ||
234 | } | ||
235 | |||
236 | /** | ||
237 | * hwmp_route_info_get - Update routing info to originator and transmitter | ||
238 | * | ||
239 | * @dev: local mesh interface | ||
240 | * @mgmt: mesh management frame | ||
241 | * @hwmp_ie: hwmp information element (PREP or PREQ) | ||
242 | * | ||
243 | * This function updates the path routing information to the originator and the | ||
244 | * transmitter of a HWMP PREQ or PREP fram. | ||
245 | * | ||
246 | * Returns: metric to frame originator or 0 if the frame should not be further | ||
247 | * processed | ||
248 | * | ||
249 | * Notes: this function is the only place (besides user-provided info) where | ||
250 | * path routing information is updated. | ||
251 | */ | ||
252 | static u32 hwmp_route_info_get(struct net_device *dev, | ||
253 | struct ieee80211_mgmt *mgmt, | ||
254 | u8 *hwmp_ie) | ||
255 | { | ||
256 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
257 | struct mesh_path *mpath; | ||
258 | struct sta_info *sta; | ||
259 | bool fresh_info; | ||
260 | u8 *orig_addr, *ta; | ||
261 | u32 orig_dsn, orig_metric; | ||
262 | unsigned long orig_lifetime, exp_time; | ||
263 | u32 last_hop_metric, new_metric; | ||
264 | bool process = true; | ||
265 | u8 action = mgmt->u.action.u.mesh_action.action_code; | ||
266 | |||
267 | rcu_read_lock(); | ||
268 | sta = sta_info_get(local, mgmt->sa); | ||
269 | if (!sta) { | ||
270 | rcu_read_unlock(); | ||
271 | return 0; | ||
272 | } | ||
273 | |||
274 | last_hop_metric = airtime_link_metric_get(local, sta); | ||
275 | /* Update and check originator routing info */ | ||
276 | fresh_info = true; | ||
277 | |||
278 | switch (action) { | ||
279 | case MPATH_PREQ: | ||
280 | orig_addr = PREQ_IE_ORIG_ADDR(hwmp_ie); | ||
281 | orig_dsn = PREQ_IE_ORIG_DSN(hwmp_ie); | ||
282 | orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie); | ||
283 | orig_metric = PREQ_IE_METRIC(hwmp_ie); | ||
284 | break; | ||
285 | case MPATH_PREP: | ||
286 | /* Originator here refers to the MP that was the destination in | ||
287 | * the Path Request. The draft refers to that MP as the | ||
288 | * destination address, even though usually it is the origin of | ||
289 | * the PREP frame. We divert from the nomenclature in the draft | ||
290 | * so that we can easily use a single function to gather path | ||
291 | * information from both PREQ and PREP frames. | ||
292 | */ | ||
293 | orig_addr = PREP_IE_ORIG_ADDR(hwmp_ie); | ||
294 | orig_dsn = PREP_IE_ORIG_DSN(hwmp_ie); | ||
295 | orig_lifetime = PREP_IE_LIFETIME(hwmp_ie); | ||
296 | orig_metric = PREP_IE_METRIC(hwmp_ie); | ||
297 | break; | ||
298 | default: | ||
299 | rcu_read_unlock(); | ||
300 | return 0; | ||
301 | } | ||
302 | new_metric = orig_metric + last_hop_metric; | ||
303 | if (new_metric < orig_metric) | ||
304 | new_metric = MAX_METRIC; | ||
305 | exp_time = TU_TO_EXP_TIME(orig_lifetime); | ||
306 | |||
307 | if (memcmp(orig_addr, dev->dev_addr, ETH_ALEN) == 0) { | ||
308 | /* This MP is the originator, we are not interested in this | ||
309 | * frame, except for updating transmitter's path info. | ||
310 | */ | ||
311 | process = false; | ||
312 | fresh_info = false; | ||
313 | } else { | ||
314 | mpath = mesh_path_lookup(orig_addr, dev); | ||
315 | if (mpath) { | ||
316 | spin_lock_bh(&mpath->state_lock); | ||
317 | if (mpath->flags & MESH_PATH_FIXED) | ||
318 | fresh_info = false; | ||
319 | else if ((mpath->flags & MESH_PATH_ACTIVE) && | ||
320 | (mpath->flags & MESH_PATH_DSN_VALID)) { | ||
321 | if (DSN_GT(mpath->dsn, orig_dsn) || | ||
322 | (mpath->dsn == orig_dsn && | ||
323 | action == MPATH_PREQ && | ||
324 | new_metric > mpath->metric)) { | ||
325 | process = false; | ||
326 | fresh_info = false; | ||
327 | } | ||
328 | } | ||
329 | } else { | ||
330 | mesh_path_add(orig_addr, dev); | ||
331 | mpath = mesh_path_lookup(orig_addr, dev); | ||
332 | if (!mpath) { | ||
333 | rcu_read_unlock(); | ||
334 | return 0; | ||
335 | } | ||
336 | spin_lock_bh(&mpath->state_lock); | ||
337 | } | ||
338 | |||
339 | if (fresh_info) { | ||
340 | mesh_path_assign_nexthop(mpath, sta); | ||
341 | mpath->flags |= MESH_PATH_DSN_VALID; | ||
342 | mpath->metric = new_metric; | ||
343 | mpath->dsn = orig_dsn; | ||
344 | mpath->exp_time = time_after(mpath->exp_time, exp_time) | ||
345 | ? mpath->exp_time : exp_time; | ||
346 | mesh_path_activate(mpath); | ||
347 | spin_unlock_bh(&mpath->state_lock); | ||
348 | mesh_path_tx_pending(mpath); | ||
349 | /* draft says preq_id should be saved to, but there does | ||
350 | * not seem to be any use for it, skipping by now | ||
351 | */ | ||
352 | } else | ||
353 | spin_unlock_bh(&mpath->state_lock); | ||
354 | } | ||
355 | |||
356 | /* Update and check transmitter routing info */ | ||
357 | ta = mgmt->sa; | ||
358 | if (memcmp(orig_addr, ta, ETH_ALEN) == 0) | ||
359 | fresh_info = false; | ||
360 | else { | ||
361 | fresh_info = true; | ||
362 | |||
363 | mpath = mesh_path_lookup(ta, dev); | ||
364 | if (mpath) { | ||
365 | spin_lock_bh(&mpath->state_lock); | ||
366 | if ((mpath->flags & MESH_PATH_FIXED) || | ||
367 | ((mpath->flags & MESH_PATH_ACTIVE) && | ||
368 | (last_hop_metric > mpath->metric))) | ||
369 | fresh_info = false; | ||
370 | } else { | ||
371 | mesh_path_add(ta, dev); | ||
372 | mpath = mesh_path_lookup(ta, dev); | ||
373 | if (!mpath) { | ||
374 | rcu_read_unlock(); | ||
375 | return 0; | ||
376 | } | ||
377 | spin_lock_bh(&mpath->state_lock); | ||
378 | } | ||
379 | |||
380 | if (fresh_info) { | ||
381 | mesh_path_assign_nexthop(mpath, sta); | ||
382 | mpath->flags &= ~MESH_PATH_DSN_VALID; | ||
383 | mpath->metric = last_hop_metric; | ||
384 | mpath->exp_time = time_after(mpath->exp_time, exp_time) | ||
385 | ? mpath->exp_time : exp_time; | ||
386 | mesh_path_activate(mpath); | ||
387 | spin_unlock_bh(&mpath->state_lock); | ||
388 | mesh_path_tx_pending(mpath); | ||
389 | } else | ||
390 | spin_unlock_bh(&mpath->state_lock); | ||
391 | } | ||
392 | |||
393 | rcu_read_unlock(); | ||
394 | |||
395 | return process ? new_metric : 0; | ||
396 | } | ||
397 | |||
398 | static void hwmp_preq_frame_process(struct net_device *dev, | ||
399 | struct ieee80211_mgmt *mgmt, | ||
400 | u8 *preq_elem, u32 metric) { | ||
401 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
402 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | ||
403 | struct mesh_path *mpath; | ||
404 | u8 *dst_addr, *orig_addr; | ||
405 | u8 dst_flags, ttl; | ||
406 | u32 orig_dsn, dst_dsn, lifetime; | ||
407 | bool reply = false; | ||
408 | bool forward = true; | ||
409 | |||
410 | /* Update destination DSN, if present */ | ||
411 | dst_addr = PREQ_IE_DST_ADDR(preq_elem); | ||
412 | orig_addr = PREQ_IE_ORIG_ADDR(preq_elem); | ||
413 | dst_dsn = PREQ_IE_DST_DSN(preq_elem); | ||
414 | orig_dsn = PREQ_IE_ORIG_DSN(preq_elem); | ||
415 | dst_flags = PREQ_IE_DST_F(preq_elem); | ||
416 | |||
417 | if (memcmp(dst_addr, dev->dev_addr, ETH_ALEN) == 0) { | ||
418 | forward = false; | ||
419 | reply = true; | ||
420 | metric = 0; | ||
421 | if (time_after(jiffies, ifsta->last_dsn_update + | ||
422 | net_traversal_jiffies(sdata)) || | ||
423 | time_before(jiffies, ifsta->last_dsn_update)) { | ||
424 | dst_dsn = ++ifsta->dsn; | ||
425 | ifsta->last_dsn_update = jiffies; | ||
426 | } | ||
427 | } else { | ||
428 | rcu_read_lock(); | ||
429 | mpath = mesh_path_lookup(dst_addr, dev); | ||
430 | if (mpath) { | ||
431 | if ((!(mpath->flags & MESH_PATH_DSN_VALID)) || | ||
432 | DSN_LT(mpath->dsn, dst_dsn)) { | ||
433 | mpath->dsn = dst_dsn; | ||
434 | mpath->flags &= MESH_PATH_DSN_VALID; | ||
435 | } else if ((!(dst_flags & MP_F_DO)) && | ||
436 | (mpath->flags & MESH_PATH_ACTIVE)) { | ||
437 | reply = true; | ||
438 | metric = mpath->metric; | ||
439 | dst_dsn = mpath->dsn; | ||
440 | if (dst_flags & MP_F_RF) | ||
441 | dst_flags |= MP_F_DO; | ||
442 | else | ||
443 | forward = false; | ||
444 | } | ||
445 | } | ||
446 | rcu_read_unlock(); | ||
447 | } | ||
448 | |||
449 | if (reply) { | ||
450 | lifetime = PREQ_IE_LIFETIME(preq_elem); | ||
451 | ttl = ifsta->mshcfg.dot11MeshTTL; | ||
452 | if (ttl != 0) | ||
453 | mesh_path_sel_frame_tx(MPATH_PREP, 0, dst_addr, | ||
454 | cpu_to_le32(dst_dsn), 0, orig_addr, | ||
455 | cpu_to_le32(orig_dsn), mgmt->sa, 0, ttl, | ||
456 | cpu_to_le32(lifetime), cpu_to_le32(metric), | ||
457 | 0, dev); | ||
458 | else | ||
459 | ifsta->mshstats.dropped_frames_ttl++; | ||
460 | } | ||
461 | |||
462 | if (forward) { | ||
463 | u32 preq_id; | ||
464 | u8 hopcount, flags; | ||
465 | |||
466 | ttl = PREQ_IE_TTL(preq_elem); | ||
467 | lifetime = PREQ_IE_LIFETIME(preq_elem); | ||
468 | if (ttl <= 1) { | ||
469 | ifsta->mshstats.dropped_frames_ttl++; | ||
470 | return; | ||
471 | } | ||
472 | --ttl; | ||
473 | flags = PREQ_IE_FLAGS(preq_elem); | ||
474 | preq_id = PREQ_IE_PREQ_ID(preq_elem); | ||
475 | hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1; | ||
476 | mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr, | ||
477 | cpu_to_le32(orig_dsn), dst_flags, dst_addr, | ||
478 | cpu_to_le32(dst_dsn), dev->broadcast, | ||
479 | hopcount, ttl, cpu_to_le32(lifetime), | ||
480 | cpu_to_le32(metric), cpu_to_le32(preq_id), | ||
481 | dev); | ||
482 | ifsta->mshstats.fwded_frames++; | ||
483 | } | ||
484 | } | ||
485 | |||
486 | |||
487 | static void hwmp_prep_frame_process(struct net_device *dev, | ||
488 | struct ieee80211_mgmt *mgmt, | ||
489 | u8 *prep_elem, u32 metric) | ||
490 | { | ||
491 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
492 | struct mesh_path *mpath; | ||
493 | u8 *dst_addr, *orig_addr; | ||
494 | u8 ttl, hopcount, flags; | ||
495 | u8 next_hop[ETH_ALEN]; | ||
496 | u32 dst_dsn, orig_dsn, lifetime; | ||
497 | |||
498 | /* Note that we divert from the draft nomenclature and denominate | ||
499 | * destination to what the draft refers to as origininator. So in this | ||
500 | * function destnation refers to the final destination of the PREP, | ||
501 | * which corresponds with the originator of the PREQ which this PREP | ||
502 | * replies | ||
503 | */ | ||
504 | dst_addr = PREP_IE_DST_ADDR(prep_elem); | ||
505 | if (memcmp(dst_addr, dev->dev_addr, ETH_ALEN) == 0) | ||
506 | /* destination, no forwarding required */ | ||
507 | return; | ||
508 | |||
509 | ttl = PREP_IE_TTL(prep_elem); | ||
510 | if (ttl <= 1) { | ||
511 | sdata->u.sta.mshstats.dropped_frames_ttl++; | ||
512 | return; | ||
513 | } | ||
514 | |||
515 | rcu_read_lock(); | ||
516 | mpath = mesh_path_lookup(dst_addr, dev); | ||
517 | if (mpath) | ||
518 | spin_lock_bh(&mpath->state_lock); | ||
519 | else | ||
520 | goto fail; | ||
521 | if (!(mpath->flags & MESH_PATH_ACTIVE)) { | ||
522 | spin_unlock_bh(&mpath->state_lock); | ||
523 | goto fail; | ||
524 | } | ||
525 | memcpy(next_hop, mpath->next_hop->addr, ETH_ALEN); | ||
526 | spin_unlock_bh(&mpath->state_lock); | ||
527 | --ttl; | ||
528 | flags = PREP_IE_FLAGS(prep_elem); | ||
529 | lifetime = PREP_IE_LIFETIME(prep_elem); | ||
530 | hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1; | ||
531 | orig_addr = PREP_IE_ORIG_ADDR(prep_elem); | ||
532 | dst_dsn = PREP_IE_DST_DSN(prep_elem); | ||
533 | orig_dsn = PREP_IE_ORIG_DSN(prep_elem); | ||
534 | |||
535 | mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, | ||
536 | cpu_to_le32(orig_dsn), 0, dst_addr, | ||
537 | cpu_to_le32(dst_dsn), mpath->next_hop->addr, hopcount, ttl, | ||
538 | cpu_to_le32(lifetime), cpu_to_le32(metric), | ||
539 | 0, dev); | ||
540 | rcu_read_unlock(); | ||
541 | sdata->u.sta.mshstats.fwded_frames++; | ||
542 | return; | ||
543 | |||
544 | fail: | ||
545 | rcu_read_unlock(); | ||
546 | sdata->u.sta.mshstats.dropped_frames_no_route++; | ||
547 | return; | ||
548 | } | ||
549 | |||
550 | static void hwmp_perr_frame_process(struct net_device *dev, | ||
551 | struct ieee80211_mgmt *mgmt, u8 *perr_elem) | ||
552 | { | ||
553 | struct mesh_path *mpath; | ||
554 | u8 *ta, *dst_addr; | ||
555 | u32 dst_dsn; | ||
556 | |||
557 | ta = mgmt->sa; | ||
558 | dst_addr = PERR_IE_DST_ADDR(perr_elem); | ||
559 | dst_dsn = PERR_IE_DST_DSN(perr_elem); | ||
560 | rcu_read_lock(); | ||
561 | mpath = mesh_path_lookup(dst_addr, dev); | ||
562 | if (mpath) { | ||
563 | spin_lock_bh(&mpath->state_lock); | ||
564 | if (mpath->flags & MESH_PATH_ACTIVE && | ||
565 | memcmp(ta, mpath->next_hop->addr, ETH_ALEN) == 0 && | ||
566 | (!(mpath->flags & MESH_PATH_DSN_VALID) || | ||
567 | DSN_GT(dst_dsn, mpath->dsn))) { | ||
568 | mpath->flags &= ~MESH_PATH_ACTIVE; | ||
569 | mpath->dsn = dst_dsn; | ||
570 | spin_unlock_bh(&mpath->state_lock); | ||
571 | mesh_path_error_tx(dst_addr, cpu_to_le32(dst_dsn), | ||
572 | dev->broadcast, dev); | ||
573 | } else | ||
574 | spin_unlock_bh(&mpath->state_lock); | ||
575 | } | ||
576 | rcu_read_unlock(); | ||
577 | } | ||
578 | |||
579 | |||
580 | |||
581 | void mesh_rx_path_sel_frame(struct net_device *dev, | ||
582 | struct ieee80211_mgmt *mgmt, | ||
583 | size_t len) | ||
584 | { | ||
585 | struct ieee802_11_elems elems; | ||
586 | size_t baselen; | ||
587 | u32 last_hop_metric; | ||
588 | |||
589 | baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt; | ||
590 | ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable, | ||
591 | len - baselen, &elems); | ||
592 | |||
593 | switch (mgmt->u.action.u.mesh_action.action_code) { | ||
594 | case MPATH_PREQ: | ||
595 | if (!elems.preq || elems.preq_len != 37) | ||
596 | /* Right now we support just 1 destination and no AE */ | ||
597 | return; | ||
598 | last_hop_metric = hwmp_route_info_get(dev, mgmt, elems.preq); | ||
599 | if (!last_hop_metric) | ||
600 | return; | ||
601 | hwmp_preq_frame_process(dev, mgmt, elems.preq, last_hop_metric); | ||
602 | break; | ||
603 | case MPATH_PREP: | ||
604 | if (!elems.prep || elems.prep_len != 31) | ||
605 | /* Right now we support no AE */ | ||
606 | return; | ||
607 | last_hop_metric = hwmp_route_info_get(dev, mgmt, elems.prep); | ||
608 | if (!last_hop_metric) | ||
609 | return; | ||
610 | hwmp_prep_frame_process(dev, mgmt, elems.prep, last_hop_metric); | ||
611 | break; | ||
612 | case MPATH_PERR: | ||
613 | if (!elems.perr || elems.perr_len != 12) | ||
614 | /* Right now we support only one destination per PERR */ | ||
615 | return; | ||
616 | hwmp_perr_frame_process(dev, mgmt, elems.perr); | ||
617 | default: | ||
618 | return; | ||
619 | } | ||
620 | |||
621 | } | ||
622 | |||
623 | /** | ||
624 | * mesh_queue_preq - queue a PREQ to a given destination | ||
625 | * | ||
626 | * @mpath: mesh path to discover | ||
627 | * @flags: special attributes of the PREQ to be sent | ||
628 | * | ||
629 | * Locking: the function must be called from within a rcu read lock block. | ||
630 | * | ||
631 | */ | ||
632 | static void mesh_queue_preq(struct mesh_path *mpath, u8 flags) | ||
633 | { | ||
634 | struct ieee80211_sub_if_data *sdata = | ||
635 | IEEE80211_DEV_TO_SUB_IF(mpath->dev); | ||
636 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | ||
637 | struct mesh_preq_queue *preq_node; | ||
638 | |||
639 | preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_KERNEL); | ||
640 | if (!preq_node) { | ||
641 | printk(KERN_DEBUG "Mesh HWMP: could not allocate PREQ node\n"); | ||
642 | return; | ||
643 | } | ||
644 | |||
645 | spin_lock(&ifsta->mesh_preq_queue_lock); | ||
646 | if (ifsta->preq_queue_len == MAX_PREQ_QUEUE_LEN) { | ||
647 | spin_unlock(&ifsta->mesh_preq_queue_lock); | ||
648 | kfree(preq_node); | ||
649 | if (printk_ratelimit()) | ||
650 | printk(KERN_DEBUG "Mesh HWMP: PREQ node queue full\n"); | ||
651 | return; | ||
652 | } | ||
653 | |||
654 | memcpy(preq_node->dst, mpath->dst, ETH_ALEN); | ||
655 | preq_node->flags = flags; | ||
656 | |||
657 | list_add_tail(&preq_node->list, &ifsta->preq_queue.list); | ||
658 | ++ifsta->preq_queue_len; | ||
659 | spin_unlock(&ifsta->mesh_preq_queue_lock); | ||
660 | |||
661 | if (time_after(jiffies, ifsta->last_preq + min_preq_int_jiff(sdata))) | ||
662 | queue_work(sdata->local->hw.workqueue, &ifsta->work); | ||
663 | |||
664 | else if (time_before(jiffies, ifsta->last_preq)) { | ||
665 | /* avoid long wait if did not send preqs for a long time | ||
666 | * and jiffies wrapped around | ||
667 | */ | ||
668 | ifsta->last_preq = jiffies - min_preq_int_jiff(sdata) - 1; | ||
669 | queue_work(sdata->local->hw.workqueue, &ifsta->work); | ||
670 | } else | ||
671 | mod_timer(&ifsta->mesh_path_timer, ifsta->last_preq + | ||
672 | min_preq_int_jiff(sdata)); | ||
673 | } | ||
674 | |||
675 | /** | ||
676 | * mesh_path_start_discovery - launch a path discovery from the PREQ queue | ||
677 | * | ||
678 | * @dev: local mesh interface | ||
679 | */ | ||
680 | void mesh_path_start_discovery(struct net_device *dev) | ||
681 | { | ||
682 | struct ieee80211_sub_if_data *sdata = | ||
683 | IEEE80211_DEV_TO_SUB_IF(dev); | ||
684 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | ||
685 | struct mesh_preq_queue *preq_node; | ||
686 | struct mesh_path *mpath; | ||
687 | u8 ttl, dst_flags; | ||
688 | u32 lifetime; | ||
689 | |||
690 | spin_lock(&ifsta->mesh_preq_queue_lock); | ||
691 | if (!ifsta->preq_queue_len || | ||
692 | time_before(jiffies, ifsta->last_preq + | ||
693 | min_preq_int_jiff(sdata))) { | ||
694 | spin_unlock(&ifsta->mesh_preq_queue_lock); | ||
695 | return; | ||
696 | } | ||
697 | |||
698 | preq_node = list_first_entry(&ifsta->preq_queue.list, | ||
699 | struct mesh_preq_queue, list); | ||
700 | list_del(&preq_node->list); | ||
701 | --ifsta->preq_queue_len; | ||
702 | spin_unlock(&ifsta->mesh_preq_queue_lock); | ||
703 | |||
704 | rcu_read_lock(); | ||
705 | mpath = mesh_path_lookup(preq_node->dst, dev); | ||
706 | if (!mpath) | ||
707 | goto enddiscovery; | ||
708 | |||
709 | spin_lock_bh(&mpath->state_lock); | ||
710 | if (preq_node->flags & PREQ_Q_F_START) { | ||
711 | if (mpath->flags & MESH_PATH_RESOLVING) { | ||
712 | spin_unlock_bh(&mpath->state_lock); | ||
713 | goto enddiscovery; | ||
714 | } else { | ||
715 | mpath->flags &= ~MESH_PATH_RESOLVED; | ||
716 | mpath->flags |= MESH_PATH_RESOLVING; | ||
717 | mpath->discovery_retries = 0; | ||
718 | mpath->discovery_timeout = disc_timeout_jiff(sdata); | ||
719 | } | ||
720 | } else if (!(mpath->flags & MESH_PATH_RESOLVING) || | ||
721 | mpath->flags & MESH_PATH_RESOLVED) { | ||
722 | mpath->flags &= ~MESH_PATH_RESOLVING; | ||
723 | spin_unlock_bh(&mpath->state_lock); | ||
724 | goto enddiscovery; | ||
725 | } | ||
726 | |||
727 | ifsta->last_preq = jiffies; | ||
728 | |||
729 | if (time_after(jiffies, ifsta->last_dsn_update + | ||
730 | net_traversal_jiffies(sdata)) || | ||
731 | time_before(jiffies, ifsta->last_dsn_update)) { | ||
732 | ++ifsta->dsn; | ||
733 | sdata->u.sta.last_dsn_update = jiffies; | ||
734 | } | ||
735 | lifetime = default_lifetime(sdata); | ||
736 | ttl = sdata->u.sta.mshcfg.dot11MeshTTL; | ||
737 | if (ttl == 0) { | ||
738 | sdata->u.sta.mshstats.dropped_frames_ttl++; | ||
739 | spin_unlock_bh(&mpath->state_lock); | ||
740 | goto enddiscovery; | ||
741 | } | ||
742 | |||
743 | if (preq_node->flags & PREQ_Q_F_REFRESH) | ||
744 | dst_flags = MP_F_DO; | ||
745 | else | ||
746 | dst_flags = MP_F_RF; | ||
747 | |||
748 | spin_unlock_bh(&mpath->state_lock); | ||
749 | mesh_path_sel_frame_tx(MPATH_PREQ, 0, dev->dev_addr, | ||
750 | cpu_to_le32(ifsta->dsn), dst_flags, mpath->dst, | ||
751 | cpu_to_le32(mpath->dsn), dev->broadcast, 0, | ||
752 | ttl, cpu_to_le32(lifetime), 0, | ||
753 | cpu_to_le32(ifsta->preq_id++), dev); | ||
754 | mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); | ||
755 | |||
756 | enddiscovery: | ||
757 | rcu_read_unlock(); | ||
758 | kfree(preq_node); | ||
759 | } | ||
760 | |||
761 | /** | ||
762 | * ieee80211s_lookup_nexthop - put the appropriate next hop on a mesh frame | ||
763 | * | ||
764 | * @next_hop: output argument for next hop address | ||
765 | * @skb: frame to be sent | ||
766 | * @dev: network device the frame will be sent through | ||
767 | * | ||
768 | * Returns: 0 if the next hop was found. Nonzero otherwise. If no next hop is | ||
769 | * found, the function will start a path discovery and queue the frame so it is | ||
770 | * sent when the path is resolved. This means the caller must not free the skb | ||
771 | * in this case. | ||
772 | */ | ||
773 | int mesh_nexthop_lookup(u8 *next_hop, struct sk_buff *skb, | ||
774 | struct net_device *dev) | ||
775 | { | ||
776 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
777 | struct sk_buff *skb_to_free = NULL; | ||
778 | struct mesh_path *mpath; | ||
779 | int err = 0; | ||
780 | |||
781 | rcu_read_lock(); | ||
782 | mpath = mesh_path_lookup(skb->data, dev); | ||
783 | |||
784 | if (!mpath) { | ||
785 | mesh_path_add(skb->data, dev); | ||
786 | mpath = mesh_path_lookup(skb->data, dev); | ||
787 | if (!mpath) { | ||
788 | dev_kfree_skb(skb); | ||
789 | sdata->u.sta.mshstats.dropped_frames_no_route++; | ||
790 | err = -ENOSPC; | ||
791 | goto endlookup; | ||
792 | } | ||
793 | } | ||
794 | |||
795 | if (mpath->flags & MESH_PATH_ACTIVE) { | ||
796 | if (time_after(jiffies, mpath->exp_time - | ||
797 | msecs_to_jiffies(sdata->u.sta.mshcfg.path_refresh_time)) | ||
798 | && skb->pkt_type != PACKET_OTHERHOST | ||
799 | && !(mpath->flags & MESH_PATH_RESOLVING) | ||
800 | && !(mpath->flags & MESH_PATH_FIXED)) { | ||
801 | mesh_queue_preq(mpath, | ||
802 | PREQ_Q_F_START | PREQ_Q_F_REFRESH); | ||
803 | } | ||
804 | memcpy(next_hop, mpath->next_hop->addr, | ||
805 | ETH_ALEN); | ||
806 | } else { | ||
807 | if (!(mpath->flags & MESH_PATH_RESOLVING)) { | ||
808 | /* Start discovery only if it is not running yet */ | ||
809 | mesh_queue_preq(mpath, PREQ_Q_F_START); | ||
810 | } | ||
811 | |||
812 | if (skb_queue_len(&mpath->frame_queue) >= | ||
813 | MESH_FRAME_QUEUE_LEN) { | ||
814 | skb_to_free = mpath->frame_queue.next; | ||
815 | skb_unlink(skb_to_free, &mpath->frame_queue); | ||
816 | } | ||
817 | |||
818 | skb_queue_tail(&mpath->frame_queue, skb); | ||
819 | if (skb_to_free) | ||
820 | mesh_path_discard_frame(skb_to_free, dev); | ||
821 | err = -ENOENT; | ||
822 | } | ||
823 | |||
824 | endlookup: | ||
825 | rcu_read_unlock(); | ||
826 | return err; | ||
827 | } | ||
828 | |||
829 | void mesh_path_timer(unsigned long data) | ||
830 | { | ||
831 | struct ieee80211_sub_if_data *sdata; | ||
832 | struct mesh_path *mpath; | ||
833 | |||
834 | rcu_read_lock(); | ||
835 | mpath = (struct mesh_path *) data; | ||
836 | mpath = rcu_dereference(mpath); | ||
837 | if (!mpath) | ||
838 | goto endmpathtimer; | ||
839 | spin_lock_bh(&mpath->state_lock); | ||
840 | sdata = IEEE80211_DEV_TO_SUB_IF(mpath->dev); | ||
841 | if (mpath->flags & MESH_PATH_RESOLVED || | ||
842 | (!(mpath->flags & MESH_PATH_RESOLVING))) | ||
843 | mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED); | ||
844 | else if (mpath->discovery_retries < max_preq_retries(sdata)) { | ||
845 | ++mpath->discovery_retries; | ||
846 | mpath->discovery_timeout *= 2; | ||
847 | mesh_queue_preq(mpath, 0); | ||
848 | } else { | ||
849 | mpath->flags = 0; | ||
850 | mpath->exp_time = jiffies; | ||
851 | mesh_path_flush_pending(mpath); | ||
852 | } | ||
853 | |||
854 | spin_unlock_bh(&mpath->state_lock); | ||
855 | endmpathtimer: | ||
856 | rcu_read_unlock(); | ||
857 | } | ||
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c new file mode 100644 index 000000000000..5845dc21ce85 --- /dev/null +++ b/net/mac80211/mesh_pathtbl.c | |||
@@ -0,0 +1,516 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2008 open80211s Ltd. | ||
3 | * Author: Luis Carlos Cobo <luisca@cozybit.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | |||
10 | #include <linux/etherdevice.h> | ||
11 | #include <linux/list.h> | ||
12 | #include <linux/netdevice.h> | ||
13 | #include <linux/random.h> | ||
14 | #include <linux/spinlock.h> | ||
15 | #include <linux/string.h> | ||
16 | #include <net/mac80211.h> | ||
17 | #include "ieee80211_i.h" | ||
18 | #include "mesh.h" | ||
19 | |||
20 | /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */ | ||
21 | #define INIT_PATHS_SIZE_ORDER 2 | ||
22 | |||
23 | /* Keep the mean chain length below this constant */ | ||
24 | #define MEAN_CHAIN_LEN 2 | ||
25 | |||
26 | #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \ | ||
27 | time_after(jiffies, mpath->exp_time) && \ | ||
28 | !(mpath->flags & MESH_PATH_FIXED)) | ||
29 | |||
30 | struct mpath_node { | ||
31 | struct hlist_node list; | ||
32 | struct rcu_head rcu; | ||
33 | /* This indirection allows two different tables to point to the same | ||
34 | * mesh_path structure, useful when resizing | ||
35 | */ | ||
36 | struct mesh_path *mpath; | ||
37 | }; | ||
38 | |||
39 | static struct mesh_table *mesh_paths; | ||
40 | |||
41 | /* This lock will have the grow table function as writer and add / delete nodes | ||
42 | * as readers. When reading the table (i.e. doing lookups) we are well protected | ||
43 | * by RCU | ||
44 | */ | ||
45 | static DEFINE_RWLOCK(pathtbl_resize_lock); | ||
46 | |||
47 | /** | ||
48 | * | ||
49 | * mesh_path_assign_nexthop - update mesh path next hop | ||
50 | * | ||
51 | * @mpath: mesh path to update | ||
52 | * @sta: next hop to assign | ||
53 | * | ||
54 | * Locking: mpath->state_lock must be held when calling this function | ||
55 | */ | ||
56 | void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) | ||
57 | { | ||
58 | rcu_assign_pointer(mpath->next_hop, sta); | ||
59 | } | ||
60 | |||
61 | |||
62 | /** | ||
63 | * mesh_path_lookup - look up a path in the mesh path table | ||
64 | * @dst: hardware address (ETH_ALEN length) of destination | ||
65 | * @dev: local interface | ||
66 | * | ||
67 | * Returns: pointer to the mesh path structure, or NULL if not found | ||
68 | * | ||
69 | * Locking: must be called within a read rcu section. | ||
70 | */ | ||
71 | struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev) | ||
72 | { | ||
73 | struct mesh_path *mpath; | ||
74 | struct hlist_node *n; | ||
75 | struct hlist_head *bucket; | ||
76 | struct mesh_table *tbl; | ||
77 | struct mpath_node *node; | ||
78 | |||
79 | tbl = rcu_dereference(mesh_paths); | ||
80 | |||
81 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, dev, tbl)]; | ||
82 | hlist_for_each_entry_rcu(node, n, bucket, list) { | ||
83 | mpath = node->mpath; | ||
84 | if (mpath->dev == dev && | ||
85 | memcmp(dst, mpath->dst, ETH_ALEN) == 0) { | ||
86 | if (MPATH_EXPIRED(mpath)) { | ||
87 | spin_lock_bh(&mpath->state_lock); | ||
88 | if (MPATH_EXPIRED(mpath)) | ||
89 | mpath->flags &= ~MESH_PATH_ACTIVE; | ||
90 | spin_unlock_bh(&mpath->state_lock); | ||
91 | } | ||
92 | return mpath; | ||
93 | } | ||
94 | } | ||
95 | return NULL; | ||
96 | } | ||
97 | |||
98 | /** | ||
99 | * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index | ||
100 | * @idx: index | ||
101 | * @dev: local interface, or NULL for all entries | ||
102 | * | ||
103 | * Returns: pointer to the mesh path structure, or NULL if not found. | ||
104 | * | ||
105 | * Locking: must be called within a read rcu section. | ||
106 | */ | ||
107 | struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev) | ||
108 | { | ||
109 | struct mpath_node *node; | ||
110 | struct hlist_node *p; | ||
111 | int i; | ||
112 | int j = 0; | ||
113 | |||
114 | for_each_mesh_entry(mesh_paths, p, node, i) { | ||
115 | if (dev && node->mpath->dev != dev) | ||
116 | continue; | ||
117 | if (j++ == idx) { | ||
118 | if (MPATH_EXPIRED(node->mpath)) { | ||
119 | spin_lock_bh(&node->mpath->state_lock); | ||
120 | if (MPATH_EXPIRED(node->mpath)) | ||
121 | node->mpath->flags &= ~MESH_PATH_ACTIVE; | ||
122 | spin_unlock_bh(&node->mpath->state_lock); | ||
123 | } | ||
124 | return node->mpath; | ||
125 | } | ||
126 | } | ||
127 | |||
128 | return NULL; | ||
129 | } | ||
130 | |||
131 | /** | ||
132 | * mesh_path_add - allocate and add a new path to the mesh path table | ||
133 | * @addr: destination address of the path (ETH_ALEN length) | ||
134 | * @dev: local interface | ||
135 | * | ||
136 | * Returns: 0 on sucess | ||
137 | * | ||
138 | * State: the initial state of the new path is set to 0 | ||
139 | */ | ||
140 | int mesh_path_add(u8 *dst, struct net_device *dev) | ||
141 | { | ||
142 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
143 | struct mesh_path *mpath, *new_mpath; | ||
144 | struct mpath_node *node, *new_node; | ||
145 | struct hlist_head *bucket; | ||
146 | struct hlist_node *n; | ||
147 | int grow = 0; | ||
148 | int err = 0; | ||
149 | u32 hash_idx; | ||
150 | |||
151 | if (memcmp(dst, dev->dev_addr, ETH_ALEN) == 0) | ||
152 | /* never add ourselves as neighbours */ | ||
153 | return -ENOTSUPP; | ||
154 | |||
155 | if (is_multicast_ether_addr(dst)) | ||
156 | return -ENOTSUPP; | ||
157 | |||
158 | if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0) | ||
159 | return -ENOSPC; | ||
160 | |||
161 | read_lock(&pathtbl_resize_lock); | ||
162 | |||
163 | new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL); | ||
164 | if (!new_mpath) { | ||
165 | atomic_dec(&sdata->u.sta.mpaths); | ||
166 | err = -ENOMEM; | ||
167 | goto endadd2; | ||
168 | } | ||
169 | memcpy(new_mpath->dst, dst, ETH_ALEN); | ||
170 | new_mpath->dev = dev; | ||
171 | new_mpath->flags = 0; | ||
172 | skb_queue_head_init(&new_mpath->frame_queue); | ||
173 | new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL); | ||
174 | new_node->mpath = new_mpath; | ||
175 | new_mpath->timer.data = (unsigned long) new_mpath; | ||
176 | new_mpath->timer.function = mesh_path_timer; | ||
177 | new_mpath->exp_time = jiffies; | ||
178 | spin_lock_init(&new_mpath->state_lock); | ||
179 | init_timer(&new_mpath->timer); | ||
180 | |||
181 | hash_idx = mesh_table_hash(dst, dev, mesh_paths); | ||
182 | bucket = &mesh_paths->hash_buckets[hash_idx]; | ||
183 | |||
184 | spin_lock(&mesh_paths->hashwlock[hash_idx]); | ||
185 | |||
186 | hlist_for_each_entry(node, n, bucket, list) { | ||
187 | mpath = node->mpath; | ||
188 | if (mpath->dev == dev && memcmp(dst, mpath->dst, ETH_ALEN) | ||
189 | == 0) { | ||
190 | err = -EEXIST; | ||
191 | atomic_dec(&sdata->u.sta.mpaths); | ||
192 | kfree(new_node); | ||
193 | kfree(new_mpath); | ||
194 | goto endadd; | ||
195 | } | ||
196 | } | ||
197 | |||
198 | hlist_add_head_rcu(&new_node->list, bucket); | ||
199 | if (atomic_inc_return(&mesh_paths->entries) >= | ||
200 | mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1)) | ||
201 | grow = 1; | ||
202 | |||
203 | endadd: | ||
204 | spin_unlock(&mesh_paths->hashwlock[hash_idx]); | ||
205 | endadd2: | ||
206 | read_unlock(&pathtbl_resize_lock); | ||
207 | if (!err && grow) { | ||
208 | struct mesh_table *oldtbl, *newtbl; | ||
209 | |||
210 | write_lock(&pathtbl_resize_lock); | ||
211 | oldtbl = mesh_paths; | ||
212 | newtbl = mesh_table_grow(mesh_paths); | ||
213 | if (!newtbl) { | ||
214 | write_unlock(&pathtbl_resize_lock); | ||
215 | return -ENOMEM; | ||
216 | } | ||
217 | rcu_assign_pointer(mesh_paths, newtbl); | ||
218 | synchronize_rcu(); | ||
219 | mesh_table_free(oldtbl, false); | ||
220 | write_unlock(&pathtbl_resize_lock); | ||
221 | } | ||
222 | return err; | ||
223 | } | ||
224 | |||
225 | |||
226 | /** | ||
227 | * mesh_plink_broken - deactivates paths and sends perr when a link breaks | ||
228 | * | ||
229 | * @sta: broken peer link | ||
230 | * | ||
231 | * This function must be called from the rate control algorithm if enough | ||
232 | * delivery errors suggest that a peer link is no longer usable. | ||
233 | */ | ||
234 | void mesh_plink_broken(struct sta_info *sta) | ||
235 | { | ||
236 | struct mesh_path *mpath; | ||
237 | struct mpath_node *node; | ||
238 | struct hlist_node *p; | ||
239 | struct net_device *dev = sta->sdata->dev; | ||
240 | int i; | ||
241 | |||
242 | rcu_read_lock(); | ||
243 | for_each_mesh_entry(mesh_paths, p, node, i) { | ||
244 | mpath = node->mpath; | ||
245 | spin_lock_bh(&mpath->state_lock); | ||
246 | if (mpath->next_hop == sta && | ||
247 | mpath->flags & MESH_PATH_ACTIVE && | ||
248 | !(mpath->flags & MESH_PATH_FIXED)) { | ||
249 | mpath->flags &= ~MESH_PATH_ACTIVE; | ||
250 | ++mpath->dsn; | ||
251 | spin_unlock_bh(&mpath->state_lock); | ||
252 | mesh_path_error_tx(mpath->dst, | ||
253 | cpu_to_le32(mpath->dsn), | ||
254 | dev->broadcast, dev); | ||
255 | } else | ||
256 | spin_unlock_bh(&mpath->state_lock); | ||
257 | } | ||
258 | rcu_read_unlock(); | ||
259 | } | ||
260 | EXPORT_SYMBOL(mesh_plink_broken); | ||
261 | |||
262 | /** | ||
263 | * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches | ||
264 | * | ||
265 | * @sta - mesh peer to match | ||
266 | * | ||
267 | * RCU notes: this function is called when a mesh plink transitions from | ||
268 | * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that | ||
269 | * allows path creation. This will happen before the sta can be freed (because | ||
270 | * sta_info_destroy() calls this) so any reader in a rcu read block will be | ||
271 | * protected against the plink disappearing. | ||
272 | */ | ||
273 | void mesh_path_flush_by_nexthop(struct sta_info *sta) | ||
274 | { | ||
275 | struct mesh_path *mpath; | ||
276 | struct mpath_node *node; | ||
277 | struct hlist_node *p; | ||
278 | int i; | ||
279 | |||
280 | for_each_mesh_entry(mesh_paths, p, node, i) { | ||
281 | mpath = node->mpath; | ||
282 | if (mpath->next_hop == sta) | ||
283 | mesh_path_del(mpath->dst, mpath->dev); | ||
284 | } | ||
285 | } | ||
286 | |||
287 | void mesh_path_flush(struct net_device *dev) | ||
288 | { | ||
289 | struct mesh_path *mpath; | ||
290 | struct mpath_node *node; | ||
291 | struct hlist_node *p; | ||
292 | int i; | ||
293 | |||
294 | for_each_mesh_entry(mesh_paths, p, node, i) { | ||
295 | mpath = node->mpath; | ||
296 | if (mpath->dev == dev) | ||
297 | mesh_path_del(mpath->dst, mpath->dev); | ||
298 | } | ||
299 | } | ||
300 | |||
301 | static void mesh_path_node_reclaim(struct rcu_head *rp) | ||
302 | { | ||
303 | struct mpath_node *node = container_of(rp, struct mpath_node, rcu); | ||
304 | struct ieee80211_sub_if_data *sdata = | ||
305 | IEEE80211_DEV_TO_SUB_IF(node->mpath->dev); | ||
306 | |||
307 | del_timer_sync(&node->mpath->timer); | ||
308 | atomic_dec(&sdata->u.sta.mpaths); | ||
309 | kfree(node->mpath); | ||
310 | kfree(node); | ||
311 | } | ||
312 | |||
313 | /** | ||
314 | * mesh_path_del - delete a mesh path from the table | ||
315 | * | ||
316 | * @addr: dst address (ETH_ALEN length) | ||
317 | * @dev: local interface | ||
318 | * | ||
319 | * Returns: 0 if succesful | ||
320 | */ | ||
321 | int mesh_path_del(u8 *addr, struct net_device *dev) | ||
322 | { | ||
323 | struct mesh_path *mpath; | ||
324 | struct mpath_node *node; | ||
325 | struct hlist_head *bucket; | ||
326 | struct hlist_node *n; | ||
327 | int hash_idx; | ||
328 | int err = 0; | ||
329 | |||
330 | read_lock(&pathtbl_resize_lock); | ||
331 | hash_idx = mesh_table_hash(addr, dev, mesh_paths); | ||
332 | bucket = &mesh_paths->hash_buckets[hash_idx]; | ||
333 | |||
334 | spin_lock(&mesh_paths->hashwlock[hash_idx]); | ||
335 | hlist_for_each_entry(node, n, bucket, list) { | ||
336 | mpath = node->mpath; | ||
337 | if (mpath->dev == dev && | ||
338 | memcmp(addr, mpath->dst, ETH_ALEN) == 0) { | ||
339 | spin_lock_bh(&mpath->state_lock); | ||
340 | mpath->flags |= MESH_PATH_RESOLVING; | ||
341 | hlist_del_rcu(&node->list); | ||
342 | call_rcu(&node->rcu, mesh_path_node_reclaim); | ||
343 | atomic_dec(&mesh_paths->entries); | ||
344 | spin_unlock_bh(&mpath->state_lock); | ||
345 | goto enddel; | ||
346 | } | ||
347 | } | ||
348 | |||
349 | err = -ENXIO; | ||
350 | enddel: | ||
351 | spin_unlock(&mesh_paths->hashwlock[hash_idx]); | ||
352 | read_unlock(&pathtbl_resize_lock); | ||
353 | return err; | ||
354 | } | ||
355 | |||
356 | /** | ||
357 | * mesh_path_tx_pending - sends pending frames in a mesh path queue | ||
358 | * | ||
359 | * @mpath: mesh path to activate | ||
360 | * | ||
361 | * Locking: the state_lock of the mpath structure must NOT be held when calling | ||
362 | * this function. | ||
363 | */ | ||
364 | void mesh_path_tx_pending(struct mesh_path *mpath) | ||
365 | { | ||
366 | struct sk_buff *skb; | ||
367 | |||
368 | while ((skb = skb_dequeue(&mpath->frame_queue)) && | ||
369 | (mpath->flags & MESH_PATH_ACTIVE)) | ||
370 | dev_queue_xmit(skb); | ||
371 | } | ||
372 | |||
373 | /** | ||
374 | * mesh_path_discard_frame - discard a frame whose path could not be resolved | ||
375 | * | ||
376 | * @skb: frame to discard | ||
377 | * @dev: network device the frame was to be sent through | ||
378 | * | ||
379 | * If the frame was beign forwarded from another MP, a PERR frame will be sent | ||
380 | * to the precursor. | ||
381 | * | ||
382 | * Locking: the function must me called within a rcu_read_lock region | ||
383 | */ | ||
384 | void mesh_path_discard_frame(struct sk_buff *skb, struct net_device *dev) | ||
385 | { | ||
386 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
387 | struct mesh_path *mpath; | ||
388 | u32 dsn = 0; | ||
389 | |||
390 | if (skb->pkt_type == PACKET_OTHERHOST) { | ||
391 | struct ieee80211s_hdr *prev_meshhdr; | ||
392 | int mshhdrlen; | ||
393 | u8 *ra, *da; | ||
394 | |||
395 | prev_meshhdr = ((struct ieee80211s_hdr *)skb->cb); | ||
396 | mshhdrlen = ieee80211_get_mesh_hdrlen(prev_meshhdr); | ||
397 | da = skb->data; | ||
398 | ra = MESH_PREQ(skb); | ||
399 | mpath = mesh_path_lookup(da, dev); | ||
400 | if (mpath) | ||
401 | dsn = ++mpath->dsn; | ||
402 | mesh_path_error_tx(skb->data, cpu_to_le32(dsn), ra, dev); | ||
403 | } | ||
404 | |||
405 | kfree_skb(skb); | ||
406 | sdata->u.sta.mshstats.dropped_frames_no_route++; | ||
407 | } | ||
408 | |||
409 | /** | ||
410 | * mesh_path_flush_pending - free the pending queue of a mesh path | ||
411 | * | ||
412 | * @mpath: mesh path whose queue has to be freed | ||
413 | * | ||
414 | * Locking: the function must me called withing a rcu_read_lock region | ||
415 | */ | ||
416 | void mesh_path_flush_pending(struct mesh_path *mpath) | ||
417 | { | ||
418 | struct ieee80211_sub_if_data *sdata; | ||
419 | struct sk_buff *skb; | ||
420 | |||
421 | sdata = IEEE80211_DEV_TO_SUB_IF(mpath->dev); | ||
422 | |||
423 | while ((skb = skb_dequeue(&mpath->frame_queue)) && | ||
424 | (mpath->flags & MESH_PATH_ACTIVE)) | ||
425 | mesh_path_discard_frame(skb, mpath->dev); | ||
426 | } | ||
427 | |||
428 | /** | ||
429 | * mesh_path_fix_nexthop - force a specific next hop for a mesh path | ||
430 | * | ||
431 | * @mpath: the mesh path to modify | ||
432 | * @next_hop: the next hop to force | ||
433 | * | ||
434 | * Locking: this function must be called holding mpath->state_lock | ||
435 | */ | ||
436 | void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop) | ||
437 | { | ||
438 | spin_lock_bh(&mpath->state_lock); | ||
439 | mesh_path_assign_nexthop(mpath, next_hop); | ||
440 | mpath->dsn = 0xffff; | ||
441 | mpath->metric = 0; | ||
442 | mpath->hop_count = 0; | ||
443 | mpath->exp_time = 0; | ||
444 | mpath->flags |= MESH_PATH_FIXED; | ||
445 | mesh_path_activate(mpath); | ||
446 | spin_unlock_bh(&mpath->state_lock); | ||
447 | mesh_path_tx_pending(mpath); | ||
448 | } | ||
449 | |||
450 | static void mesh_path_node_free(struct hlist_node *p, bool free_leafs) | ||
451 | { | ||
452 | struct mesh_path *mpath; | ||
453 | struct mpath_node *node = hlist_entry(p, struct mpath_node, list); | ||
454 | mpath = node->mpath; | ||
455 | hlist_del_rcu(p); | ||
456 | synchronize_rcu(); | ||
457 | if (free_leafs) | ||
458 | kfree(mpath); | ||
459 | kfree(node); | ||
460 | } | ||
461 | |||
462 | static void mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl) | ||
463 | { | ||
464 | struct mesh_path *mpath; | ||
465 | struct mpath_node *node, *new_node; | ||
466 | u32 hash_idx; | ||
467 | |||
468 | node = hlist_entry(p, struct mpath_node, list); | ||
469 | mpath = node->mpath; | ||
470 | new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL); | ||
471 | new_node->mpath = mpath; | ||
472 | hash_idx = mesh_table_hash(mpath->dst, mpath->dev, newtbl); | ||
473 | hlist_add_head(&new_node->list, | ||
474 | &newtbl->hash_buckets[hash_idx]); | ||
475 | } | ||
476 | |||
477 | int mesh_pathtbl_init(void) | ||
478 | { | ||
479 | mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); | ||
480 | mesh_paths->free_node = &mesh_path_node_free; | ||
481 | mesh_paths->copy_node = &mesh_path_node_copy; | ||
482 | mesh_paths->mean_chain_len = MEAN_CHAIN_LEN; | ||
483 | if (!mesh_paths) | ||
484 | return -ENOMEM; | ||
485 | return 0; | ||
486 | } | ||
487 | |||
488 | void mesh_path_expire(struct net_device *dev) | ||
489 | { | ||
490 | struct mesh_path *mpath; | ||
491 | struct mpath_node *node; | ||
492 | struct hlist_node *p; | ||
493 | int i; | ||
494 | |||
495 | read_lock(&pathtbl_resize_lock); | ||
496 | for_each_mesh_entry(mesh_paths, p, node, i) { | ||
497 | if (node->mpath->dev != dev) | ||
498 | continue; | ||
499 | mpath = node->mpath; | ||
500 | spin_lock_bh(&mpath->state_lock); | ||
501 | if ((!(mpath->flags & MESH_PATH_RESOLVING)) && | ||
502 | (!(mpath->flags & MESH_PATH_FIXED)) && | ||
503 | time_after(jiffies, | ||
504 | mpath->exp_time + MESH_PATH_EXPIRE)) { | ||
505 | spin_unlock_bh(&mpath->state_lock); | ||
506 | mesh_path_del(mpath->dst, mpath->dev); | ||
507 | } else | ||
508 | spin_unlock_bh(&mpath->state_lock); | ||
509 | } | ||
510 | read_unlock(&pathtbl_resize_lock); | ||
511 | } | ||
512 | |||
513 | void mesh_pathtbl_unregister(void) | ||
514 | { | ||
515 | mesh_table_free(mesh_paths, true); | ||
516 | } | ||
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c new file mode 100644 index 000000000000..18fe52436c47 --- /dev/null +++ b/net/mac80211/mesh_plink.c | |||
@@ -0,0 +1,761 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2008 open80211s Ltd. | ||
3 | * Author: Luis Carlos Cobo <luisca@cozybit.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/random.h> | ||
11 | #include "ieee80211_i.h" | ||
12 | #include "ieee80211_rate.h" | ||
13 | #include "mesh.h" | ||
14 | |||
15 | #ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG | ||
16 | #define mpl_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args) | ||
17 | #else | ||
18 | #define mpl_dbg(fmt, args...) do { (void)(0); } while (0) | ||
19 | #endif | ||
20 | |||
21 | #define IEEE80211_FC(type, stype) cpu_to_le16(type | stype) | ||
22 | #define PLINK_GET_FRAME_SUBTYPE(p) (p) | ||
23 | #define PLINK_GET_LLID(p) (p + 1) | ||
24 | #define PLINK_GET_PLID(p) (p + 3) | ||
25 | |||
26 | #define mod_plink_timer(s, t) (mod_timer(&s->plink_timer, \ | ||
27 | jiffies + HZ * t / 1000)) | ||
28 | |||
29 | /* Peer link cancel reasons, all subject to ANA approval */ | ||
30 | #define MESH_LINK_CANCELLED 2 | ||
31 | #define MESH_MAX_NEIGHBORS 3 | ||
32 | #define MESH_CAPABILITY_POLICY_VIOLATION 4 | ||
33 | #define MESH_CLOSE_RCVD 5 | ||
34 | #define MESH_MAX_RETRIES 6 | ||
35 | #define MESH_CONFIRM_TIMEOUT 7 | ||
36 | #define MESH_SECURITY_ROLE_NEGOTIATION_DIFFERS 8 | ||
37 | #define MESH_SECURITY_AUTHENTICATION_IMPOSSIBLE 9 | ||
38 | #define MESH_SECURITY_FAILED_VERIFICATION 10 | ||
39 | |||
40 | #define dot11MeshMaxRetries(s) (s->u.sta.mshcfg.dot11MeshMaxRetries) | ||
41 | #define dot11MeshRetryTimeout(s) (s->u.sta.mshcfg.dot11MeshRetryTimeout) | ||
42 | #define dot11MeshConfirmTimeout(s) (s->u.sta.mshcfg.dot11MeshConfirmTimeout) | ||
43 | #define dot11MeshHoldingTimeout(s) (s->u.sta.mshcfg.dot11MeshHoldingTimeout) | ||
44 | #define dot11MeshMaxPeerLinks(s) (s->u.sta.mshcfg.dot11MeshMaxPeerLinks) | ||
45 | |||
46 | enum plink_frame_type { | ||
47 | PLINK_OPEN = 0, | ||
48 | PLINK_CONFIRM, | ||
49 | PLINK_CLOSE | ||
50 | }; | ||
51 | |||
52 | enum plink_event { | ||
53 | PLINK_UNDEFINED, | ||
54 | OPN_ACPT, | ||
55 | OPN_RJCT, | ||
56 | OPN_IGNR, | ||
57 | CNF_ACPT, | ||
58 | CNF_RJCT, | ||
59 | CNF_IGNR, | ||
60 | CLS_ACPT, | ||
61 | CLS_IGNR | ||
62 | }; | ||
63 | |||
64 | static inline | ||
65 | void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata) | ||
66 | { | ||
67 | atomic_inc(&sdata->u.sta.mshstats.estab_plinks); | ||
68 | mesh_accept_plinks_update(sdata); | ||
69 | } | ||
70 | |||
71 | static inline | ||
72 | void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata) | ||
73 | { | ||
74 | atomic_dec(&sdata->u.sta.mshstats.estab_plinks); | ||
75 | mesh_accept_plinks_update(sdata); | ||
76 | } | ||
77 | |||
78 | /** | ||
79 | * mesh_plink_fsm_restart - restart a mesh peer link finite state machine | ||
80 | * | ||
81 | * @sta: mes peer link to restart | ||
82 | * | ||
83 | * Locking: this function must be called holding sta->plink_lock | ||
84 | */ | ||
85 | static inline void mesh_plink_fsm_restart(struct sta_info *sta) | ||
86 | { | ||
87 | sta->plink_state = PLINK_LISTEN; | ||
88 | sta->llid = sta->plid = sta->reason = 0; | ||
89 | sta->plink_retries = 0; | ||
90 | } | ||
91 | |||
92 | static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata, | ||
93 | u8 *hw_addr, u64 rates) | ||
94 | { | ||
95 | struct ieee80211_local *local = sdata->local; | ||
96 | struct sta_info *sta; | ||
97 | |||
98 | if (local->num_sta >= MESH_MAX_PLINKS) | ||
99 | return NULL; | ||
100 | |||
101 | sta = sta_info_alloc(sdata, hw_addr, GFP_ATOMIC); | ||
102 | if (!sta) | ||
103 | return NULL; | ||
104 | |||
105 | sta->flags |= WLAN_STA_AUTHORIZED; | ||
106 | sta->supp_rates[local->hw.conf.channel->band] = rates; | ||
107 | |||
108 | return sta; | ||
109 | } | ||
110 | |||
111 | /** | ||
112 | * mesh_plink_deactivate - deactivate mesh peer link | ||
113 | * | ||
114 | * @sta: mesh peer link to deactivate | ||
115 | * | ||
116 | * All mesh paths with this peer as next hop will be flushed | ||
117 | * | ||
118 | * Locking: the caller must hold sta->plink_lock | ||
119 | */ | ||
120 | static void __mesh_plink_deactivate(struct sta_info *sta) | ||
121 | { | ||
122 | struct ieee80211_sub_if_data *sdata = sta->sdata; | ||
123 | |||
124 | if (sta->plink_state == PLINK_ESTAB) | ||
125 | mesh_plink_dec_estab_count(sdata); | ||
126 | sta->plink_state = PLINK_BLOCKED; | ||
127 | mesh_path_flush_by_nexthop(sta); | ||
128 | } | ||
129 | |||
130 | /** | ||
131 | * __mesh_plink_deactivate - deactivate mesh peer link | ||
132 | * | ||
133 | * @sta: mesh peer link to deactivate | ||
134 | * | ||
135 | * All mesh paths with this peer as next hop will be flushed | ||
136 | */ | ||
137 | void mesh_plink_deactivate(struct sta_info *sta) | ||
138 | { | ||
139 | spin_lock_bh(&sta->plink_lock); | ||
140 | __mesh_plink_deactivate(sta); | ||
141 | spin_unlock_bh(&sta->plink_lock); | ||
142 | } | ||
143 | |||
144 | static int mesh_plink_frame_tx(struct net_device *dev, | ||
145 | enum plink_frame_type action, u8 *da, __le16 llid, __le16 plid, | ||
146 | __le16 reason) { | ||
147 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
148 | struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); | ||
149 | struct ieee80211_mgmt *mgmt; | ||
150 | bool include_plid = false; | ||
151 | u8 *pos; | ||
152 | int ie_len; | ||
153 | |||
154 | if (!skb) | ||
155 | return -1; | ||
156 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
157 | /* 25 is the size of the common mgmt part (24) plus the size of the | ||
158 | * common action part (1) | ||
159 | */ | ||
160 | mgmt = (struct ieee80211_mgmt *) | ||
161 | skb_put(skb, 25 + sizeof(mgmt->u.action.u.plink_action)); | ||
162 | memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.plink_action)); | ||
163 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | ||
164 | IEEE80211_STYPE_ACTION); | ||
165 | memcpy(mgmt->da, da, ETH_ALEN); | ||
166 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | ||
167 | /* BSSID is left zeroed, wildcard value */ | ||
168 | mgmt->u.action.category = PLINK_CATEGORY; | ||
169 | mgmt->u.action.u.plink_action.action_code = action; | ||
170 | |||
171 | if (action == PLINK_CLOSE) | ||
172 | mgmt->u.action.u.plink_action.aux = reason; | ||
173 | else { | ||
174 | mgmt->u.action.u.plink_action.aux = cpu_to_le16(0x0); | ||
175 | if (action == PLINK_CONFIRM) { | ||
176 | pos = skb_put(skb, 4); | ||
177 | /* two-byte status code followed by two-byte AID */ | ||
178 | memset(pos, 0, 4); | ||
179 | } | ||
180 | mesh_mgmt_ies_add(skb, dev); | ||
181 | } | ||
182 | |||
183 | /* Add Peer Link Management element */ | ||
184 | switch (action) { | ||
185 | case PLINK_OPEN: | ||
186 | ie_len = 3; | ||
187 | break; | ||
188 | case PLINK_CONFIRM: | ||
189 | ie_len = 5; | ||
190 | include_plid = true; | ||
191 | break; | ||
192 | case PLINK_CLOSE: | ||
193 | default: | ||
194 | if (!plid) | ||
195 | ie_len = 5; | ||
196 | else { | ||
197 | ie_len = 7; | ||
198 | include_plid = true; | ||
199 | } | ||
200 | break; | ||
201 | } | ||
202 | |||
203 | pos = skb_put(skb, 2 + ie_len); | ||
204 | *pos++ = WLAN_EID_PEER_LINK; | ||
205 | *pos++ = ie_len; | ||
206 | *pos++ = action; | ||
207 | memcpy(pos, &llid, 2); | ||
208 | if (include_plid) { | ||
209 | pos += 2; | ||
210 | memcpy(pos, &plid, 2); | ||
211 | } | ||
212 | if (action == PLINK_CLOSE) { | ||
213 | pos += 2; | ||
214 | memcpy(pos, &reason, 2); | ||
215 | } | ||
216 | |||
217 | ieee80211_sta_tx(dev, skb, 0); | ||
218 | return 0; | ||
219 | } | ||
220 | |||
221 | void mesh_neighbour_update(u8 *hw_addr, u64 rates, struct net_device *dev, | ||
222 | bool peer_accepting_plinks) | ||
223 | { | ||
224 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
225 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
226 | struct sta_info *sta; | ||
227 | |||
228 | rcu_read_lock(); | ||
229 | |||
230 | sta = sta_info_get(local, hw_addr); | ||
231 | if (!sta) { | ||
232 | sta = mesh_plink_alloc(sdata, hw_addr, rates); | ||
233 | if (!sta) { | ||
234 | rcu_read_unlock(); | ||
235 | return; | ||
236 | } | ||
237 | if (sta_info_insert(sta)) { | ||
238 | sta_info_destroy(sta); | ||
239 | rcu_read_unlock(); | ||
240 | return; | ||
241 | } | ||
242 | } | ||
243 | |||
244 | sta->last_rx = jiffies; | ||
245 | sta->supp_rates[local->hw.conf.channel->band] = rates; | ||
246 | if (peer_accepting_plinks && sta->plink_state == PLINK_LISTEN && | ||
247 | sdata->u.sta.accepting_plinks && | ||
248 | sdata->u.sta.mshcfg.auto_open_plinks) | ||
249 | mesh_plink_open(sta); | ||
250 | |||
251 | rcu_read_unlock(); | ||
252 | } | ||
253 | |||
254 | static void mesh_plink_timer(unsigned long data) | ||
255 | { | ||
256 | struct sta_info *sta; | ||
257 | __le16 llid, plid, reason; | ||
258 | struct net_device *dev = NULL; | ||
259 | struct ieee80211_sub_if_data *sdata; | ||
260 | #ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG | ||
261 | DECLARE_MAC_BUF(mac); | ||
262 | #endif | ||
263 | |||
264 | /* | ||
265 | * This STA is valid because sta_info_destroy() will | ||
266 | * del_timer_sync() this timer after having made sure | ||
267 | * it cannot be readded (by deleting the plink.) | ||
268 | */ | ||
269 | sta = (struct sta_info *) data; | ||
270 | |||
271 | spin_lock_bh(&sta->plink_lock); | ||
272 | if (sta->ignore_plink_timer) { | ||
273 | sta->ignore_plink_timer = false; | ||
274 | spin_unlock_bh(&sta->plink_lock); | ||
275 | return; | ||
276 | } | ||
277 | mpl_dbg("Mesh plink timer for %s fired on state %d\n", | ||
278 | print_mac(mac, sta->addr), sta->plink_state); | ||
279 | reason = 0; | ||
280 | llid = sta->llid; | ||
281 | plid = sta->plid; | ||
282 | sdata = sta->sdata; | ||
283 | dev = sdata->dev; | ||
284 | |||
285 | switch (sta->plink_state) { | ||
286 | case PLINK_OPN_RCVD: | ||
287 | case PLINK_OPN_SNT: | ||
288 | /* retry timer */ | ||
289 | if (sta->plink_retries < dot11MeshMaxRetries(sdata)) { | ||
290 | u32 rand; | ||
291 | mpl_dbg("Mesh plink for %s (retry, timeout): %d %d\n", | ||
292 | print_mac(mac, sta->addr), | ||
293 | sta->plink_retries, sta->plink_timeout); | ||
294 | get_random_bytes(&rand, sizeof(u32)); | ||
295 | sta->plink_timeout = sta->plink_timeout + | ||
296 | rand % sta->plink_timeout; | ||
297 | ++sta->plink_retries; | ||
298 | mod_plink_timer(sta, sta->plink_timeout); | ||
299 | spin_unlock_bh(&sta->plink_lock); | ||
300 | mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid, | ||
301 | 0, 0); | ||
302 | break; | ||
303 | } | ||
304 | reason = cpu_to_le16(MESH_MAX_RETRIES); | ||
305 | /* fall through on else */ | ||
306 | case PLINK_CNF_RCVD: | ||
307 | /* confirm timer */ | ||
308 | if (!reason) | ||
309 | reason = cpu_to_le16(MESH_CONFIRM_TIMEOUT); | ||
310 | sta->plink_state = PLINK_HOLDING; | ||
311 | mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); | ||
312 | spin_unlock_bh(&sta->plink_lock); | ||
313 | mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, plid, | ||
314 | reason); | ||
315 | break; | ||
316 | case PLINK_HOLDING: | ||
317 | /* holding timer */ | ||
318 | del_timer(&sta->plink_timer); | ||
319 | mesh_plink_fsm_restart(sta); | ||
320 | spin_unlock_bh(&sta->plink_lock); | ||
321 | break; | ||
322 | default: | ||
323 | spin_unlock_bh(&sta->plink_lock); | ||
324 | break; | ||
325 | } | ||
326 | } | ||
327 | |||
328 | static inline void mesh_plink_timer_set(struct sta_info *sta, int timeout) | ||
329 | { | ||
330 | sta->plink_timer.expires = jiffies + (HZ * timeout / 1000); | ||
331 | sta->plink_timer.data = (unsigned long) sta; | ||
332 | sta->plink_timer.function = mesh_plink_timer; | ||
333 | sta->plink_timeout = timeout; | ||
334 | add_timer(&sta->plink_timer); | ||
335 | } | ||
336 | |||
337 | int mesh_plink_open(struct sta_info *sta) | ||
338 | { | ||
339 | __le16 llid; | ||
340 | struct ieee80211_sub_if_data *sdata = sta->sdata; | ||
341 | #ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG | ||
342 | DECLARE_MAC_BUF(mac); | ||
343 | #endif | ||
344 | |||
345 | spin_lock_bh(&sta->plink_lock); | ||
346 | get_random_bytes(&llid, 2); | ||
347 | sta->llid = llid; | ||
348 | if (sta->plink_state != PLINK_LISTEN) { | ||
349 | spin_unlock_bh(&sta->plink_lock); | ||
350 | return -EBUSY; | ||
351 | } | ||
352 | sta->plink_state = PLINK_OPN_SNT; | ||
353 | mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); | ||
354 | spin_unlock_bh(&sta->plink_lock); | ||
355 | mpl_dbg("Mesh plink: starting establishment with %s\n", | ||
356 | print_mac(mac, sta->addr)); | ||
357 | |||
358 | return mesh_plink_frame_tx(sdata->dev, PLINK_OPEN, | ||
359 | sta->addr, llid, 0, 0); | ||
360 | } | ||
361 | |||
362 | void mesh_plink_block(struct sta_info *sta) | ||
363 | { | ||
364 | #ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG | ||
365 | DECLARE_MAC_BUF(mac); | ||
366 | #endif | ||
367 | |||
368 | spin_lock_bh(&sta->plink_lock); | ||
369 | __mesh_plink_deactivate(sta); | ||
370 | sta->plink_state = PLINK_BLOCKED; | ||
371 | spin_unlock_bh(&sta->plink_lock); | ||
372 | } | ||
373 | |||
374 | int mesh_plink_close(struct sta_info *sta) | ||
375 | { | ||
376 | struct ieee80211_sub_if_data *sdata = sta->sdata; | ||
377 | __le16 llid, plid, reason; | ||
378 | #ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG | ||
379 | DECLARE_MAC_BUF(mac); | ||
380 | #endif | ||
381 | |||
382 | mpl_dbg("Mesh plink: closing link with %s\n", | ||
383 | print_mac(mac, sta->addr)); | ||
384 | spin_lock_bh(&sta->plink_lock); | ||
385 | sta->reason = cpu_to_le16(MESH_LINK_CANCELLED); | ||
386 | reason = sta->reason; | ||
387 | |||
388 | if (sta->plink_state == PLINK_LISTEN || | ||
389 | sta->plink_state == PLINK_BLOCKED) { | ||
390 | mesh_plink_fsm_restart(sta); | ||
391 | spin_unlock_bh(&sta->plink_lock); | ||
392 | return 0; | ||
393 | } else if (sta->plink_state == PLINK_ESTAB) { | ||
394 | __mesh_plink_deactivate(sta); | ||
395 | /* The timer should not be running */ | ||
396 | mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); | ||
397 | } else if (!mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata))) | ||
398 | sta->ignore_plink_timer = true; | ||
399 | |||
400 | sta->plink_state = PLINK_HOLDING; | ||
401 | llid = sta->llid; | ||
402 | plid = sta->plid; | ||
403 | spin_unlock_bh(&sta->plink_lock); | ||
404 | mesh_plink_frame_tx(sta->sdata->dev, PLINK_CLOSE, sta->addr, llid, | ||
405 | plid, reason); | ||
406 | return 0; | ||
407 | } | ||
408 | |||
409 | void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | ||
410 | size_t len, struct ieee80211_rx_status *rx_status) | ||
411 | { | ||
412 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
413 | struct ieee80211_local *local = sdata->local; | ||
414 | struct ieee802_11_elems elems; | ||
415 | struct sta_info *sta; | ||
416 | enum plink_event event; | ||
417 | enum plink_frame_type ftype; | ||
418 | size_t baselen; | ||
419 | u8 ie_len; | ||
420 | u8 *baseaddr; | ||
421 | __le16 plid, llid, reason; | ||
422 | #ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG | ||
423 | DECLARE_MAC_BUF(mac); | ||
424 | #endif | ||
425 | |||
426 | if (is_multicast_ether_addr(mgmt->da)) { | ||
427 | mpl_dbg("Mesh plink: ignore frame from multicast address"); | ||
428 | return; | ||
429 | } | ||
430 | |||
431 | baseaddr = mgmt->u.action.u.plink_action.variable; | ||
432 | baselen = (u8 *) mgmt->u.action.u.plink_action.variable - (u8 *) mgmt; | ||
433 | if (mgmt->u.action.u.plink_action.action_code == PLINK_CONFIRM) { | ||
434 | baseaddr += 4; | ||
435 | baselen -= 4; | ||
436 | } | ||
437 | ieee802_11_parse_elems(baseaddr, len - baselen, &elems); | ||
438 | if (!elems.peer_link) { | ||
439 | mpl_dbg("Mesh plink: missing necessary peer link ie\n"); | ||
440 | return; | ||
441 | } | ||
442 | |||
443 | ftype = *((u8 *)PLINK_GET_FRAME_SUBTYPE(elems.peer_link)); | ||
444 | ie_len = elems.peer_link_len; | ||
445 | if ((ftype == PLINK_OPEN && ie_len != 3) || | ||
446 | (ftype == PLINK_CONFIRM && ie_len != 5) || | ||
447 | (ftype == PLINK_CLOSE && ie_len != 5 && ie_len != 7)) { | ||
448 | mpl_dbg("Mesh plink: incorrect plink ie length\n"); | ||
449 | return; | ||
450 | } | ||
451 | |||
452 | if (ftype != PLINK_CLOSE && (!elems.mesh_id || !elems.mesh_config)) { | ||
453 | mpl_dbg("Mesh plink: missing necessary ie\n"); | ||
454 | return; | ||
455 | } | ||
456 | /* Note the lines below are correct, the llid in the frame is the plid | ||
457 | * from the point of view of this host. | ||
458 | */ | ||
459 | memcpy(&plid, PLINK_GET_LLID(elems.peer_link), 2); | ||
460 | if (ftype == PLINK_CONFIRM || (ftype == PLINK_CLOSE && ie_len == 7)) | ||
461 | memcpy(&llid, PLINK_GET_PLID(elems.peer_link), 2); | ||
462 | |||
463 | rcu_read_lock(); | ||
464 | |||
465 | sta = sta_info_get(local, mgmt->sa); | ||
466 | if (!sta && ftype != PLINK_OPEN) { | ||
467 | mpl_dbg("Mesh plink: cls or cnf from unknown peer\n"); | ||
468 | rcu_read_unlock(); | ||
469 | return; | ||
470 | } | ||
471 | |||
472 | if (sta && sta->plink_state == PLINK_BLOCKED) { | ||
473 | rcu_read_unlock(); | ||
474 | return; | ||
475 | } | ||
476 | |||
477 | /* Now we will figure out the appropriate event... */ | ||
478 | event = PLINK_UNDEFINED; | ||
479 | if (ftype != PLINK_CLOSE && (!mesh_matches_local(&elems, dev))) { | ||
480 | switch (ftype) { | ||
481 | case PLINK_OPEN: | ||
482 | event = OPN_RJCT; | ||
483 | break; | ||
484 | case PLINK_CONFIRM: | ||
485 | event = CNF_RJCT; | ||
486 | break; | ||
487 | case PLINK_CLOSE: | ||
488 | /* avoid warning */ | ||
489 | break; | ||
490 | } | ||
491 | spin_lock_bh(&sta->plink_lock); | ||
492 | } else if (!sta) { | ||
493 | /* ftype == PLINK_OPEN */ | ||
494 | u64 rates; | ||
495 | if (!mesh_plink_free_count(sdata)) { | ||
496 | mpl_dbg("Mesh plink error: no more free plinks\n"); | ||
497 | rcu_read_unlock(); | ||
498 | return; | ||
499 | } | ||
500 | |||
501 | rates = ieee80211_sta_get_rates(local, &elems, rx_status->band); | ||
502 | sta = mesh_plink_alloc(sdata, mgmt->sa, rates); | ||
503 | if (!sta) { | ||
504 | mpl_dbg("Mesh plink error: plink table full\n"); | ||
505 | rcu_read_unlock(); | ||
506 | return; | ||
507 | } | ||
508 | if (sta_info_insert(sta)) { | ||
509 | sta_info_destroy(sta); | ||
510 | rcu_read_unlock(); | ||
511 | return; | ||
512 | } | ||
513 | event = OPN_ACPT; | ||
514 | spin_lock_bh(&sta->plink_lock); | ||
515 | } else { | ||
516 | spin_lock_bh(&sta->plink_lock); | ||
517 | switch (ftype) { | ||
518 | case PLINK_OPEN: | ||
519 | if (!mesh_plink_free_count(sdata) || | ||
520 | (sta->plid && sta->plid != plid)) | ||
521 | event = OPN_IGNR; | ||
522 | else | ||
523 | event = OPN_ACPT; | ||
524 | break; | ||
525 | case PLINK_CONFIRM: | ||
526 | if (!mesh_plink_free_count(sdata) || | ||
527 | (sta->llid != llid || sta->plid != plid)) | ||
528 | event = CNF_IGNR; | ||
529 | else | ||
530 | event = CNF_ACPT; | ||
531 | break; | ||
532 | case PLINK_CLOSE: | ||
533 | if (sta->plink_state == PLINK_ESTAB) | ||
534 | /* Do not check for llid or plid. This does not | ||
535 | * follow the standard but since multiple plinks | ||
536 | * per sta are not supported, it is necessary in | ||
537 | * order to avoid a livelock when MP A sees an | ||
538 | * establish peer link to MP B but MP B does not | ||
539 | * see it. This can be caused by a timeout in | ||
540 | * B's peer link establishment or B beign | ||
541 | * restarted. | ||
542 | */ | ||
543 | event = CLS_ACPT; | ||
544 | else if (sta->plid != plid) | ||
545 | event = CLS_IGNR; | ||
546 | else if (ie_len == 7 && sta->llid != llid) | ||
547 | event = CLS_IGNR; | ||
548 | else | ||
549 | event = CLS_ACPT; | ||
550 | break; | ||
551 | default: | ||
552 | mpl_dbg("Mesh plink: unknown frame subtype\n"); | ||
553 | spin_unlock_bh(&sta->plink_lock); | ||
554 | rcu_read_unlock(); | ||
555 | return; | ||
556 | } | ||
557 | } | ||
558 | |||
559 | mpl_dbg("Mesh plink (peer, state, llid, plid, event): %s %d %d %d %d\n", | ||
560 | print_mac(mac, mgmt->sa), sta->plink_state, | ||
561 | le16_to_cpu(sta->llid), le16_to_cpu(sta->plid), | ||
562 | event); | ||
563 | reason = 0; | ||
564 | switch (sta->plink_state) { | ||
565 | /* spin_unlock as soon as state is updated at each case */ | ||
566 | case PLINK_LISTEN: | ||
567 | switch (event) { | ||
568 | case CLS_ACPT: | ||
569 | mesh_plink_fsm_restart(sta); | ||
570 | spin_unlock_bh(&sta->plink_lock); | ||
571 | break; | ||
572 | case OPN_ACPT: | ||
573 | sta->plink_state = PLINK_OPN_RCVD; | ||
574 | sta->plid = plid; | ||
575 | get_random_bytes(&llid, 2); | ||
576 | sta->llid = llid; | ||
577 | mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); | ||
578 | spin_unlock_bh(&sta->plink_lock); | ||
579 | mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid, | ||
580 | 0, 0); | ||
581 | mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, | ||
582 | llid, plid, 0); | ||
583 | break; | ||
584 | default: | ||
585 | spin_unlock_bh(&sta->plink_lock); | ||
586 | break; | ||
587 | } | ||
588 | break; | ||
589 | |||
590 | case PLINK_OPN_SNT: | ||
591 | switch (event) { | ||
592 | case OPN_RJCT: | ||
593 | case CNF_RJCT: | ||
594 | reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION); | ||
595 | case CLS_ACPT: | ||
596 | if (!reason) | ||
597 | reason = cpu_to_le16(MESH_CLOSE_RCVD); | ||
598 | sta->reason = reason; | ||
599 | sta->plink_state = PLINK_HOLDING; | ||
600 | if (!mod_plink_timer(sta, | ||
601 | dot11MeshHoldingTimeout(sdata))) | ||
602 | sta->ignore_plink_timer = true; | ||
603 | |||
604 | llid = sta->llid; | ||
605 | spin_unlock_bh(&sta->plink_lock); | ||
606 | mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, | ||
607 | plid, reason); | ||
608 | break; | ||
609 | case OPN_ACPT: | ||
610 | /* retry timer is left untouched */ | ||
611 | sta->plink_state = PLINK_OPN_RCVD; | ||
612 | sta->plid = plid; | ||
613 | llid = sta->llid; | ||
614 | spin_unlock_bh(&sta->plink_lock); | ||
615 | mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, | ||
616 | plid, 0); | ||
617 | break; | ||
618 | case CNF_ACPT: | ||
619 | sta->plink_state = PLINK_CNF_RCVD; | ||
620 | if (!mod_plink_timer(sta, | ||
621 | dot11MeshConfirmTimeout(sdata))) | ||
622 | sta->ignore_plink_timer = true; | ||
623 | |||
624 | spin_unlock_bh(&sta->plink_lock); | ||
625 | break; | ||
626 | default: | ||
627 | spin_unlock_bh(&sta->plink_lock); | ||
628 | break; | ||
629 | } | ||
630 | break; | ||
631 | |||
632 | case PLINK_OPN_RCVD: | ||
633 | switch (event) { | ||
634 | case OPN_RJCT: | ||
635 | case CNF_RJCT: | ||
636 | reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION); | ||
637 | case CLS_ACPT: | ||
638 | if (!reason) | ||
639 | reason = cpu_to_le16(MESH_CLOSE_RCVD); | ||
640 | sta->reason = reason; | ||
641 | sta->plink_state = PLINK_HOLDING; | ||
642 | if (!mod_plink_timer(sta, | ||
643 | dot11MeshHoldingTimeout(sdata))) | ||
644 | sta->ignore_plink_timer = true; | ||
645 | |||
646 | llid = sta->llid; | ||
647 | spin_unlock_bh(&sta->plink_lock); | ||
648 | mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, | ||
649 | plid, reason); | ||
650 | break; | ||
651 | case OPN_ACPT: | ||
652 | llid = sta->llid; | ||
653 | spin_unlock_bh(&sta->plink_lock); | ||
654 | mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, | ||
655 | plid, 0); | ||
656 | break; | ||
657 | case CNF_ACPT: | ||
658 | del_timer(&sta->plink_timer); | ||
659 | sta->plink_state = PLINK_ESTAB; | ||
660 | mesh_plink_inc_estab_count(sdata); | ||
661 | spin_unlock_bh(&sta->plink_lock); | ||
662 | mpl_dbg("Mesh plink with %s ESTABLISHED\n", | ||
663 | print_mac(mac, sta->addr)); | ||
664 | break; | ||
665 | default: | ||
666 | spin_unlock_bh(&sta->plink_lock); | ||
667 | break; | ||
668 | } | ||
669 | break; | ||
670 | |||
671 | case PLINK_CNF_RCVD: | ||
672 | switch (event) { | ||
673 | case OPN_RJCT: | ||
674 | case CNF_RJCT: | ||
675 | reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION); | ||
676 | case CLS_ACPT: | ||
677 | if (!reason) | ||
678 | reason = cpu_to_le16(MESH_CLOSE_RCVD); | ||
679 | sta->reason = reason; | ||
680 | sta->plink_state = PLINK_HOLDING; | ||
681 | if (!mod_plink_timer(sta, | ||
682 | dot11MeshHoldingTimeout(sdata))) | ||
683 | sta->ignore_plink_timer = true; | ||
684 | |||
685 | llid = sta->llid; | ||
686 | spin_unlock_bh(&sta->plink_lock); | ||
687 | mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, | ||
688 | plid, reason); | ||
689 | break; | ||
690 | case OPN_ACPT: | ||
691 | del_timer(&sta->plink_timer); | ||
692 | sta->plink_state = PLINK_ESTAB; | ||
693 | mesh_plink_inc_estab_count(sdata); | ||
694 | spin_unlock_bh(&sta->plink_lock); | ||
695 | mpl_dbg("Mesh plink with %s ESTABLISHED\n", | ||
696 | print_mac(mac, sta->addr)); | ||
697 | mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, | ||
698 | plid, 0); | ||
699 | break; | ||
700 | default: | ||
701 | spin_unlock_bh(&sta->plink_lock); | ||
702 | break; | ||
703 | } | ||
704 | break; | ||
705 | |||
706 | case PLINK_ESTAB: | ||
707 | switch (event) { | ||
708 | case CLS_ACPT: | ||
709 | reason = cpu_to_le16(MESH_CLOSE_RCVD); | ||
710 | sta->reason = reason; | ||
711 | __mesh_plink_deactivate(sta); | ||
712 | sta->plink_state = PLINK_HOLDING; | ||
713 | llid = sta->llid; | ||
714 | mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); | ||
715 | spin_unlock_bh(&sta->plink_lock); | ||
716 | mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, | ||
717 | plid, reason); | ||
718 | break; | ||
719 | case OPN_ACPT: | ||
720 | llid = sta->llid; | ||
721 | spin_unlock_bh(&sta->plink_lock); | ||
722 | mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, | ||
723 | plid, 0); | ||
724 | break; | ||
725 | default: | ||
726 | spin_unlock_bh(&sta->plink_lock); | ||
727 | break; | ||
728 | } | ||
729 | break; | ||
730 | case PLINK_HOLDING: | ||
731 | switch (event) { | ||
732 | case CLS_ACPT: | ||
733 | if (del_timer(&sta->plink_timer)) | ||
734 | sta->ignore_plink_timer = 1; | ||
735 | mesh_plink_fsm_restart(sta); | ||
736 | spin_unlock_bh(&sta->plink_lock); | ||
737 | break; | ||
738 | case OPN_ACPT: | ||
739 | case CNF_ACPT: | ||
740 | case OPN_RJCT: | ||
741 | case CNF_RJCT: | ||
742 | llid = sta->llid; | ||
743 | reason = sta->reason; | ||
744 | spin_unlock_bh(&sta->plink_lock); | ||
745 | mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, | ||
746 | plid, reason); | ||
747 | break; | ||
748 | default: | ||
749 | spin_unlock_bh(&sta->plink_lock); | ||
750 | } | ||
751 | break; | ||
752 | default: | ||
753 | /* should not get here, PLINK_BLOCKED is dealt with at the | ||
754 | * beggining of the function | ||
755 | */ | ||
756 | spin_unlock_bh(&sta->plink_lock); | ||
757 | break; | ||
758 | } | ||
759 | |||
760 | rcu_read_unlock(); | ||
761 | } | ||
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c index 3b77410588e7..a1993161de99 100644 --- a/net/mac80211/rc80211_pid_algo.c +++ b/net/mac80211/rc80211_pid_algo.c | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <linux/debugfs.h> | 15 | #include <linux/debugfs.h> |
16 | #include <net/mac80211.h> | 16 | #include <net/mac80211.h> |
17 | #include "ieee80211_rate.h" | 17 | #include "ieee80211_rate.h" |
18 | 18 | #include "mesh.h" | |
19 | #include "rc80211_pid.h" | 19 | #include "rc80211_pid.h" |
20 | 20 | ||
21 | 21 | ||
@@ -63,6 +63,7 @@ | |||
63 | * RC_PID_ARITH_SHIFT. | 63 | * RC_PID_ARITH_SHIFT. |
64 | */ | 64 | */ |
65 | 65 | ||
66 | |||
66 | /* Adjust the rate while ensuring that we won't switch to a lower rate if it | 67 | /* Adjust the rate while ensuring that we won't switch to a lower rate if it |
67 | * exhibited a worse failed frames behaviour and we'll choose the highest rate | 68 | * exhibited a worse failed frames behaviour and we'll choose the highest rate |
68 | * whose failed frames behaviour is not worse than the one of the original rate | 69 | * whose failed frames behaviour is not worse than the one of the original rate |
@@ -72,14 +73,14 @@ static void rate_control_pid_adjust_rate(struct ieee80211_local *local, | |||
72 | struct rc_pid_rateinfo *rinfo) | 73 | struct rc_pid_rateinfo *rinfo) |
73 | { | 74 | { |
74 | struct ieee80211_sub_if_data *sdata; | 75 | struct ieee80211_sub_if_data *sdata; |
75 | struct ieee80211_hw_mode *mode; | 76 | struct ieee80211_supported_band *sband; |
76 | int cur_sorted, new_sorted, probe, tmp, n_bitrates; | 77 | int cur_sorted, new_sorted, probe, tmp, n_bitrates, band; |
77 | int cur = sta->txrate; | 78 | int cur = sta->txrate_idx; |
78 | |||
79 | sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev); | ||
80 | 79 | ||
81 | mode = local->oper_hw_mode; | 80 | sdata = sta->sdata; |
82 | n_bitrates = mode->num_rates; | 81 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; |
82 | band = sband->band; | ||
83 | n_bitrates = sband->n_bitrates; | ||
83 | 84 | ||
84 | /* Map passed arguments to sorted values. */ | 85 | /* Map passed arguments to sorted values. */ |
85 | cur_sorted = rinfo[cur].rev_index; | 86 | cur_sorted = rinfo[cur].rev_index; |
@@ -97,20 +98,20 @@ static void rate_control_pid_adjust_rate(struct ieee80211_local *local, | |||
97 | /* Ensure that the rate decrease isn't disadvantageous. */ | 98 | /* Ensure that the rate decrease isn't disadvantageous. */ |
98 | for (probe = cur_sorted; probe >= new_sorted; probe--) | 99 | for (probe = cur_sorted; probe >= new_sorted; probe--) |
99 | if (rinfo[probe].diff <= rinfo[cur_sorted].diff && | 100 | if (rinfo[probe].diff <= rinfo[cur_sorted].diff && |
100 | rate_supported(sta, mode, rinfo[probe].index)) | 101 | rate_supported(sta, band, rinfo[probe].index)) |
101 | tmp = probe; | 102 | tmp = probe; |
102 | } else { | 103 | } else { |
103 | /* Look for rate increase with zero (or below) cost. */ | 104 | /* Look for rate increase with zero (or below) cost. */ |
104 | for (probe = new_sorted + 1; probe < n_bitrates; probe++) | 105 | for (probe = new_sorted + 1; probe < n_bitrates; probe++) |
105 | if (rinfo[probe].diff <= rinfo[new_sorted].diff && | 106 | if (rinfo[probe].diff <= rinfo[new_sorted].diff && |
106 | rate_supported(sta, mode, rinfo[probe].index)) | 107 | rate_supported(sta, band, rinfo[probe].index)) |
107 | tmp = probe; | 108 | tmp = probe; |
108 | } | 109 | } |
109 | 110 | ||
110 | /* Fit the rate found to the nearest supported rate. */ | 111 | /* Fit the rate found to the nearest supported rate. */ |
111 | do { | 112 | do { |
112 | if (rate_supported(sta, mode, rinfo[tmp].index)) { | 113 | if (rate_supported(sta, band, rinfo[tmp].index)) { |
113 | sta->txrate = rinfo[tmp].index; | 114 | sta->txrate_idx = rinfo[tmp].index; |
114 | break; | 115 | break; |
115 | } | 116 | } |
116 | if (adj < 0) | 117 | if (adj < 0) |
@@ -122,7 +123,7 @@ static void rate_control_pid_adjust_rate(struct ieee80211_local *local, | |||
122 | #ifdef CONFIG_MAC80211_DEBUGFS | 123 | #ifdef CONFIG_MAC80211_DEBUGFS |
123 | rate_control_pid_event_rate_change( | 124 | rate_control_pid_event_rate_change( |
124 | &((struct rc_pid_sta_info *)sta->rate_ctrl_priv)->events, | 125 | &((struct rc_pid_sta_info *)sta->rate_ctrl_priv)->events, |
125 | cur, mode->rates[cur].rate); | 126 | sta->txrate_idx, sband->bitrates[sta->txrate_idx].bitrate); |
126 | #endif | 127 | #endif |
127 | } | 128 | } |
128 | 129 | ||
@@ -147,9 +148,12 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo, | |||
147 | struct ieee80211_local *local, | 148 | struct ieee80211_local *local, |
148 | struct sta_info *sta) | 149 | struct sta_info *sta) |
149 | { | 150 | { |
151 | #ifdef CONFIG_MAC80211_MESH | ||
152 | struct ieee80211_sub_if_data *sdata = sta->sdata; | ||
153 | #endif | ||
150 | struct rc_pid_sta_info *spinfo = sta->rate_ctrl_priv; | 154 | struct rc_pid_sta_info *spinfo = sta->rate_ctrl_priv; |
151 | struct rc_pid_rateinfo *rinfo = pinfo->rinfo; | 155 | struct rc_pid_rateinfo *rinfo = pinfo->rinfo; |
152 | struct ieee80211_hw_mode *mode; | 156 | struct ieee80211_supported_band *sband; |
153 | u32 pf; | 157 | u32 pf; |
154 | s32 err_avg; | 158 | s32 err_avg; |
155 | u32 err_prop; | 159 | u32 err_prop; |
@@ -158,7 +162,7 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo, | |||
158 | int adj, i, j, tmp; | 162 | int adj, i, j, tmp; |
159 | unsigned long period; | 163 | unsigned long period; |
160 | 164 | ||
161 | mode = local->oper_hw_mode; | 165 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; |
162 | spinfo = sta->rate_ctrl_priv; | 166 | spinfo = sta->rate_ctrl_priv; |
163 | 167 | ||
164 | /* In case nothing happened during the previous control interval, turn | 168 | /* In case nothing happened during the previous control interval, turn |
@@ -177,25 +181,32 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo, | |||
177 | pf = spinfo->last_pf; | 181 | pf = spinfo->last_pf; |
178 | else { | 182 | else { |
179 | pf = spinfo->tx_num_failed * 100 / spinfo->tx_num_xmit; | 183 | pf = spinfo->tx_num_failed * 100 / spinfo->tx_num_xmit; |
184 | #ifdef CONFIG_MAC80211_MESH | ||
185 | if (pf == 100 && | ||
186 | sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) | ||
187 | mesh_plink_broken(sta); | ||
188 | #endif | ||
180 | pf <<= RC_PID_ARITH_SHIFT; | 189 | pf <<= RC_PID_ARITH_SHIFT; |
190 | sta->fail_avg = ((pf + (spinfo->last_pf << 3)) / 9) | ||
191 | >> RC_PID_ARITH_SHIFT; | ||
181 | } | 192 | } |
182 | 193 | ||
183 | spinfo->tx_num_xmit = 0; | 194 | spinfo->tx_num_xmit = 0; |
184 | spinfo->tx_num_failed = 0; | 195 | spinfo->tx_num_failed = 0; |
185 | 196 | ||
186 | /* If we just switched rate, update the rate behaviour info. */ | 197 | /* If we just switched rate, update the rate behaviour info. */ |
187 | if (pinfo->oldrate != sta->txrate) { | 198 | if (pinfo->oldrate != sta->txrate_idx) { |
188 | 199 | ||
189 | i = rinfo[pinfo->oldrate].rev_index; | 200 | i = rinfo[pinfo->oldrate].rev_index; |
190 | j = rinfo[sta->txrate].rev_index; | 201 | j = rinfo[sta->txrate_idx].rev_index; |
191 | 202 | ||
192 | tmp = (pf - spinfo->last_pf); | 203 | tmp = (pf - spinfo->last_pf); |
193 | tmp = RC_PID_DO_ARITH_RIGHT_SHIFT(tmp, RC_PID_ARITH_SHIFT); | 204 | tmp = RC_PID_DO_ARITH_RIGHT_SHIFT(tmp, RC_PID_ARITH_SHIFT); |
194 | 205 | ||
195 | rinfo[j].diff = rinfo[i].diff + tmp; | 206 | rinfo[j].diff = rinfo[i].diff + tmp; |
196 | pinfo->oldrate = sta->txrate; | 207 | pinfo->oldrate = sta->txrate_idx; |
197 | } | 208 | } |
198 | rate_control_pid_normalize(pinfo, mode->num_rates); | 209 | rate_control_pid_normalize(pinfo, sband->n_bitrates); |
199 | 210 | ||
200 | /* Compute the proportional, integral and derivative errors. */ | 211 | /* Compute the proportional, integral and derivative errors. */ |
201 | err_prop = (pinfo->target << RC_PID_ARITH_SHIFT) - pf; | 212 | err_prop = (pinfo->target << RC_PID_ARITH_SHIFT) - pf; |
@@ -236,23 +247,27 @@ static void rate_control_pid_tx_status(void *priv, struct net_device *dev, | |||
236 | struct sta_info *sta; | 247 | struct sta_info *sta; |
237 | struct rc_pid_sta_info *spinfo; | 248 | struct rc_pid_sta_info *spinfo; |
238 | unsigned long period; | 249 | unsigned long period; |
250 | struct ieee80211_supported_band *sband; | ||
251 | |||
252 | rcu_read_lock(); | ||
239 | 253 | ||
240 | sta = sta_info_get(local, hdr->addr1); | 254 | sta = sta_info_get(local, hdr->addr1); |
255 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | ||
241 | 256 | ||
242 | if (!sta) | 257 | if (!sta) |
243 | return; | 258 | goto unlock; |
244 | 259 | ||
245 | /* Don't update the state if we're not controlling the rate. */ | 260 | /* Don't update the state if we're not controlling the rate. */ |
246 | sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev); | 261 | sdata = sta->sdata; |
247 | if (sdata->bss && sdata->bss->force_unicast_rateidx > -1) { | 262 | if (sdata->bss && sdata->bss->force_unicast_rateidx > -1) { |
248 | sta->txrate = sdata->bss->max_ratectrl_rateidx; | 263 | sta->txrate_idx = sdata->bss->max_ratectrl_rateidx; |
249 | return; | 264 | goto unlock; |
250 | } | 265 | } |
251 | 266 | ||
252 | /* Ignore all frames that were sent with a different rate than the rate | 267 | /* Ignore all frames that were sent with a different rate than the rate |
253 | * we currently advise mac80211 to use. */ | 268 | * we currently advise mac80211 to use. */ |
254 | if (status->control.rate != &local->oper_hw_mode->rates[sta->txrate]) | 269 | if (status->control.tx_rate != &sband->bitrates[sta->txrate_idx]) |
255 | goto ignore; | 270 | goto unlock; |
256 | 271 | ||
257 | spinfo = sta->rate_ctrl_priv; | 272 | spinfo = sta->rate_ctrl_priv; |
258 | spinfo->tx_num_xmit++; | 273 | spinfo->tx_num_xmit++; |
@@ -277,9 +292,6 @@ static void rate_control_pid_tx_status(void *priv, struct net_device *dev, | |||
277 | sta->tx_num_consecutive_failures++; | 292 | sta->tx_num_consecutive_failures++; |
278 | sta->tx_num_mpdu_fail++; | 293 | sta->tx_num_mpdu_fail++; |
279 | } else { | 294 | } else { |
280 | sta->last_ack_rssi[0] = sta->last_ack_rssi[1]; | ||
281 | sta->last_ack_rssi[1] = sta->last_ack_rssi[2]; | ||
282 | sta->last_ack_rssi[2] = status->ack_signal; | ||
283 | sta->tx_num_consecutive_failures = 0; | 295 | sta->tx_num_consecutive_failures = 0; |
284 | sta->tx_num_mpdu_ok++; | 296 | sta->tx_num_mpdu_ok++; |
285 | } | 297 | } |
@@ -293,12 +305,12 @@ static void rate_control_pid_tx_status(void *priv, struct net_device *dev, | |||
293 | if (time_after(jiffies, spinfo->last_sample + period)) | 305 | if (time_after(jiffies, spinfo->last_sample + period)) |
294 | rate_control_pid_sample(pinfo, local, sta); | 306 | rate_control_pid_sample(pinfo, local, sta); |
295 | 307 | ||
296 | ignore: | 308 | unlock: |
297 | sta_info_put(sta); | 309 | rcu_read_unlock(); |
298 | } | 310 | } |
299 | 311 | ||
300 | static void rate_control_pid_get_rate(void *priv, struct net_device *dev, | 312 | static void rate_control_pid_get_rate(void *priv, struct net_device *dev, |
301 | struct ieee80211_hw_mode *mode, | 313 | struct ieee80211_supported_band *sband, |
302 | struct sk_buff *skb, | 314 | struct sk_buff *skb, |
303 | struct rate_selection *sel) | 315 | struct rate_selection *sel) |
304 | { | 316 | { |
@@ -309,6 +321,8 @@ static void rate_control_pid_get_rate(void *priv, struct net_device *dev, | |||
309 | int rateidx; | 321 | int rateidx; |
310 | u16 fc; | 322 | u16 fc; |
311 | 323 | ||
324 | rcu_read_lock(); | ||
325 | |||
312 | sta = sta_info_get(local, hdr->addr1); | 326 | sta = sta_info_get(local, hdr->addr1); |
313 | 327 | ||
314 | /* Send management frames and broadcast/multicast data using lowest | 328 | /* Send management frames and broadcast/multicast data using lowest |
@@ -316,32 +330,31 @@ static void rate_control_pid_get_rate(void *priv, struct net_device *dev, | |||
316 | fc = le16_to_cpu(hdr->frame_control); | 330 | fc = le16_to_cpu(hdr->frame_control); |
317 | if ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA || | 331 | if ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA || |
318 | is_multicast_ether_addr(hdr->addr1) || !sta) { | 332 | is_multicast_ether_addr(hdr->addr1) || !sta) { |
319 | sel->rate = rate_lowest(local, mode, sta); | 333 | sel->rate = rate_lowest(local, sband, sta); |
320 | if (sta) | 334 | rcu_read_unlock(); |
321 | sta_info_put(sta); | ||
322 | return; | 335 | return; |
323 | } | 336 | } |
324 | 337 | ||
325 | /* If a forced rate is in effect, select it. */ | 338 | /* If a forced rate is in effect, select it. */ |
326 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 339 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
327 | if (sdata->bss && sdata->bss->force_unicast_rateidx > -1) | 340 | if (sdata->bss && sdata->bss->force_unicast_rateidx > -1) |
328 | sta->txrate = sdata->bss->force_unicast_rateidx; | 341 | sta->txrate_idx = sdata->bss->force_unicast_rateidx; |
329 | 342 | ||
330 | rateidx = sta->txrate; | 343 | rateidx = sta->txrate_idx; |
331 | 344 | ||
332 | if (rateidx >= mode->num_rates) | 345 | if (rateidx >= sband->n_bitrates) |
333 | rateidx = mode->num_rates - 1; | 346 | rateidx = sband->n_bitrates - 1; |
334 | 347 | ||
335 | sta->last_txrate = rateidx; | 348 | sta->last_txrate_idx = rateidx; |
336 | 349 | ||
337 | sta_info_put(sta); | 350 | rcu_read_unlock(); |
338 | 351 | ||
339 | sel->rate = &mode->rates[rateidx]; | 352 | sel->rate = &sband->bitrates[rateidx]; |
340 | 353 | ||
341 | #ifdef CONFIG_MAC80211_DEBUGFS | 354 | #ifdef CONFIG_MAC80211_DEBUGFS |
342 | rate_control_pid_event_tx_rate( | 355 | rate_control_pid_event_tx_rate( |
343 | &((struct rc_pid_sta_info *) sta->rate_ctrl_priv)->events, | 356 | &((struct rc_pid_sta_info *) sta->rate_ctrl_priv)->events, |
344 | rateidx, mode->rates[rateidx].rate); | 357 | rateidx, sband->bitrates[rateidx].bitrate); |
345 | #endif | 358 | #endif |
346 | } | 359 | } |
347 | 360 | ||
@@ -353,28 +366,33 @@ static void rate_control_pid_rate_init(void *priv, void *priv_sta, | |||
353 | * as we need to have IEEE 802.1X auth succeed immediately after assoc.. | 366 | * as we need to have IEEE 802.1X auth succeed immediately after assoc.. |
354 | * Until that method is implemented, we will use the lowest supported | 367 | * Until that method is implemented, we will use the lowest supported |
355 | * rate as a workaround. */ | 368 | * rate as a workaround. */ |
356 | sta->txrate = rate_lowest_index(local, local->oper_hw_mode, sta); | 369 | struct ieee80211_supported_band *sband; |
370 | |||
371 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | ||
372 | sta->txrate_idx = rate_lowest_index(local, sband, sta); | ||
373 | sta->fail_avg = 0; | ||
357 | } | 374 | } |
358 | 375 | ||
359 | static void *rate_control_pid_alloc(struct ieee80211_local *local) | 376 | static void *rate_control_pid_alloc(struct ieee80211_local *local) |
360 | { | 377 | { |
361 | struct rc_pid_info *pinfo; | 378 | struct rc_pid_info *pinfo; |
362 | struct rc_pid_rateinfo *rinfo; | 379 | struct rc_pid_rateinfo *rinfo; |
363 | struct ieee80211_hw_mode *mode; | 380 | struct ieee80211_supported_band *sband; |
364 | int i, j, tmp; | 381 | int i, j, tmp; |
365 | bool s; | 382 | bool s; |
366 | #ifdef CONFIG_MAC80211_DEBUGFS | 383 | #ifdef CONFIG_MAC80211_DEBUGFS |
367 | struct rc_pid_debugfs_entries *de; | 384 | struct rc_pid_debugfs_entries *de; |
368 | #endif | 385 | #endif |
369 | 386 | ||
387 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | ||
388 | |||
370 | pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC); | 389 | pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC); |
371 | if (!pinfo) | 390 | if (!pinfo) |
372 | return NULL; | 391 | return NULL; |
373 | 392 | ||
374 | /* We can safely assume that oper_hw_mode won't change unless we get | 393 | /* We can safely assume that sband won't change unless we get |
375 | * reinitialized. */ | 394 | * reinitialized. */ |
376 | mode = local->oper_hw_mode; | 395 | rinfo = kmalloc(sizeof(*rinfo) * sband->n_bitrates, GFP_ATOMIC); |
377 | rinfo = kmalloc(sizeof(*rinfo) * mode->num_rates, GFP_ATOMIC); | ||
378 | if (!rinfo) { | 396 | if (!rinfo) { |
379 | kfree(pinfo); | 397 | kfree(pinfo); |
380 | return NULL; | 398 | return NULL; |
@@ -383,7 +401,7 @@ static void *rate_control_pid_alloc(struct ieee80211_local *local) | |||
383 | /* Sort the rates. This is optimized for the most common case (i.e. | 401 | /* Sort the rates. This is optimized for the most common case (i.e. |
384 | * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed | 402 | * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed |
385 | * mapping too. */ | 403 | * mapping too. */ |
386 | for (i = 0; i < mode->num_rates; i++) { | 404 | for (i = 0; i < sband->n_bitrates; i++) { |
387 | rinfo[i].index = i; | 405 | rinfo[i].index = i; |
388 | rinfo[i].rev_index = i; | 406 | rinfo[i].rev_index = i; |
389 | if (pinfo->fast_start) | 407 | if (pinfo->fast_start) |
@@ -391,11 +409,11 @@ static void *rate_control_pid_alloc(struct ieee80211_local *local) | |||
391 | else | 409 | else |
392 | rinfo[i].diff = i * pinfo->norm_offset; | 410 | rinfo[i].diff = i * pinfo->norm_offset; |
393 | } | 411 | } |
394 | for (i = 1; i < mode->num_rates; i++) { | 412 | for (i = 1; i < sband->n_bitrates; i++) { |
395 | s = 0; | 413 | s = 0; |
396 | for (j = 0; j < mode->num_rates - i; j++) | 414 | for (j = 0; j < sband->n_bitrates - i; j++) |
397 | if (unlikely(mode->rates[rinfo[j].index].rate > | 415 | if (unlikely(sband->bitrates[rinfo[j].index].bitrate > |
398 | mode->rates[rinfo[j + 1].index].rate)) { | 416 | sband->bitrates[rinfo[j + 1].index].bitrate)) { |
399 | tmp = rinfo[j].index; | 417 | tmp = rinfo[j].index; |
400 | rinfo[j].index = rinfo[j + 1].index; | 418 | rinfo[j].index = rinfo[j + 1].index; |
401 | rinfo[j + 1].index = tmp; | 419 | rinfo[j + 1].index = tmp; |
diff --git a/net/mac80211/rc80211_simple.c b/net/mac80211/rc80211_simple.c deleted file mode 100644 index 9a78b116acff..000000000000 --- a/net/mac80211/rc80211_simple.c +++ /dev/null | |||
@@ -1,400 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright 2002-2005, Instant802 Networks, Inc. | ||
3 | * Copyright 2005, Devicescape Software, Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | |||
10 | #include <linux/init.h> | ||
11 | #include <linux/netdevice.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/skbuff.h> | ||
15 | #include <linux/compiler.h> | ||
16 | #include <linux/module.h> | ||
17 | |||
18 | #include <net/mac80211.h> | ||
19 | #include "ieee80211_i.h" | ||
20 | #include "ieee80211_rate.h" | ||
21 | #include "debugfs.h" | ||
22 | |||
23 | |||
24 | /* This is a minimal implementation of TX rate controlling that can be used | ||
25 | * as the default when no improved mechanisms are available. */ | ||
26 | |||
27 | #define RATE_CONTROL_NUM_DOWN 20 | ||
28 | #define RATE_CONTROL_NUM_UP 15 | ||
29 | |||
30 | #define RATE_CONTROL_EMERG_DEC 2 | ||
31 | #define RATE_CONTROL_INTERVAL (HZ / 20) | ||
32 | #define RATE_CONTROL_MIN_TX 10 | ||
33 | |||
34 | static void rate_control_rate_inc(struct ieee80211_local *local, | ||
35 | struct sta_info *sta) | ||
36 | { | ||
37 | struct ieee80211_sub_if_data *sdata; | ||
38 | struct ieee80211_hw_mode *mode; | ||
39 | int i = sta->txrate; | ||
40 | int maxrate; | ||
41 | |||
42 | sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev); | ||
43 | if (sdata->bss && sdata->bss->force_unicast_rateidx > -1) { | ||
44 | /* forced unicast rate - do not change STA rate */ | ||
45 | return; | ||
46 | } | ||
47 | |||
48 | mode = local->oper_hw_mode; | ||
49 | maxrate = sdata->bss ? sdata->bss->max_ratectrl_rateidx : -1; | ||
50 | |||
51 | if (i > mode->num_rates) | ||
52 | i = mode->num_rates - 2; | ||
53 | |||
54 | while (i + 1 < mode->num_rates) { | ||
55 | i++; | ||
56 | if (sta->supp_rates & BIT(i) && | ||
57 | mode->rates[i].flags & IEEE80211_RATE_SUPPORTED && | ||
58 | (maxrate < 0 || i <= maxrate)) { | ||
59 | sta->txrate = i; | ||
60 | break; | ||
61 | } | ||
62 | } | ||
63 | } | ||
64 | |||
65 | |||
66 | static void rate_control_rate_dec(struct ieee80211_local *local, | ||
67 | struct sta_info *sta) | ||
68 | { | ||
69 | struct ieee80211_sub_if_data *sdata; | ||
70 | struct ieee80211_hw_mode *mode; | ||
71 | int i = sta->txrate; | ||
72 | |||
73 | sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev); | ||
74 | if (sdata->bss && sdata->bss->force_unicast_rateidx > -1) { | ||
75 | /* forced unicast rate - do not change STA rate */ | ||
76 | return; | ||
77 | } | ||
78 | |||
79 | mode = local->oper_hw_mode; | ||
80 | if (i > mode->num_rates) | ||
81 | i = mode->num_rates; | ||
82 | |||
83 | while (i > 0) { | ||
84 | i--; | ||
85 | if (sta->supp_rates & BIT(i) && | ||
86 | mode->rates[i].flags & IEEE80211_RATE_SUPPORTED) { | ||
87 | sta->txrate = i; | ||
88 | break; | ||
89 | } | ||
90 | } | ||
91 | } | ||
92 | |||
93 | struct global_rate_control { | ||
94 | int dummy; | ||
95 | }; | ||
96 | |||
97 | struct sta_rate_control { | ||
98 | unsigned long last_rate_change; | ||
99 | u32 tx_num_failures; | ||
100 | u32 tx_num_xmit; | ||
101 | |||
102 | unsigned long avg_rate_update; | ||
103 | u32 tx_avg_rate_sum; | ||
104 | u32 tx_avg_rate_num; | ||
105 | |||
106 | #ifdef CONFIG_MAC80211_DEBUGFS | ||
107 | struct dentry *tx_avg_rate_sum_dentry; | ||
108 | struct dentry *tx_avg_rate_num_dentry; | ||
109 | #endif | ||
110 | }; | ||
111 | |||
112 | |||
113 | static void rate_control_simple_tx_status(void *priv, struct net_device *dev, | ||
114 | struct sk_buff *skb, | ||
115 | struct ieee80211_tx_status *status) | ||
116 | { | ||
117 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
118 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
119 | struct sta_info *sta; | ||
120 | struct sta_rate_control *srctrl; | ||
121 | |||
122 | sta = sta_info_get(local, hdr->addr1); | ||
123 | |||
124 | if (!sta) | ||
125 | return; | ||
126 | |||
127 | srctrl = sta->rate_ctrl_priv; | ||
128 | srctrl->tx_num_xmit++; | ||
129 | if (status->excessive_retries) { | ||
130 | srctrl->tx_num_failures++; | ||
131 | sta->tx_retry_failed++; | ||
132 | sta->tx_num_consecutive_failures++; | ||
133 | sta->tx_num_mpdu_fail++; | ||
134 | } else { | ||
135 | sta->last_ack_rssi[0] = sta->last_ack_rssi[1]; | ||
136 | sta->last_ack_rssi[1] = sta->last_ack_rssi[2]; | ||
137 | sta->last_ack_rssi[2] = status->ack_signal; | ||
138 | sta->tx_num_consecutive_failures = 0; | ||
139 | sta->tx_num_mpdu_ok++; | ||
140 | } | ||
141 | sta->tx_retry_count += status->retry_count; | ||
142 | sta->tx_num_mpdu_fail += status->retry_count; | ||
143 | |||
144 | if (time_after(jiffies, | ||
145 | srctrl->last_rate_change + RATE_CONTROL_INTERVAL) && | ||
146 | srctrl->tx_num_xmit > RATE_CONTROL_MIN_TX) { | ||
147 | u32 per_failed; | ||
148 | srctrl->last_rate_change = jiffies; | ||
149 | |||
150 | per_failed = (100 * sta->tx_num_mpdu_fail) / | ||
151 | (sta->tx_num_mpdu_fail + sta->tx_num_mpdu_ok); | ||
152 | /* TODO: calculate average per_failed to make adjusting | ||
153 | * parameters easier */ | ||
154 | #if 0 | ||
155 | if (net_ratelimit()) { | ||
156 | printk(KERN_DEBUG "MPDU fail=%d ok=%d per_failed=%d\n", | ||
157 | sta->tx_num_mpdu_fail, sta->tx_num_mpdu_ok, | ||
158 | per_failed); | ||
159 | } | ||
160 | #endif | ||
161 | |||
162 | /* | ||
163 | * XXX: Make these configurable once we have an | ||
164 | * interface to the rate control algorithms | ||
165 | */ | ||
166 | if (per_failed > RATE_CONTROL_NUM_DOWN) { | ||
167 | rate_control_rate_dec(local, sta); | ||
168 | } else if (per_failed < RATE_CONTROL_NUM_UP) { | ||
169 | rate_control_rate_inc(local, sta); | ||
170 | } | ||
171 | srctrl->tx_avg_rate_sum += status->control.rate->rate; | ||
172 | srctrl->tx_avg_rate_num++; | ||
173 | srctrl->tx_num_failures = 0; | ||
174 | srctrl->tx_num_xmit = 0; | ||
175 | } else if (sta->tx_num_consecutive_failures >= | ||
176 | RATE_CONTROL_EMERG_DEC) { | ||
177 | rate_control_rate_dec(local, sta); | ||
178 | } | ||
179 | |||
180 | if (srctrl->avg_rate_update + 60 * HZ < jiffies) { | ||
181 | srctrl->avg_rate_update = jiffies; | ||
182 | if (srctrl->tx_avg_rate_num > 0) { | ||
183 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | ||
184 | DECLARE_MAC_BUF(mac); | ||
185 | printk(KERN_DEBUG "%s: STA %s Average rate: " | ||
186 | "%d (%d/%d)\n", | ||
187 | dev->name, print_mac(mac, sta->addr), | ||
188 | srctrl->tx_avg_rate_sum / | ||
189 | srctrl->tx_avg_rate_num, | ||
190 | srctrl->tx_avg_rate_sum, | ||
191 | srctrl->tx_avg_rate_num); | ||
192 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ | ||
193 | srctrl->tx_avg_rate_sum = 0; | ||
194 | srctrl->tx_avg_rate_num = 0; | ||
195 | } | ||
196 | } | ||
197 | |||
198 | sta_info_put(sta); | ||
199 | } | ||
200 | |||
201 | |||
202 | static void | ||
203 | rate_control_simple_get_rate(void *priv, struct net_device *dev, | ||
204 | struct ieee80211_hw_mode *mode, | ||
205 | struct sk_buff *skb, | ||
206 | struct rate_selection *sel) | ||
207 | { | ||
208 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
209 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
210 | struct ieee80211_sub_if_data *sdata; | ||
211 | struct sta_info *sta; | ||
212 | int rateidx; | ||
213 | u16 fc; | ||
214 | |||
215 | sta = sta_info_get(local, hdr->addr1); | ||
216 | |||
217 | /* Send management frames and broadcast/multicast data using lowest | ||
218 | * rate. */ | ||
219 | fc = le16_to_cpu(hdr->frame_control); | ||
220 | if ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA || | ||
221 | is_multicast_ether_addr(hdr->addr1) || !sta) { | ||
222 | sel->rate = rate_lowest(local, mode, sta); | ||
223 | if (sta) | ||
224 | sta_info_put(sta); | ||
225 | return; | ||
226 | } | ||
227 | |||
228 | /* If a forced rate is in effect, select it. */ | ||
229 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
230 | if (sdata->bss && sdata->bss->force_unicast_rateidx > -1) | ||
231 | sta->txrate = sdata->bss->force_unicast_rateidx; | ||
232 | |||
233 | rateidx = sta->txrate; | ||
234 | |||
235 | if (rateidx >= mode->num_rates) | ||
236 | rateidx = mode->num_rates - 1; | ||
237 | |||
238 | sta->last_txrate = rateidx; | ||
239 | |||
240 | sta_info_put(sta); | ||
241 | |||
242 | sel->rate = &mode->rates[rateidx]; | ||
243 | } | ||
244 | |||
245 | |||
246 | static void rate_control_simple_rate_init(void *priv, void *priv_sta, | ||
247 | struct ieee80211_local *local, | ||
248 | struct sta_info *sta) | ||
249 | { | ||
250 | struct ieee80211_hw_mode *mode; | ||
251 | int i; | ||
252 | sta->txrate = 0; | ||
253 | mode = local->oper_hw_mode; | ||
254 | /* TODO: This routine should consider using RSSI from previous packets | ||
255 | * as we need to have IEEE 802.1X auth succeed immediately after assoc.. | ||
256 | * Until that method is implemented, we will use the lowest supported rate | ||
257 | * as a workaround, */ | ||
258 | for (i = 0; i < mode->num_rates; i++) { | ||
259 | if ((sta->supp_rates & BIT(i)) && | ||
260 | (mode->rates[i].flags & IEEE80211_RATE_SUPPORTED)) { | ||
261 | sta->txrate = i; | ||
262 | break; | ||
263 | } | ||
264 | } | ||
265 | } | ||
266 | |||
267 | |||
268 | static void * rate_control_simple_alloc(struct ieee80211_local *local) | ||
269 | { | ||
270 | struct global_rate_control *rctrl; | ||
271 | |||
272 | rctrl = kzalloc(sizeof(*rctrl), GFP_ATOMIC); | ||
273 | |||
274 | return rctrl; | ||
275 | } | ||
276 | |||
277 | |||
278 | static void rate_control_simple_free(void *priv) | ||
279 | { | ||
280 | struct global_rate_control *rctrl = priv; | ||
281 | kfree(rctrl); | ||
282 | } | ||
283 | |||
284 | |||
285 | static void rate_control_simple_clear(void *priv) | ||
286 | { | ||
287 | } | ||
288 | |||
289 | |||
290 | static void * rate_control_simple_alloc_sta(void *priv, gfp_t gfp) | ||
291 | { | ||
292 | struct sta_rate_control *rctrl; | ||
293 | |||
294 | rctrl = kzalloc(sizeof(*rctrl), gfp); | ||
295 | |||
296 | return rctrl; | ||
297 | } | ||
298 | |||
299 | |||
300 | static void rate_control_simple_free_sta(void *priv, void *priv_sta) | ||
301 | { | ||
302 | struct sta_rate_control *rctrl = priv_sta; | ||
303 | kfree(rctrl); | ||
304 | } | ||
305 | |||
306 | #ifdef CONFIG_MAC80211_DEBUGFS | ||
307 | |||
308 | static int open_file_generic(struct inode *inode, struct file *file) | ||
309 | { | ||
310 | file->private_data = inode->i_private; | ||
311 | return 0; | ||
312 | } | ||
313 | |||
314 | static ssize_t sta_tx_avg_rate_sum_read(struct file *file, | ||
315 | char __user *userbuf, | ||
316 | size_t count, loff_t *ppos) | ||
317 | { | ||
318 | struct sta_rate_control *srctrl = file->private_data; | ||
319 | char buf[20]; | ||
320 | |||
321 | sprintf(buf, "%d\n", srctrl->tx_avg_rate_sum); | ||
322 | return simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf)); | ||
323 | } | ||
324 | |||
325 | static const struct file_operations sta_tx_avg_rate_sum_ops = { | ||
326 | .read = sta_tx_avg_rate_sum_read, | ||
327 | .open = open_file_generic, | ||
328 | }; | ||
329 | |||
330 | static ssize_t sta_tx_avg_rate_num_read(struct file *file, | ||
331 | char __user *userbuf, | ||
332 | size_t count, loff_t *ppos) | ||
333 | { | ||
334 | struct sta_rate_control *srctrl = file->private_data; | ||
335 | char buf[20]; | ||
336 | |||
337 | sprintf(buf, "%d\n", srctrl->tx_avg_rate_num); | ||
338 | return simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf)); | ||
339 | } | ||
340 | |||
341 | static const struct file_operations sta_tx_avg_rate_num_ops = { | ||
342 | .read = sta_tx_avg_rate_num_read, | ||
343 | .open = open_file_generic, | ||
344 | }; | ||
345 | |||
346 | static void rate_control_simple_add_sta_debugfs(void *priv, void *priv_sta, | ||
347 | struct dentry *dir) | ||
348 | { | ||
349 | struct sta_rate_control *srctrl = priv_sta; | ||
350 | |||
351 | srctrl->tx_avg_rate_num_dentry = | ||
352 | debugfs_create_file("rc_simple_sta_tx_avg_rate_num", 0400, | ||
353 | dir, srctrl, &sta_tx_avg_rate_num_ops); | ||
354 | srctrl->tx_avg_rate_sum_dentry = | ||
355 | debugfs_create_file("rc_simple_sta_tx_avg_rate_sum", 0400, | ||
356 | dir, srctrl, &sta_tx_avg_rate_sum_ops); | ||
357 | } | ||
358 | |||
359 | static void rate_control_simple_remove_sta_debugfs(void *priv, void *priv_sta) | ||
360 | { | ||
361 | struct sta_rate_control *srctrl = priv_sta; | ||
362 | |||
363 | debugfs_remove(srctrl->tx_avg_rate_sum_dentry); | ||
364 | debugfs_remove(srctrl->tx_avg_rate_num_dentry); | ||
365 | } | ||
366 | #endif | ||
367 | |||
368 | static struct rate_control_ops mac80211_rcsimple = { | ||
369 | .name = "simple", | ||
370 | .tx_status = rate_control_simple_tx_status, | ||
371 | .get_rate = rate_control_simple_get_rate, | ||
372 | .rate_init = rate_control_simple_rate_init, | ||
373 | .clear = rate_control_simple_clear, | ||
374 | .alloc = rate_control_simple_alloc, | ||
375 | .free = rate_control_simple_free, | ||
376 | .alloc_sta = rate_control_simple_alloc_sta, | ||
377 | .free_sta = rate_control_simple_free_sta, | ||
378 | #ifdef CONFIG_MAC80211_DEBUGFS | ||
379 | .add_sta_debugfs = rate_control_simple_add_sta_debugfs, | ||
380 | .remove_sta_debugfs = rate_control_simple_remove_sta_debugfs, | ||
381 | #endif | ||
382 | }; | ||
383 | |||
384 | MODULE_LICENSE("GPL"); | ||
385 | MODULE_DESCRIPTION("Simple rate control algorithm"); | ||
386 | |||
387 | int __init rc80211_simple_init(void) | ||
388 | { | ||
389 | return ieee80211_rate_control_register(&mac80211_rcsimple); | ||
390 | } | ||
391 | |||
392 | void rc80211_simple_exit(void) | ||
393 | { | ||
394 | ieee80211_rate_control_unregister(&mac80211_rcsimple); | ||
395 | } | ||
396 | |||
397 | #ifdef CONFIG_MAC80211_RC_SIMPLE_MODULE | ||
398 | module_init(rc80211_simple_init); | ||
399 | module_exit(rc80211_simple_exit); | ||
400 | #endif | ||
diff --git a/net/mac80211/regdomain.c b/net/mac80211/regdomain.c deleted file mode 100644 index f42678fa62d1..000000000000 --- a/net/mac80211/regdomain.c +++ /dev/null | |||
@@ -1,152 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright 2002-2005, Instant802 Networks, Inc. | ||
3 | * Copyright 2005-2006, Devicescape Software, Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | |||
10 | /* | ||
11 | * This regulatory domain control implementation is known to be incomplete | ||
12 | * and confusing. mac80211 regulatory domain control will be significantly | ||
13 | * reworked in the not-too-distant future. | ||
14 | * | ||
15 | * For now, drivers wishing to control which channels are and aren't available | ||
16 | * are advised as follows: | ||
17 | * - set the IEEE80211_HW_DEFAULT_REG_DOMAIN_CONFIGURED flag | ||
18 | * - continue to include *ALL* possible channels in the modes registered | ||
19 | * through ieee80211_register_hwmode() | ||
20 | * - for each allowable ieee80211_channel structure registered in the above | ||
21 | * call, set the flag member to some meaningful value such as | ||
22 | * IEEE80211_CHAN_W_SCAN | IEEE80211_CHAN_W_ACTIVE_SCAN | | ||
23 | * IEEE80211_CHAN_W_IBSS. | ||
24 | * - leave flag as 0 for non-allowable channels | ||
25 | * | ||
26 | * The usual implementation is for a driver to read a device EEPROM to | ||
27 | * determine which regulatory domain it should be operating under, then | ||
28 | * looking up the allowable channels in a driver-local table, then performing | ||
29 | * the above. | ||
30 | */ | ||
31 | |||
32 | #include <linux/module.h> | ||
33 | #include <linux/netdevice.h> | ||
34 | #include <net/mac80211.h> | ||
35 | #include "ieee80211_i.h" | ||
36 | |||
37 | static int ieee80211_regdom = 0x10; /* FCC */ | ||
38 | module_param(ieee80211_regdom, int, 0444); | ||
39 | MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain; 64=MKK"); | ||
40 | |||
41 | /* | ||
42 | * If firmware is upgraded by the vendor, additional channels can be used based | ||
43 | * on the new Japanese regulatory rules. This is indicated by setting | ||
44 | * ieee80211_japan_5ghz module parameter to one when loading the 80211 kernel | ||
45 | * module. | ||
46 | */ | ||
47 | static int ieee80211_japan_5ghz /* = 0 */; | ||
48 | module_param(ieee80211_japan_5ghz, int, 0444); | ||
49 | MODULE_PARM_DESC(ieee80211_japan_5ghz, "Vendor-updated firmware for 5 GHz"); | ||
50 | |||
51 | |||
52 | struct ieee80211_channel_range { | ||
53 | short start_freq; | ||
54 | short end_freq; | ||
55 | unsigned char power_level; | ||
56 | unsigned char antenna_max; | ||
57 | }; | ||
58 | |||
59 | static const struct ieee80211_channel_range ieee80211_fcc_channels[] = { | ||
60 | { 2412, 2462, 27, 6 } /* IEEE 802.11b/g, channels 1..11 */, | ||
61 | { 5180, 5240, 17, 6 } /* IEEE 802.11a, channels 36..48 */, | ||
62 | { 5260, 5320, 23, 6 } /* IEEE 802.11a, channels 52..64 */, | ||
63 | { 5745, 5825, 30, 6 } /* IEEE 802.11a, channels 149..165, outdoor */, | ||
64 | { 0 } | ||
65 | }; | ||
66 | |||
67 | static const struct ieee80211_channel_range ieee80211_mkk_channels[] = { | ||
68 | { 2412, 2472, 20, 6 } /* IEEE 802.11b/g, channels 1..13 */, | ||
69 | { 5170, 5240, 20, 6 } /* IEEE 802.11a, channels 34..48 */, | ||
70 | { 5260, 5320, 20, 6 } /* IEEE 802.11a, channels 52..64 */, | ||
71 | { 0 } | ||
72 | }; | ||
73 | |||
74 | |||
75 | static const struct ieee80211_channel_range *channel_range = | ||
76 | ieee80211_fcc_channels; | ||
77 | |||
78 | |||
79 | static void ieee80211_unmask_channel(int mode, struct ieee80211_channel *chan) | ||
80 | { | ||
81 | int i; | ||
82 | |||
83 | chan->flag = 0; | ||
84 | |||
85 | for (i = 0; channel_range[i].start_freq; i++) { | ||
86 | const struct ieee80211_channel_range *r = &channel_range[i]; | ||
87 | if (r->start_freq <= chan->freq && r->end_freq >= chan->freq) { | ||
88 | if (ieee80211_regdom == 64 && !ieee80211_japan_5ghz && | ||
89 | chan->freq >= 5260 && chan->freq <= 5320) { | ||
90 | /* | ||
91 | * Skip new channels in Japan since the | ||
92 | * firmware was not marked having been upgraded | ||
93 | * by the vendor. | ||
94 | */ | ||
95 | continue; | ||
96 | } | ||
97 | |||
98 | if (ieee80211_regdom == 0x10 && | ||
99 | (chan->freq == 5190 || chan->freq == 5210 || | ||
100 | chan->freq == 5230)) { | ||
101 | /* Skip MKK channels when in FCC domain. */ | ||
102 | continue; | ||
103 | } | ||
104 | |||
105 | chan->flag |= IEEE80211_CHAN_W_SCAN | | ||
106 | IEEE80211_CHAN_W_ACTIVE_SCAN | | ||
107 | IEEE80211_CHAN_W_IBSS; | ||
108 | chan->power_level = r->power_level; | ||
109 | chan->antenna_max = r->antenna_max; | ||
110 | |||
111 | if (ieee80211_regdom == 64 && | ||
112 | (chan->freq == 5170 || chan->freq == 5190 || | ||
113 | chan->freq == 5210 || chan->freq == 5230)) { | ||
114 | /* | ||
115 | * New regulatory rules in Japan have backwards | ||
116 | * compatibility with old channels in 5.15-5.25 | ||
117 | * GHz band, but the station is not allowed to | ||
118 | * use active scan on these old channels. | ||
119 | */ | ||
120 | chan->flag &= ~IEEE80211_CHAN_W_ACTIVE_SCAN; | ||
121 | } | ||
122 | |||
123 | if (ieee80211_regdom == 64 && | ||
124 | (chan->freq == 5260 || chan->freq == 5280 || | ||
125 | chan->freq == 5300 || chan->freq == 5320)) { | ||
126 | /* | ||
127 | * IBSS is not allowed on 5.25-5.35 GHz band | ||
128 | * due to radar detection requirements. | ||
129 | */ | ||
130 | chan->flag &= ~IEEE80211_CHAN_W_IBSS; | ||
131 | } | ||
132 | |||
133 | break; | ||
134 | } | ||
135 | } | ||
136 | } | ||
137 | |||
138 | |||
139 | void ieee80211_set_default_regdomain(struct ieee80211_hw_mode *mode) | ||
140 | { | ||
141 | int c; | ||
142 | for (c = 0; c < mode->num_channels; c++) | ||
143 | ieee80211_unmask_channel(mode->mode, &mode->channels[c]); | ||
144 | } | ||
145 | |||
146 | |||
147 | void ieee80211_regdomain_init(void) | ||
148 | { | ||
149 | if (ieee80211_regdom == 0x40) | ||
150 | channel_range = ieee80211_mkk_channels; | ||
151 | } | ||
152 | |||
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 535407d07fa4..644d2774469d 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -9,6 +9,7 @@ | |||
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/jiffies.h> | ||
12 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
13 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
14 | #include <linux/netdevice.h> | 15 | #include <linux/netdevice.h> |
@@ -19,6 +20,7 @@ | |||
19 | 20 | ||
20 | #include "ieee80211_i.h" | 21 | #include "ieee80211_i.h" |
21 | #include "ieee80211_led.h" | 22 | #include "ieee80211_led.h" |
23 | #include "mesh.h" | ||
22 | #include "wep.h" | 24 | #include "wep.h" |
23 | #include "wpa.h" | 25 | #include "wpa.h" |
24 | #include "tkip.h" | 26 | #include "tkip.h" |
@@ -82,10 +84,10 @@ static inline int should_drop_frame(struct ieee80211_rx_status *status, | |||
82 | */ | 84 | */ |
83 | static struct sk_buff * | 85 | static struct sk_buff * |
84 | ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, | 86 | ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, |
85 | struct ieee80211_rx_status *status) | 87 | struct ieee80211_rx_status *status, |
88 | struct ieee80211_rate *rate) | ||
86 | { | 89 | { |
87 | struct ieee80211_sub_if_data *sdata; | 90 | struct ieee80211_sub_if_data *sdata; |
88 | struct ieee80211_rate *rate; | ||
89 | int needed_headroom = 0; | 91 | int needed_headroom = 0; |
90 | struct ieee80211_radiotap_header *rthdr; | 92 | struct ieee80211_radiotap_header *rthdr; |
91 | __le64 *rttsft = NULL; | 93 | __le64 *rttsft = NULL; |
@@ -194,14 +196,11 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, | |||
194 | rtfixed->rx_flags |= | 196 | rtfixed->rx_flags |= |
195 | cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADFCS); | 197 | cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADFCS); |
196 | 198 | ||
197 | rate = ieee80211_get_rate(local, status->phymode, | 199 | rtfixed->rate = rate->bitrate / 5; |
198 | status->rate); | ||
199 | if (rate) | ||
200 | rtfixed->rate = rate->rate / 5; | ||
201 | 200 | ||
202 | rtfixed->chan_freq = cpu_to_le16(status->freq); | 201 | rtfixed->chan_freq = cpu_to_le16(status->freq); |
203 | 202 | ||
204 | if (status->phymode == MODE_IEEE80211A) | 203 | if (status->band == IEEE80211_BAND_5GHZ) |
205 | rtfixed->chan_flags = | 204 | rtfixed->chan_flags = |
206 | cpu_to_le16(IEEE80211_CHAN_OFDM | | 205 | cpu_to_le16(IEEE80211_CHAN_OFDM | |
207 | IEEE80211_CHAN_5GHZ); | 206 | IEEE80211_CHAN_5GHZ); |
@@ -226,6 +225,9 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, | |||
226 | if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR) | 225 | if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR) |
227 | continue; | 226 | continue; |
228 | 227 | ||
228 | if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) | ||
229 | continue; | ||
230 | |||
229 | if (prev_dev) { | 231 | if (prev_dev) { |
230 | skb2 = skb_clone(skb, GFP_ATOMIC); | 232 | skb2 = skb_clone(skb, GFP_ATOMIC); |
231 | if (skb2) { | 233 | if (skb2) { |
@@ -249,15 +251,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, | |||
249 | } | 251 | } |
250 | 252 | ||
251 | 253 | ||
252 | /* pre-rx handlers | 254 | static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) |
253 | * | ||
254 | * these don't have dev/sdata fields in the rx data | ||
255 | * The sta value should also not be used because it may | ||
256 | * be NULL even though a STA (in IBSS mode) will be added. | ||
257 | */ | ||
258 | |||
259 | static ieee80211_txrx_result | ||
260 | ieee80211_rx_h_parse_qos(struct ieee80211_txrx_data *rx) | ||
261 | { | 255 | { |
262 | u8 *data = rx->skb->data; | 256 | u8 *data = rx->skb->data; |
263 | int tid; | 257 | int tid; |
@@ -268,9 +262,9 @@ ieee80211_rx_h_parse_qos(struct ieee80211_txrx_data *rx) | |||
268 | /* frame has qos control */ | 262 | /* frame has qos control */ |
269 | tid = qc[0] & QOS_CONTROL_TID_MASK; | 263 | tid = qc[0] & QOS_CONTROL_TID_MASK; |
270 | if (qc[0] & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT) | 264 | if (qc[0] & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT) |
271 | rx->flags |= IEEE80211_TXRXD_RX_AMSDU; | 265 | rx->flags |= IEEE80211_RX_AMSDU; |
272 | else | 266 | else |
273 | rx->flags &= ~IEEE80211_TXRXD_RX_AMSDU; | 267 | rx->flags &= ~IEEE80211_RX_AMSDU; |
274 | } else { | 268 | } else { |
275 | if (unlikely((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)) { | 269 | if (unlikely((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)) { |
276 | /* Separate TID for management frames */ | 270 | /* Separate TID for management frames */ |
@@ -286,68 +280,19 @@ ieee80211_rx_h_parse_qos(struct ieee80211_txrx_data *rx) | |||
286 | if (rx->sta) | 280 | if (rx->sta) |
287 | I802_DEBUG_INC(rx->sta->wme_rx_queue[tid]); | 281 | I802_DEBUG_INC(rx->sta->wme_rx_queue[tid]); |
288 | 282 | ||
289 | rx->u.rx.queue = tid; | 283 | rx->queue = tid; |
290 | /* Set skb->priority to 1d tag if highest order bit of TID is not set. | 284 | /* Set skb->priority to 1d tag if highest order bit of TID is not set. |
291 | * For now, set skb->priority to 0 for other cases. */ | 285 | * For now, set skb->priority to 0 for other cases. */ |
292 | rx->skb->priority = (tid > 7) ? 0 : tid; | 286 | rx->skb->priority = (tid > 7) ? 0 : tid; |
293 | |||
294 | return TXRX_CONTINUE; | ||
295 | } | 287 | } |
296 | 288 | ||
297 | 289 | static void ieee80211_verify_ip_alignment(struct ieee80211_rx_data *rx) | |
298 | static u32 ieee80211_rx_load_stats(struct ieee80211_local *local, | ||
299 | struct sk_buff *skb, | ||
300 | struct ieee80211_rx_status *status) | ||
301 | { | 290 | { |
302 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
303 | u32 load = 0, hdrtime; | ||
304 | struct ieee80211_rate *rate; | ||
305 | struct ieee80211_hw_mode *mode = local->hw.conf.mode; | ||
306 | int i; | ||
307 | |||
308 | /* Estimate total channel use caused by this frame */ | ||
309 | |||
310 | if (unlikely(mode->num_rates < 0)) | ||
311 | return TXRX_CONTINUE; | ||
312 | |||
313 | rate = &mode->rates[0]; | ||
314 | for (i = 0; i < mode->num_rates; i++) { | ||
315 | if (mode->rates[i].val == status->rate) { | ||
316 | rate = &mode->rates[i]; | ||
317 | break; | ||
318 | } | ||
319 | } | ||
320 | |||
321 | /* 1 bit at 1 Mbit/s takes 1 usec; in channel_use values, | ||
322 | * 1 usec = 1/8 * (1080 / 10) = 13.5 */ | ||
323 | |||
324 | if (mode->mode == MODE_IEEE80211A || | ||
325 | (mode->mode == MODE_IEEE80211G && | ||
326 | rate->flags & IEEE80211_RATE_ERP)) | ||
327 | hdrtime = CHAN_UTIL_HDR_SHORT; | ||
328 | else | ||
329 | hdrtime = CHAN_UTIL_HDR_LONG; | ||
330 | |||
331 | load = hdrtime; | ||
332 | if (!is_multicast_ether_addr(hdr->addr1)) | ||
333 | load += hdrtime; | ||
334 | |||
335 | load += skb->len * rate->rate_inv; | ||
336 | |||
337 | /* Divide channel_use by 8 to avoid wrapping around the counter */ | ||
338 | load >>= CHAN_UTIL_SHIFT; | ||
339 | |||
340 | return load; | ||
341 | } | ||
342 | |||
343 | #ifdef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT | 291 | #ifdef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT |
344 | static ieee80211_txrx_result | ||
345 | ieee80211_rx_h_verify_ip_alignment(struct ieee80211_txrx_data *rx) | ||
346 | { | ||
347 | int hdrlen; | 292 | int hdrlen; |
348 | 293 | ||
349 | if (!WLAN_FC_DATA_PRESENT(rx->fc)) | 294 | if (!WLAN_FC_DATA_PRESENT(rx->fc)) |
350 | return TXRX_CONTINUE; | 295 | return; |
351 | 296 | ||
352 | /* | 297 | /* |
353 | * Drivers are required to align the payload data in a way that | 298 | * Drivers are required to align the payload data in a way that |
@@ -369,83 +314,158 @@ ieee80211_rx_h_verify_ip_alignment(struct ieee80211_txrx_data *rx) | |||
369 | * to move the 802.11 header further back in that case. | 314 | * to move the 802.11 header further back in that case. |
370 | */ | 315 | */ |
371 | hdrlen = ieee80211_get_hdrlen(rx->fc); | 316 | hdrlen = ieee80211_get_hdrlen(rx->fc); |
372 | if (rx->flags & IEEE80211_TXRXD_RX_AMSDU) | 317 | if (rx->flags & IEEE80211_RX_AMSDU) |
373 | hdrlen += ETH_HLEN; | 318 | hdrlen += ETH_HLEN; |
374 | WARN_ON_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3); | 319 | WARN_ON_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3); |
375 | |||
376 | return TXRX_CONTINUE; | ||
377 | } | ||
378 | #endif | 320 | #endif |
321 | } | ||
379 | 322 | ||
380 | ieee80211_rx_handler ieee80211_rx_pre_handlers[] = | 323 | |
324 | static u32 ieee80211_rx_load_stats(struct ieee80211_local *local, | ||
325 | struct sk_buff *skb, | ||
326 | struct ieee80211_rx_status *status, | ||
327 | struct ieee80211_rate *rate) | ||
381 | { | 328 | { |
382 | ieee80211_rx_h_parse_qos, | 329 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
383 | #ifdef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT | 330 | u32 load = 0, hdrtime; |
384 | ieee80211_rx_h_verify_ip_alignment, | 331 | |
385 | #endif | 332 | /* Estimate total channel use caused by this frame */ |
386 | NULL | 333 | |
387 | }; | 334 | /* 1 bit at 1 Mbit/s takes 1 usec; in channel_use values, |
335 | * 1 usec = 1/8 * (1080 / 10) = 13.5 */ | ||
336 | |||
337 | if (status->band == IEEE80211_BAND_5GHZ || | ||
338 | (status->band == IEEE80211_BAND_5GHZ && | ||
339 | rate->flags & IEEE80211_RATE_ERP_G)) | ||
340 | hdrtime = CHAN_UTIL_HDR_SHORT; | ||
341 | else | ||
342 | hdrtime = CHAN_UTIL_HDR_LONG; | ||
343 | |||
344 | load = hdrtime; | ||
345 | if (!is_multicast_ether_addr(hdr->addr1)) | ||
346 | load += hdrtime; | ||
347 | |||
348 | /* TODO: optimise again */ | ||
349 | load += skb->len * CHAN_UTIL_RATE_LCM / rate->bitrate; | ||
350 | |||
351 | /* Divide channel_use by 8 to avoid wrapping around the counter */ | ||
352 | load >>= CHAN_UTIL_SHIFT; | ||
353 | |||
354 | return load; | ||
355 | } | ||
388 | 356 | ||
389 | /* rx handlers */ | 357 | /* rx handlers */ |
390 | 358 | ||
391 | static ieee80211_txrx_result | 359 | static ieee80211_rx_result |
392 | ieee80211_rx_h_if_stats(struct ieee80211_txrx_data *rx) | 360 | ieee80211_rx_h_if_stats(struct ieee80211_rx_data *rx) |
393 | { | 361 | { |
394 | if (rx->sta) | 362 | if (rx->sta) |
395 | rx->sta->channel_use_raw += rx->u.rx.load; | 363 | rx->sta->channel_use_raw += rx->load; |
396 | rx->sdata->channel_use_raw += rx->u.rx.load; | 364 | rx->sdata->channel_use_raw += rx->load; |
397 | return TXRX_CONTINUE; | 365 | return RX_CONTINUE; |
398 | } | 366 | } |
399 | 367 | ||
400 | static ieee80211_txrx_result | 368 | static ieee80211_rx_result |
401 | ieee80211_rx_h_passive_scan(struct ieee80211_txrx_data *rx) | 369 | ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx) |
402 | { | 370 | { |
403 | struct ieee80211_local *local = rx->local; | 371 | struct ieee80211_local *local = rx->local; |
404 | struct sk_buff *skb = rx->skb; | 372 | struct sk_buff *skb = rx->skb; |
405 | 373 | ||
406 | if (unlikely(local->sta_hw_scanning)) | 374 | if (unlikely(local->sta_hw_scanning)) |
407 | return ieee80211_sta_rx_scan(rx->dev, skb, rx->u.rx.status); | 375 | return ieee80211_sta_rx_scan(rx->dev, skb, rx->status); |
408 | 376 | ||
409 | if (unlikely(local->sta_sw_scanning)) { | 377 | if (unlikely(local->sta_sw_scanning)) { |
410 | /* drop all the other packets during a software scan anyway */ | 378 | /* drop all the other packets during a software scan anyway */ |
411 | if (ieee80211_sta_rx_scan(rx->dev, skb, rx->u.rx.status) | 379 | if (ieee80211_sta_rx_scan(rx->dev, skb, rx->status) |
412 | != TXRX_QUEUED) | 380 | != RX_QUEUED) |
413 | dev_kfree_skb(skb); | 381 | dev_kfree_skb(skb); |
414 | return TXRX_QUEUED; | 382 | return RX_QUEUED; |
415 | } | 383 | } |
416 | 384 | ||
417 | if (unlikely(rx->flags & IEEE80211_TXRXD_RXIN_SCAN)) { | 385 | if (unlikely(rx->flags & IEEE80211_RX_IN_SCAN)) { |
418 | /* scanning finished during invoking of handlers */ | 386 | /* scanning finished during invoking of handlers */ |
419 | I802_DEBUG_INC(local->rx_handlers_drop_passive_scan); | 387 | I802_DEBUG_INC(local->rx_handlers_drop_passive_scan); |
420 | return TXRX_DROP; | 388 | return RX_DROP_UNUSABLE; |
421 | } | 389 | } |
422 | 390 | ||
423 | return TXRX_CONTINUE; | 391 | return RX_CONTINUE; |
392 | } | ||
393 | |||
394 | static ieee80211_rx_result | ||
395 | ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) | ||
396 | { | ||
397 | int hdrlen = ieee80211_get_hdrlen(rx->fc); | ||
398 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; | ||
399 | |||
400 | #define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l)) | ||
401 | |||
402 | if ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) { | ||
403 | if (!((rx->fc & IEEE80211_FCTL_FROMDS) && | ||
404 | (rx->fc & IEEE80211_FCTL_TODS))) | ||
405 | return RX_DROP_MONITOR; | ||
406 | if (memcmp(hdr->addr4, rx->dev->dev_addr, ETH_ALEN) == 0) | ||
407 | return RX_DROP_MONITOR; | ||
408 | } | ||
409 | |||
410 | /* If there is not an established peer link and this is not a peer link | ||
411 | * establisment frame, beacon or probe, drop the frame. | ||
412 | */ | ||
413 | |||
414 | if (!rx->sta || sta_plink_state(rx->sta) != PLINK_ESTAB) { | ||
415 | struct ieee80211_mgmt *mgmt; | ||
416 | |||
417 | if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT) | ||
418 | return RX_DROP_MONITOR; | ||
419 | |||
420 | switch (rx->fc & IEEE80211_FCTL_STYPE) { | ||
421 | case IEEE80211_STYPE_ACTION: | ||
422 | mgmt = (struct ieee80211_mgmt *)hdr; | ||
423 | if (mgmt->u.action.category != PLINK_CATEGORY) | ||
424 | return RX_DROP_MONITOR; | ||
425 | /* fall through on else */ | ||
426 | case IEEE80211_STYPE_PROBE_REQ: | ||
427 | case IEEE80211_STYPE_PROBE_RESP: | ||
428 | case IEEE80211_STYPE_BEACON: | ||
429 | return RX_CONTINUE; | ||
430 | break; | ||
431 | default: | ||
432 | return RX_DROP_MONITOR; | ||
433 | } | ||
434 | |||
435 | } else if ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && | ||
436 | is_broadcast_ether_addr(hdr->addr1) && | ||
437 | mesh_rmc_check(hdr->addr4, msh_h_get(hdr, hdrlen), rx->dev)) | ||
438 | return RX_DROP_MONITOR; | ||
439 | #undef msh_h_get | ||
440 | |||
441 | return RX_CONTINUE; | ||
424 | } | 442 | } |
425 | 443 | ||
426 | static ieee80211_txrx_result | 444 | |
427 | ieee80211_rx_h_check(struct ieee80211_txrx_data *rx) | 445 | static ieee80211_rx_result |
446 | ieee80211_rx_h_check(struct ieee80211_rx_data *rx) | ||
428 | { | 447 | { |
429 | struct ieee80211_hdr *hdr; | 448 | struct ieee80211_hdr *hdr; |
449 | |||
430 | hdr = (struct ieee80211_hdr *) rx->skb->data; | 450 | hdr = (struct ieee80211_hdr *) rx->skb->data; |
431 | 451 | ||
432 | /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */ | 452 | /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */ |
433 | if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) { | 453 | if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) { |
434 | if (unlikely(rx->fc & IEEE80211_FCTL_RETRY && | 454 | if (unlikely(rx->fc & IEEE80211_FCTL_RETRY && |
435 | rx->sta->last_seq_ctrl[rx->u.rx.queue] == | 455 | rx->sta->last_seq_ctrl[rx->queue] == |
436 | hdr->seq_ctrl)) { | 456 | hdr->seq_ctrl)) { |
437 | if (rx->flags & IEEE80211_TXRXD_RXRA_MATCH) { | 457 | if (rx->flags & IEEE80211_RX_RA_MATCH) { |
438 | rx->local->dot11FrameDuplicateCount++; | 458 | rx->local->dot11FrameDuplicateCount++; |
439 | rx->sta->num_duplicates++; | 459 | rx->sta->num_duplicates++; |
440 | } | 460 | } |
441 | return TXRX_DROP; | 461 | return RX_DROP_MONITOR; |
442 | } else | 462 | } else |
443 | rx->sta->last_seq_ctrl[rx->u.rx.queue] = hdr->seq_ctrl; | 463 | rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl; |
444 | } | 464 | } |
445 | 465 | ||
446 | if (unlikely(rx->skb->len < 16)) { | 466 | if (unlikely(rx->skb->len < 16)) { |
447 | I802_DEBUG_INC(rx->local->rx_handlers_drop_short); | 467 | I802_DEBUG_INC(rx->local->rx_handlers_drop_short); |
448 | return TXRX_DROP; | 468 | return RX_DROP_MONITOR; |
449 | } | 469 | } |
450 | 470 | ||
451 | /* Drop disallowed frame classes based on STA auth/assoc state; | 471 | /* Drop disallowed frame classes based on STA auth/assoc state; |
@@ -456,6 +476,10 @@ ieee80211_rx_h_check(struct ieee80211_txrx_data *rx) | |||
456 | * deauth/disassoc frames when needed. In addition, hostapd is | 476 | * deauth/disassoc frames when needed. In addition, hostapd is |
457 | * responsible for filtering on both auth and assoc states. | 477 | * responsible for filtering on both auth and assoc states. |
458 | */ | 478 | */ |
479 | |||
480 | if (ieee80211_vif_is_mesh(&rx->sdata->vif)) | ||
481 | return ieee80211_rx_mesh_check(rx); | ||
482 | |||
459 | if (unlikely(((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA || | 483 | if (unlikely(((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA || |
460 | ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL && | 484 | ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL && |
461 | (rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL)) && | 485 | (rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL)) && |
@@ -464,26 +488,26 @@ ieee80211_rx_h_check(struct ieee80211_txrx_data *rx) | |||
464 | if ((!(rx->fc & IEEE80211_FCTL_FROMDS) && | 488 | if ((!(rx->fc & IEEE80211_FCTL_FROMDS) && |
465 | !(rx->fc & IEEE80211_FCTL_TODS) && | 489 | !(rx->fc & IEEE80211_FCTL_TODS) && |
466 | (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) | 490 | (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) |
467 | || !(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) { | 491 | || !(rx->flags & IEEE80211_RX_RA_MATCH)) { |
468 | /* Drop IBSS frames and frames for other hosts | 492 | /* Drop IBSS frames and frames for other hosts |
469 | * silently. */ | 493 | * silently. */ |
470 | return TXRX_DROP; | 494 | return RX_DROP_MONITOR; |
471 | } | 495 | } |
472 | 496 | ||
473 | return TXRX_DROP; | 497 | return RX_DROP_MONITOR; |
474 | } | 498 | } |
475 | 499 | ||
476 | return TXRX_CONTINUE; | 500 | return RX_CONTINUE; |
477 | } | 501 | } |
478 | 502 | ||
479 | 503 | ||
480 | static ieee80211_txrx_result | 504 | static ieee80211_rx_result |
481 | ieee80211_rx_h_decrypt(struct ieee80211_txrx_data *rx) | 505 | ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) |
482 | { | 506 | { |
483 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; | 507 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; |
484 | int keyidx; | 508 | int keyidx; |
485 | int hdrlen; | 509 | int hdrlen; |
486 | ieee80211_txrx_result result = TXRX_DROP; | 510 | ieee80211_rx_result result = RX_DROP_UNUSABLE; |
487 | struct ieee80211_key *stakey = NULL; | 511 | struct ieee80211_key *stakey = NULL; |
488 | 512 | ||
489 | /* | 513 | /* |
@@ -513,14 +537,14 @@ ieee80211_rx_h_decrypt(struct ieee80211_txrx_data *rx) | |||
513 | */ | 537 | */ |
514 | 538 | ||
515 | if (!(rx->fc & IEEE80211_FCTL_PROTECTED)) | 539 | if (!(rx->fc & IEEE80211_FCTL_PROTECTED)) |
516 | return TXRX_CONTINUE; | 540 | return RX_CONTINUE; |
517 | 541 | ||
518 | /* | 542 | /* |
519 | * No point in finding a key and decrypting if the frame is neither | 543 | * No point in finding a key and decrypting if the frame is neither |
520 | * addressed to us nor a multicast frame. | 544 | * addressed to us nor a multicast frame. |
521 | */ | 545 | */ |
522 | if (!(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) | 546 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) |
523 | return TXRX_CONTINUE; | 547 | return RX_CONTINUE; |
524 | 548 | ||
525 | if (rx->sta) | 549 | if (rx->sta) |
526 | stakey = rcu_dereference(rx->sta->key); | 550 | stakey = rcu_dereference(rx->sta->key); |
@@ -537,14 +561,14 @@ ieee80211_rx_h_decrypt(struct ieee80211_txrx_data *rx) | |||
537 | * we somehow allow the driver to tell us which key | 561 | * we somehow allow the driver to tell us which key |
538 | * the hardware used if this flag is set? | 562 | * the hardware used if this flag is set? |
539 | */ | 563 | */ |
540 | if ((rx->u.rx.status->flag & RX_FLAG_DECRYPTED) && | 564 | if ((rx->status->flag & RX_FLAG_DECRYPTED) && |
541 | (rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED)) | 565 | (rx->status->flag & RX_FLAG_IV_STRIPPED)) |
542 | return TXRX_CONTINUE; | 566 | return RX_CONTINUE; |
543 | 567 | ||
544 | hdrlen = ieee80211_get_hdrlen(rx->fc); | 568 | hdrlen = ieee80211_get_hdrlen(rx->fc); |
545 | 569 | ||
546 | if (rx->skb->len < 8 + hdrlen) | 570 | if (rx->skb->len < 8 + hdrlen) |
547 | return TXRX_DROP; /* TODO: count this? */ | 571 | return RX_DROP_UNUSABLE; /* TODO: count this? */ |
548 | 572 | ||
549 | /* | 573 | /* |
550 | * no need to call ieee80211_wep_get_keyidx, | 574 | * no need to call ieee80211_wep_get_keyidx, |
@@ -573,14 +597,14 @@ ieee80211_rx_h_decrypt(struct ieee80211_txrx_data *rx) | |||
573 | printk(KERN_DEBUG "%s: RX protected frame," | 597 | printk(KERN_DEBUG "%s: RX protected frame," |
574 | " but have no key\n", rx->dev->name); | 598 | " but have no key\n", rx->dev->name); |
575 | #endif /* CONFIG_MAC80211_DEBUG */ | 599 | #endif /* CONFIG_MAC80211_DEBUG */ |
576 | return TXRX_DROP; | 600 | return RX_DROP_MONITOR; |
577 | } | 601 | } |
578 | 602 | ||
579 | /* Check for weak IVs if possible */ | 603 | /* Check for weak IVs if possible */ |
580 | if (rx->sta && rx->key->conf.alg == ALG_WEP && | 604 | if (rx->sta && rx->key->conf.alg == ALG_WEP && |
581 | ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) && | 605 | ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) && |
582 | (!(rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED) || | 606 | (!(rx->status->flag & RX_FLAG_IV_STRIPPED) || |
583 | !(rx->u.rx.status->flag & RX_FLAG_DECRYPTED)) && | 607 | !(rx->status->flag & RX_FLAG_DECRYPTED)) && |
584 | ieee80211_wep_is_weak_iv(rx->skb, rx->key)) | 608 | ieee80211_wep_is_weak_iv(rx->skb, rx->key)) |
585 | rx->sta->wep_weak_iv_count++; | 609 | rx->sta->wep_weak_iv_count++; |
586 | 610 | ||
@@ -597,7 +621,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_txrx_data *rx) | |||
597 | } | 621 | } |
598 | 622 | ||
599 | /* either the frame has been decrypted or will be dropped */ | 623 | /* either the frame has been decrypted or will be dropped */ |
600 | rx->u.rx.status->flag |= RX_FLAG_DECRYPTED; | 624 | rx->status->flag |= RX_FLAG_DECRYPTED; |
601 | 625 | ||
602 | return result; | 626 | return result; |
603 | } | 627 | } |
@@ -607,12 +631,12 @@ static void ap_sta_ps_start(struct net_device *dev, struct sta_info *sta) | |||
607 | struct ieee80211_sub_if_data *sdata; | 631 | struct ieee80211_sub_if_data *sdata; |
608 | DECLARE_MAC_BUF(mac); | 632 | DECLARE_MAC_BUF(mac); |
609 | 633 | ||
610 | sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev); | 634 | sdata = sta->sdata; |
611 | 635 | ||
612 | if (sdata->bss) | 636 | if (sdata->bss) |
613 | atomic_inc(&sdata->bss->num_sta_ps); | 637 | atomic_inc(&sdata->bss->num_sta_ps); |
614 | sta->flags |= WLAN_STA_PS; | 638 | sta->flags |= WLAN_STA_PS; |
615 | sta->pspoll = 0; | 639 | sta->flags &= ~WLAN_STA_PSPOLL; |
616 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 640 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
617 | printk(KERN_DEBUG "%s: STA %s aid %d enters power save mode\n", | 641 | printk(KERN_DEBUG "%s: STA %s aid %d enters power save mode\n", |
618 | dev->name, print_mac(mac, sta->addr), sta->aid); | 642 | dev->name, print_mac(mac, sta->addr), sta->aid); |
@@ -628,21 +652,21 @@ static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta) | |||
628 | struct ieee80211_tx_packet_data *pkt_data; | 652 | struct ieee80211_tx_packet_data *pkt_data; |
629 | DECLARE_MAC_BUF(mac); | 653 | DECLARE_MAC_BUF(mac); |
630 | 654 | ||
631 | sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev); | 655 | sdata = sta->sdata; |
656 | |||
632 | if (sdata->bss) | 657 | if (sdata->bss) |
633 | atomic_dec(&sdata->bss->num_sta_ps); | 658 | atomic_dec(&sdata->bss->num_sta_ps); |
634 | sta->flags &= ~(WLAN_STA_PS | WLAN_STA_TIM); | 659 | |
635 | sta->pspoll = 0; | 660 | sta->flags &= ~(WLAN_STA_PS | WLAN_STA_PSPOLL); |
636 | if (!skb_queue_empty(&sta->ps_tx_buf)) { | 661 | |
637 | if (local->ops->set_tim) | 662 | if (!skb_queue_empty(&sta->ps_tx_buf)) |
638 | local->ops->set_tim(local_to_hw(local), sta->aid, 0); | 663 | sta_info_clear_tim_bit(sta); |
639 | if (sdata->bss) | 664 | |
640 | bss_tim_clear(local, sdata->bss, sta->aid); | ||
641 | } | ||
642 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 665 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
643 | printk(KERN_DEBUG "%s: STA %s aid %d exits power save mode\n", | 666 | printk(KERN_DEBUG "%s: STA %s aid %d exits power save mode\n", |
644 | dev->name, print_mac(mac, sta->addr), sta->aid); | 667 | dev->name, print_mac(mac, sta->addr), sta->aid); |
645 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | 668 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ |
669 | |||
646 | /* Send all buffered frames to the station */ | 670 | /* Send all buffered frames to the station */ |
647 | while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) { | 671 | while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) { |
648 | pkt_data = (struct ieee80211_tx_packet_data *) skb->cb; | 672 | pkt_data = (struct ieee80211_tx_packet_data *) skb->cb; |
@@ -666,15 +690,15 @@ static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta) | |||
666 | return sent; | 690 | return sent; |
667 | } | 691 | } |
668 | 692 | ||
669 | static ieee80211_txrx_result | 693 | static ieee80211_rx_result |
670 | ieee80211_rx_h_sta_process(struct ieee80211_txrx_data *rx) | 694 | ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) |
671 | { | 695 | { |
672 | struct sta_info *sta = rx->sta; | 696 | struct sta_info *sta = rx->sta; |
673 | struct net_device *dev = rx->dev; | 697 | struct net_device *dev = rx->dev; |
674 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; | 698 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; |
675 | 699 | ||
676 | if (!sta) | 700 | if (!sta) |
677 | return TXRX_CONTINUE; | 701 | return RX_CONTINUE; |
678 | 702 | ||
679 | /* Update last_rx only for IBSS packets which are for the current | 703 | /* Update last_rx only for IBSS packets which are for the current |
680 | * BSSID to avoid keeping the current IBSS network alive in cases where | 704 | * BSSID to avoid keeping the current IBSS network alive in cases where |
@@ -690,24 +714,26 @@ ieee80211_rx_h_sta_process(struct ieee80211_txrx_data *rx) | |||
690 | /* Update last_rx only for unicast frames in order to prevent | 714 | /* Update last_rx only for unicast frames in order to prevent |
691 | * the Probe Request frames (the only broadcast frames from a | 715 | * the Probe Request frames (the only broadcast frames from a |
692 | * STA in infrastructure mode) from keeping a connection alive. | 716 | * STA in infrastructure mode) from keeping a connection alive. |
717 | * Mesh beacons will update last_rx when if they are found to | ||
718 | * match the current local configuration when processed. | ||
693 | */ | 719 | */ |
694 | sta->last_rx = jiffies; | 720 | sta->last_rx = jiffies; |
695 | } | 721 | } |
696 | 722 | ||
697 | if (!(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) | 723 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) |
698 | return TXRX_CONTINUE; | 724 | return RX_CONTINUE; |
699 | 725 | ||
700 | sta->rx_fragments++; | 726 | sta->rx_fragments++; |
701 | sta->rx_bytes += rx->skb->len; | 727 | sta->rx_bytes += rx->skb->len; |
702 | sta->last_rssi = rx->u.rx.status->ssi; | 728 | sta->last_rssi = rx->status->ssi; |
703 | sta->last_signal = rx->u.rx.status->signal; | 729 | sta->last_signal = rx->status->signal; |
704 | sta->last_noise = rx->u.rx.status->noise; | 730 | sta->last_noise = rx->status->noise; |
705 | 731 | ||
706 | if (!(rx->fc & IEEE80211_FCTL_MOREFRAGS)) { | 732 | if (!(rx->fc & IEEE80211_FCTL_MOREFRAGS)) { |
707 | /* Change STA power saving mode only in the end of a frame | 733 | /* Change STA power saving mode only in the end of a frame |
708 | * exchange sequence */ | 734 | * exchange sequence */ |
709 | if ((sta->flags & WLAN_STA_PS) && !(rx->fc & IEEE80211_FCTL_PM)) | 735 | if ((sta->flags & WLAN_STA_PS) && !(rx->fc & IEEE80211_FCTL_PM)) |
710 | rx->u.rx.sent_ps_buffered += ap_sta_ps_end(dev, sta); | 736 | rx->sent_ps_buffered += ap_sta_ps_end(dev, sta); |
711 | else if (!(sta->flags & WLAN_STA_PS) && | 737 | else if (!(sta->flags & WLAN_STA_PS) && |
712 | (rx->fc & IEEE80211_FCTL_PM)) | 738 | (rx->fc & IEEE80211_FCTL_PM)) |
713 | ap_sta_ps_start(dev, sta); | 739 | ap_sta_ps_start(dev, sta); |
@@ -722,10 +748,10 @@ ieee80211_rx_h_sta_process(struct ieee80211_txrx_data *rx) | |||
722 | * as a dropped packed. */ | 748 | * as a dropped packed. */ |
723 | sta->rx_packets++; | 749 | sta->rx_packets++; |
724 | dev_kfree_skb(rx->skb); | 750 | dev_kfree_skb(rx->skb); |
725 | return TXRX_QUEUED; | 751 | return RX_QUEUED; |
726 | } | 752 | } |
727 | 753 | ||
728 | return TXRX_CONTINUE; | 754 | return RX_CONTINUE; |
729 | } /* ieee80211_rx_h_sta_process */ | 755 | } /* ieee80211_rx_h_sta_process */ |
730 | 756 | ||
731 | static inline struct ieee80211_fragment_entry * | 757 | static inline struct ieee80211_fragment_entry * |
@@ -801,7 +827,7 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, | |||
801 | compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0) | 827 | compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0) |
802 | continue; | 828 | continue; |
803 | 829 | ||
804 | if (entry->first_frag_time + 2 * HZ < jiffies) { | 830 | if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) { |
805 | __skb_queue_purge(&entry->skb_list); | 831 | __skb_queue_purge(&entry->skb_list); |
806 | continue; | 832 | continue; |
807 | } | 833 | } |
@@ -811,8 +837,8 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, | |||
811 | return NULL; | 837 | return NULL; |
812 | } | 838 | } |
813 | 839 | ||
814 | static ieee80211_txrx_result | 840 | static ieee80211_rx_result |
815 | ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx) | 841 | ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) |
816 | { | 842 | { |
817 | struct ieee80211_hdr *hdr; | 843 | struct ieee80211_hdr *hdr; |
818 | u16 sc; | 844 | u16 sc; |
@@ -838,27 +864,27 @@ ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx) | |||
838 | if (frag == 0) { | 864 | if (frag == 0) { |
839 | /* This is the first fragment of a new frame. */ | 865 | /* This is the first fragment of a new frame. */ |
840 | entry = ieee80211_reassemble_add(rx->sdata, frag, seq, | 866 | entry = ieee80211_reassemble_add(rx->sdata, frag, seq, |
841 | rx->u.rx.queue, &(rx->skb)); | 867 | rx->queue, &(rx->skb)); |
842 | if (rx->key && rx->key->conf.alg == ALG_CCMP && | 868 | if (rx->key && rx->key->conf.alg == ALG_CCMP && |
843 | (rx->fc & IEEE80211_FCTL_PROTECTED)) { | 869 | (rx->fc & IEEE80211_FCTL_PROTECTED)) { |
844 | /* Store CCMP PN so that we can verify that the next | 870 | /* Store CCMP PN so that we can verify that the next |
845 | * fragment has a sequential PN value. */ | 871 | * fragment has a sequential PN value. */ |
846 | entry->ccmp = 1; | 872 | entry->ccmp = 1; |
847 | memcpy(entry->last_pn, | 873 | memcpy(entry->last_pn, |
848 | rx->key->u.ccmp.rx_pn[rx->u.rx.queue], | 874 | rx->key->u.ccmp.rx_pn[rx->queue], |
849 | CCMP_PN_LEN); | 875 | CCMP_PN_LEN); |
850 | } | 876 | } |
851 | return TXRX_QUEUED; | 877 | return RX_QUEUED; |
852 | } | 878 | } |
853 | 879 | ||
854 | /* This is a fragment for a frame that should already be pending in | 880 | /* This is a fragment for a frame that should already be pending in |
855 | * fragment cache. Add this fragment to the end of the pending entry. | 881 | * fragment cache. Add this fragment to the end of the pending entry. |
856 | */ | 882 | */ |
857 | entry = ieee80211_reassemble_find(rx->sdata, rx->fc, frag, seq, | 883 | entry = ieee80211_reassemble_find(rx->sdata, rx->fc, frag, seq, |
858 | rx->u.rx.queue, hdr); | 884 | rx->queue, hdr); |
859 | if (!entry) { | 885 | if (!entry) { |
860 | I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); | 886 | I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); |
861 | return TXRX_DROP; | 887 | return RX_DROP_MONITOR; |
862 | } | 888 | } |
863 | 889 | ||
864 | /* Verify that MPDUs within one MSDU have sequential PN values. | 890 | /* Verify that MPDUs within one MSDU have sequential PN values. |
@@ -867,14 +893,14 @@ ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx) | |||
867 | int i; | 893 | int i; |
868 | u8 pn[CCMP_PN_LEN], *rpn; | 894 | u8 pn[CCMP_PN_LEN], *rpn; |
869 | if (!rx->key || rx->key->conf.alg != ALG_CCMP) | 895 | if (!rx->key || rx->key->conf.alg != ALG_CCMP) |
870 | return TXRX_DROP; | 896 | return RX_DROP_UNUSABLE; |
871 | memcpy(pn, entry->last_pn, CCMP_PN_LEN); | 897 | memcpy(pn, entry->last_pn, CCMP_PN_LEN); |
872 | for (i = CCMP_PN_LEN - 1; i >= 0; i--) { | 898 | for (i = CCMP_PN_LEN - 1; i >= 0; i--) { |
873 | pn[i]++; | 899 | pn[i]++; |
874 | if (pn[i]) | 900 | if (pn[i]) |
875 | break; | 901 | break; |
876 | } | 902 | } |
877 | rpn = rx->key->u.ccmp.rx_pn[rx->u.rx.queue]; | 903 | rpn = rx->key->u.ccmp.rx_pn[rx->queue]; |
878 | if (memcmp(pn, rpn, CCMP_PN_LEN) != 0) { | 904 | if (memcmp(pn, rpn, CCMP_PN_LEN) != 0) { |
879 | if (net_ratelimit()) | 905 | if (net_ratelimit()) |
880 | printk(KERN_DEBUG "%s: defrag: CCMP PN not " | 906 | printk(KERN_DEBUG "%s: defrag: CCMP PN not " |
@@ -885,7 +911,7 @@ ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx) | |||
885 | rpn[0], rpn[1], rpn[2], rpn[3], rpn[4], | 911 | rpn[0], rpn[1], rpn[2], rpn[3], rpn[4], |
886 | rpn[5], pn[0], pn[1], pn[2], pn[3], | 912 | rpn[5], pn[0], pn[1], pn[2], pn[3], |
887 | pn[4], pn[5]); | 913 | pn[4], pn[5]); |
888 | return TXRX_DROP; | 914 | return RX_DROP_UNUSABLE; |
889 | } | 915 | } |
890 | memcpy(entry->last_pn, pn, CCMP_PN_LEN); | 916 | memcpy(entry->last_pn, pn, CCMP_PN_LEN); |
891 | } | 917 | } |
@@ -896,7 +922,7 @@ ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx) | |||
896 | entry->extra_len += rx->skb->len; | 922 | entry->extra_len += rx->skb->len; |
897 | if (rx->fc & IEEE80211_FCTL_MOREFRAGS) { | 923 | if (rx->fc & IEEE80211_FCTL_MOREFRAGS) { |
898 | rx->skb = NULL; | 924 | rx->skb = NULL; |
899 | return TXRX_QUEUED; | 925 | return RX_QUEUED; |
900 | } | 926 | } |
901 | 927 | ||
902 | rx->skb = __skb_dequeue(&entry->skb_list); | 928 | rx->skb = __skb_dequeue(&entry->skb_list); |
@@ -906,7 +932,7 @@ ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx) | |||
906 | GFP_ATOMIC))) { | 932 | GFP_ATOMIC))) { |
907 | I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); | 933 | I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); |
908 | __skb_queue_purge(&entry->skb_list); | 934 | __skb_queue_purge(&entry->skb_list); |
909 | return TXRX_DROP; | 935 | return RX_DROP_UNUSABLE; |
910 | } | 936 | } |
911 | } | 937 | } |
912 | while ((skb = __skb_dequeue(&entry->skb_list))) { | 938 | while ((skb = __skb_dequeue(&entry->skb_list))) { |
@@ -915,7 +941,7 @@ ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx) | |||
915 | } | 941 | } |
916 | 942 | ||
917 | /* Complete frame has been reassembled - process it now */ | 943 | /* Complete frame has been reassembled - process it now */ |
918 | rx->flags |= IEEE80211_TXRXD_FRAGMENTED; | 944 | rx->flags |= IEEE80211_RX_FRAGMENTED; |
919 | 945 | ||
920 | out: | 946 | out: |
921 | if (rx->sta) | 947 | if (rx->sta) |
@@ -924,11 +950,11 @@ ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx) | |||
924 | rx->local->dot11MulticastReceivedFrameCount++; | 950 | rx->local->dot11MulticastReceivedFrameCount++; |
925 | else | 951 | else |
926 | ieee80211_led_rx(rx->local); | 952 | ieee80211_led_rx(rx->local); |
927 | return TXRX_CONTINUE; | 953 | return RX_CONTINUE; |
928 | } | 954 | } |
929 | 955 | ||
930 | static ieee80211_txrx_result | 956 | static ieee80211_rx_result |
931 | ieee80211_rx_h_ps_poll(struct ieee80211_txrx_data *rx) | 957 | ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx) |
932 | { | 958 | { |
933 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); | 959 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); |
934 | struct sk_buff *skb; | 960 | struct sk_buff *skb; |
@@ -938,12 +964,12 @@ ieee80211_rx_h_ps_poll(struct ieee80211_txrx_data *rx) | |||
938 | if (likely(!rx->sta || | 964 | if (likely(!rx->sta || |
939 | (rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_CTL || | 965 | (rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_CTL || |
940 | (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PSPOLL || | 966 | (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PSPOLL || |
941 | !(rx->flags & IEEE80211_TXRXD_RXRA_MATCH))) | 967 | !(rx->flags & IEEE80211_RX_RA_MATCH))) |
942 | return TXRX_CONTINUE; | 968 | return RX_CONTINUE; |
943 | 969 | ||
944 | if ((sdata->vif.type != IEEE80211_IF_TYPE_AP) && | 970 | if ((sdata->vif.type != IEEE80211_IF_TYPE_AP) && |
945 | (sdata->vif.type != IEEE80211_IF_TYPE_VLAN)) | 971 | (sdata->vif.type != IEEE80211_IF_TYPE_VLAN)) |
946 | return TXRX_DROP; | 972 | return RX_DROP_UNUSABLE; |
947 | 973 | ||
948 | skb = skb_dequeue(&rx->sta->tx_filtered); | 974 | skb = skb_dequeue(&rx->sta->tx_filtered); |
949 | if (!skb) { | 975 | if (!skb) { |
@@ -958,9 +984,11 @@ ieee80211_rx_h_ps_poll(struct ieee80211_txrx_data *rx) | |||
958 | struct ieee80211_hdr *hdr = | 984 | struct ieee80211_hdr *hdr = |
959 | (struct ieee80211_hdr *) skb->data; | 985 | (struct ieee80211_hdr *) skb->data; |
960 | 986 | ||
961 | /* tell TX path to send one frame even though the STA may | 987 | /* |
962 | * still remain is PS mode after this frame exchange */ | 988 | * Tell TX path to send one frame even though the STA may |
963 | rx->sta->pspoll = 1; | 989 | * still remain is PS mode after this frame exchange. |
990 | */ | ||
991 | rx->sta->flags |= WLAN_STA_PSPOLL; | ||
964 | 992 | ||
965 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 993 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
966 | printk(KERN_DEBUG "STA %s aid %d: PS Poll (entries after %d)\n", | 994 | printk(KERN_DEBUG "STA %s aid %d: PS Poll (entries after %d)\n", |
@@ -970,46 +998,45 @@ ieee80211_rx_h_ps_poll(struct ieee80211_txrx_data *rx) | |||
970 | 998 | ||
971 | /* Use MoreData flag to indicate whether there are more | 999 | /* Use MoreData flag to indicate whether there are more |
972 | * buffered frames for this STA */ | 1000 | * buffered frames for this STA */ |
973 | if (no_pending_pkts) { | 1001 | if (no_pending_pkts) |
974 | hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA); | 1002 | hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA); |
975 | rx->sta->flags &= ~WLAN_STA_TIM; | 1003 | else |
976 | } else | ||
977 | hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); | 1004 | hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); |
978 | 1005 | ||
979 | dev_queue_xmit(skb); | 1006 | dev_queue_xmit(skb); |
980 | 1007 | ||
981 | if (no_pending_pkts) { | 1008 | if (no_pending_pkts) |
982 | if (rx->local->ops->set_tim) | 1009 | sta_info_clear_tim_bit(rx->sta); |
983 | rx->local->ops->set_tim(local_to_hw(rx->local), | ||
984 | rx->sta->aid, 0); | ||
985 | if (rx->sdata->bss) | ||
986 | bss_tim_clear(rx->local, rx->sdata->bss, rx->sta->aid); | ||
987 | } | ||
988 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 1010 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
989 | } else if (!rx->u.rx.sent_ps_buffered) { | 1011 | } else if (!rx->sent_ps_buffered) { |
1012 | /* | ||
1013 | * FIXME: This can be the result of a race condition between | ||
1014 | * us expiring a frame and the station polling for it. | ||
1015 | * Should we send it a null-func frame indicating we | ||
1016 | * have nothing buffered for it? | ||
1017 | */ | ||
990 | printk(KERN_DEBUG "%s: STA %s sent PS Poll even " | 1018 | printk(KERN_DEBUG "%s: STA %s sent PS Poll even " |
991 | "though there is no buffered frames for it\n", | 1019 | "though there is no buffered frames for it\n", |
992 | rx->dev->name, print_mac(mac, rx->sta->addr)); | 1020 | rx->dev->name, print_mac(mac, rx->sta->addr)); |
993 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | 1021 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ |
994 | |||
995 | } | 1022 | } |
996 | 1023 | ||
997 | /* Free PS Poll skb here instead of returning TXRX_DROP that would | 1024 | /* Free PS Poll skb here instead of returning RX_DROP that would |
998 | * count as an dropped frame. */ | 1025 | * count as an dropped frame. */ |
999 | dev_kfree_skb(rx->skb); | 1026 | dev_kfree_skb(rx->skb); |
1000 | 1027 | ||
1001 | return TXRX_QUEUED; | 1028 | return RX_QUEUED; |
1002 | } | 1029 | } |
1003 | 1030 | ||
1004 | static ieee80211_txrx_result | 1031 | static ieee80211_rx_result |
1005 | ieee80211_rx_h_remove_qos_control(struct ieee80211_txrx_data *rx) | 1032 | ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx) |
1006 | { | 1033 | { |
1007 | u16 fc = rx->fc; | 1034 | u16 fc = rx->fc; |
1008 | u8 *data = rx->skb->data; | 1035 | u8 *data = rx->skb->data; |
1009 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) data; | 1036 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) data; |
1010 | 1037 | ||
1011 | if (!WLAN_FC_IS_QOS_DATA(fc)) | 1038 | if (!WLAN_FC_IS_QOS_DATA(fc)) |
1012 | return TXRX_CONTINUE; | 1039 | return RX_CONTINUE; |
1013 | 1040 | ||
1014 | /* remove the qos control field, update frame type and meta-data */ | 1041 | /* remove the qos control field, update frame type and meta-data */ |
1015 | memmove(data + 2, data, ieee80211_get_hdrlen(fc) - 2); | 1042 | memmove(data + 2, data, ieee80211_get_hdrlen(fc) - 2); |
@@ -1018,17 +1045,17 @@ ieee80211_rx_h_remove_qos_control(struct ieee80211_txrx_data *rx) | |||
1018 | rx->fc = fc &= ~IEEE80211_STYPE_QOS_DATA; | 1045 | rx->fc = fc &= ~IEEE80211_STYPE_QOS_DATA; |
1019 | hdr->frame_control = cpu_to_le16(fc); | 1046 | hdr->frame_control = cpu_to_le16(fc); |
1020 | 1047 | ||
1021 | return TXRX_CONTINUE; | 1048 | return RX_CONTINUE; |
1022 | } | 1049 | } |
1023 | 1050 | ||
1024 | static int | 1051 | static int |
1025 | ieee80211_802_1x_port_control(struct ieee80211_txrx_data *rx) | 1052 | ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) |
1026 | { | 1053 | { |
1027 | if (unlikely(rx->sdata->ieee802_1x_pac && | 1054 | if (unlikely(!rx->sta || !(rx->sta->flags & WLAN_STA_AUTHORIZED))) { |
1028 | (!rx->sta || !(rx->sta->flags & WLAN_STA_AUTHORIZED)))) { | ||
1029 | #ifdef CONFIG_MAC80211_DEBUG | 1055 | #ifdef CONFIG_MAC80211_DEBUG |
1030 | printk(KERN_DEBUG "%s: dropped frame " | 1056 | if (net_ratelimit()) |
1031 | "(unauthorized port)\n", rx->dev->name); | 1057 | printk(KERN_DEBUG "%s: dropped frame " |
1058 | "(unauthorized port)\n", rx->dev->name); | ||
1032 | #endif /* CONFIG_MAC80211_DEBUG */ | 1059 | #endif /* CONFIG_MAC80211_DEBUG */ |
1033 | return -EACCES; | 1060 | return -EACCES; |
1034 | } | 1061 | } |
@@ -1037,13 +1064,13 @@ ieee80211_802_1x_port_control(struct ieee80211_txrx_data *rx) | |||
1037 | } | 1064 | } |
1038 | 1065 | ||
1039 | static int | 1066 | static int |
1040 | ieee80211_drop_unencrypted(struct ieee80211_txrx_data *rx) | 1067 | ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx) |
1041 | { | 1068 | { |
1042 | /* | 1069 | /* |
1043 | * Pass through unencrypted frames if the hardware has | 1070 | * Pass through unencrypted frames if the hardware has |
1044 | * decrypted them already. | 1071 | * decrypted them already. |
1045 | */ | 1072 | */ |
1046 | if (rx->u.rx.status->flag & RX_FLAG_DECRYPTED) | 1073 | if (rx->status->flag & RX_FLAG_DECRYPTED) |
1047 | return 0; | 1074 | return 0; |
1048 | 1075 | ||
1049 | /* Drop unencrypted frames if key is set. */ | 1076 | /* Drop unencrypted frames if key is set. */ |
@@ -1060,7 +1087,7 @@ ieee80211_drop_unencrypted(struct ieee80211_txrx_data *rx) | |||
1060 | } | 1087 | } |
1061 | 1088 | ||
1062 | static int | 1089 | static int |
1063 | ieee80211_data_to_8023(struct ieee80211_txrx_data *rx) | 1090 | ieee80211_data_to_8023(struct ieee80211_rx_data *rx) |
1064 | { | 1091 | { |
1065 | struct net_device *dev = rx->dev; | 1092 | struct net_device *dev = rx->dev; |
1066 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; | 1093 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; |
@@ -1082,6 +1109,21 @@ ieee80211_data_to_8023(struct ieee80211_txrx_data *rx) | |||
1082 | 1109 | ||
1083 | hdrlen = ieee80211_get_hdrlen(fc); | 1110 | hdrlen = ieee80211_get_hdrlen(fc); |
1084 | 1111 | ||
1112 | if (ieee80211_vif_is_mesh(&sdata->vif)) { | ||
1113 | int meshhdrlen = ieee80211_get_mesh_hdrlen( | ||
1114 | (struct ieee80211s_hdr *) (skb->data + hdrlen)); | ||
1115 | /* Copy on cb: | ||
1116 | * - mesh header: to be used for mesh forwarding | ||
1117 | * decision. It will also be used as mesh header template at | ||
1118 | * tx.c:ieee80211_subif_start_xmit() if interface | ||
1119 | * type is mesh and skb->pkt_type == PACKET_OTHERHOST | ||
1120 | * - ta: to be used if a RERR needs to be sent. | ||
1121 | */ | ||
1122 | memcpy(skb->cb, skb->data + hdrlen, meshhdrlen); | ||
1123 | memcpy(MESH_PREQ(skb), hdr->addr2, ETH_ALEN); | ||
1124 | hdrlen += meshhdrlen; | ||
1125 | } | ||
1126 | |||
1085 | /* convert IEEE 802.11 header + possible LLC headers into Ethernet | 1127 | /* convert IEEE 802.11 header + possible LLC headers into Ethernet |
1086 | * header | 1128 | * header |
1087 | * IEEE 802.11 address fields: | 1129 | * IEEE 802.11 address fields: |
@@ -1115,9 +1157,10 @@ ieee80211_data_to_8023(struct ieee80211_txrx_data *rx) | |||
1115 | memcpy(dst, hdr->addr3, ETH_ALEN); | 1157 | memcpy(dst, hdr->addr3, ETH_ALEN); |
1116 | memcpy(src, hdr->addr4, ETH_ALEN); | 1158 | memcpy(src, hdr->addr4, ETH_ALEN); |
1117 | 1159 | ||
1118 | if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_WDS)) { | 1160 | if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_WDS && |
1119 | if (net_ratelimit()) | 1161 | sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT)) { |
1120 | printk(KERN_DEBUG "%s: dropped FromDS&ToDS " | 1162 | if (net_ratelimit()) |
1163 | printk(KERN_DEBUG "%s: dropped FromDS&ToDS " | ||
1121 | "frame (RA=%s TA=%s DA=%s SA=%s)\n", | 1164 | "frame (RA=%s TA=%s DA=%s SA=%s)\n", |
1122 | rx->dev->name, | 1165 | rx->dev->name, |
1123 | print_mac(mac, hdr->addr1), | 1166 | print_mac(mac, hdr->addr1), |
@@ -1192,7 +1235,7 @@ ieee80211_data_to_8023(struct ieee80211_txrx_data *rx) | |||
1192 | /* | 1235 | /* |
1193 | * requires that rx->skb is a frame with ethernet header | 1236 | * requires that rx->skb is a frame with ethernet header |
1194 | */ | 1237 | */ |
1195 | static bool ieee80211_frame_allowed(struct ieee80211_txrx_data *rx) | 1238 | static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx) |
1196 | { | 1239 | { |
1197 | static const u8 pae_group_addr[ETH_ALEN] | 1240 | static const u8 pae_group_addr[ETH_ALEN] |
1198 | = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; | 1241 | = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; |
@@ -1218,7 +1261,7 @@ static bool ieee80211_frame_allowed(struct ieee80211_txrx_data *rx) | |||
1218 | * requires that rx->skb is a frame with ethernet header | 1261 | * requires that rx->skb is a frame with ethernet header |
1219 | */ | 1262 | */ |
1220 | static void | 1263 | static void |
1221 | ieee80211_deliver_skb(struct ieee80211_txrx_data *rx) | 1264 | ieee80211_deliver_skb(struct ieee80211_rx_data *rx) |
1222 | { | 1265 | { |
1223 | struct net_device *dev = rx->dev; | 1266 | struct net_device *dev = rx->dev; |
1224 | struct ieee80211_local *local = rx->local; | 1267 | struct ieee80211_local *local = rx->local; |
@@ -1232,7 +1275,7 @@ ieee80211_deliver_skb(struct ieee80211_txrx_data *rx) | |||
1232 | 1275 | ||
1233 | if (local->bridge_packets && (sdata->vif.type == IEEE80211_IF_TYPE_AP || | 1276 | if (local->bridge_packets && (sdata->vif.type == IEEE80211_IF_TYPE_AP || |
1234 | sdata->vif.type == IEEE80211_IF_TYPE_VLAN) && | 1277 | sdata->vif.type == IEEE80211_IF_TYPE_VLAN) && |
1235 | (rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) { | 1278 | (rx->flags & IEEE80211_RX_RA_MATCH)) { |
1236 | if (is_multicast_ether_addr(ehdr->h_dest)) { | 1279 | if (is_multicast_ether_addr(ehdr->h_dest)) { |
1237 | /* | 1280 | /* |
1238 | * send multicast frames both to higher layers in | 1281 | * send multicast frames both to higher layers in |
@@ -1244,7 +1287,7 @@ ieee80211_deliver_skb(struct ieee80211_txrx_data *rx) | |||
1244 | "multicast frame\n", dev->name); | 1287 | "multicast frame\n", dev->name); |
1245 | } else { | 1288 | } else { |
1246 | dsta = sta_info_get(local, skb->data); | 1289 | dsta = sta_info_get(local, skb->data); |
1247 | if (dsta && dsta->dev == dev) { | 1290 | if (dsta && dsta->sdata->dev == dev) { |
1248 | /* | 1291 | /* |
1249 | * The destination station is associated to | 1292 | * The destination station is associated to |
1250 | * this AP (in this VLAN), so send the frame | 1293 | * this AP (in this VLAN), so send the frame |
@@ -1254,8 +1297,38 @@ ieee80211_deliver_skb(struct ieee80211_txrx_data *rx) | |||
1254 | xmit_skb = skb; | 1297 | xmit_skb = skb; |
1255 | skb = NULL; | 1298 | skb = NULL; |
1256 | } | 1299 | } |
1257 | if (dsta) | 1300 | } |
1258 | sta_info_put(dsta); | 1301 | } |
1302 | |||
1303 | /* Mesh forwarding */ | ||
1304 | if (ieee80211_vif_is_mesh(&sdata->vif)) { | ||
1305 | u8 *mesh_ttl = &((struct ieee80211s_hdr *)skb->cb)->ttl; | ||
1306 | (*mesh_ttl)--; | ||
1307 | |||
1308 | if (is_multicast_ether_addr(skb->data)) { | ||
1309 | if (*mesh_ttl > 0) { | ||
1310 | xmit_skb = skb_copy(skb, GFP_ATOMIC); | ||
1311 | if (!xmit_skb && net_ratelimit()) | ||
1312 | printk(KERN_DEBUG "%s: failed to clone " | ||
1313 | "multicast frame\n", dev->name); | ||
1314 | else | ||
1315 | xmit_skb->pkt_type = PACKET_OTHERHOST; | ||
1316 | } else | ||
1317 | IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.sta, | ||
1318 | dropped_frames_ttl); | ||
1319 | } else if (skb->pkt_type != PACKET_OTHERHOST && | ||
1320 | compare_ether_addr(dev->dev_addr, skb->data) != 0) { | ||
1321 | if (*mesh_ttl == 0) { | ||
1322 | IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.sta, | ||
1323 | dropped_frames_ttl); | ||
1324 | dev_kfree_skb(skb); | ||
1325 | skb = NULL; | ||
1326 | } else { | ||
1327 | xmit_skb = skb; | ||
1328 | xmit_skb->pkt_type = PACKET_OTHERHOST; | ||
1329 | if (!(dev->flags & IFF_PROMISC)) | ||
1330 | skb = NULL; | ||
1331 | } | ||
1259 | } | 1332 | } |
1260 | } | 1333 | } |
1261 | 1334 | ||
@@ -1275,8 +1348,8 @@ ieee80211_deliver_skb(struct ieee80211_txrx_data *rx) | |||
1275 | } | 1348 | } |
1276 | } | 1349 | } |
1277 | 1350 | ||
1278 | static ieee80211_txrx_result | 1351 | static ieee80211_rx_result |
1279 | ieee80211_rx_h_amsdu(struct ieee80211_txrx_data *rx) | 1352 | ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) |
1280 | { | 1353 | { |
1281 | struct net_device *dev = rx->dev; | 1354 | struct net_device *dev = rx->dev; |
1282 | struct ieee80211_local *local = rx->local; | 1355 | struct ieee80211_local *local = rx->local; |
@@ -1291,17 +1364,17 @@ ieee80211_rx_h_amsdu(struct ieee80211_txrx_data *rx) | |||
1291 | 1364 | ||
1292 | fc = rx->fc; | 1365 | fc = rx->fc; |
1293 | if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) | 1366 | if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) |
1294 | return TXRX_CONTINUE; | 1367 | return RX_CONTINUE; |
1295 | 1368 | ||
1296 | if (unlikely(!WLAN_FC_DATA_PRESENT(fc))) | 1369 | if (unlikely(!WLAN_FC_DATA_PRESENT(fc))) |
1297 | return TXRX_DROP; | 1370 | return RX_DROP_MONITOR; |
1298 | 1371 | ||
1299 | if (!(rx->flags & IEEE80211_TXRXD_RX_AMSDU)) | 1372 | if (!(rx->flags & IEEE80211_RX_AMSDU)) |
1300 | return TXRX_CONTINUE; | 1373 | return RX_CONTINUE; |
1301 | 1374 | ||
1302 | err = ieee80211_data_to_8023(rx); | 1375 | err = ieee80211_data_to_8023(rx); |
1303 | if (unlikely(err)) | 1376 | if (unlikely(err)) |
1304 | return TXRX_DROP; | 1377 | return RX_DROP_UNUSABLE; |
1305 | 1378 | ||
1306 | skb->dev = dev; | 1379 | skb->dev = dev; |
1307 | 1380 | ||
@@ -1311,7 +1384,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_txrx_data *rx) | |||
1311 | /* skip the wrapping header */ | 1384 | /* skip the wrapping header */ |
1312 | eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr)); | 1385 | eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr)); |
1313 | if (!eth) | 1386 | if (!eth) |
1314 | return TXRX_DROP; | 1387 | return RX_DROP_UNUSABLE; |
1315 | 1388 | ||
1316 | while (skb != frame) { | 1389 | while (skb != frame) { |
1317 | u8 padding; | 1390 | u8 padding; |
@@ -1326,7 +1399,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_txrx_data *rx) | |||
1326 | /* the last MSDU has no padding */ | 1399 | /* the last MSDU has no padding */ |
1327 | if (subframe_len > remaining) { | 1400 | if (subframe_len > remaining) { |
1328 | printk(KERN_DEBUG "%s: wrong buffer size", dev->name); | 1401 | printk(KERN_DEBUG "%s: wrong buffer size", dev->name); |
1329 | return TXRX_DROP; | 1402 | return RX_DROP_UNUSABLE; |
1330 | } | 1403 | } |
1331 | 1404 | ||
1332 | skb_pull(skb, sizeof(struct ethhdr)); | 1405 | skb_pull(skb, sizeof(struct ethhdr)); |
@@ -1338,7 +1411,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_txrx_data *rx) | |||
1338 | subframe_len); | 1411 | subframe_len); |
1339 | 1412 | ||
1340 | if (frame == NULL) | 1413 | if (frame == NULL) |
1341 | return TXRX_DROP; | 1414 | return RX_DROP_UNUSABLE; |
1342 | 1415 | ||
1343 | skb_reserve(frame, local->hw.extra_tx_headroom + | 1416 | skb_reserve(frame, local->hw.extra_tx_headroom + |
1344 | sizeof(struct ethhdr)); | 1417 | sizeof(struct ethhdr)); |
@@ -1351,7 +1424,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_txrx_data *rx) | |||
1351 | printk(KERN_DEBUG "%s: wrong buffer size ", | 1424 | printk(KERN_DEBUG "%s: wrong buffer size ", |
1352 | dev->name); | 1425 | dev->name); |
1353 | dev_kfree_skb(frame); | 1426 | dev_kfree_skb(frame); |
1354 | return TXRX_DROP; | 1427 | return RX_DROP_UNUSABLE; |
1355 | } | 1428 | } |
1356 | } | 1429 | } |
1357 | 1430 | ||
@@ -1381,7 +1454,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_txrx_data *rx) | |||
1381 | 1454 | ||
1382 | if (!ieee80211_frame_allowed(rx)) { | 1455 | if (!ieee80211_frame_allowed(rx)) { |
1383 | if (skb == frame) /* last frame */ | 1456 | if (skb == frame) /* last frame */ |
1384 | return TXRX_DROP; | 1457 | return RX_DROP_UNUSABLE; |
1385 | dev_kfree_skb(frame); | 1458 | dev_kfree_skb(frame); |
1386 | continue; | 1459 | continue; |
1387 | } | 1460 | } |
@@ -1389,11 +1462,11 @@ ieee80211_rx_h_amsdu(struct ieee80211_txrx_data *rx) | |||
1389 | ieee80211_deliver_skb(rx); | 1462 | ieee80211_deliver_skb(rx); |
1390 | } | 1463 | } |
1391 | 1464 | ||
1392 | return TXRX_QUEUED; | 1465 | return RX_QUEUED; |
1393 | } | 1466 | } |
1394 | 1467 | ||
1395 | static ieee80211_txrx_result | 1468 | static ieee80211_rx_result |
1396 | ieee80211_rx_h_data(struct ieee80211_txrx_data *rx) | 1469 | ieee80211_rx_h_data(struct ieee80211_rx_data *rx) |
1397 | { | 1470 | { |
1398 | struct net_device *dev = rx->dev; | 1471 | struct net_device *dev = rx->dev; |
1399 | u16 fc; | 1472 | u16 fc; |
@@ -1401,17 +1474,17 @@ ieee80211_rx_h_data(struct ieee80211_txrx_data *rx) | |||
1401 | 1474 | ||
1402 | fc = rx->fc; | 1475 | fc = rx->fc; |
1403 | if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) | 1476 | if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) |
1404 | return TXRX_CONTINUE; | 1477 | return RX_CONTINUE; |
1405 | 1478 | ||
1406 | if (unlikely(!WLAN_FC_DATA_PRESENT(fc))) | 1479 | if (unlikely(!WLAN_FC_DATA_PRESENT(fc))) |
1407 | return TXRX_DROP; | 1480 | return RX_DROP_MONITOR; |
1408 | 1481 | ||
1409 | err = ieee80211_data_to_8023(rx); | 1482 | err = ieee80211_data_to_8023(rx); |
1410 | if (unlikely(err)) | 1483 | if (unlikely(err)) |
1411 | return TXRX_DROP; | 1484 | return RX_DROP_UNUSABLE; |
1412 | 1485 | ||
1413 | if (!ieee80211_frame_allowed(rx)) | 1486 | if (!ieee80211_frame_allowed(rx)) |
1414 | return TXRX_DROP; | 1487 | return RX_DROP_MONITOR; |
1415 | 1488 | ||
1416 | rx->skb->dev = dev; | 1489 | rx->skb->dev = dev; |
1417 | 1490 | ||
@@ -1420,11 +1493,11 @@ ieee80211_rx_h_data(struct ieee80211_txrx_data *rx) | |||
1420 | 1493 | ||
1421 | ieee80211_deliver_skb(rx); | 1494 | ieee80211_deliver_skb(rx); |
1422 | 1495 | ||
1423 | return TXRX_QUEUED; | 1496 | return RX_QUEUED; |
1424 | } | 1497 | } |
1425 | 1498 | ||
1426 | static ieee80211_txrx_result | 1499 | static ieee80211_rx_result |
1427 | ieee80211_rx_h_ctrl(struct ieee80211_txrx_data *rx) | 1500 | ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx) |
1428 | { | 1501 | { |
1429 | struct ieee80211_local *local = rx->local; | 1502 | struct ieee80211_local *local = rx->local; |
1430 | struct ieee80211_hw *hw = &local->hw; | 1503 | struct ieee80211_hw *hw = &local->hw; |
@@ -1435,15 +1508,15 @@ ieee80211_rx_h_ctrl(struct ieee80211_txrx_data *rx) | |||
1435 | u16 tid; | 1508 | u16 tid; |
1436 | 1509 | ||
1437 | if (likely((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_CTL)) | 1510 | if (likely((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_CTL)) |
1438 | return TXRX_CONTINUE; | 1511 | return RX_CONTINUE; |
1439 | 1512 | ||
1440 | if ((rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BACK_REQ) { | 1513 | if ((rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BACK_REQ) { |
1441 | if (!rx->sta) | 1514 | if (!rx->sta) |
1442 | return TXRX_CONTINUE; | 1515 | return RX_CONTINUE; |
1443 | tid = le16_to_cpu(bar->control) >> 12; | 1516 | tid = le16_to_cpu(bar->control) >> 12; |
1444 | tid_agg_rx = &(rx->sta->ampdu_mlme.tid_rx[tid]); | 1517 | tid_agg_rx = &(rx->sta->ampdu_mlme.tid_rx[tid]); |
1445 | if (tid_agg_rx->state != HT_AGG_STATE_OPERATIONAL) | 1518 | if (tid_agg_rx->state != HT_AGG_STATE_OPERATIONAL) |
1446 | return TXRX_CONTINUE; | 1519 | return RX_CONTINUE; |
1447 | 1520 | ||
1448 | start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4; | 1521 | start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4; |
1449 | 1522 | ||
@@ -1460,77 +1533,35 @@ ieee80211_rx_h_ctrl(struct ieee80211_txrx_data *rx) | |||
1460 | ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL, | 1533 | ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL, |
1461 | start_seq_num, 1); | 1534 | start_seq_num, 1); |
1462 | rcu_read_unlock(); | 1535 | rcu_read_unlock(); |
1463 | return TXRX_DROP; | 1536 | return RX_DROP_UNUSABLE; |
1464 | } | 1537 | } |
1465 | 1538 | ||
1466 | return TXRX_CONTINUE; | 1539 | return RX_CONTINUE; |
1467 | } | 1540 | } |
1468 | 1541 | ||
1469 | static ieee80211_txrx_result | 1542 | static ieee80211_rx_result |
1470 | ieee80211_rx_h_mgmt(struct ieee80211_txrx_data *rx) | 1543 | ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) |
1471 | { | 1544 | { |
1472 | struct ieee80211_sub_if_data *sdata; | 1545 | struct ieee80211_sub_if_data *sdata; |
1473 | 1546 | ||
1474 | if (!(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) | 1547 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) |
1475 | return TXRX_DROP; | 1548 | return RX_DROP_MONITOR; |
1476 | 1549 | ||
1477 | sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); | 1550 | sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); |
1478 | if ((sdata->vif.type == IEEE80211_IF_TYPE_STA || | 1551 | if ((sdata->vif.type == IEEE80211_IF_TYPE_STA || |
1479 | sdata->vif.type == IEEE80211_IF_TYPE_IBSS) && | 1552 | sdata->vif.type == IEEE80211_IF_TYPE_IBSS || |
1553 | sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) && | ||
1480 | !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME)) | 1554 | !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME)) |
1481 | ieee80211_sta_rx_mgmt(rx->dev, rx->skb, rx->u.rx.status); | 1555 | ieee80211_sta_rx_mgmt(rx->dev, rx->skb, rx->status); |
1482 | else | 1556 | else |
1483 | return TXRX_DROP; | 1557 | return RX_DROP_MONITOR; |
1484 | |||
1485 | return TXRX_QUEUED; | ||
1486 | } | ||
1487 | |||
1488 | static inline ieee80211_txrx_result __ieee80211_invoke_rx_handlers( | ||
1489 | struct ieee80211_local *local, | ||
1490 | ieee80211_rx_handler *handlers, | ||
1491 | struct ieee80211_txrx_data *rx, | ||
1492 | struct sta_info *sta) | ||
1493 | { | ||
1494 | ieee80211_rx_handler *handler; | ||
1495 | ieee80211_txrx_result res = TXRX_DROP; | ||
1496 | |||
1497 | for (handler = handlers; *handler != NULL; handler++) { | ||
1498 | res = (*handler)(rx); | ||
1499 | |||
1500 | switch (res) { | ||
1501 | case TXRX_CONTINUE: | ||
1502 | continue; | ||
1503 | case TXRX_DROP: | ||
1504 | I802_DEBUG_INC(local->rx_handlers_drop); | ||
1505 | if (sta) | ||
1506 | sta->rx_dropped++; | ||
1507 | break; | ||
1508 | case TXRX_QUEUED: | ||
1509 | I802_DEBUG_INC(local->rx_handlers_queued); | ||
1510 | break; | ||
1511 | } | ||
1512 | break; | ||
1513 | } | ||
1514 | 1558 | ||
1515 | if (res == TXRX_DROP) | 1559 | return RX_QUEUED; |
1516 | dev_kfree_skb(rx->skb); | ||
1517 | return res; | ||
1518 | } | ||
1519 | |||
1520 | static inline void ieee80211_invoke_rx_handlers(struct ieee80211_local *local, | ||
1521 | ieee80211_rx_handler *handlers, | ||
1522 | struct ieee80211_txrx_data *rx, | ||
1523 | struct sta_info *sta) | ||
1524 | { | ||
1525 | if (__ieee80211_invoke_rx_handlers(local, handlers, rx, sta) == | ||
1526 | TXRX_CONTINUE) | ||
1527 | dev_kfree_skb(rx->skb); | ||
1528 | } | 1560 | } |
1529 | 1561 | ||
1530 | static void ieee80211_rx_michael_mic_report(struct net_device *dev, | 1562 | static void ieee80211_rx_michael_mic_report(struct net_device *dev, |
1531 | struct ieee80211_hdr *hdr, | 1563 | struct ieee80211_hdr *hdr, |
1532 | struct sta_info *sta, | 1564 | struct ieee80211_rx_data *rx) |
1533 | struct ieee80211_txrx_data *rx) | ||
1534 | { | 1565 | { |
1535 | int keyidx, hdrlen; | 1566 | int keyidx, hdrlen; |
1536 | DECLARE_MAC_BUF(mac); | 1567 | DECLARE_MAC_BUF(mac); |
@@ -1548,7 +1579,7 @@ static void ieee80211_rx_michael_mic_report(struct net_device *dev, | |||
1548 | dev->name, print_mac(mac, hdr->addr2), | 1579 | dev->name, print_mac(mac, hdr->addr2), |
1549 | print_mac(mac2, hdr->addr1), keyidx); | 1580 | print_mac(mac2, hdr->addr1), keyidx); |
1550 | 1581 | ||
1551 | if (!sta) { | 1582 | if (!rx->sta) { |
1552 | /* | 1583 | /* |
1553 | * Some hardware seem to generate incorrect Michael MIC | 1584 | * Some hardware seem to generate incorrect Michael MIC |
1554 | * reports; ignore them to avoid triggering countermeasures. | 1585 | * reports; ignore them to avoid triggering countermeasures. |
@@ -1600,7 +1631,89 @@ static void ieee80211_rx_michael_mic_report(struct net_device *dev, | |||
1600 | rx->skb = NULL; | 1631 | rx->skb = NULL; |
1601 | } | 1632 | } |
1602 | 1633 | ||
1603 | ieee80211_rx_handler ieee80211_rx_handlers[] = | 1634 | /* TODO: use IEEE80211_RX_FRAGMENTED */ |
1635 | static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx) | ||
1636 | { | ||
1637 | struct ieee80211_sub_if_data *sdata; | ||
1638 | struct ieee80211_local *local = rx->local; | ||
1639 | struct ieee80211_rtap_hdr { | ||
1640 | struct ieee80211_radiotap_header hdr; | ||
1641 | u8 flags; | ||
1642 | u8 rate; | ||
1643 | __le16 chan_freq; | ||
1644 | __le16 chan_flags; | ||
1645 | } __attribute__ ((packed)) *rthdr; | ||
1646 | struct sk_buff *skb = rx->skb, *skb2; | ||
1647 | struct net_device *prev_dev = NULL; | ||
1648 | struct ieee80211_rx_status *status = rx->status; | ||
1649 | |||
1650 | if (rx->flags & IEEE80211_RX_CMNTR_REPORTED) | ||
1651 | goto out_free_skb; | ||
1652 | |||
1653 | if (skb_headroom(skb) < sizeof(*rthdr) && | ||
1654 | pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC)) | ||
1655 | goto out_free_skb; | ||
1656 | |||
1657 | rthdr = (void *)skb_push(skb, sizeof(*rthdr)); | ||
1658 | memset(rthdr, 0, sizeof(*rthdr)); | ||
1659 | rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr)); | ||
1660 | rthdr->hdr.it_present = | ||
1661 | cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | | ||
1662 | (1 << IEEE80211_RADIOTAP_RATE) | | ||
1663 | (1 << IEEE80211_RADIOTAP_CHANNEL)); | ||
1664 | |||
1665 | rthdr->rate = rx->rate->bitrate / 5; | ||
1666 | rthdr->chan_freq = cpu_to_le16(status->freq); | ||
1667 | |||
1668 | if (status->band == IEEE80211_BAND_5GHZ) | ||
1669 | rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM | | ||
1670 | IEEE80211_CHAN_5GHZ); | ||
1671 | else | ||
1672 | rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN | | ||
1673 | IEEE80211_CHAN_2GHZ); | ||
1674 | |||
1675 | skb_set_mac_header(skb, 0); | ||
1676 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1677 | skb->pkt_type = PACKET_OTHERHOST; | ||
1678 | skb->protocol = htons(ETH_P_802_2); | ||
1679 | |||
1680 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { | ||
1681 | if (!netif_running(sdata->dev)) | ||
1682 | continue; | ||
1683 | |||
1684 | if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR || | ||
1685 | !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)) | ||
1686 | continue; | ||
1687 | |||
1688 | if (prev_dev) { | ||
1689 | skb2 = skb_clone(skb, GFP_ATOMIC); | ||
1690 | if (skb2) { | ||
1691 | skb2->dev = prev_dev; | ||
1692 | netif_rx(skb2); | ||
1693 | } | ||
1694 | } | ||
1695 | |||
1696 | prev_dev = sdata->dev; | ||
1697 | sdata->dev->stats.rx_packets++; | ||
1698 | sdata->dev->stats.rx_bytes += skb->len; | ||
1699 | } | ||
1700 | |||
1701 | if (prev_dev) { | ||
1702 | skb->dev = prev_dev; | ||
1703 | netif_rx(skb); | ||
1704 | skb = NULL; | ||
1705 | } else | ||
1706 | goto out_free_skb; | ||
1707 | |||
1708 | rx->flags |= IEEE80211_RX_CMNTR_REPORTED; | ||
1709 | return; | ||
1710 | |||
1711 | out_free_skb: | ||
1712 | dev_kfree_skb(skb); | ||
1713 | } | ||
1714 | |||
1715 | typedef ieee80211_rx_result (*ieee80211_rx_handler)(struct ieee80211_rx_data *); | ||
1716 | static ieee80211_rx_handler ieee80211_rx_handlers[] = | ||
1604 | { | 1717 | { |
1605 | ieee80211_rx_h_if_stats, | 1718 | ieee80211_rx_h_if_stats, |
1606 | ieee80211_rx_h_passive_scan, | 1719 | ieee80211_rx_h_passive_scan, |
@@ -1622,10 +1735,51 @@ ieee80211_rx_handler ieee80211_rx_handlers[] = | |||
1622 | NULL | 1735 | NULL |
1623 | }; | 1736 | }; |
1624 | 1737 | ||
1738 | static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata, | ||
1739 | struct ieee80211_rx_data *rx, | ||
1740 | struct sk_buff *skb) | ||
1741 | { | ||
1742 | ieee80211_rx_handler *handler; | ||
1743 | ieee80211_rx_result res = RX_DROP_MONITOR; | ||
1744 | |||
1745 | rx->skb = skb; | ||
1746 | rx->sdata = sdata; | ||
1747 | rx->dev = sdata->dev; | ||
1748 | |||
1749 | for (handler = ieee80211_rx_handlers; *handler != NULL; handler++) { | ||
1750 | res = (*handler)(rx); | ||
1751 | |||
1752 | switch (res) { | ||
1753 | case RX_CONTINUE: | ||
1754 | continue; | ||
1755 | case RX_DROP_UNUSABLE: | ||
1756 | case RX_DROP_MONITOR: | ||
1757 | I802_DEBUG_INC(sdata->local->rx_handlers_drop); | ||
1758 | if (rx->sta) | ||
1759 | rx->sta->rx_dropped++; | ||
1760 | break; | ||
1761 | case RX_QUEUED: | ||
1762 | I802_DEBUG_INC(sdata->local->rx_handlers_queued); | ||
1763 | break; | ||
1764 | } | ||
1765 | break; | ||
1766 | } | ||
1767 | |||
1768 | switch (res) { | ||
1769 | case RX_CONTINUE: | ||
1770 | case RX_DROP_MONITOR: | ||
1771 | ieee80211_rx_cooked_monitor(rx); | ||
1772 | break; | ||
1773 | case RX_DROP_UNUSABLE: | ||
1774 | dev_kfree_skb(rx->skb); | ||
1775 | break; | ||
1776 | } | ||
1777 | } | ||
1778 | |||
1625 | /* main receive path */ | 1779 | /* main receive path */ |
1626 | 1780 | ||
1627 | static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, | 1781 | static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, |
1628 | u8 *bssid, struct ieee80211_txrx_data *rx, | 1782 | u8 *bssid, struct ieee80211_rx_data *rx, |
1629 | struct ieee80211_hdr *hdr) | 1783 | struct ieee80211_hdr *hdr) |
1630 | { | 1784 | { |
1631 | int multicast = is_multicast_ether_addr(hdr->addr1); | 1785 | int multicast = is_multicast_ether_addr(hdr->addr1); |
@@ -1635,34 +1789,47 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, | |||
1635 | if (!bssid) | 1789 | if (!bssid) |
1636 | return 0; | 1790 | return 0; |
1637 | if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) { | 1791 | if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) { |
1638 | if (!(rx->flags & IEEE80211_TXRXD_RXIN_SCAN)) | 1792 | if (!(rx->flags & IEEE80211_RX_IN_SCAN)) |
1639 | return 0; | 1793 | return 0; |
1640 | rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH; | 1794 | rx->flags &= ~IEEE80211_RX_RA_MATCH; |
1641 | } else if (!multicast && | 1795 | } else if (!multicast && |
1642 | compare_ether_addr(sdata->dev->dev_addr, | 1796 | compare_ether_addr(sdata->dev->dev_addr, |
1643 | hdr->addr1) != 0) { | 1797 | hdr->addr1) != 0) { |
1644 | if (!(sdata->dev->flags & IFF_PROMISC)) | 1798 | if (!(sdata->dev->flags & IFF_PROMISC)) |
1645 | return 0; | 1799 | return 0; |
1646 | rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH; | 1800 | rx->flags &= ~IEEE80211_RX_RA_MATCH; |
1647 | } | 1801 | } |
1648 | break; | 1802 | break; |
1649 | case IEEE80211_IF_TYPE_IBSS: | 1803 | case IEEE80211_IF_TYPE_IBSS: |
1650 | if (!bssid) | 1804 | if (!bssid) |
1651 | return 0; | 1805 | return 0; |
1652 | if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) { | 1806 | if ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && |
1653 | if (!(rx->flags & IEEE80211_TXRXD_RXIN_SCAN)) | 1807 | (rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON) |
1808 | return 1; | ||
1809 | else if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) { | ||
1810 | if (!(rx->flags & IEEE80211_RX_IN_SCAN)) | ||
1654 | return 0; | 1811 | return 0; |
1655 | rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH; | 1812 | rx->flags &= ~IEEE80211_RX_RA_MATCH; |
1656 | } else if (!multicast && | 1813 | } else if (!multicast && |
1657 | compare_ether_addr(sdata->dev->dev_addr, | 1814 | compare_ether_addr(sdata->dev->dev_addr, |
1658 | hdr->addr1) != 0) { | 1815 | hdr->addr1) != 0) { |
1659 | if (!(sdata->dev->flags & IFF_PROMISC)) | 1816 | if (!(sdata->dev->flags & IFF_PROMISC)) |
1660 | return 0; | 1817 | return 0; |
1661 | rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH; | 1818 | rx->flags &= ~IEEE80211_RX_RA_MATCH; |
1662 | } else if (!rx->sta) | 1819 | } else if (!rx->sta) |
1663 | rx->sta = ieee80211_ibss_add_sta(sdata->dev, rx->skb, | 1820 | rx->sta = ieee80211_ibss_add_sta(sdata->dev, rx->skb, |
1664 | bssid, hdr->addr2); | 1821 | bssid, hdr->addr2); |
1665 | break; | 1822 | break; |
1823 | case IEEE80211_IF_TYPE_MESH_POINT: | ||
1824 | if (!multicast && | ||
1825 | compare_ether_addr(sdata->dev->dev_addr, | ||
1826 | hdr->addr1) != 0) { | ||
1827 | if (!(sdata->dev->flags & IFF_PROMISC)) | ||
1828 | return 0; | ||
1829 | |||
1830 | rx->flags &= ~IEEE80211_RX_RA_MATCH; | ||
1831 | } | ||
1832 | break; | ||
1666 | case IEEE80211_IF_TYPE_VLAN: | 1833 | case IEEE80211_IF_TYPE_VLAN: |
1667 | case IEEE80211_IF_TYPE_AP: | 1834 | case IEEE80211_IF_TYPE_AP: |
1668 | if (!bssid) { | 1835 | if (!bssid) { |
@@ -1671,12 +1838,12 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, | |||
1671 | return 0; | 1838 | return 0; |
1672 | } else if (!ieee80211_bssid_match(bssid, | 1839 | } else if (!ieee80211_bssid_match(bssid, |
1673 | sdata->dev->dev_addr)) { | 1840 | sdata->dev->dev_addr)) { |
1674 | if (!(rx->flags & IEEE80211_TXRXD_RXIN_SCAN)) | 1841 | if (!(rx->flags & IEEE80211_RX_IN_SCAN)) |
1675 | return 0; | 1842 | return 0; |
1676 | rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH; | 1843 | rx->flags &= ~IEEE80211_RX_RA_MATCH; |
1677 | } | 1844 | } |
1678 | if (sdata->dev == sdata->local->mdev && | 1845 | if (sdata->dev == sdata->local->mdev && |
1679 | !(rx->flags & IEEE80211_TXRXD_RXIN_SCAN)) | 1846 | !(rx->flags & IEEE80211_RX_IN_SCAN)) |
1680 | /* do not receive anything via | 1847 | /* do not receive anything via |
1681 | * master device when not scanning */ | 1848 | * master device when not scanning */ |
1682 | return 0; | 1849 | return 0; |
@@ -1707,13 +1874,13 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, | |||
1707 | static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, | 1874 | static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, |
1708 | struct sk_buff *skb, | 1875 | struct sk_buff *skb, |
1709 | struct ieee80211_rx_status *status, | 1876 | struct ieee80211_rx_status *status, |
1710 | u32 load) | 1877 | u32 load, |
1878 | struct ieee80211_rate *rate) | ||
1711 | { | 1879 | { |
1712 | struct ieee80211_local *local = hw_to_local(hw); | 1880 | struct ieee80211_local *local = hw_to_local(hw); |
1713 | struct ieee80211_sub_if_data *sdata; | 1881 | struct ieee80211_sub_if_data *sdata; |
1714 | struct sta_info *sta; | ||
1715 | struct ieee80211_hdr *hdr; | 1882 | struct ieee80211_hdr *hdr; |
1716 | struct ieee80211_txrx_data rx; | 1883 | struct ieee80211_rx_data rx; |
1717 | u16 type; | 1884 | u16 type; |
1718 | int prepares; | 1885 | int prepares; |
1719 | struct ieee80211_sub_if_data *prev = NULL; | 1886 | struct ieee80211_sub_if_data *prev = NULL; |
@@ -1725,42 +1892,33 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, | |||
1725 | rx.skb = skb; | 1892 | rx.skb = skb; |
1726 | rx.local = local; | 1893 | rx.local = local; |
1727 | 1894 | ||
1728 | rx.u.rx.status = status; | 1895 | rx.status = status; |
1729 | rx.u.rx.load = load; | 1896 | rx.load = load; |
1897 | rx.rate = rate; | ||
1730 | rx.fc = le16_to_cpu(hdr->frame_control); | 1898 | rx.fc = le16_to_cpu(hdr->frame_control); |
1731 | type = rx.fc & IEEE80211_FCTL_FTYPE; | 1899 | type = rx.fc & IEEE80211_FCTL_FTYPE; |
1732 | 1900 | ||
1733 | if (type == IEEE80211_FTYPE_DATA || type == IEEE80211_FTYPE_MGMT) | 1901 | if (type == IEEE80211_FTYPE_DATA || type == IEEE80211_FTYPE_MGMT) |
1734 | local->dot11ReceivedFragmentCount++; | 1902 | local->dot11ReceivedFragmentCount++; |
1735 | 1903 | ||
1736 | sta = rx.sta = sta_info_get(local, hdr->addr2); | 1904 | rx.sta = sta_info_get(local, hdr->addr2); |
1737 | if (sta) { | 1905 | if (rx.sta) { |
1738 | rx.dev = rx.sta->dev; | 1906 | rx.sdata = rx.sta->sdata; |
1739 | rx.sdata = IEEE80211_DEV_TO_SUB_IF(rx.dev); | 1907 | rx.dev = rx.sta->sdata->dev; |
1740 | } | 1908 | } |
1741 | 1909 | ||
1742 | if ((status->flag & RX_FLAG_MMIC_ERROR)) { | 1910 | if ((status->flag & RX_FLAG_MMIC_ERROR)) { |
1743 | ieee80211_rx_michael_mic_report(local->mdev, hdr, sta, &rx); | 1911 | ieee80211_rx_michael_mic_report(local->mdev, hdr, &rx); |
1744 | goto end; | 1912 | return; |
1745 | } | 1913 | } |
1746 | 1914 | ||
1747 | if (unlikely(local->sta_sw_scanning || local->sta_hw_scanning)) | 1915 | if (unlikely(local->sta_sw_scanning || local->sta_hw_scanning)) |
1748 | rx.flags |= IEEE80211_TXRXD_RXIN_SCAN; | 1916 | rx.flags |= IEEE80211_RX_IN_SCAN; |
1749 | 1917 | ||
1750 | if (__ieee80211_invoke_rx_handlers(local, local->rx_pre_handlers, &rx, | 1918 | ieee80211_parse_qos(&rx); |
1751 | sta) != TXRX_CONTINUE) | 1919 | ieee80211_verify_ip_alignment(&rx); |
1752 | goto end; | ||
1753 | skb = rx.skb; | ||
1754 | 1920 | ||
1755 | if (sta && !(sta->flags & (WLAN_STA_WDS | WLAN_STA_ASSOC_AP)) && | 1921 | skb = rx.skb; |
1756 | !atomic_read(&local->iff_promiscs) && | ||
1757 | !is_multicast_ether_addr(hdr->addr1)) { | ||
1758 | rx.flags |= IEEE80211_TXRXD_RXRA_MATCH; | ||
1759 | ieee80211_invoke_rx_handlers(local, local->rx_handlers, &rx, | ||
1760 | rx.sta); | ||
1761 | sta_info_put(sta); | ||
1762 | return; | ||
1763 | } | ||
1764 | 1922 | ||
1765 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { | 1923 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { |
1766 | if (!netif_running(sdata->dev)) | 1924 | if (!netif_running(sdata->dev)) |
@@ -1770,10 +1928,8 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, | |||
1770 | continue; | 1928 | continue; |
1771 | 1929 | ||
1772 | bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); | 1930 | bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); |
1773 | rx.flags |= IEEE80211_TXRXD_RXRA_MATCH; | 1931 | rx.flags |= IEEE80211_RX_RA_MATCH; |
1774 | prepares = prepare_for_handlers(sdata, bssid, &rx, hdr); | 1932 | prepares = prepare_for_handlers(sdata, bssid, &rx, hdr); |
1775 | /* prepare_for_handlers can change sta */ | ||
1776 | sta = rx.sta; | ||
1777 | 1933 | ||
1778 | if (!prepares) | 1934 | if (!prepares) |
1779 | continue; | 1935 | continue; |
@@ -1804,26 +1960,14 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, | |||
1804 | continue; | 1960 | continue; |
1805 | } | 1961 | } |
1806 | rx.fc = le16_to_cpu(hdr->frame_control); | 1962 | rx.fc = le16_to_cpu(hdr->frame_control); |
1807 | rx.skb = skb_new; | 1963 | ieee80211_invoke_rx_handlers(prev, &rx, skb_new); |
1808 | rx.dev = prev->dev; | ||
1809 | rx.sdata = prev; | ||
1810 | ieee80211_invoke_rx_handlers(local, local->rx_handlers, | ||
1811 | &rx, sta); | ||
1812 | prev = sdata; | 1964 | prev = sdata; |
1813 | } | 1965 | } |
1814 | if (prev) { | 1966 | if (prev) { |
1815 | rx.fc = le16_to_cpu(hdr->frame_control); | 1967 | rx.fc = le16_to_cpu(hdr->frame_control); |
1816 | rx.skb = skb; | 1968 | ieee80211_invoke_rx_handlers(prev, &rx, skb); |
1817 | rx.dev = prev->dev; | ||
1818 | rx.sdata = prev; | ||
1819 | ieee80211_invoke_rx_handlers(local, local->rx_handlers, | ||
1820 | &rx, sta); | ||
1821 | } else | 1969 | } else |
1822 | dev_kfree_skb(skb); | 1970 | dev_kfree_skb(skb); |
1823 | |||
1824 | end: | ||
1825 | if (sta) | ||
1826 | sta_info_put(sta); | ||
1827 | } | 1971 | } |
1828 | 1972 | ||
1829 | #define SEQ_MODULO 0x1000 | 1973 | #define SEQ_MODULO 0x1000 |
@@ -1859,6 +2003,8 @@ u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | |||
1859 | u16 head_seq_num, buf_size; | 2003 | u16 head_seq_num, buf_size; |
1860 | int index; | 2004 | int index; |
1861 | u32 pkt_load; | 2005 | u32 pkt_load; |
2006 | struct ieee80211_supported_band *sband; | ||
2007 | struct ieee80211_rate *rate; | ||
1862 | 2008 | ||
1863 | buf_size = tid_agg_rx->buf_size; | 2009 | buf_size = tid_agg_rx->buf_size; |
1864 | head_seq_num = tid_agg_rx->head_seq_num; | 2010 | head_seq_num = tid_agg_rx->head_seq_num; |
@@ -1889,12 +2035,14 @@ u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | |||
1889 | memcpy(&status, | 2035 | memcpy(&status, |
1890 | tid_agg_rx->reorder_buf[index]->cb, | 2036 | tid_agg_rx->reorder_buf[index]->cb, |
1891 | sizeof(status)); | 2037 | sizeof(status)); |
2038 | sband = local->hw.wiphy->bands[status.band]; | ||
2039 | rate = &sband->bitrates[status.rate_idx]; | ||
1892 | pkt_load = ieee80211_rx_load_stats(local, | 2040 | pkt_load = ieee80211_rx_load_stats(local, |
1893 | tid_agg_rx->reorder_buf[index], | 2041 | tid_agg_rx->reorder_buf[index], |
1894 | &status); | 2042 | &status, rate); |
1895 | __ieee80211_rx_handle_packet(hw, | 2043 | __ieee80211_rx_handle_packet(hw, |
1896 | tid_agg_rx->reorder_buf[index], | 2044 | tid_agg_rx->reorder_buf[index], |
1897 | &status, pkt_load); | 2045 | &status, pkt_load, rate); |
1898 | tid_agg_rx->stored_mpdu_num--; | 2046 | tid_agg_rx->stored_mpdu_num--; |
1899 | tid_agg_rx->reorder_buf[index] = NULL; | 2047 | tid_agg_rx->reorder_buf[index] = NULL; |
1900 | } | 2048 | } |
@@ -1934,11 +2082,13 @@ u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | |||
1934 | /* release the reordered frame back to stack */ | 2082 | /* release the reordered frame back to stack */ |
1935 | memcpy(&status, tid_agg_rx->reorder_buf[index]->cb, | 2083 | memcpy(&status, tid_agg_rx->reorder_buf[index]->cb, |
1936 | sizeof(status)); | 2084 | sizeof(status)); |
2085 | sband = local->hw.wiphy->bands[status.band]; | ||
2086 | rate = &sband->bitrates[status.rate_idx]; | ||
1937 | pkt_load = ieee80211_rx_load_stats(local, | 2087 | pkt_load = ieee80211_rx_load_stats(local, |
1938 | tid_agg_rx->reorder_buf[index], | 2088 | tid_agg_rx->reorder_buf[index], |
1939 | &status); | 2089 | &status, rate); |
1940 | __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index], | 2090 | __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index], |
1941 | &status, pkt_load); | 2091 | &status, pkt_load, rate); |
1942 | tid_agg_rx->stored_mpdu_num--; | 2092 | tid_agg_rx->stored_mpdu_num--; |
1943 | tid_agg_rx->reorder_buf[index] = NULL; | 2093 | tid_agg_rx->reorder_buf[index] = NULL; |
1944 | tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); | 2094 | tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); |
@@ -1994,7 +2144,7 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local, | |||
1994 | /* if this mpdu is fragmented - terminate rx aggregation session */ | 2144 | /* if this mpdu is fragmented - terminate rx aggregation session */ |
1995 | sc = le16_to_cpu(hdr->seq_ctrl); | 2145 | sc = le16_to_cpu(hdr->seq_ctrl); |
1996 | if (sc & IEEE80211_SCTL_FRAG) { | 2146 | if (sc & IEEE80211_SCTL_FRAG) { |
1997 | ieee80211_sta_stop_rx_ba_session(sta->dev, sta->addr, | 2147 | ieee80211_sta_stop_rx_ba_session(sta->sdata->dev, sta->addr, |
1998 | tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP); | 2148 | tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP); |
1999 | ret = 1; | 2149 | ret = 1; |
2000 | goto end_reorder; | 2150 | goto end_reorder; |
@@ -2004,9 +2154,7 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local, | |||
2004 | mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; | 2154 | mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; |
2005 | ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, | 2155 | ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, |
2006 | mpdu_seq_num, 0); | 2156 | mpdu_seq_num, 0); |
2007 | end_reorder: | 2157 | end_reorder: |
2008 | if (sta) | ||
2009 | sta_info_put(sta); | ||
2010 | return ret; | 2158 | return ret; |
2011 | } | 2159 | } |
2012 | 2160 | ||
@@ -2019,6 +2167,25 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
2019 | { | 2167 | { |
2020 | struct ieee80211_local *local = hw_to_local(hw); | 2168 | struct ieee80211_local *local = hw_to_local(hw); |
2021 | u32 pkt_load; | 2169 | u32 pkt_load; |
2170 | struct ieee80211_rate *rate = NULL; | ||
2171 | struct ieee80211_supported_band *sband; | ||
2172 | |||
2173 | if (status->band < 0 || | ||
2174 | status->band > IEEE80211_NUM_BANDS) { | ||
2175 | WARN_ON(1); | ||
2176 | return; | ||
2177 | } | ||
2178 | |||
2179 | sband = local->hw.wiphy->bands[status->band]; | ||
2180 | |||
2181 | if (!sband || | ||
2182 | status->rate_idx < 0 || | ||
2183 | status->rate_idx >= sband->n_bitrates) { | ||
2184 | WARN_ON(1); | ||
2185 | return; | ||
2186 | } | ||
2187 | |||
2188 | rate = &sband->bitrates[status->rate_idx]; | ||
2022 | 2189 | ||
2023 | /* | 2190 | /* |
2024 | * key references and virtual interfaces are protected using RCU | 2191 | * key references and virtual interfaces are protected using RCU |
@@ -2033,17 +2200,17 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
2033 | * if it was previously present. | 2200 | * if it was previously present. |
2034 | * Also, frames with less than 16 bytes are dropped. | 2201 | * Also, frames with less than 16 bytes are dropped. |
2035 | */ | 2202 | */ |
2036 | skb = ieee80211_rx_monitor(local, skb, status); | 2203 | skb = ieee80211_rx_monitor(local, skb, status, rate); |
2037 | if (!skb) { | 2204 | if (!skb) { |
2038 | rcu_read_unlock(); | 2205 | rcu_read_unlock(); |
2039 | return; | 2206 | return; |
2040 | } | 2207 | } |
2041 | 2208 | ||
2042 | pkt_load = ieee80211_rx_load_stats(local, skb, status); | 2209 | pkt_load = ieee80211_rx_load_stats(local, skb, status, rate); |
2043 | local->channel_use_raw += pkt_load; | 2210 | local->channel_use_raw += pkt_load; |
2044 | 2211 | ||
2045 | if (!ieee80211_rx_reorder_ampdu(local, skb)) | 2212 | if (!ieee80211_rx_reorder_ampdu(local, skb)) |
2046 | __ieee80211_rx_handle_packet(hw, skb, status, pkt_load); | 2213 | __ieee80211_rx_handle_packet(hw, skb, status, pkt_load, rate); |
2047 | 2214 | ||
2048 | rcu_read_unlock(); | 2215 | rcu_read_unlock(); |
2049 | } | 2216 | } |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 1f74bd296357..3b84c16cf054 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -15,21 +15,52 @@ | |||
15 | #include <linux/skbuff.h> | 15 | #include <linux/skbuff.h> |
16 | #include <linux/if_arp.h> | 16 | #include <linux/if_arp.h> |
17 | #include <linux/timer.h> | 17 | #include <linux/timer.h> |
18 | #include <linux/rtnetlink.h> | ||
18 | 19 | ||
19 | #include <net/mac80211.h> | 20 | #include <net/mac80211.h> |
20 | #include "ieee80211_i.h" | 21 | #include "ieee80211_i.h" |
21 | #include "ieee80211_rate.h" | 22 | #include "ieee80211_rate.h" |
22 | #include "sta_info.h" | 23 | #include "sta_info.h" |
23 | #include "debugfs_sta.h" | 24 | #include "debugfs_sta.h" |
25 | #include "mesh.h" | ||
24 | 26 | ||
25 | /* Caller must hold local->sta_lock */ | 27 | /** |
26 | static void sta_info_hash_add(struct ieee80211_local *local, | 28 | * DOC: STA information lifetime rules |
27 | struct sta_info *sta) | 29 | * |
28 | { | 30 | * STA info structures (&struct sta_info) are managed in a hash table |
29 | sta->hnext = local->sta_hash[STA_HASH(sta->addr)]; | 31 | * for faster lookup and a list for iteration. They are managed using |
30 | local->sta_hash[STA_HASH(sta->addr)] = sta; | 32 | * RCU, i.e. access to the list and hash table is protected by RCU. |
31 | } | 33 | * |
32 | 34 | * Upon allocating a STA info structure with sta_info_alloc(), the caller owns | |
35 | * that structure. It must then either destroy it using sta_info_destroy() | ||
36 | * (which is pretty useless) or insert it into the hash table using | ||
37 | * sta_info_insert() which demotes the reference from ownership to a regular | ||
38 | * RCU-protected reference; if the function is called without protection by an | ||
39 | * RCU critical section the reference is instantly invalidated. | ||
40 | * | ||
41 | * Because there are debugfs entries for each station, and adding those | ||
42 | * must be able to sleep, it is also possible to "pin" a station entry, | ||
43 | * that means it can be removed from the hash table but not be freed. | ||
44 | * See the comment in __sta_info_unlink() for more information. | ||
45 | * | ||
46 | * In order to remove a STA info structure, the caller needs to first | ||
47 | * unlink it (sta_info_unlink()) from the list and hash tables and | ||
48 | * then wait for an RCU synchronisation before it can be freed. Due to | ||
49 | * the pinning and the possibility of multiple callers trying to remove | ||
50 | * the same STA info at the same time, sta_info_unlink() can clear the | ||
51 | * STA info pointer it is passed to indicate that the STA info is owned | ||
52 | * by somebody else now. | ||
53 | * | ||
54 | * If sta_info_unlink() did not clear the pointer then the caller owns | ||
55 | * the STA info structure now and is responsible of destroying it with | ||
56 | * a call to sta_info_destroy(), not before RCU synchronisation, of | ||
57 | * course. Note that sta_info_destroy() must be protected by the RTNL. | ||
58 | * | ||
59 | * In all other cases, there is no concept of ownership on a STA entry, | ||
60 | * each structure is owned by the global hash table/list until it is | ||
61 | * removed. All users of the structure need to be RCU protected so that | ||
62 | * the structure won't be freed before they are done using it. | ||
63 | */ | ||
33 | 64 | ||
34 | /* Caller must hold local->sta_lock */ | 65 | /* Caller must hold local->sta_lock */ |
35 | static int sta_info_hash_del(struct ieee80211_local *local, | 66 | static int sta_info_hash_del(struct ieee80211_local *local, |
@@ -41,159 +72,238 @@ static int sta_info_hash_del(struct ieee80211_local *local, | |||
41 | if (!s) | 72 | if (!s) |
42 | return -ENOENT; | 73 | return -ENOENT; |
43 | if (s == sta) { | 74 | if (s == sta) { |
44 | local->sta_hash[STA_HASH(sta->addr)] = s->hnext; | 75 | rcu_assign_pointer(local->sta_hash[STA_HASH(sta->addr)], |
76 | s->hnext); | ||
45 | return 0; | 77 | return 0; |
46 | } | 78 | } |
47 | 79 | ||
48 | while (s->hnext && s->hnext != sta) | 80 | while (s->hnext && s->hnext != sta) |
49 | s = s->hnext; | 81 | s = s->hnext; |
50 | if (s->hnext) { | 82 | if (s->hnext) { |
51 | s->hnext = sta->hnext; | 83 | rcu_assign_pointer(s->hnext, sta->hnext); |
52 | return 0; | 84 | return 0; |
53 | } | 85 | } |
54 | 86 | ||
55 | return -ENOENT; | 87 | return -ENOENT; |
56 | } | 88 | } |
57 | 89 | ||
58 | struct sta_info *sta_info_get(struct ieee80211_local *local, u8 *addr) | 90 | /* protected by RCU */ |
91 | static struct sta_info *__sta_info_find(struct ieee80211_local *local, | ||
92 | u8 *addr) | ||
59 | { | 93 | { |
60 | struct sta_info *sta; | 94 | struct sta_info *sta; |
61 | 95 | ||
62 | read_lock_bh(&local->sta_lock); | 96 | sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); |
63 | sta = local->sta_hash[STA_HASH(addr)]; | ||
64 | while (sta) { | 97 | while (sta) { |
65 | if (memcmp(sta->addr, addr, ETH_ALEN) == 0) { | 98 | if (compare_ether_addr(sta->addr, addr) == 0) |
66 | __sta_info_get(sta); | ||
67 | break; | 99 | break; |
68 | } | 100 | sta = rcu_dereference(sta->hnext); |
69 | sta = sta->hnext; | ||
70 | } | 101 | } |
71 | read_unlock_bh(&local->sta_lock); | ||
72 | |||
73 | return sta; | 102 | return sta; |
74 | } | 103 | } |
104 | |||
105 | struct sta_info *sta_info_get(struct ieee80211_local *local, u8 *addr) | ||
106 | { | ||
107 | return __sta_info_find(local, addr); | ||
108 | } | ||
75 | EXPORT_SYMBOL(sta_info_get); | 109 | EXPORT_SYMBOL(sta_info_get); |
76 | 110 | ||
77 | int sta_info_min_txrate_get(struct ieee80211_local *local) | 111 | struct sta_info *sta_info_get_by_idx(struct ieee80211_local *local, int idx, |
112 | struct net_device *dev) | ||
78 | { | 113 | { |
79 | struct sta_info *sta; | 114 | struct sta_info *sta; |
80 | struct ieee80211_hw_mode *mode; | 115 | int i = 0; |
81 | int min_txrate = 9999999; | 116 | |
82 | int i; | 117 | list_for_each_entry_rcu(sta, &local->sta_list, list) { |
83 | 118 | if (dev && dev != sta->sdata->dev) | |
84 | read_lock_bh(&local->sta_lock); | 119 | continue; |
85 | mode = local->oper_hw_mode; | 120 | if (i < idx) { |
86 | for (i = 0; i < STA_HASH_SIZE; i++) { | 121 | ++i; |
87 | sta = local->sta_hash[i]; | 122 | continue; |
88 | while (sta) { | ||
89 | if (sta->txrate < min_txrate) | ||
90 | min_txrate = sta->txrate; | ||
91 | sta = sta->hnext; | ||
92 | } | 123 | } |
124 | return sta; | ||
93 | } | 125 | } |
94 | read_unlock_bh(&local->sta_lock); | ||
95 | if (min_txrate == 9999999) | ||
96 | min_txrate = 0; | ||
97 | 126 | ||
98 | return mode->rates[min_txrate].rate; | 127 | return NULL; |
99 | } | 128 | } |
100 | 129 | ||
101 | 130 | void sta_info_destroy(struct sta_info *sta) | |
102 | static void sta_info_release(struct kref *kref) | ||
103 | { | 131 | { |
104 | struct sta_info *sta = container_of(kref, struct sta_info, kref); | ||
105 | struct ieee80211_local *local = sta->local; | 132 | struct ieee80211_local *local = sta->local; |
106 | struct sk_buff *skb; | 133 | struct sk_buff *skb; |
107 | int i; | 134 | int i; |
135 | DECLARE_MAC_BUF(mbuf); | ||
136 | |||
137 | if (!sta) | ||
138 | return; | ||
139 | |||
140 | ASSERT_RTNL(); | ||
141 | might_sleep(); | ||
142 | |||
143 | rate_control_remove_sta_debugfs(sta); | ||
144 | ieee80211_sta_debugfs_remove(sta); | ||
145 | |||
146 | #ifdef CONFIG_MAC80211_MESH | ||
147 | if (ieee80211_vif_is_mesh(&sta->sdata->vif)) | ||
148 | mesh_plink_deactivate(sta); | ||
149 | #endif | ||
150 | |||
151 | /* | ||
152 | * NOTE: This will call synchronize_rcu() internally to | ||
153 | * make sure no key references can be in use. We rely on | ||
154 | * that here for the mesh code! | ||
155 | */ | ||
156 | ieee80211_key_free(sta->key); | ||
157 | WARN_ON(sta->key); | ||
158 | |||
159 | #ifdef CONFIG_MAC80211_MESH | ||
160 | if (ieee80211_vif_is_mesh(&sta->sdata->vif)) | ||
161 | del_timer_sync(&sta->plink_timer); | ||
162 | #endif | ||
108 | 163 | ||
109 | /* free sta structure; it has already been removed from | ||
110 | * hash table etc. external structures. Make sure that all | ||
111 | * buffered frames are release (one might have been added | ||
112 | * after sta_info_free() was called). */ | ||
113 | while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) { | 164 | while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) { |
114 | local->total_ps_buffered--; | 165 | local->total_ps_buffered--; |
115 | dev_kfree_skb_any(skb); | 166 | dev_kfree_skb_any(skb); |
116 | } | 167 | } |
117 | while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) { | 168 | |
169 | while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) | ||
118 | dev_kfree_skb_any(skb); | 170 | dev_kfree_skb_any(skb); |
119 | } | 171 | |
120 | for (i = 0; i < STA_TID_NUM; i++) | 172 | for (i = 0; i < STA_TID_NUM; i++) { |
121 | del_timer_sync(&sta->ampdu_mlme.tid_rx[i].session_timer); | 173 | del_timer_sync(&sta->ampdu_mlme.tid_rx[i].session_timer); |
174 | del_timer_sync(&sta->ampdu_mlme.tid_tx[i].addba_resp_timer); | ||
175 | } | ||
122 | rate_control_free_sta(sta->rate_ctrl, sta->rate_ctrl_priv); | 176 | rate_control_free_sta(sta->rate_ctrl, sta->rate_ctrl_priv); |
123 | rate_control_put(sta->rate_ctrl); | 177 | rate_control_put(sta->rate_ctrl); |
178 | |||
179 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | ||
180 | printk(KERN_DEBUG "%s: Destroyed STA %s\n", | ||
181 | wiphy_name(local->hw.wiphy), print_mac(mbuf, sta->addr)); | ||
182 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ | ||
183 | |||
124 | kfree(sta); | 184 | kfree(sta); |
125 | } | 185 | } |
126 | 186 | ||
127 | 187 | ||
128 | void sta_info_put(struct sta_info *sta) | 188 | /* Caller must hold local->sta_lock */ |
189 | static void sta_info_hash_add(struct ieee80211_local *local, | ||
190 | struct sta_info *sta) | ||
129 | { | 191 | { |
130 | kref_put(&sta->kref, sta_info_release); | 192 | sta->hnext = local->sta_hash[STA_HASH(sta->addr)]; |
193 | rcu_assign_pointer(local->sta_hash[STA_HASH(sta->addr)], sta); | ||
131 | } | 194 | } |
132 | EXPORT_SYMBOL(sta_info_put); | ||
133 | 195 | ||
134 | 196 | struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, | |
135 | struct sta_info * sta_info_add(struct ieee80211_local *local, | 197 | u8 *addr, gfp_t gfp) |
136 | struct net_device *dev, u8 *addr, gfp_t gfp) | ||
137 | { | 198 | { |
199 | struct ieee80211_local *local = sdata->local; | ||
138 | struct sta_info *sta; | 200 | struct sta_info *sta; |
139 | int i; | 201 | int i; |
140 | DECLARE_MAC_BUF(mac); | 202 | DECLARE_MAC_BUF(mbuf); |
141 | 203 | ||
142 | sta = kzalloc(sizeof(*sta), gfp); | 204 | sta = kzalloc(sizeof(*sta), gfp); |
143 | if (!sta) | 205 | if (!sta) |
144 | return NULL; | 206 | return NULL; |
145 | 207 | ||
146 | kref_init(&sta->kref); | 208 | memcpy(sta->addr, addr, ETH_ALEN); |
209 | sta->local = local; | ||
210 | sta->sdata = sdata; | ||
147 | 211 | ||
148 | sta->rate_ctrl = rate_control_get(local->rate_ctrl); | 212 | sta->rate_ctrl = rate_control_get(local->rate_ctrl); |
149 | sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl, gfp); | 213 | sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl, |
214 | gfp); | ||
150 | if (!sta->rate_ctrl_priv) { | 215 | if (!sta->rate_ctrl_priv) { |
151 | rate_control_put(sta->rate_ctrl); | 216 | rate_control_put(sta->rate_ctrl); |
152 | kfree(sta); | 217 | kfree(sta); |
153 | return NULL; | 218 | return NULL; |
154 | } | 219 | } |
155 | 220 | ||
156 | memcpy(sta->addr, addr, ETH_ALEN); | ||
157 | sta->local = local; | ||
158 | sta->dev = dev; | ||
159 | spin_lock_init(&sta->ampdu_mlme.ampdu_rx); | 221 | spin_lock_init(&sta->ampdu_mlme.ampdu_rx); |
222 | spin_lock_init(&sta->ampdu_mlme.ampdu_tx); | ||
160 | for (i = 0; i < STA_TID_NUM; i++) { | 223 | for (i = 0; i < STA_TID_NUM; i++) { |
161 | /* timer_to_tid must be initialized with identity mapping to | 224 | /* timer_to_tid must be initialized with identity mapping to |
162 | * enable session_timer's data differentiation. refer to | 225 | * enable session_timer's data differentiation. refer to |
163 | * sta_rx_agg_session_timer_expired for useage */ | 226 | * sta_rx_agg_session_timer_expired for useage */ |
164 | sta->timer_to_tid[i] = i; | 227 | sta->timer_to_tid[i] = i; |
228 | /* tid to tx queue: initialize according to HW (0 is valid) */ | ||
229 | sta->tid_to_tx_q[i] = local->hw.queues; | ||
165 | /* rx timers */ | 230 | /* rx timers */ |
166 | sta->ampdu_mlme.tid_rx[i].session_timer.function = | 231 | sta->ampdu_mlme.tid_rx[i].session_timer.function = |
167 | sta_rx_agg_session_timer_expired; | 232 | sta_rx_agg_session_timer_expired; |
168 | sta->ampdu_mlme.tid_rx[i].session_timer.data = | 233 | sta->ampdu_mlme.tid_rx[i].session_timer.data = |
169 | (unsigned long)&sta->timer_to_tid[i]; | 234 | (unsigned long)&sta->timer_to_tid[i]; |
170 | init_timer(&sta->ampdu_mlme.tid_rx[i].session_timer); | 235 | init_timer(&sta->ampdu_mlme.tid_rx[i].session_timer); |
236 | /* tx timers */ | ||
237 | sta->ampdu_mlme.tid_tx[i].addba_resp_timer.function = | ||
238 | sta_addba_resp_timer_expired; | ||
239 | sta->ampdu_mlme.tid_tx[i].addba_resp_timer.data = | ||
240 | (unsigned long)&sta->timer_to_tid[i]; | ||
241 | init_timer(&sta->ampdu_mlme.tid_tx[i].addba_resp_timer); | ||
171 | } | 242 | } |
172 | skb_queue_head_init(&sta->ps_tx_buf); | 243 | skb_queue_head_init(&sta->ps_tx_buf); |
173 | skb_queue_head_init(&sta->tx_filtered); | 244 | skb_queue_head_init(&sta->tx_filtered); |
174 | __sta_info_get(sta); /* sta used by caller, decremented by | 245 | |
175 | * sta_info_put() */ | 246 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
176 | write_lock_bh(&local->sta_lock); | 247 | printk(KERN_DEBUG "%s: Allocated STA %s\n", |
248 | wiphy_name(local->hw.wiphy), print_mac(mbuf, sta->addr)); | ||
249 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ | ||
250 | |||
251 | #ifdef CONFIG_MAC80211_MESH | ||
252 | sta->plink_state = PLINK_LISTEN; | ||
253 | spin_lock_init(&sta->plink_lock); | ||
254 | init_timer(&sta->plink_timer); | ||
255 | #endif | ||
256 | |||
257 | return sta; | ||
258 | } | ||
259 | |||
260 | int sta_info_insert(struct sta_info *sta) | ||
261 | { | ||
262 | struct ieee80211_local *local = sta->local; | ||
263 | struct ieee80211_sub_if_data *sdata = sta->sdata; | ||
264 | unsigned long flags; | ||
265 | DECLARE_MAC_BUF(mac); | ||
266 | |||
267 | /* | ||
268 | * Can't be a WARN_ON because it can be triggered through a race: | ||
269 | * something inserts a STA (on one CPU) without holding the RTNL | ||
270 | * and another CPU turns off the net device. | ||
271 | */ | ||
272 | if (unlikely(!netif_running(sdata->dev))) | ||
273 | return -ENETDOWN; | ||
274 | |||
275 | if (WARN_ON(compare_ether_addr(sta->addr, sdata->dev->dev_addr) == 0)) | ||
276 | return -EINVAL; | ||
277 | |||
278 | if (WARN_ON(is_multicast_ether_addr(sta->addr))) | ||
279 | return -EINVAL; | ||
280 | |||
281 | spin_lock_irqsave(&local->sta_lock, flags); | ||
282 | /* check if STA exists already */ | ||
283 | if (__sta_info_find(local, sta->addr)) { | ||
284 | spin_unlock_irqrestore(&local->sta_lock, flags); | ||
285 | return -EEXIST; | ||
286 | } | ||
177 | list_add(&sta->list, &local->sta_list); | 287 | list_add(&sta->list, &local->sta_list); |
178 | local->num_sta++; | 288 | local->num_sta++; |
179 | sta_info_hash_add(local, sta); | 289 | sta_info_hash_add(local, sta); |
180 | if (local->ops->sta_notify) { | ||
181 | struct ieee80211_sub_if_data *sdata; | ||
182 | 290 | ||
183 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 291 | /* notify driver */ |
292 | if (local->ops->sta_notify) { | ||
184 | if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN) | 293 | if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN) |
185 | sdata = sdata->u.vlan.ap; | 294 | sdata = sdata->u.vlan.ap; |
186 | 295 | ||
187 | local->ops->sta_notify(local_to_hw(local), &sdata->vif, | 296 | local->ops->sta_notify(local_to_hw(local), &sdata->vif, |
188 | STA_NOTIFY_ADD, addr); | 297 | STA_NOTIFY_ADD, sta->addr); |
189 | } | 298 | } |
190 | write_unlock_bh(&local->sta_lock); | ||
191 | 299 | ||
192 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 300 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
193 | printk(KERN_DEBUG "%s: Added STA %s\n", | 301 | printk(KERN_DEBUG "%s: Inserted STA %s\n", |
194 | wiphy_name(local->hw.wiphy), print_mac(mac, addr)); | 302 | wiphy_name(local->hw.wiphy), print_mac(mac, sta->addr)); |
195 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ | 303 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ |
196 | 304 | ||
305 | spin_unlock_irqrestore(&local->sta_lock, flags); | ||
306 | |||
197 | #ifdef CONFIG_MAC80211_DEBUGFS | 307 | #ifdef CONFIG_MAC80211_DEBUGFS |
198 | /* debugfs entry adding might sleep, so schedule process | 308 | /* debugfs entry adding might sleep, so schedule process |
199 | * context task for adding entry for STAs that do not yet | 309 | * context task for adding entry for STAs that do not yet |
@@ -201,77 +311,185 @@ struct sta_info * sta_info_add(struct ieee80211_local *local, | |||
201 | queue_work(local->hw.workqueue, &local->sta_debugfs_add); | 311 | queue_work(local->hw.workqueue, &local->sta_debugfs_add); |
202 | #endif | 312 | #endif |
203 | 313 | ||
204 | return sta; | 314 | if (ieee80211_vif_is_mesh(&sdata->vif)) |
315 | mesh_accept_plinks_update(sdata); | ||
316 | |||
317 | return 0; | ||
205 | } | 318 | } |
206 | 319 | ||
207 | /* Caller must hold local->sta_lock */ | 320 | static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid) |
208 | void sta_info_remove(struct sta_info *sta) | ||
209 | { | 321 | { |
210 | struct ieee80211_local *local = sta->local; | 322 | /* |
211 | struct ieee80211_sub_if_data *sdata; | 323 | * This format has been mandated by the IEEE specifications, |
324 | * so this line may not be changed to use the __set_bit() format. | ||
325 | */ | ||
326 | bss->tim[aid / 8] |= (1 << (aid % 8)); | ||
327 | } | ||
212 | 328 | ||
213 | /* don't do anything if we've been removed already */ | 329 | static inline void __bss_tim_clear(struct ieee80211_if_ap *bss, u16 aid) |
214 | if (sta_info_hash_del(local, sta)) | 330 | { |
215 | return; | 331 | /* |
332 | * This format has been mandated by the IEEE specifications, | ||
333 | * so this line may not be changed to use the __clear_bit() format. | ||
334 | */ | ||
335 | bss->tim[aid / 8] &= ~(1 << (aid % 8)); | ||
336 | } | ||
216 | 337 | ||
217 | list_del(&sta->list); | 338 | static void __sta_info_set_tim_bit(struct ieee80211_if_ap *bss, |
218 | sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev); | 339 | struct sta_info *sta) |
219 | if (sta->flags & WLAN_STA_PS) { | 340 | { |
220 | sta->flags &= ~WLAN_STA_PS; | 341 | if (bss) |
221 | if (sdata->bss) | 342 | __bss_tim_set(bss, sta->aid); |
222 | atomic_dec(&sdata->bss->num_sta_ps); | 343 | if (sta->local->ops->set_tim) { |
344 | sta->local->tim_in_locked_section = true; | ||
345 | sta->local->ops->set_tim(local_to_hw(sta->local), sta->aid, 1); | ||
346 | sta->local->tim_in_locked_section = false; | ||
223 | } | 347 | } |
224 | local->num_sta--; | 348 | } |
225 | sta_info_remove_aid_ptr(sta); | 349 | |
350 | void sta_info_set_tim_bit(struct sta_info *sta) | ||
351 | { | ||
352 | unsigned long flags; | ||
226 | 353 | ||
354 | spin_lock_irqsave(&sta->local->sta_lock, flags); | ||
355 | __sta_info_set_tim_bit(sta->sdata->bss, sta); | ||
356 | spin_unlock_irqrestore(&sta->local->sta_lock, flags); | ||
227 | } | 357 | } |
228 | 358 | ||
229 | void sta_info_free(struct sta_info *sta) | 359 | static void __sta_info_clear_tim_bit(struct ieee80211_if_ap *bss, |
360 | struct sta_info *sta) | ||
230 | { | 361 | { |
231 | struct sk_buff *skb; | 362 | if (bss) |
232 | struct ieee80211_local *local = sta->local; | 363 | __bss_tim_clear(bss, sta->aid); |
233 | DECLARE_MAC_BUF(mac); | 364 | if (sta->local->ops->set_tim) { |
365 | sta->local->tim_in_locked_section = true; | ||
366 | sta->local->ops->set_tim(local_to_hw(sta->local), sta->aid, 0); | ||
367 | sta->local->tim_in_locked_section = false; | ||
368 | } | ||
369 | } | ||
234 | 370 | ||
235 | might_sleep(); | 371 | void sta_info_clear_tim_bit(struct sta_info *sta) |
372 | { | ||
373 | unsigned long flags; | ||
236 | 374 | ||
237 | write_lock_bh(&local->sta_lock); | 375 | spin_lock_irqsave(&sta->local->sta_lock, flags); |
238 | sta_info_remove(sta); | 376 | __sta_info_clear_tim_bit(sta->sdata->bss, sta); |
239 | write_unlock_bh(&local->sta_lock); | 377 | spin_unlock_irqrestore(&sta->local->sta_lock, flags); |
378 | } | ||
240 | 379 | ||
241 | while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) { | 380 | /* |
242 | local->total_ps_buffered--; | 381 | * See comment in __sta_info_unlink, |
243 | dev_kfree_skb(skb); | 382 | * caller must hold local->sta_lock. |
244 | } | 383 | */ |
245 | while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) { | 384 | static void __sta_info_pin(struct sta_info *sta) |
246 | dev_kfree_skb(skb); | 385 | { |
247 | } | 386 | WARN_ON(sta->pin_status != STA_INFO_PIN_STAT_NORMAL); |
387 | sta->pin_status = STA_INFO_PIN_STAT_PINNED; | ||
388 | } | ||
389 | |||
390 | /* | ||
391 | * See comment in __sta_info_unlink, returns sta if it | ||
392 | * needs to be destroyed. | ||
393 | */ | ||
394 | static struct sta_info *__sta_info_unpin(struct sta_info *sta) | ||
395 | { | ||
396 | struct sta_info *ret = NULL; | ||
397 | unsigned long flags; | ||
248 | 398 | ||
399 | spin_lock_irqsave(&sta->local->sta_lock, flags); | ||
400 | WARN_ON(sta->pin_status != STA_INFO_PIN_STAT_DESTROY && | ||
401 | sta->pin_status != STA_INFO_PIN_STAT_PINNED); | ||
402 | if (sta->pin_status == STA_INFO_PIN_STAT_DESTROY) | ||
403 | ret = sta; | ||
404 | sta->pin_status = STA_INFO_PIN_STAT_NORMAL; | ||
405 | spin_unlock_irqrestore(&sta->local->sta_lock, flags); | ||
406 | |||
407 | return ret; | ||
408 | } | ||
409 | |||
410 | static void __sta_info_unlink(struct sta_info **sta) | ||
411 | { | ||
412 | struct ieee80211_local *local = (*sta)->local; | ||
413 | struct ieee80211_sub_if_data *sdata = (*sta)->sdata; | ||
249 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 414 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
250 | printk(KERN_DEBUG "%s: Removed STA %s\n", | 415 | DECLARE_MAC_BUF(mbuf); |
251 | wiphy_name(local->hw.wiphy), print_mac(mac, sta->addr)); | 416 | #endif |
252 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ | 417 | /* |
418 | * pull caller's reference if we're already gone. | ||
419 | */ | ||
420 | if (sta_info_hash_del(local, *sta)) { | ||
421 | *sta = NULL; | ||
422 | return; | ||
423 | } | ||
253 | 424 | ||
254 | ieee80211_key_free(sta->key); | 425 | /* |
255 | sta->key = NULL; | 426 | * Also pull caller's reference if the STA is pinned by the |
427 | * task that is adding the debugfs entries. In that case, we | ||
428 | * leave the STA "to be freed". | ||
429 | * | ||
430 | * The rules are not trivial, but not too complex either: | ||
431 | * (1) pin_status is only modified under the sta_lock | ||
432 | * (2) sta_info_debugfs_add_work() will set the status | ||
433 | * to PINNED when it found an item that needs a new | ||
434 | * debugfs directory created. In that case, that item | ||
435 | * must not be freed although all *RCU* users are done | ||
436 | * with it. Hence, we tell the caller of _unlink() | ||
437 | * that the item is already gone (as can happen when | ||
438 | * two tasks try to unlink/destroy at the same time) | ||
439 | * (3) We set the pin_status to DESTROY here when we | ||
440 | * find such an item. | ||
441 | * (4) sta_info_debugfs_add_work() will reset the pin_status | ||
442 | * from PINNED to NORMAL when it is done with the item, | ||
443 | * but will check for DESTROY before resetting it in | ||
444 | * which case it will free the item. | ||
445 | */ | ||
446 | if ((*sta)->pin_status == STA_INFO_PIN_STAT_PINNED) { | ||
447 | (*sta)->pin_status = STA_INFO_PIN_STAT_DESTROY; | ||
448 | *sta = NULL; | ||
449 | return; | ||
450 | } | ||
256 | 451 | ||
257 | if (local->ops->sta_notify) { | 452 | list_del(&(*sta)->list); |
258 | struct ieee80211_sub_if_data *sdata; | 453 | |
454 | if ((*sta)->flags & WLAN_STA_PS) { | ||
455 | (*sta)->flags &= ~WLAN_STA_PS; | ||
456 | if (sdata->bss) | ||
457 | atomic_dec(&sdata->bss->num_sta_ps); | ||
458 | __sta_info_clear_tim_bit(sdata->bss, *sta); | ||
459 | } | ||
259 | 460 | ||
260 | sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev); | 461 | local->num_sta--; |
261 | 462 | ||
463 | if (local->ops->sta_notify) { | ||
262 | if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN) | 464 | if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN) |
263 | sdata = sdata->u.vlan.ap; | 465 | sdata = sdata->u.vlan.ap; |
264 | 466 | ||
265 | local->ops->sta_notify(local_to_hw(local), &sdata->vif, | 467 | local->ops->sta_notify(local_to_hw(local), &sdata->vif, |
266 | STA_NOTIFY_REMOVE, sta->addr); | 468 | STA_NOTIFY_REMOVE, (*sta)->addr); |
267 | } | 469 | } |
268 | 470 | ||
269 | rate_control_remove_sta_debugfs(sta); | 471 | if (ieee80211_vif_is_mesh(&sdata->vif)) { |
270 | ieee80211_sta_debugfs_remove(sta); | 472 | mesh_accept_plinks_update(sdata); |
473 | #ifdef CONFIG_MAC80211_MESH | ||
474 | del_timer(&(*sta)->plink_timer); | ||
475 | #endif | ||
476 | } | ||
271 | 477 | ||
272 | sta_info_put(sta); | 478 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
479 | printk(KERN_DEBUG "%s: Removed STA %s\n", | ||
480 | wiphy_name(local->hw.wiphy), print_mac(mbuf, (*sta)->addr)); | ||
481 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ | ||
273 | } | 482 | } |
274 | 483 | ||
484 | void sta_info_unlink(struct sta_info **sta) | ||
485 | { | ||
486 | struct ieee80211_local *local = (*sta)->local; | ||
487 | unsigned long flags; | ||
488 | |||
489 | spin_lock_irqsave(&local->sta_lock, flags); | ||
490 | __sta_info_unlink(sta); | ||
491 | spin_unlock_irqrestore(&local->sta_lock, flags); | ||
492 | } | ||
275 | 493 | ||
276 | static inline int sta_info_buffer_expired(struct ieee80211_local *local, | 494 | static inline int sta_info_buffer_expired(struct ieee80211_local *local, |
277 | struct sta_info *sta, | 495 | struct sta_info *sta, |
@@ -299,6 +517,7 @@ static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local, | |||
299 | { | 517 | { |
300 | unsigned long flags; | 518 | unsigned long flags; |
301 | struct sk_buff *skb; | 519 | struct sk_buff *skb; |
520 | struct ieee80211_sub_if_data *sdata; | ||
302 | DECLARE_MAC_BUF(mac); | 521 | DECLARE_MAC_BUF(mac); |
303 | 522 | ||
304 | if (skb_queue_empty(&sta->ps_tx_buf)) | 523 | if (skb_queue_empty(&sta->ps_tx_buf)) |
@@ -307,21 +526,23 @@ static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local, | |||
307 | for (;;) { | 526 | for (;;) { |
308 | spin_lock_irqsave(&sta->ps_tx_buf.lock, flags); | 527 | spin_lock_irqsave(&sta->ps_tx_buf.lock, flags); |
309 | skb = skb_peek(&sta->ps_tx_buf); | 528 | skb = skb_peek(&sta->ps_tx_buf); |
310 | if (sta_info_buffer_expired(local, sta, skb)) { | 529 | if (sta_info_buffer_expired(local, sta, skb)) |
311 | skb = __skb_dequeue(&sta->ps_tx_buf); | 530 | skb = __skb_dequeue(&sta->ps_tx_buf); |
312 | if (skb_queue_empty(&sta->ps_tx_buf)) | 531 | else |
313 | sta->flags &= ~WLAN_STA_TIM; | ||
314 | } else | ||
315 | skb = NULL; | 532 | skb = NULL; |
316 | spin_unlock_irqrestore(&sta->ps_tx_buf.lock, flags); | 533 | spin_unlock_irqrestore(&sta->ps_tx_buf.lock, flags); |
317 | 534 | ||
318 | if (skb) { | 535 | if (!skb) |
319 | local->total_ps_buffered--; | ||
320 | printk(KERN_DEBUG "Buffered frame expired (STA " | ||
321 | "%s)\n", print_mac(mac, sta->addr)); | ||
322 | dev_kfree_skb(skb); | ||
323 | } else | ||
324 | break; | 536 | break; |
537 | |||
538 | sdata = sta->sdata; | ||
539 | local->total_ps_buffered--; | ||
540 | printk(KERN_DEBUG "Buffered frame expired (STA " | ||
541 | "%s)\n", print_mac(mac, sta->addr)); | ||
542 | dev_kfree_skb(skb); | ||
543 | |||
544 | if (skb_queue_empty(&sta->ps_tx_buf)) | ||
545 | sta_info_clear_tim_bit(sta); | ||
325 | } | 546 | } |
326 | } | 547 | } |
327 | 548 | ||
@@ -331,13 +552,10 @@ static void sta_info_cleanup(unsigned long data) | |||
331 | struct ieee80211_local *local = (struct ieee80211_local *) data; | 552 | struct ieee80211_local *local = (struct ieee80211_local *) data; |
332 | struct sta_info *sta; | 553 | struct sta_info *sta; |
333 | 554 | ||
334 | read_lock_bh(&local->sta_lock); | 555 | rcu_read_lock(); |
335 | list_for_each_entry(sta, &local->sta_list, list) { | 556 | list_for_each_entry_rcu(sta, &local->sta_list, list) |
336 | __sta_info_get(sta); | ||
337 | sta_info_cleanup_expire_buffered(local, sta); | 557 | sta_info_cleanup_expire_buffered(local, sta); |
338 | sta_info_put(sta); | 558 | rcu_read_unlock(); |
339 | } | ||
340 | read_unlock_bh(&local->sta_lock); | ||
341 | 559 | ||
342 | local->sta_cleanup.expires = | 560 | local->sta_cleanup.expires = |
343 | round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL); | 561 | round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL); |
@@ -345,37 +563,45 @@ static void sta_info_cleanup(unsigned long data) | |||
345 | } | 563 | } |
346 | 564 | ||
347 | #ifdef CONFIG_MAC80211_DEBUGFS | 565 | #ifdef CONFIG_MAC80211_DEBUGFS |
348 | static void sta_info_debugfs_add_task(struct work_struct *work) | 566 | static void sta_info_debugfs_add_work(struct work_struct *work) |
349 | { | 567 | { |
350 | struct ieee80211_local *local = | 568 | struct ieee80211_local *local = |
351 | container_of(work, struct ieee80211_local, sta_debugfs_add); | 569 | container_of(work, struct ieee80211_local, sta_debugfs_add); |
352 | struct sta_info *sta, *tmp; | 570 | struct sta_info *sta, *tmp; |
571 | unsigned long flags; | ||
353 | 572 | ||
354 | while (1) { | 573 | while (1) { |
355 | sta = NULL; | 574 | sta = NULL; |
356 | read_lock_bh(&local->sta_lock); | 575 | |
576 | spin_lock_irqsave(&local->sta_lock, flags); | ||
357 | list_for_each_entry(tmp, &local->sta_list, list) { | 577 | list_for_each_entry(tmp, &local->sta_list, list) { |
358 | if (!tmp->debugfs.dir) { | 578 | if (!tmp->debugfs.dir) { |
359 | sta = tmp; | 579 | sta = tmp; |
360 | __sta_info_get(sta); | 580 | __sta_info_pin(sta); |
361 | break; | 581 | break; |
362 | } | 582 | } |
363 | } | 583 | } |
364 | read_unlock_bh(&local->sta_lock); | 584 | spin_unlock_irqrestore(&local->sta_lock, flags); |
365 | 585 | ||
366 | if (!sta) | 586 | if (!sta) |
367 | break; | 587 | break; |
368 | 588 | ||
369 | ieee80211_sta_debugfs_add(sta); | 589 | ieee80211_sta_debugfs_add(sta); |
370 | rate_control_add_sta_debugfs(sta); | 590 | rate_control_add_sta_debugfs(sta); |
371 | sta_info_put(sta); | 591 | |
592 | sta = __sta_info_unpin(sta); | ||
593 | |||
594 | if (sta) { | ||
595 | synchronize_rcu(); | ||
596 | sta_info_destroy(sta); | ||
597 | } | ||
372 | } | 598 | } |
373 | } | 599 | } |
374 | #endif | 600 | #endif |
375 | 601 | ||
376 | void sta_info_init(struct ieee80211_local *local) | 602 | void sta_info_init(struct ieee80211_local *local) |
377 | { | 603 | { |
378 | rwlock_init(&local->sta_lock); | 604 | spin_lock_init(&local->sta_lock); |
379 | INIT_LIST_HEAD(&local->sta_list); | 605 | INIT_LIST_HEAD(&local->sta_list); |
380 | 606 | ||
381 | setup_timer(&local->sta_cleanup, sta_info_cleanup, | 607 | setup_timer(&local->sta_cleanup, sta_info_cleanup, |
@@ -384,7 +610,7 @@ void sta_info_init(struct ieee80211_local *local) | |||
384 | round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL); | 610 | round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL); |
385 | 611 | ||
386 | #ifdef CONFIG_MAC80211_DEBUGFS | 612 | #ifdef CONFIG_MAC80211_DEBUGFS |
387 | INIT_WORK(&local->sta_debugfs_add, sta_info_debugfs_add_task); | 613 | INIT_WORK(&local->sta_debugfs_add, sta_info_debugfs_add_work); |
388 | #endif | 614 | #endif |
389 | } | 615 | } |
390 | 616 | ||
@@ -400,44 +626,40 @@ void sta_info_stop(struct ieee80211_local *local) | |||
400 | sta_info_flush(local, NULL); | 626 | sta_info_flush(local, NULL); |
401 | } | 627 | } |
402 | 628 | ||
403 | void sta_info_remove_aid_ptr(struct sta_info *sta) | ||
404 | { | ||
405 | struct ieee80211_sub_if_data *sdata; | ||
406 | |||
407 | if (sta->aid <= 0) | ||
408 | return; | ||
409 | |||
410 | sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev); | ||
411 | |||
412 | if (sdata->local->ops->set_tim) | ||
413 | sdata->local->ops->set_tim(local_to_hw(sdata->local), | ||
414 | sta->aid, 0); | ||
415 | if (sdata->bss) | ||
416 | __bss_tim_clear(sdata->bss, sta->aid); | ||
417 | } | ||
418 | |||
419 | |||
420 | /** | 629 | /** |
421 | * sta_info_flush - flush matching STA entries from the STA table | 630 | * sta_info_flush - flush matching STA entries from the STA table |
631 | * | ||
632 | * Returns the number of removed STA entries. | ||
633 | * | ||
422 | * @local: local interface data | 634 | * @local: local interface data |
423 | * @dev: matching rule for the net device (sta->dev) or %NULL to match all STAs | 635 | * @sdata: matching rule for the net device (sta->dev) or %NULL to match all STAs |
424 | */ | 636 | */ |
425 | void sta_info_flush(struct ieee80211_local *local, struct net_device *dev) | 637 | int sta_info_flush(struct ieee80211_local *local, |
638 | struct ieee80211_sub_if_data *sdata) | ||
426 | { | 639 | { |
427 | struct sta_info *sta, *tmp; | 640 | struct sta_info *sta, *tmp; |
428 | LIST_HEAD(tmp_list); | 641 | LIST_HEAD(tmp_list); |
642 | int ret = 0; | ||
643 | unsigned long flags; | ||
429 | 644 | ||
430 | write_lock_bh(&local->sta_lock); | 645 | might_sleep(); |
431 | list_for_each_entry_safe(sta, tmp, &local->sta_list, list) | ||
432 | if (!dev || dev == sta->dev) { | ||
433 | __sta_info_get(sta); | ||
434 | sta_info_remove(sta); | ||
435 | list_add_tail(&sta->list, &tmp_list); | ||
436 | } | ||
437 | write_unlock_bh(&local->sta_lock); | ||
438 | 646 | ||
439 | list_for_each_entry_safe(sta, tmp, &tmp_list, list) { | 647 | spin_lock_irqsave(&local->sta_lock, flags); |
440 | sta_info_free(sta); | 648 | list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { |
441 | sta_info_put(sta); | 649 | if (!sdata || sdata == sta->sdata) { |
650 | __sta_info_unlink(&sta); | ||
651 | if (sta) { | ||
652 | list_add_tail(&sta->list, &tmp_list); | ||
653 | ret++; | ||
654 | } | ||
655 | } | ||
442 | } | 656 | } |
657 | spin_unlock_irqrestore(&local->sta_lock, flags); | ||
658 | |||
659 | synchronize_rcu(); | ||
660 | |||
661 | list_for_each_entry_safe(sta, tmp, &tmp_list, list) | ||
662 | sta_info_destroy(sta); | ||
663 | |||
664 | return ret; | ||
443 | } | 665 | } |
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 96fe3ed95038..f166c8039f2b 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h | |||
@@ -12,34 +12,74 @@ | |||
12 | #include <linux/list.h> | 12 | #include <linux/list.h> |
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | #include <linux/if_ether.h> | 14 | #include <linux/if_ether.h> |
15 | #include <linux/kref.h> | ||
16 | #include "ieee80211_key.h" | 15 | #include "ieee80211_key.h" |
17 | 16 | ||
18 | /* Stations flags (struct sta_info::flags) */ | 17 | /** |
19 | #define WLAN_STA_AUTH BIT(0) | 18 | * enum ieee80211_sta_info_flags - Stations flags |
20 | #define WLAN_STA_ASSOC BIT(1) | 19 | * |
21 | #define WLAN_STA_PS BIT(2) | 20 | * These flags are used with &struct sta_info's @flags member. |
22 | #define WLAN_STA_TIM BIT(3) /* TIM bit is on for PS stations */ | 21 | * |
23 | #define WLAN_STA_PERM BIT(4) /* permanent; do not remove entry on expiration */ | 22 | * @WLAN_STA_AUTH: Station is authenticated. |
24 | #define WLAN_STA_AUTHORIZED BIT(5) /* If 802.1X is used, this flag is | 23 | * @WLAN_STA_ASSOC: Station is associated. |
25 | * controlling whether STA is authorized to | 24 | * @WLAN_STA_PS: Station is in power-save mode |
26 | * send and receive non-IEEE 802.1X frames | 25 | * @WLAN_STA_AUTHORIZED: Station is authorized to send/receive traffic. |
27 | */ | 26 | * This bit is always checked so needs to be enabled for all stations |
28 | #define WLAN_STA_SHORT_PREAMBLE BIT(7) | 27 | * when virtual port control is not in use. |
29 | /* whether this is an AP that we are associated with as a client */ | 28 | * @WLAN_STA_SHORT_PREAMBLE: Station is capable of receiving short-preamble |
30 | #define WLAN_STA_ASSOC_AP BIT(8) | 29 | * frames. |
31 | #define WLAN_STA_WME BIT(9) | 30 | * @WLAN_STA_ASSOC_AP: We're associated to that station, it is an AP. |
32 | #define WLAN_STA_WDS BIT(27) | 31 | * @WLAN_STA_WME: Station is a QoS-STA. |
32 | * @WLAN_STA_WDS: Station is one of our WDS peers. | ||
33 | * @WLAN_STA_PSPOLL: Station has just PS-polled us. | ||
34 | * @WLAN_STA_CLEAR_PS_FILT: Clear PS filter in hardware (using the | ||
35 | * IEEE80211_TXCTL_CLEAR_PS_FILT control flag) when the next | ||
36 | * frame to this station is transmitted. | ||
37 | */ | ||
38 | enum ieee80211_sta_info_flags { | ||
39 | WLAN_STA_AUTH = 1<<0, | ||
40 | WLAN_STA_ASSOC = 1<<1, | ||
41 | WLAN_STA_PS = 1<<2, | ||
42 | WLAN_STA_AUTHORIZED = 1<<3, | ||
43 | WLAN_STA_SHORT_PREAMBLE = 1<<4, | ||
44 | WLAN_STA_ASSOC_AP = 1<<5, | ||
45 | WLAN_STA_WME = 1<<6, | ||
46 | WLAN_STA_WDS = 1<<7, | ||
47 | WLAN_STA_PSPOLL = 1<<8, | ||
48 | WLAN_STA_CLEAR_PS_FILT = 1<<9, | ||
49 | }; | ||
33 | 50 | ||
34 | #define STA_TID_NUM 16 | 51 | #define STA_TID_NUM 16 |
35 | #define ADDBA_RESP_INTERVAL HZ | 52 | #define ADDBA_RESP_INTERVAL HZ |
53 | #define HT_AGG_MAX_RETRIES (0x3) | ||
36 | 54 | ||
37 | #define HT_AGG_STATE_INITIATOR_SHIFT (4) | 55 | #define HT_AGG_STATE_INITIATOR_SHIFT (4) |
38 | 56 | ||
57 | #define HT_ADDBA_REQUESTED_MSK BIT(0) | ||
58 | #define HT_ADDBA_DRV_READY_MSK BIT(1) | ||
59 | #define HT_ADDBA_RECEIVED_MSK BIT(2) | ||
39 | #define HT_AGG_STATE_REQ_STOP_BA_MSK BIT(3) | 60 | #define HT_AGG_STATE_REQ_STOP_BA_MSK BIT(3) |
40 | 61 | #define HT_AGG_STATE_INITIATOR_MSK BIT(HT_AGG_STATE_INITIATOR_SHIFT) | |
41 | #define HT_AGG_STATE_IDLE (0x0) | 62 | #define HT_AGG_STATE_IDLE (0x0) |
42 | #define HT_AGG_STATE_OPERATIONAL (0x7) | 63 | #define HT_AGG_STATE_OPERATIONAL (HT_ADDBA_REQUESTED_MSK | \ |
64 | HT_ADDBA_DRV_READY_MSK | \ | ||
65 | HT_ADDBA_RECEIVED_MSK) | ||
66 | |||
67 | /** | ||
68 | * struct tid_ampdu_tx - TID aggregation information (Tx). | ||
69 | * | ||
70 | * @state: TID's state in session state machine. | ||
71 | * @dialog_token: dialog token for aggregation session | ||
72 | * @ssn: Starting Sequence Number expected to be aggregated. | ||
73 | * @addba_resp_timer: timer for peer's response to addba request | ||
74 | * @addba_req_num: number of times addBA request has been sent. | ||
75 | */ | ||
76 | struct tid_ampdu_tx { | ||
77 | u8 state; | ||
78 | u8 dialog_token; | ||
79 | u16 ssn; | ||
80 | struct timer_list addba_resp_timer; | ||
81 | u8 addba_req_num; | ||
82 | }; | ||
43 | 83 | ||
44 | /** | 84 | /** |
45 | * struct tid_ampdu_rx - TID aggregation information (Rx). | 85 | * struct tid_ampdu_rx - TID aggregation information (Rx). |
@@ -67,105 +107,195 @@ struct tid_ampdu_rx { | |||
67 | }; | 107 | }; |
68 | 108 | ||
69 | /** | 109 | /** |
110 | * enum plink_state - state of a mesh peer link finite state machine | ||
111 | * | ||
112 | * @PLINK_LISTEN: initial state, considered the implicit state of non existant | ||
113 | * mesh peer links | ||
114 | * @PLINK_OPN_SNT: mesh plink open frame has been sent to this mesh peer | ||
115 | * @PLINK_OPN_RCVD: mesh plink open frame has been received from this mesh peer | ||
116 | * @PLINK_CNF_RCVD: mesh plink confirm frame has been received from this mesh | ||
117 | * peer | ||
118 | * @PLINK_ESTAB: mesh peer link is established | ||
119 | * @PLINK_HOLDING: mesh peer link is being closed or cancelled | ||
120 | * @PLINK_BLOCKED: all frames transmitted from this mesh plink are discarded | ||
121 | */ | ||
122 | enum plink_state { | ||
123 | PLINK_LISTEN, | ||
124 | PLINK_OPN_SNT, | ||
125 | PLINK_OPN_RCVD, | ||
126 | PLINK_CNF_RCVD, | ||
127 | PLINK_ESTAB, | ||
128 | PLINK_HOLDING, | ||
129 | PLINK_BLOCKED | ||
130 | }; | ||
131 | |||
132 | /** | ||
70 | * struct sta_ampdu_mlme - STA aggregation information. | 133 | * struct sta_ampdu_mlme - STA aggregation information. |
71 | * | 134 | * |
72 | * @tid_agg_info_rx: aggregation info for Rx per TID | 135 | * @tid_rx: aggregation info for Rx per TID |
136 | * @tid_tx: aggregation info for Tx per TID | ||
73 | * @ampdu_rx: for locking sections in aggregation Rx flow | 137 | * @ampdu_rx: for locking sections in aggregation Rx flow |
138 | * @ampdu_tx: for locking sectionsi in aggregation Tx flow | ||
139 | * @dialog_token_allocator: dialog token enumerator for each new session; | ||
74 | */ | 140 | */ |
75 | struct sta_ampdu_mlme { | 141 | struct sta_ampdu_mlme { |
76 | struct tid_ampdu_rx tid_rx[STA_TID_NUM]; | 142 | struct tid_ampdu_rx tid_rx[STA_TID_NUM]; |
143 | struct tid_ampdu_tx tid_tx[STA_TID_NUM]; | ||
77 | spinlock_t ampdu_rx; | 144 | spinlock_t ampdu_rx; |
145 | spinlock_t ampdu_tx; | ||
146 | u8 dialog_token_allocator; | ||
78 | }; | 147 | }; |
79 | 148 | ||
149 | |||
150 | /* see __sta_info_unlink */ | ||
151 | #define STA_INFO_PIN_STAT_NORMAL 0 | ||
152 | #define STA_INFO_PIN_STAT_PINNED 1 | ||
153 | #define STA_INFO_PIN_STAT_DESTROY 2 | ||
154 | |||
155 | /** | ||
156 | * struct sta_info - STA information | ||
157 | * | ||
158 | * This structure collects information about a station that | ||
159 | * mac80211 is communicating with. | ||
160 | * | ||
161 | * @list: global linked list entry | ||
162 | * @hnext: hash table linked list pointer | ||
163 | * @local: pointer to the global information | ||
164 | * @addr: MAC address of this STA | ||
165 | * @aid: STA's unique AID (1..2007, 0 = not assigned yet), | ||
166 | * only used in AP (and IBSS?) mode | ||
167 | * @flags: STA flags, see &enum ieee80211_sta_info_flags | ||
168 | * @ps_tx_buf: buffer of frames to transmit to this station | ||
169 | * when it leaves power saving state | ||
170 | * @tx_filtered: buffer of frames we already tried to transmit | ||
171 | * but were filtered by hardware due to STA having entered | ||
172 | * power saving state | ||
173 | * @rx_packets: Number of MSDUs received from this STA | ||
174 | * @rx_bytes: Number of bytes received from this STA | ||
175 | * @supp_rates: Bitmap of supported rates (per band) | ||
176 | * @ht_info: HT capabilities of this STA | ||
177 | */ | ||
80 | struct sta_info { | 178 | struct sta_info { |
81 | struct kref kref; | 179 | /* General information, mostly static */ |
82 | struct list_head list; | 180 | struct list_head list; |
83 | struct sta_info *hnext; /* next entry in hash table list */ | 181 | struct sta_info *hnext; |
84 | |||
85 | struct ieee80211_local *local; | 182 | struct ieee80211_local *local; |
86 | 183 | struct ieee80211_sub_if_data *sdata; | |
87 | u8 addr[ETH_ALEN]; | ||
88 | u16 aid; /* STA's unique AID (1..2007), 0 = not yet assigned */ | ||
89 | u32 flags; /* WLAN_STA_ */ | ||
90 | |||
91 | struct sk_buff_head ps_tx_buf; /* buffer of TX frames for station in | ||
92 | * power saving state */ | ||
93 | int pspoll; /* whether STA has send a PS Poll frame */ | ||
94 | struct sk_buff_head tx_filtered; /* buffer of TX frames that were | ||
95 | * already given to low-level driver, | ||
96 | * but were filtered */ | ||
97 | int clear_dst_mask; | ||
98 | |||
99 | unsigned long rx_packets, tx_packets; /* number of RX/TX MSDUs */ | ||
100 | unsigned long rx_bytes, tx_bytes; | ||
101 | unsigned long tx_retry_failed, tx_retry_count; | ||
102 | unsigned long tx_filtered_count; | ||
103 | |||
104 | unsigned int wep_weak_iv_count; /* number of RX frames with weak IV */ | ||
105 | |||
106 | unsigned long last_rx; | ||
107 | u32 supp_rates; /* bitmap of supported rates in local->curr_rates */ | ||
108 | int txrate; /* index in local->curr_rates */ | ||
109 | int last_txrate; /* last rate used to send a frame to this STA */ | ||
110 | int last_nonerp_idx; | ||
111 | |||
112 | struct net_device *dev; /* which net device is this station associated | ||
113 | * to */ | ||
114 | |||
115 | struct ieee80211_key *key; | 184 | struct ieee80211_key *key; |
116 | |||
117 | u32 tx_num_consecutive_failures; | ||
118 | u32 tx_num_mpdu_ok; | ||
119 | u32 tx_num_mpdu_fail; | ||
120 | |||
121 | struct rate_control_ref *rate_ctrl; | 185 | struct rate_control_ref *rate_ctrl; |
122 | void *rate_ctrl_priv; | 186 | void *rate_ctrl_priv; |
187 | struct ieee80211_ht_info ht_info; | ||
188 | u64 supp_rates[IEEE80211_NUM_BANDS]; | ||
189 | u8 addr[ETH_ALEN]; | ||
190 | u16 aid; | ||
191 | u16 listen_interval; | ||
123 | 192 | ||
124 | /* last received seq/frag number from this STA (per RX queue) */ | 193 | /* |
125 | __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES]; | 194 | * for use by the internal lifetime management, |
195 | * see __sta_info_unlink | ||
196 | */ | ||
197 | u8 pin_status; | ||
198 | |||
199 | /* frequently updated information, needs locking? */ | ||
200 | u32 flags; | ||
201 | |||
202 | /* | ||
203 | * STA powersave frame queues, no more than the internal | ||
204 | * locking required. | ||
205 | */ | ||
206 | struct sk_buff_head ps_tx_buf; | ||
207 | struct sk_buff_head tx_filtered; | ||
208 | |||
209 | /* Updated from RX path only, no locking requirements */ | ||
210 | unsigned long rx_packets, rx_bytes; | ||
211 | unsigned long wep_weak_iv_count; | ||
212 | unsigned long last_rx; | ||
126 | unsigned long num_duplicates; /* number of duplicate frames received | 213 | unsigned long num_duplicates; /* number of duplicate frames received |
127 | * from this STA */ | 214 | * from this STA */ |
128 | unsigned long tx_fragments; /* number of transmitted MPDUs */ | ||
129 | unsigned long rx_fragments; /* number of received MPDUs */ | 215 | unsigned long rx_fragments; /* number of received MPDUs */ |
130 | unsigned long rx_dropped; /* number of dropped MPDUs from this STA */ | 216 | unsigned long rx_dropped; /* number of dropped MPDUs from this STA */ |
131 | |||
132 | int last_rssi; /* RSSI of last received frame from this STA */ | 217 | int last_rssi; /* RSSI of last received frame from this STA */ |
133 | int last_signal; /* signal of last received frame from this STA */ | 218 | int last_signal; /* signal of last received frame from this STA */ |
134 | int last_noise; /* noise of last received frame from this STA */ | 219 | int last_noise; /* noise of last received frame from this STA */ |
135 | int last_ack_rssi[3]; /* RSSI of last received ACKs from this STA */ | 220 | /* last received seq/frag number from this STA (per RX queue) */ |
136 | unsigned long last_ack; | 221 | __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES]; |
137 | int channel_use; | ||
138 | int channel_use_raw; | ||
139 | |||
140 | #ifdef CONFIG_MAC80211_DEBUG_COUNTERS | 222 | #ifdef CONFIG_MAC80211_DEBUG_COUNTERS |
141 | unsigned int wme_rx_queue[NUM_RX_DATA_QUEUES]; | 223 | unsigned int wme_rx_queue[NUM_RX_DATA_QUEUES]; |
224 | #endif | ||
225 | |||
226 | /* Updated from TX status path only, no locking requirements */ | ||
227 | unsigned long tx_filtered_count; | ||
228 | unsigned long tx_retry_failed, tx_retry_count; | ||
229 | /* TODO: update in generic code not rate control? */ | ||
230 | u32 tx_num_consecutive_failures; | ||
231 | u32 tx_num_mpdu_ok; | ||
232 | u32 tx_num_mpdu_fail; | ||
233 | /* moving percentage of failed MSDUs */ | ||
234 | unsigned int fail_avg; | ||
235 | |||
236 | /* Updated from TX path only, no locking requirements */ | ||
237 | unsigned long tx_packets; /* number of RX/TX MSDUs */ | ||
238 | unsigned long tx_bytes; | ||
239 | unsigned long tx_fragments; /* number of transmitted MPDUs */ | ||
240 | int txrate_idx; | ||
241 | int last_txrate_idx; | ||
242 | #ifdef CONFIG_MAC80211_DEBUG_COUNTERS | ||
142 | unsigned int wme_tx_queue[NUM_RX_DATA_QUEUES]; | 243 | unsigned int wme_tx_queue[NUM_RX_DATA_QUEUES]; |
143 | #endif /* CONFIG_MAC80211_DEBUG_COUNTERS */ | 244 | #endif |
144 | 245 | ||
145 | u16 listen_interval; | 246 | /* Debug counters, no locking doesn't matter */ |
247 | int channel_use; | ||
248 | int channel_use_raw; | ||
146 | 249 | ||
147 | struct ieee80211_ht_info ht_info; /* 802.11n HT capabilities | 250 | /* |
148 | of this STA */ | 251 | * Aggregation information, comes with own locking. |
252 | */ | ||
149 | struct sta_ampdu_mlme ampdu_mlme; | 253 | struct sta_ampdu_mlme ampdu_mlme; |
150 | u8 timer_to_tid[STA_TID_NUM]; /* convert timer id to tid */ | 254 | u8 timer_to_tid[STA_TID_NUM]; /* identity mapping to ID timers */ |
255 | u8 tid_to_tx_q[STA_TID_NUM]; /* map tid to tx queue */ | ||
256 | |||
257 | #ifdef CONFIG_MAC80211_MESH | ||
258 | /* | ||
259 | * Mesh peer link attributes | ||
260 | * TODO: move to a sub-structure that is referenced with pointer? | ||
261 | */ | ||
262 | __le16 llid; /* Local link ID */ | ||
263 | __le16 plid; /* Peer link ID */ | ||
264 | __le16 reason; /* Cancel reason on PLINK_HOLDING state */ | ||
265 | u8 plink_retries; /* Retries in establishment */ | ||
266 | bool ignore_plink_timer; | ||
267 | enum plink_state plink_state; | ||
268 | u32 plink_timeout; | ||
269 | struct timer_list plink_timer; | ||
270 | spinlock_t plink_lock; /* For peer_state reads / updates and other | ||
271 | updates in the structure. Ensures robust | ||
272 | transitions for the peerlink FSM */ | ||
273 | #endif | ||
151 | 274 | ||
152 | #ifdef CONFIG_MAC80211_DEBUGFS | 275 | #ifdef CONFIG_MAC80211_DEBUGFS |
153 | struct sta_info_debugfsdentries { | 276 | struct sta_info_debugfsdentries { |
154 | struct dentry *dir; | 277 | struct dentry *dir; |
155 | struct dentry *flags; | 278 | struct dentry *flags; |
156 | struct dentry *num_ps_buf_frames; | 279 | struct dentry *num_ps_buf_frames; |
157 | struct dentry *last_ack_rssi; | ||
158 | struct dentry *last_ack_ms; | ||
159 | struct dentry *inactive_ms; | 280 | struct dentry *inactive_ms; |
160 | struct dentry *last_seq_ctrl; | 281 | struct dentry *last_seq_ctrl; |
161 | #ifdef CONFIG_MAC80211_DEBUG_COUNTERS | 282 | #ifdef CONFIG_MAC80211_DEBUG_COUNTERS |
162 | struct dentry *wme_rx_queue; | 283 | struct dentry *wme_rx_queue; |
163 | struct dentry *wme_tx_queue; | 284 | struct dentry *wme_tx_queue; |
164 | #endif | 285 | #endif |
286 | struct dentry *agg_status; | ||
165 | } debugfs; | 287 | } debugfs; |
166 | #endif | 288 | #endif |
167 | }; | 289 | }; |
168 | 290 | ||
291 | static inline enum plink_state sta_plink_state(struct sta_info *sta) | ||
292 | { | ||
293 | #ifdef CONFIG_MAC80211_MESH | ||
294 | return sta->plink_state; | ||
295 | #endif | ||
296 | return PLINK_LISTEN; | ||
297 | } | ||
298 | |||
169 | 299 | ||
170 | /* Maximum number of concurrently registered stations */ | 300 | /* Maximum number of concurrently registered stations */ |
171 | #define MAX_STA_COUNT 2007 | 301 | #define MAX_STA_COUNT 2007 |
@@ -185,22 +315,44 @@ struct sta_info { | |||
185 | */ | 315 | */ |
186 | #define STA_INFO_CLEANUP_INTERVAL (10 * HZ) | 316 | #define STA_INFO_CLEANUP_INTERVAL (10 * HZ) |
187 | 317 | ||
188 | static inline void __sta_info_get(struct sta_info *sta) | 318 | /* |
189 | { | 319 | * Get a STA info, must have be under RCU read lock. |
190 | kref_get(&sta->kref); | 320 | */ |
191 | } | 321 | struct sta_info *sta_info_get(struct ieee80211_local *local, u8 *addr); |
322 | /* | ||
323 | * Get STA info by index, BROKEN! | ||
324 | */ | ||
325 | struct sta_info *sta_info_get_by_idx(struct ieee80211_local *local, int idx, | ||
326 | struct net_device *dev); | ||
327 | /* | ||
328 | * Create a new STA info, caller owns returned structure | ||
329 | * until sta_info_insert(). | ||
330 | */ | ||
331 | struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, | ||
332 | u8 *addr, gfp_t gfp); | ||
333 | /* | ||
334 | * Insert STA info into hash table/list, returns zero or a | ||
335 | * -EEXIST if (if the same MAC address is already present). | ||
336 | * | ||
337 | * Calling this without RCU protection makes the caller | ||
338 | * relinquish its reference to @sta. | ||
339 | */ | ||
340 | int sta_info_insert(struct sta_info *sta); | ||
341 | /* | ||
342 | * Unlink a STA info from the hash table/list. | ||
343 | * This can NULL the STA pointer if somebody else | ||
344 | * has already unlinked it. | ||
345 | */ | ||
346 | void sta_info_unlink(struct sta_info **sta); | ||
347 | |||
348 | void sta_info_destroy(struct sta_info *sta); | ||
349 | void sta_info_set_tim_bit(struct sta_info *sta); | ||
350 | void sta_info_clear_tim_bit(struct sta_info *sta); | ||
192 | 351 | ||
193 | struct sta_info * sta_info_get(struct ieee80211_local *local, u8 *addr); | ||
194 | int sta_info_min_txrate_get(struct ieee80211_local *local); | ||
195 | void sta_info_put(struct sta_info *sta); | ||
196 | struct sta_info * sta_info_add(struct ieee80211_local *local, | ||
197 | struct net_device *dev, u8 *addr, gfp_t gfp); | ||
198 | void sta_info_remove(struct sta_info *sta); | ||
199 | void sta_info_free(struct sta_info *sta); | ||
200 | void sta_info_init(struct ieee80211_local *local); | 352 | void sta_info_init(struct ieee80211_local *local); |
201 | int sta_info_start(struct ieee80211_local *local); | 353 | int sta_info_start(struct ieee80211_local *local); |
202 | void sta_info_stop(struct ieee80211_local *local); | 354 | void sta_info_stop(struct ieee80211_local *local); |
203 | void sta_info_remove_aid_ptr(struct sta_info *sta); | 355 | int sta_info_flush(struct ieee80211_local *local, |
204 | void sta_info_flush(struct ieee80211_local *local, struct net_device *dev); | 356 | struct ieee80211_sub_if_data *sdata); |
205 | 357 | ||
206 | #endif /* STA_INFO_H */ | 358 | #endif /* STA_INFO_H */ |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 67b509edd431..80f4343a3007 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -26,6 +26,7 @@ | |||
26 | 26 | ||
27 | #include "ieee80211_i.h" | 27 | #include "ieee80211_i.h" |
28 | #include "ieee80211_led.h" | 28 | #include "ieee80211_led.h" |
29 | #include "mesh.h" | ||
29 | #include "wep.h" | 30 | #include "wep.h" |
30 | #include "wpa.h" | 31 | #include "wpa.h" |
31 | #include "wme.h" | 32 | #include "wme.h" |
@@ -86,15 +87,19 @@ static inline void ieee80211_dump_frame(const char *ifname, const char *title, | |||
86 | } | 87 | } |
87 | #endif /* CONFIG_MAC80211_LOWTX_FRAME_DUMP */ | 88 | #endif /* CONFIG_MAC80211_LOWTX_FRAME_DUMP */ |
88 | 89 | ||
89 | static u16 ieee80211_duration(struct ieee80211_txrx_data *tx, int group_addr, | 90 | static u16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, |
90 | int next_frag_len) | 91 | int next_frag_len) |
91 | { | 92 | { |
92 | int rate, mrate, erp, dur, i; | 93 | int rate, mrate, erp, dur, i; |
93 | struct ieee80211_rate *txrate = tx->u.tx.rate; | 94 | struct ieee80211_rate *txrate = tx->rate; |
94 | struct ieee80211_local *local = tx->local; | 95 | struct ieee80211_local *local = tx->local; |
95 | struct ieee80211_hw_mode *mode = tx->u.tx.mode; | 96 | struct ieee80211_supported_band *sband; |
96 | 97 | ||
97 | erp = txrate->flags & IEEE80211_RATE_ERP; | 98 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; |
99 | |||
100 | erp = 0; | ||
101 | if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) | ||
102 | erp = txrate->flags & IEEE80211_RATE_ERP_G; | ||
98 | 103 | ||
99 | /* | 104 | /* |
100 | * data and mgmt (except PS Poll): | 105 | * data and mgmt (except PS Poll): |
@@ -150,20 +155,36 @@ static u16 ieee80211_duration(struct ieee80211_txrx_data *tx, int group_addr, | |||
150 | * Mandatory rates for IEEE 802.11g PHY: 1, 2, 5.5, 11, 6, 12, 24 Mbps | 155 | * Mandatory rates for IEEE 802.11g PHY: 1, 2, 5.5, 11, 6, 12, 24 Mbps |
151 | */ | 156 | */ |
152 | rate = -1; | 157 | rate = -1; |
153 | mrate = 10; /* use 1 Mbps if everything fails */ | 158 | /* use lowest available if everything fails */ |
154 | for (i = 0; i < mode->num_rates; i++) { | 159 | mrate = sband->bitrates[0].bitrate; |
155 | struct ieee80211_rate *r = &mode->rates[i]; | 160 | for (i = 0; i < sband->n_bitrates; i++) { |
156 | if (r->rate > txrate->rate) | 161 | struct ieee80211_rate *r = &sband->bitrates[i]; |
157 | break; | ||
158 | 162 | ||
159 | if (IEEE80211_RATE_MODULATION(txrate->flags) != | 163 | if (r->bitrate > txrate->bitrate) |
160 | IEEE80211_RATE_MODULATION(r->flags)) | 164 | break; |
161 | continue; | ||
162 | 165 | ||
163 | if (r->flags & IEEE80211_RATE_BASIC) | 166 | if (tx->sdata->basic_rates & BIT(i)) |
164 | rate = r->rate; | 167 | rate = r->bitrate; |
165 | else if (r->flags & IEEE80211_RATE_MANDATORY) | 168 | |
166 | mrate = r->rate; | 169 | switch (sband->band) { |
170 | case IEEE80211_BAND_2GHZ: { | ||
171 | u32 flag; | ||
172 | if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) | ||
173 | flag = IEEE80211_RATE_MANDATORY_G; | ||
174 | else | ||
175 | flag = IEEE80211_RATE_MANDATORY_B; | ||
176 | if (r->flags & flag) | ||
177 | mrate = r->bitrate; | ||
178 | break; | ||
179 | } | ||
180 | case IEEE80211_BAND_5GHZ: | ||
181 | if (r->flags & IEEE80211_RATE_MANDATORY_A) | ||
182 | mrate = r->bitrate; | ||
183 | break; | ||
184 | case IEEE80211_NUM_BANDS: | ||
185 | WARN_ON(1); | ||
186 | break; | ||
187 | } | ||
167 | } | 188 | } |
168 | if (rate == -1) { | 189 | if (rate == -1) { |
169 | /* No matching basic rate found; use highest suitable mandatory | 190 | /* No matching basic rate found; use highest suitable mandatory |
@@ -184,7 +205,7 @@ static u16 ieee80211_duration(struct ieee80211_txrx_data *tx, int group_addr, | |||
184 | dur *= 2; /* ACK + SIFS */ | 205 | dur *= 2; /* ACK + SIFS */ |
185 | /* next fragment */ | 206 | /* next fragment */ |
186 | dur += ieee80211_frame_duration(local, next_frag_len, | 207 | dur += ieee80211_frame_duration(local, next_frag_len, |
187 | txrate->rate, erp, | 208 | txrate->bitrate, erp, |
188 | tx->sdata->bss_conf.use_short_preamble); | 209 | tx->sdata->bss_conf.use_short_preamble); |
189 | } | 210 | } |
190 | 211 | ||
@@ -212,8 +233,8 @@ static int inline is_ieee80211_device(struct net_device *dev, | |||
212 | 233 | ||
213 | /* tx handlers */ | 234 | /* tx handlers */ |
214 | 235 | ||
215 | static ieee80211_txrx_result | 236 | static ieee80211_tx_result |
216 | ieee80211_tx_h_check_assoc(struct ieee80211_txrx_data *tx) | 237 | ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) |
217 | { | 238 | { |
218 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 239 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
219 | struct sk_buff *skb = tx->skb; | 240 | struct sk_buff *skb = tx->skb; |
@@ -221,20 +242,23 @@ ieee80211_tx_h_check_assoc(struct ieee80211_txrx_data *tx) | |||
221 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ | 242 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ |
222 | u32 sta_flags; | 243 | u32 sta_flags; |
223 | 244 | ||
224 | if (unlikely(tx->flags & IEEE80211_TXRXD_TX_INJECTED)) | 245 | if (unlikely(tx->flags & IEEE80211_TX_INJECTED)) |
225 | return TXRX_CONTINUE; | 246 | return TX_CONTINUE; |
226 | 247 | ||
227 | if (unlikely(tx->local->sta_sw_scanning) && | 248 | if (unlikely(tx->local->sta_sw_scanning) && |
228 | ((tx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT || | 249 | ((tx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT || |
229 | (tx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PROBE_REQ)) | 250 | (tx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PROBE_REQ)) |
230 | return TXRX_DROP; | 251 | return TX_DROP; |
231 | 252 | ||
232 | if (tx->flags & IEEE80211_TXRXD_TXPS_BUFFERED) | 253 | if (tx->sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) |
233 | return TXRX_CONTINUE; | 254 | return TX_CONTINUE; |
255 | |||
256 | if (tx->flags & IEEE80211_TX_PS_BUFFERED) | ||
257 | return TX_CONTINUE; | ||
234 | 258 | ||
235 | sta_flags = tx->sta ? tx->sta->flags : 0; | 259 | sta_flags = tx->sta ? tx->sta->flags : 0; |
236 | 260 | ||
237 | if (likely(tx->flags & IEEE80211_TXRXD_TXUNICAST)) { | 261 | if (likely(tx->flags & IEEE80211_TX_UNICAST)) { |
238 | if (unlikely(!(sta_flags & WLAN_STA_ASSOC) && | 262 | if (unlikely(!(sta_flags & WLAN_STA_ASSOC) && |
239 | tx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS && | 263 | tx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS && |
240 | (tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) { | 264 | (tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) { |
@@ -245,7 +269,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_txrx_data *tx) | |||
245 | tx->dev->name, print_mac(mac, hdr->addr1)); | 269 | tx->dev->name, print_mac(mac, hdr->addr1)); |
246 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ | 270 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ |
247 | I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc); | 271 | I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc); |
248 | return TXRX_DROP; | 272 | return TX_DROP; |
249 | } | 273 | } |
250 | } else { | 274 | } else { |
251 | if (unlikely((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && | 275 | if (unlikely((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && |
@@ -255,23 +279,23 @@ ieee80211_tx_h_check_assoc(struct ieee80211_txrx_data *tx) | |||
255 | * No associated STAs - no need to send multicast | 279 | * No associated STAs - no need to send multicast |
256 | * frames. | 280 | * frames. |
257 | */ | 281 | */ |
258 | return TXRX_DROP; | 282 | return TX_DROP; |
259 | } | 283 | } |
260 | return TXRX_CONTINUE; | 284 | return TX_CONTINUE; |
261 | } | 285 | } |
262 | 286 | ||
263 | return TXRX_CONTINUE; | 287 | return TX_CONTINUE; |
264 | } | 288 | } |
265 | 289 | ||
266 | static ieee80211_txrx_result | 290 | static ieee80211_tx_result |
267 | ieee80211_tx_h_sequence(struct ieee80211_txrx_data *tx) | 291 | ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) |
268 | { | 292 | { |
269 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; | 293 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; |
270 | 294 | ||
271 | if (ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control)) >= 24) | 295 | if (ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control)) >= 24) |
272 | ieee80211_include_sequence(tx->sdata, hdr); | 296 | ieee80211_include_sequence(tx->sdata, hdr); |
273 | 297 | ||
274 | return TXRX_CONTINUE; | 298 | return TX_CONTINUE; |
275 | } | 299 | } |
276 | 300 | ||
277 | /* This function is called whenever the AP is about to exceed the maximum limit | 301 | /* This function is called whenever the AP is about to exceed the maximum limit |
@@ -303,10 +327,8 @@ static void purge_old_ps_buffers(struct ieee80211_local *local) | |||
303 | } | 327 | } |
304 | total += skb_queue_len(&ap->ps_bc_buf); | 328 | total += skb_queue_len(&ap->ps_bc_buf); |
305 | } | 329 | } |
306 | rcu_read_unlock(); | ||
307 | 330 | ||
308 | read_lock_bh(&local->sta_lock); | 331 | list_for_each_entry_rcu(sta, &local->sta_list, list) { |
309 | list_for_each_entry(sta, &local->sta_list, list) { | ||
310 | skb = skb_dequeue(&sta->ps_tx_buf); | 332 | skb = skb_dequeue(&sta->ps_tx_buf); |
311 | if (skb) { | 333 | if (skb) { |
312 | purged++; | 334 | purged++; |
@@ -314,15 +336,16 @@ static void purge_old_ps_buffers(struct ieee80211_local *local) | |||
314 | } | 336 | } |
315 | total += skb_queue_len(&sta->ps_tx_buf); | 337 | total += skb_queue_len(&sta->ps_tx_buf); |
316 | } | 338 | } |
317 | read_unlock_bh(&local->sta_lock); | 339 | |
340 | rcu_read_unlock(); | ||
318 | 341 | ||
319 | local->total_ps_buffered = total; | 342 | local->total_ps_buffered = total; |
320 | printk(KERN_DEBUG "%s: PS buffers full - purged %d frames\n", | 343 | printk(KERN_DEBUG "%s: PS buffers full - purged %d frames\n", |
321 | wiphy_name(local->hw.wiphy), purged); | 344 | wiphy_name(local->hw.wiphy), purged); |
322 | } | 345 | } |
323 | 346 | ||
324 | static ieee80211_txrx_result | 347 | static ieee80211_tx_result |
325 | ieee80211_tx_h_multicast_ps_buf(struct ieee80211_txrx_data *tx) | 348 | ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) |
326 | { | 349 | { |
327 | /* | 350 | /* |
328 | * broadcast/multicast frame | 351 | * broadcast/multicast frame |
@@ -334,11 +357,11 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_txrx_data *tx) | |||
334 | 357 | ||
335 | /* not AP/IBSS or ordered frame */ | 358 | /* not AP/IBSS or ordered frame */ |
336 | if (!tx->sdata->bss || (tx->fc & IEEE80211_FCTL_ORDER)) | 359 | if (!tx->sdata->bss || (tx->fc & IEEE80211_FCTL_ORDER)) |
337 | return TXRX_CONTINUE; | 360 | return TX_CONTINUE; |
338 | 361 | ||
339 | /* no stations in PS mode */ | 362 | /* no stations in PS mode */ |
340 | if (!atomic_read(&tx->sdata->bss->num_sta_ps)) | 363 | if (!atomic_read(&tx->sdata->bss->num_sta_ps)) |
341 | return TXRX_CONTINUE; | 364 | return TX_CONTINUE; |
342 | 365 | ||
343 | /* buffered in mac80211 */ | 366 | /* buffered in mac80211 */ |
344 | if (tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING) { | 367 | if (tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING) { |
@@ -355,17 +378,17 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_txrx_data *tx) | |||
355 | } else | 378 | } else |
356 | tx->local->total_ps_buffered++; | 379 | tx->local->total_ps_buffered++; |
357 | skb_queue_tail(&tx->sdata->bss->ps_bc_buf, tx->skb); | 380 | skb_queue_tail(&tx->sdata->bss->ps_bc_buf, tx->skb); |
358 | return TXRX_QUEUED; | 381 | return TX_QUEUED; |
359 | } | 382 | } |
360 | 383 | ||
361 | /* buffered in hardware */ | 384 | /* buffered in hardware */ |
362 | tx->u.tx.control->flags |= IEEE80211_TXCTL_SEND_AFTER_DTIM; | 385 | tx->control->flags |= IEEE80211_TXCTL_SEND_AFTER_DTIM; |
363 | 386 | ||
364 | return TXRX_CONTINUE; | 387 | return TX_CONTINUE; |
365 | } | 388 | } |
366 | 389 | ||
367 | static ieee80211_txrx_result | 390 | static ieee80211_tx_result |
368 | ieee80211_tx_h_unicast_ps_buf(struct ieee80211_txrx_data *tx) | 391 | ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) |
369 | { | 392 | { |
370 | struct sta_info *sta = tx->sta; | 393 | struct sta_info *sta = tx->sta; |
371 | DECLARE_MAC_BUF(mac); | 394 | DECLARE_MAC_BUF(mac); |
@@ -373,9 +396,10 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_txrx_data *tx) | |||
373 | if (unlikely(!sta || | 396 | if (unlikely(!sta || |
374 | ((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && | 397 | ((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && |
375 | (tx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP))) | 398 | (tx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP))) |
376 | return TXRX_CONTINUE; | 399 | return TX_CONTINUE; |
377 | 400 | ||
378 | if (unlikely((sta->flags & WLAN_STA_PS) && !sta->pspoll)) { | 401 | if (unlikely((sta->flags & WLAN_STA_PS) && |
402 | !(sta->flags & WLAN_STA_PSPOLL))) { | ||
379 | struct ieee80211_tx_packet_data *pkt_data; | 403 | struct ieee80211_tx_packet_data *pkt_data; |
380 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 404 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
381 | printk(KERN_DEBUG "STA %s aid %d: PS buffer (entries " | 405 | printk(KERN_DEBUG "STA %s aid %d: PS buffer (entries " |
@@ -383,7 +407,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_txrx_data *tx) | |||
383 | print_mac(mac, sta->addr), sta->aid, | 407 | print_mac(mac, sta->addr), sta->aid, |
384 | skb_queue_len(&sta->ps_tx_buf)); | 408 | skb_queue_len(&sta->ps_tx_buf)); |
385 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | 409 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ |
386 | sta->flags |= WLAN_STA_TIM; | ||
387 | if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) | 410 | if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) |
388 | purge_old_ps_buffers(tx->local); | 411 | purge_old_ps_buffers(tx->local); |
389 | if (skb_queue_len(&sta->ps_tx_buf) >= STA_MAX_TX_BUFFER) { | 412 | if (skb_queue_len(&sta->ps_tx_buf) >= STA_MAX_TX_BUFFER) { |
@@ -396,18 +419,15 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_txrx_data *tx) | |||
396 | dev_kfree_skb(old); | 419 | dev_kfree_skb(old); |
397 | } else | 420 | } else |
398 | tx->local->total_ps_buffered++; | 421 | tx->local->total_ps_buffered++; |
422 | |||
399 | /* Queue frame to be sent after STA sends an PS Poll frame */ | 423 | /* Queue frame to be sent after STA sends an PS Poll frame */ |
400 | if (skb_queue_empty(&sta->ps_tx_buf)) { | 424 | if (skb_queue_empty(&sta->ps_tx_buf)) |
401 | if (tx->local->ops->set_tim) | 425 | sta_info_set_tim_bit(sta); |
402 | tx->local->ops->set_tim(local_to_hw(tx->local), | 426 | |
403 | sta->aid, 1); | ||
404 | if (tx->sdata->bss) | ||
405 | bss_tim_set(tx->local, tx->sdata->bss, sta->aid); | ||
406 | } | ||
407 | pkt_data = (struct ieee80211_tx_packet_data *)tx->skb->cb; | 427 | pkt_data = (struct ieee80211_tx_packet_data *)tx->skb->cb; |
408 | pkt_data->jiffies = jiffies; | 428 | pkt_data->jiffies = jiffies; |
409 | skb_queue_tail(&sta->ps_tx_buf, tx->skb); | 429 | skb_queue_tail(&sta->ps_tx_buf, tx->skb); |
410 | return TXRX_QUEUED; | 430 | return TX_QUEUED; |
411 | } | 431 | } |
412 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 432 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
413 | else if (unlikely(sta->flags & WLAN_STA_PS)) { | 433 | else if (unlikely(sta->flags & WLAN_STA_PS)) { |
@@ -416,40 +436,40 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_txrx_data *tx) | |||
416 | print_mac(mac, sta->addr)); | 436 | print_mac(mac, sta->addr)); |
417 | } | 437 | } |
418 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | 438 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ |
419 | sta->pspoll = 0; | 439 | sta->flags &= ~WLAN_STA_PSPOLL; |
420 | 440 | ||
421 | return TXRX_CONTINUE; | 441 | return TX_CONTINUE; |
422 | } | 442 | } |
423 | 443 | ||
424 | static ieee80211_txrx_result | 444 | static ieee80211_tx_result |
425 | ieee80211_tx_h_ps_buf(struct ieee80211_txrx_data *tx) | 445 | ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) |
426 | { | 446 | { |
427 | if (unlikely(tx->flags & IEEE80211_TXRXD_TXPS_BUFFERED)) | 447 | if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) |
428 | return TXRX_CONTINUE; | 448 | return TX_CONTINUE; |
429 | 449 | ||
430 | if (tx->flags & IEEE80211_TXRXD_TXUNICAST) | 450 | if (tx->flags & IEEE80211_TX_UNICAST) |
431 | return ieee80211_tx_h_unicast_ps_buf(tx); | 451 | return ieee80211_tx_h_unicast_ps_buf(tx); |
432 | else | 452 | else |
433 | return ieee80211_tx_h_multicast_ps_buf(tx); | 453 | return ieee80211_tx_h_multicast_ps_buf(tx); |
434 | } | 454 | } |
435 | 455 | ||
436 | static ieee80211_txrx_result | 456 | static ieee80211_tx_result |
437 | ieee80211_tx_h_select_key(struct ieee80211_txrx_data *tx) | 457 | ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) |
438 | { | 458 | { |
439 | struct ieee80211_key *key; | 459 | struct ieee80211_key *key; |
440 | u16 fc = tx->fc; | 460 | u16 fc = tx->fc; |
441 | 461 | ||
442 | if (unlikely(tx->u.tx.control->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)) | 462 | if (unlikely(tx->control->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)) |
443 | tx->key = NULL; | 463 | tx->key = NULL; |
444 | else if (tx->sta && (key = rcu_dereference(tx->sta->key))) | 464 | else if (tx->sta && (key = rcu_dereference(tx->sta->key))) |
445 | tx->key = key; | 465 | tx->key = key; |
446 | else if ((key = rcu_dereference(tx->sdata->default_key))) | 466 | else if ((key = rcu_dereference(tx->sdata->default_key))) |
447 | tx->key = key; | 467 | tx->key = key; |
448 | else if (tx->sdata->drop_unencrypted && | 468 | else if (tx->sdata->drop_unencrypted && |
449 | !(tx->u.tx.control->flags & IEEE80211_TXCTL_EAPOL_FRAME) && | 469 | !(tx->control->flags & IEEE80211_TXCTL_EAPOL_FRAME) && |
450 | !(tx->flags & IEEE80211_TXRXD_TX_INJECTED)) { | 470 | !(tx->flags & IEEE80211_TX_INJECTED)) { |
451 | I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted); | 471 | I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted); |
452 | return TXRX_DROP; | 472 | return TX_DROP; |
453 | } else | 473 | } else |
454 | tx->key = NULL; | 474 | tx->key = NULL; |
455 | 475 | ||
@@ -476,13 +496,13 @@ ieee80211_tx_h_select_key(struct ieee80211_txrx_data *tx) | |||
476 | } | 496 | } |
477 | 497 | ||
478 | if (!tx->key || !(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) | 498 | if (!tx->key || !(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) |
479 | tx->u.tx.control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; | 499 | tx->control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; |
480 | 500 | ||
481 | return TXRX_CONTINUE; | 501 | return TX_CONTINUE; |
482 | } | 502 | } |
483 | 503 | ||
484 | static ieee80211_txrx_result | 504 | static ieee80211_tx_result |
485 | ieee80211_tx_h_fragment(struct ieee80211_txrx_data *tx) | 505 | ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) |
486 | { | 506 | { |
487 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data; | 507 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data; |
488 | size_t hdrlen, per_fragm, num_fragm, payload_len, left; | 508 | size_t hdrlen, per_fragm, num_fragm, payload_len, left; |
@@ -492,8 +512,8 @@ ieee80211_tx_h_fragment(struct ieee80211_txrx_data *tx) | |||
492 | u8 *pos; | 512 | u8 *pos; |
493 | int frag_threshold = tx->local->fragmentation_threshold; | 513 | int frag_threshold = tx->local->fragmentation_threshold; |
494 | 514 | ||
495 | if (!(tx->flags & IEEE80211_TXRXD_FRAGMENTED)) | 515 | if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) |
496 | return TXRX_CONTINUE; | 516 | return TX_CONTINUE; |
497 | 517 | ||
498 | first = tx->skb; | 518 | first = tx->skb; |
499 | 519 | ||
@@ -544,10 +564,10 @@ ieee80211_tx_h_fragment(struct ieee80211_txrx_data *tx) | |||
544 | } | 564 | } |
545 | skb_trim(first, hdrlen + per_fragm); | 565 | skb_trim(first, hdrlen + per_fragm); |
546 | 566 | ||
547 | tx->u.tx.num_extra_frag = num_fragm - 1; | 567 | tx->num_extra_frag = num_fragm - 1; |
548 | tx->u.tx.extra_frag = frags; | 568 | tx->extra_frag = frags; |
549 | 569 | ||
550 | return TXRX_CONTINUE; | 570 | return TX_CONTINUE; |
551 | 571 | ||
552 | fail: | 572 | fail: |
553 | printk(KERN_DEBUG "%s: failed to fragment frame\n", tx->dev->name); | 573 | printk(KERN_DEBUG "%s: failed to fragment frame\n", tx->dev->name); |
@@ -558,14 +578,14 @@ ieee80211_tx_h_fragment(struct ieee80211_txrx_data *tx) | |||
558 | kfree(frags); | 578 | kfree(frags); |
559 | } | 579 | } |
560 | I802_DEBUG_INC(tx->local->tx_handlers_drop_fragment); | 580 | I802_DEBUG_INC(tx->local->tx_handlers_drop_fragment); |
561 | return TXRX_DROP; | 581 | return TX_DROP; |
562 | } | 582 | } |
563 | 583 | ||
564 | static ieee80211_txrx_result | 584 | static ieee80211_tx_result |
565 | ieee80211_tx_h_encrypt(struct ieee80211_txrx_data *tx) | 585 | ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx) |
566 | { | 586 | { |
567 | if (!tx->key) | 587 | if (!tx->key) |
568 | return TXRX_CONTINUE; | 588 | return TX_CONTINUE; |
569 | 589 | ||
570 | switch (tx->key->conf.alg) { | 590 | switch (tx->key->conf.alg) { |
571 | case ALG_WEP: | 591 | case ALG_WEP: |
@@ -578,59 +598,60 @@ ieee80211_tx_h_encrypt(struct ieee80211_txrx_data *tx) | |||
578 | 598 | ||
579 | /* not reached */ | 599 | /* not reached */ |
580 | WARN_ON(1); | 600 | WARN_ON(1); |
581 | return TXRX_DROP; | 601 | return TX_DROP; |
582 | } | 602 | } |
583 | 603 | ||
584 | static ieee80211_txrx_result | 604 | static ieee80211_tx_result |
585 | ieee80211_tx_h_rate_ctrl(struct ieee80211_txrx_data *tx) | 605 | ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) |
586 | { | 606 | { |
587 | struct rate_selection rsel; | 607 | struct rate_selection rsel; |
608 | struct ieee80211_supported_band *sband; | ||
609 | |||
610 | sband = tx->local->hw.wiphy->bands[tx->local->hw.conf.channel->band]; | ||
588 | 611 | ||
589 | if (likely(!tx->u.tx.rate)) { | 612 | if (likely(!tx->rate)) { |
590 | rate_control_get_rate(tx->dev, tx->u.tx.mode, tx->skb, &rsel); | 613 | rate_control_get_rate(tx->dev, sband, tx->skb, &rsel); |
591 | tx->u.tx.rate = rsel.rate; | 614 | tx->rate = rsel.rate; |
592 | if (unlikely(rsel.probe != NULL)) { | 615 | if (unlikely(rsel.probe)) { |
593 | tx->u.tx.control->flags |= | 616 | tx->control->flags |= |
594 | IEEE80211_TXCTL_RATE_CTRL_PROBE; | 617 | IEEE80211_TXCTL_RATE_CTRL_PROBE; |
595 | tx->flags |= IEEE80211_TXRXD_TXPROBE_LAST_FRAG; | 618 | tx->flags |= IEEE80211_TX_PROBE_LAST_FRAG; |
596 | tx->u.tx.control->alt_retry_rate = tx->u.tx.rate->val; | 619 | tx->control->alt_retry_rate = tx->rate; |
597 | tx->u.tx.rate = rsel.probe; | 620 | tx->rate = rsel.probe; |
598 | } else | 621 | } else |
599 | tx->u.tx.control->alt_retry_rate = -1; | 622 | tx->control->alt_retry_rate = NULL; |
600 | 623 | ||
601 | if (!tx->u.tx.rate) | 624 | if (!tx->rate) |
602 | return TXRX_DROP; | 625 | return TX_DROP; |
603 | } else | 626 | } else |
604 | tx->u.tx.control->alt_retry_rate = -1; | 627 | tx->control->alt_retry_rate = NULL; |
605 | 628 | ||
606 | if (tx->u.tx.mode->mode == MODE_IEEE80211G && | 629 | if (tx->sdata->bss_conf.use_cts_prot && |
607 | tx->sdata->bss_conf.use_cts_prot && | 630 | (tx->flags & IEEE80211_TX_FRAGMENTED) && rsel.nonerp) { |
608 | (tx->flags & IEEE80211_TXRXD_FRAGMENTED) && rsel.nonerp) { | 631 | tx->last_frag_rate = tx->rate; |
609 | tx->u.tx.last_frag_rate = tx->u.tx.rate; | ||
610 | if (rsel.probe) | 632 | if (rsel.probe) |
611 | tx->flags &= ~IEEE80211_TXRXD_TXPROBE_LAST_FRAG; | 633 | tx->flags &= ~IEEE80211_TX_PROBE_LAST_FRAG; |
612 | else | 634 | else |
613 | tx->flags |= IEEE80211_TXRXD_TXPROBE_LAST_FRAG; | 635 | tx->flags |= IEEE80211_TX_PROBE_LAST_FRAG; |
614 | tx->u.tx.rate = rsel.nonerp; | 636 | tx->rate = rsel.nonerp; |
615 | tx->u.tx.control->rate = rsel.nonerp; | 637 | tx->control->tx_rate = rsel.nonerp; |
616 | tx->u.tx.control->flags &= ~IEEE80211_TXCTL_RATE_CTRL_PROBE; | 638 | tx->control->flags &= ~IEEE80211_TXCTL_RATE_CTRL_PROBE; |
617 | } else { | 639 | } else { |
618 | tx->u.tx.last_frag_rate = tx->u.tx.rate; | 640 | tx->last_frag_rate = tx->rate; |
619 | tx->u.tx.control->rate = tx->u.tx.rate; | 641 | tx->control->tx_rate = tx->rate; |
620 | } | 642 | } |
621 | tx->u.tx.control->tx_rate = tx->u.tx.rate->val; | 643 | tx->control->tx_rate = tx->rate; |
622 | 644 | ||
623 | return TXRX_CONTINUE; | 645 | return TX_CONTINUE; |
624 | } | 646 | } |
625 | 647 | ||
626 | static ieee80211_txrx_result | 648 | static ieee80211_tx_result |
627 | ieee80211_tx_h_misc(struct ieee80211_txrx_data *tx) | 649 | ieee80211_tx_h_misc(struct ieee80211_tx_data *tx) |
628 | { | 650 | { |
629 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data; | 651 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data; |
630 | u16 fc = le16_to_cpu(hdr->frame_control); | 652 | u16 fc = le16_to_cpu(hdr->frame_control); |
631 | u16 dur; | 653 | u16 dur; |
632 | struct ieee80211_tx_control *control = tx->u.tx.control; | 654 | struct ieee80211_tx_control *control = tx->control; |
633 | struct ieee80211_hw_mode *mode = tx->u.tx.mode; | ||
634 | 655 | ||
635 | if (!control->retry_limit) { | 656 | if (!control->retry_limit) { |
636 | if (!is_multicast_ether_addr(hdr->addr1)) { | 657 | if (!is_multicast_ether_addr(hdr->addr1)) { |
@@ -652,20 +673,20 @@ ieee80211_tx_h_misc(struct ieee80211_txrx_data *tx) | |||
652 | } | 673 | } |
653 | } | 674 | } |
654 | 675 | ||
655 | if (tx->flags & IEEE80211_TXRXD_FRAGMENTED) { | 676 | if (tx->flags & IEEE80211_TX_FRAGMENTED) { |
656 | /* Do not use multiple retry rates when sending fragmented | 677 | /* Do not use multiple retry rates when sending fragmented |
657 | * frames. | 678 | * frames. |
658 | * TODO: The last fragment could still use multiple retry | 679 | * TODO: The last fragment could still use multiple retry |
659 | * rates. */ | 680 | * rates. */ |
660 | control->alt_retry_rate = -1; | 681 | control->alt_retry_rate = NULL; |
661 | } | 682 | } |
662 | 683 | ||
663 | /* Use CTS protection for unicast frames sent using extended rates if | 684 | /* Use CTS protection for unicast frames sent using extended rates if |
664 | * there are associated non-ERP stations and RTS/CTS is not configured | 685 | * there are associated non-ERP stations and RTS/CTS is not configured |
665 | * for the frame. */ | 686 | * for the frame. */ |
666 | if (mode->mode == MODE_IEEE80211G && | 687 | if ((tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) && |
667 | (tx->u.tx.rate->flags & IEEE80211_RATE_ERP) && | 688 | (tx->rate->flags & IEEE80211_RATE_ERP_G) && |
668 | (tx->flags & IEEE80211_TXRXD_TXUNICAST) && | 689 | (tx->flags & IEEE80211_TX_UNICAST) && |
669 | tx->sdata->bss_conf.use_cts_prot && | 690 | tx->sdata->bss_conf.use_cts_prot && |
670 | !(control->flags & IEEE80211_TXCTL_USE_RTS_CTS)) | 691 | !(control->flags & IEEE80211_TXCTL_USE_RTS_CTS)) |
671 | control->flags |= IEEE80211_TXCTL_USE_CTS_PROTECT; | 692 | control->flags |= IEEE80211_TXCTL_USE_CTS_PROTECT; |
@@ -674,62 +695,76 @@ ieee80211_tx_h_misc(struct ieee80211_txrx_data *tx) | |||
674 | * short preambles at the selected rate and short preambles are | 695 | * short preambles at the selected rate and short preambles are |
675 | * available on the network at the current point in time. */ | 696 | * available on the network at the current point in time. */ |
676 | if (((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) && | 697 | if (((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) && |
677 | (tx->u.tx.rate->flags & IEEE80211_RATE_PREAMBLE2) && | 698 | (tx->rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) && |
678 | tx->sdata->bss_conf.use_short_preamble && | 699 | tx->sdata->bss_conf.use_short_preamble && |
679 | (!tx->sta || (tx->sta->flags & WLAN_STA_SHORT_PREAMBLE))) { | 700 | (!tx->sta || (tx->sta->flags & WLAN_STA_SHORT_PREAMBLE))) { |
680 | tx->u.tx.control->tx_rate = tx->u.tx.rate->val2; | 701 | tx->control->flags |= IEEE80211_TXCTL_SHORT_PREAMBLE; |
681 | } | 702 | } |
682 | 703 | ||
683 | /* Setup duration field for the first fragment of the frame. Duration | 704 | /* Setup duration field for the first fragment of the frame. Duration |
684 | * for remaining fragments will be updated when they are being sent | 705 | * for remaining fragments will be updated when they are being sent |
685 | * to low-level driver in ieee80211_tx(). */ | 706 | * to low-level driver in ieee80211_tx(). */ |
686 | dur = ieee80211_duration(tx, is_multicast_ether_addr(hdr->addr1), | 707 | dur = ieee80211_duration(tx, is_multicast_ether_addr(hdr->addr1), |
687 | (tx->flags & IEEE80211_TXRXD_FRAGMENTED) ? | 708 | (tx->flags & IEEE80211_TX_FRAGMENTED) ? |
688 | tx->u.tx.extra_frag[0]->len : 0); | 709 | tx->extra_frag[0]->len : 0); |
689 | hdr->duration_id = cpu_to_le16(dur); | 710 | hdr->duration_id = cpu_to_le16(dur); |
690 | 711 | ||
691 | if ((control->flags & IEEE80211_TXCTL_USE_RTS_CTS) || | 712 | if ((control->flags & IEEE80211_TXCTL_USE_RTS_CTS) || |
692 | (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)) { | 713 | (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)) { |
693 | struct ieee80211_rate *rate; | 714 | struct ieee80211_supported_band *sband; |
715 | struct ieee80211_rate *rate, *baserate; | ||
716 | int idx; | ||
717 | |||
718 | sband = tx->local->hw.wiphy->bands[ | ||
719 | tx->local->hw.conf.channel->band]; | ||
694 | 720 | ||
695 | /* Do not use multiple retry rates when using RTS/CTS */ | 721 | /* Do not use multiple retry rates when using RTS/CTS */ |
696 | control->alt_retry_rate = -1; | 722 | control->alt_retry_rate = NULL; |
697 | 723 | ||
698 | /* Use min(data rate, max base rate) as CTS/RTS rate */ | 724 | /* Use min(data rate, max base rate) as CTS/RTS rate */ |
699 | rate = tx->u.tx.rate; | 725 | rate = tx->rate; |
700 | while (rate > mode->rates && | 726 | baserate = NULL; |
701 | !(rate->flags & IEEE80211_RATE_BASIC)) | 727 | |
702 | rate--; | 728 | for (idx = 0; idx < sband->n_bitrates; idx++) { |
729 | if (sband->bitrates[idx].bitrate > rate->bitrate) | ||
730 | continue; | ||
731 | if (tx->sdata->basic_rates & BIT(idx) && | ||
732 | (!baserate || | ||
733 | (baserate->bitrate < sband->bitrates[idx].bitrate))) | ||
734 | baserate = &sband->bitrates[idx]; | ||
735 | } | ||
703 | 736 | ||
704 | control->rts_cts_rate = rate->val; | 737 | if (baserate) |
705 | control->rts_rate = rate; | 738 | control->rts_cts_rate = baserate; |
739 | else | ||
740 | control->rts_cts_rate = &sband->bitrates[0]; | ||
706 | } | 741 | } |
707 | 742 | ||
708 | if (tx->sta) { | 743 | if (tx->sta) { |
709 | tx->sta->tx_packets++; | 744 | tx->sta->tx_packets++; |
710 | tx->sta->tx_fragments++; | 745 | tx->sta->tx_fragments++; |
711 | tx->sta->tx_bytes += tx->skb->len; | 746 | tx->sta->tx_bytes += tx->skb->len; |
712 | if (tx->u.tx.extra_frag) { | 747 | if (tx->extra_frag) { |
713 | int i; | 748 | int i; |
714 | tx->sta->tx_fragments += tx->u.tx.num_extra_frag; | 749 | tx->sta->tx_fragments += tx->num_extra_frag; |
715 | for (i = 0; i < tx->u.tx.num_extra_frag; i++) { | 750 | for (i = 0; i < tx->num_extra_frag; i++) { |
716 | tx->sta->tx_bytes += | 751 | tx->sta->tx_bytes += |
717 | tx->u.tx.extra_frag[i]->len; | 752 | tx->extra_frag[i]->len; |
718 | } | 753 | } |
719 | } | 754 | } |
720 | } | 755 | } |
721 | 756 | ||
722 | return TXRX_CONTINUE; | 757 | return TX_CONTINUE; |
723 | } | 758 | } |
724 | 759 | ||
725 | static ieee80211_txrx_result | 760 | static ieee80211_tx_result |
726 | ieee80211_tx_h_load_stats(struct ieee80211_txrx_data *tx) | 761 | ieee80211_tx_h_load_stats(struct ieee80211_tx_data *tx) |
727 | { | 762 | { |
728 | struct ieee80211_local *local = tx->local; | 763 | struct ieee80211_local *local = tx->local; |
729 | struct ieee80211_hw_mode *mode = tx->u.tx.mode; | ||
730 | struct sk_buff *skb = tx->skb; | 764 | struct sk_buff *skb = tx->skb; |
731 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 765 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
732 | u32 load = 0, hdrtime; | 766 | u32 load = 0, hdrtime; |
767 | struct ieee80211_rate *rate = tx->rate; | ||
733 | 768 | ||
734 | /* TODO: this could be part of tx_status handling, so that the number | 769 | /* TODO: this could be part of tx_status handling, so that the number |
735 | * of retries would be known; TX rate should in that case be stored | 770 | * of retries would be known; TX rate should in that case be stored |
@@ -740,9 +775,9 @@ ieee80211_tx_h_load_stats(struct ieee80211_txrx_data *tx) | |||
740 | /* 1 bit at 1 Mbit/s takes 1 usec; in channel_use values, | 775 | /* 1 bit at 1 Mbit/s takes 1 usec; in channel_use values, |
741 | * 1 usec = 1/8 * (1080 / 10) = 13.5 */ | 776 | * 1 usec = 1/8 * (1080 / 10) = 13.5 */ |
742 | 777 | ||
743 | if (mode->mode == MODE_IEEE80211A || | 778 | if (tx->channel->band == IEEE80211_BAND_5GHZ || |
744 | (mode->mode == MODE_IEEE80211G && | 779 | (tx->channel->band == IEEE80211_BAND_2GHZ && |
745 | tx->u.tx.rate->flags & IEEE80211_RATE_ERP)) | 780 | rate->flags & IEEE80211_RATE_ERP_G)) |
746 | hdrtime = CHAN_UTIL_HDR_SHORT; | 781 | hdrtime = CHAN_UTIL_HDR_SHORT; |
747 | else | 782 | else |
748 | hdrtime = CHAN_UTIL_HDR_LONG; | 783 | hdrtime = CHAN_UTIL_HDR_LONG; |
@@ -751,19 +786,20 @@ ieee80211_tx_h_load_stats(struct ieee80211_txrx_data *tx) | |||
751 | if (!is_multicast_ether_addr(hdr->addr1)) | 786 | if (!is_multicast_ether_addr(hdr->addr1)) |
752 | load += hdrtime; | 787 | load += hdrtime; |
753 | 788 | ||
754 | if (tx->u.tx.control->flags & IEEE80211_TXCTL_USE_RTS_CTS) | 789 | if (tx->control->flags & IEEE80211_TXCTL_USE_RTS_CTS) |
755 | load += 2 * hdrtime; | 790 | load += 2 * hdrtime; |
756 | else if (tx->u.tx.control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) | 791 | else if (tx->control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) |
757 | load += hdrtime; | 792 | load += hdrtime; |
758 | 793 | ||
759 | load += skb->len * tx->u.tx.rate->rate_inv; | 794 | /* TODO: optimise again */ |
795 | load += skb->len * CHAN_UTIL_RATE_LCM / rate->bitrate; | ||
760 | 796 | ||
761 | if (tx->u.tx.extra_frag) { | 797 | if (tx->extra_frag) { |
762 | int i; | 798 | int i; |
763 | for (i = 0; i < tx->u.tx.num_extra_frag; i++) { | 799 | for (i = 0; i < tx->num_extra_frag; i++) { |
764 | load += 2 * hdrtime; | 800 | load += 2 * hdrtime; |
765 | load += tx->u.tx.extra_frag[i]->len * | 801 | load += tx->extra_frag[i]->len * |
766 | tx->u.tx.rate->rate; | 802 | tx->rate->bitrate; |
767 | } | 803 | } |
768 | } | 804 | } |
769 | 805 | ||
@@ -774,13 +810,12 @@ ieee80211_tx_h_load_stats(struct ieee80211_txrx_data *tx) | |||
774 | tx->sta->channel_use_raw += load; | 810 | tx->sta->channel_use_raw += load; |
775 | tx->sdata->channel_use_raw += load; | 811 | tx->sdata->channel_use_raw += load; |
776 | 812 | ||
777 | return TXRX_CONTINUE; | 813 | return TX_CONTINUE; |
778 | } | 814 | } |
779 | 815 | ||
780 | /* TODO: implement register/unregister functions for adding TX/RX handlers | ||
781 | * into ordered list */ | ||
782 | 816 | ||
783 | ieee80211_tx_handler ieee80211_tx_handlers[] = | 817 | typedef ieee80211_tx_result (*ieee80211_tx_handler)(struct ieee80211_tx_data *); |
818 | static ieee80211_tx_handler ieee80211_tx_handlers[] = | ||
784 | { | 819 | { |
785 | ieee80211_tx_h_check_assoc, | 820 | ieee80211_tx_h_check_assoc, |
786 | ieee80211_tx_h_sequence, | 821 | ieee80211_tx_h_sequence, |
@@ -801,8 +836,8 @@ ieee80211_tx_handler ieee80211_tx_handlers[] = | |||
801 | * deal with packet injection down monitor interface | 836 | * deal with packet injection down monitor interface |
802 | * with Radiotap Header -- only called for monitor mode interface | 837 | * with Radiotap Header -- only called for monitor mode interface |
803 | */ | 838 | */ |
804 | static ieee80211_txrx_result | 839 | static ieee80211_tx_result |
805 | __ieee80211_parse_tx_radiotap(struct ieee80211_txrx_data *tx, | 840 | __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx, |
806 | struct sk_buff *skb) | 841 | struct sk_buff *skb) |
807 | { | 842 | { |
808 | /* | 843 | /* |
@@ -816,13 +851,15 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_txrx_data *tx, | |||
816 | struct ieee80211_radiotap_iterator iterator; | 851 | struct ieee80211_radiotap_iterator iterator; |
817 | struct ieee80211_radiotap_header *rthdr = | 852 | struct ieee80211_radiotap_header *rthdr = |
818 | (struct ieee80211_radiotap_header *) skb->data; | 853 | (struct ieee80211_radiotap_header *) skb->data; |
819 | struct ieee80211_hw_mode *mode = tx->local->hw.conf.mode; | 854 | struct ieee80211_supported_band *sband; |
820 | int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len); | 855 | int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len); |
821 | struct ieee80211_tx_control *control = tx->u.tx.control; | 856 | struct ieee80211_tx_control *control = tx->control; |
857 | |||
858 | sband = tx->local->hw.wiphy->bands[tx->local->hw.conf.channel->band]; | ||
822 | 859 | ||
823 | control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; | 860 | control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; |
824 | tx->flags |= IEEE80211_TXRXD_TX_INJECTED; | 861 | tx->flags |= IEEE80211_TX_INJECTED; |
825 | tx->flags &= ~IEEE80211_TXRXD_FRAGMENTED; | 862 | tx->flags &= ~IEEE80211_TX_FRAGMENTED; |
826 | 863 | ||
827 | /* | 864 | /* |
828 | * for every radiotap entry that is present | 865 | * for every radiotap entry that is present |
@@ -852,11 +889,13 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_txrx_data *tx, | |||
852 | * ieee80211 rate int is in 100kbps units eg, 0x0a=1Mbps | 889 | * ieee80211 rate int is in 100kbps units eg, 0x0a=1Mbps |
853 | */ | 890 | */ |
854 | target_rate = (*iterator.this_arg) * 5; | 891 | target_rate = (*iterator.this_arg) * 5; |
855 | for (i = 0; i < mode->num_rates; i++) { | 892 | for (i = 0; i < sband->n_bitrates; i++) { |
856 | struct ieee80211_rate *r = &mode->rates[i]; | 893 | struct ieee80211_rate *r; |
894 | |||
895 | r = &sband->bitrates[i]; | ||
857 | 896 | ||
858 | if (r->rate == target_rate) { | 897 | if (r->bitrate == target_rate) { |
859 | tx->u.tx.rate = r; | 898 | tx->rate = r; |
860 | break; | 899 | break; |
861 | } | 900 | } |
862 | } | 901 | } |
@@ -870,9 +909,11 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_txrx_data *tx, | |||
870 | control->antenna_sel_tx = (*iterator.this_arg) + 1; | 909 | control->antenna_sel_tx = (*iterator.this_arg) + 1; |
871 | break; | 910 | break; |
872 | 911 | ||
912 | #if 0 | ||
873 | case IEEE80211_RADIOTAP_DBM_TX_POWER: | 913 | case IEEE80211_RADIOTAP_DBM_TX_POWER: |
874 | control->power_level = *iterator.this_arg; | 914 | control->power_level = *iterator.this_arg; |
875 | break; | 915 | break; |
916 | #endif | ||
876 | 917 | ||
877 | case IEEE80211_RADIOTAP_FLAGS: | 918 | case IEEE80211_RADIOTAP_FLAGS: |
878 | if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) { | 919 | if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) { |
@@ -884,7 +925,7 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_txrx_data *tx, | |||
884 | * on transmission | 925 | * on transmission |
885 | */ | 926 | */ |
886 | if (skb->len < (iterator.max_length + FCS_LEN)) | 927 | if (skb->len < (iterator.max_length + FCS_LEN)) |
887 | return TXRX_DROP; | 928 | return TX_DROP; |
888 | 929 | ||
889 | skb_trim(skb, skb->len - FCS_LEN); | 930 | skb_trim(skb, skb->len - FCS_LEN); |
890 | } | 931 | } |
@@ -892,7 +933,7 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_txrx_data *tx, | |||
892 | control->flags &= | 933 | control->flags &= |
893 | ~IEEE80211_TXCTL_DO_NOT_ENCRYPT; | 934 | ~IEEE80211_TXCTL_DO_NOT_ENCRYPT; |
894 | if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG) | 935 | if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG) |
895 | tx->flags |= IEEE80211_TXRXD_FRAGMENTED; | 936 | tx->flags |= IEEE80211_TX_FRAGMENTED; |
896 | break; | 937 | break; |
897 | 938 | ||
898 | /* | 939 | /* |
@@ -907,7 +948,7 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_txrx_data *tx, | |||
907 | } | 948 | } |
908 | 949 | ||
909 | if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */ | 950 | if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */ |
910 | return TXRX_DROP; | 951 | return TX_DROP; |
911 | 952 | ||
912 | /* | 953 | /* |
913 | * remove the radiotap header | 954 | * remove the radiotap header |
@@ -916,14 +957,14 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_txrx_data *tx, | |||
916 | */ | 957 | */ |
917 | skb_pull(skb, iterator.max_length); | 958 | skb_pull(skb, iterator.max_length); |
918 | 959 | ||
919 | return TXRX_CONTINUE; | 960 | return TX_CONTINUE; |
920 | } | 961 | } |
921 | 962 | ||
922 | /* | 963 | /* |
923 | * initialises @tx | 964 | * initialises @tx |
924 | */ | 965 | */ |
925 | static ieee80211_txrx_result | 966 | static ieee80211_tx_result |
926 | __ieee80211_tx_prepare(struct ieee80211_txrx_data *tx, | 967 | __ieee80211_tx_prepare(struct ieee80211_tx_data *tx, |
927 | struct sk_buff *skb, | 968 | struct sk_buff *skb, |
928 | struct net_device *dev, | 969 | struct net_device *dev, |
929 | struct ieee80211_tx_control *control) | 970 | struct ieee80211_tx_control *control) |
@@ -939,18 +980,18 @@ __ieee80211_tx_prepare(struct ieee80211_txrx_data *tx, | |||
939 | tx->dev = dev; /* use original interface */ | 980 | tx->dev = dev; /* use original interface */ |
940 | tx->local = local; | 981 | tx->local = local; |
941 | tx->sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 982 | tx->sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
942 | tx->u.tx.control = control; | 983 | tx->control = control; |
943 | /* | 984 | /* |
944 | * Set this flag (used below to indicate "automatic fragmentation"), | 985 | * Set this flag (used below to indicate "automatic fragmentation"), |
945 | * it will be cleared/left by radiotap as desired. | 986 | * it will be cleared/left by radiotap as desired. |
946 | */ | 987 | */ |
947 | tx->flags |= IEEE80211_TXRXD_FRAGMENTED; | 988 | tx->flags |= IEEE80211_TX_FRAGMENTED; |
948 | 989 | ||
949 | /* process and remove the injection radiotap header */ | 990 | /* process and remove the injection radiotap header */ |
950 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 991 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
951 | if (unlikely(sdata->vif.type == IEEE80211_IF_TYPE_MNTR)) { | 992 | if (unlikely(sdata->vif.type == IEEE80211_IF_TYPE_MNTR)) { |
952 | if (__ieee80211_parse_tx_radiotap(tx, skb) == TXRX_DROP) | 993 | if (__ieee80211_parse_tx_radiotap(tx, skb) == TX_DROP) |
953 | return TXRX_DROP; | 994 | return TX_DROP; |
954 | 995 | ||
955 | /* | 996 | /* |
956 | * __ieee80211_parse_tx_radiotap has now removed | 997 | * __ieee80211_parse_tx_radiotap has now removed |
@@ -965,27 +1006,27 @@ __ieee80211_tx_prepare(struct ieee80211_txrx_data *tx, | |||
965 | tx->fc = le16_to_cpu(hdr->frame_control); | 1006 | tx->fc = le16_to_cpu(hdr->frame_control); |
966 | 1007 | ||
967 | if (is_multicast_ether_addr(hdr->addr1)) { | 1008 | if (is_multicast_ether_addr(hdr->addr1)) { |
968 | tx->flags &= ~IEEE80211_TXRXD_TXUNICAST; | 1009 | tx->flags &= ~IEEE80211_TX_UNICAST; |
969 | control->flags |= IEEE80211_TXCTL_NO_ACK; | 1010 | control->flags |= IEEE80211_TXCTL_NO_ACK; |
970 | } else { | 1011 | } else { |
971 | tx->flags |= IEEE80211_TXRXD_TXUNICAST; | 1012 | tx->flags |= IEEE80211_TX_UNICAST; |
972 | control->flags &= ~IEEE80211_TXCTL_NO_ACK; | 1013 | control->flags &= ~IEEE80211_TXCTL_NO_ACK; |
973 | } | 1014 | } |
974 | 1015 | ||
975 | if (tx->flags & IEEE80211_TXRXD_FRAGMENTED) { | 1016 | if (tx->flags & IEEE80211_TX_FRAGMENTED) { |
976 | if ((tx->flags & IEEE80211_TXRXD_TXUNICAST) && | 1017 | if ((tx->flags & IEEE80211_TX_UNICAST) && |
977 | skb->len + FCS_LEN > local->fragmentation_threshold && | 1018 | skb->len + FCS_LEN > local->fragmentation_threshold && |
978 | !local->ops->set_frag_threshold) | 1019 | !local->ops->set_frag_threshold) |
979 | tx->flags |= IEEE80211_TXRXD_FRAGMENTED; | 1020 | tx->flags |= IEEE80211_TX_FRAGMENTED; |
980 | else | 1021 | else |
981 | tx->flags &= ~IEEE80211_TXRXD_FRAGMENTED; | 1022 | tx->flags &= ~IEEE80211_TX_FRAGMENTED; |
982 | } | 1023 | } |
983 | 1024 | ||
984 | if (!tx->sta) | 1025 | if (!tx->sta) |
985 | control->flags |= IEEE80211_TXCTL_CLEAR_DST_MASK; | 1026 | control->flags |= IEEE80211_TXCTL_CLEAR_PS_FILT; |
986 | else if (tx->sta->clear_dst_mask) { | 1027 | else if (tx->sta->flags & WLAN_STA_CLEAR_PS_FILT) { |
987 | control->flags |= IEEE80211_TXCTL_CLEAR_DST_MASK; | 1028 | control->flags |= IEEE80211_TXCTL_CLEAR_PS_FILT; |
988 | tx->sta->clear_dst_mask = 0; | 1029 | tx->sta->flags &= ~WLAN_STA_CLEAR_PS_FILT; |
989 | } | 1030 | } |
990 | 1031 | ||
991 | hdrlen = ieee80211_get_hdrlen(tx->fc); | 1032 | hdrlen = ieee80211_get_hdrlen(tx->fc); |
@@ -995,13 +1036,13 @@ __ieee80211_tx_prepare(struct ieee80211_txrx_data *tx, | |||
995 | } | 1036 | } |
996 | control->flags |= IEEE80211_TXCTL_FIRST_FRAGMENT; | 1037 | control->flags |= IEEE80211_TXCTL_FIRST_FRAGMENT; |
997 | 1038 | ||
998 | return TXRX_CONTINUE; | 1039 | return TX_CONTINUE; |
999 | } | 1040 | } |
1000 | 1041 | ||
1001 | /* | 1042 | /* |
1002 | * NB: @tx is uninitialised when passed in here | 1043 | * NB: @tx is uninitialised when passed in here |
1003 | */ | 1044 | */ |
1004 | static int ieee80211_tx_prepare(struct ieee80211_txrx_data *tx, | 1045 | static int ieee80211_tx_prepare(struct ieee80211_tx_data *tx, |
1005 | struct sk_buff *skb, | 1046 | struct sk_buff *skb, |
1006 | struct net_device *mdev, | 1047 | struct net_device *mdev, |
1007 | struct ieee80211_tx_control *control) | 1048 | struct ieee80211_tx_control *control) |
@@ -1024,9 +1065,9 @@ static int ieee80211_tx_prepare(struct ieee80211_txrx_data *tx, | |||
1024 | } | 1065 | } |
1025 | 1066 | ||
1026 | static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb, | 1067 | static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb, |
1027 | struct ieee80211_txrx_data *tx) | 1068 | struct ieee80211_tx_data *tx) |
1028 | { | 1069 | { |
1029 | struct ieee80211_tx_control *control = tx->u.tx.control; | 1070 | struct ieee80211_tx_control *control = tx->control; |
1030 | int ret, i; | 1071 | int ret, i; |
1031 | 1072 | ||
1032 | if (!ieee80211_qdisc_installed(local->mdev) && | 1073 | if (!ieee80211_qdisc_installed(local->mdev) && |
@@ -1043,20 +1084,20 @@ static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb, | |||
1043 | local->mdev->trans_start = jiffies; | 1084 | local->mdev->trans_start = jiffies; |
1044 | ieee80211_led_tx(local, 1); | 1085 | ieee80211_led_tx(local, 1); |
1045 | } | 1086 | } |
1046 | if (tx->u.tx.extra_frag) { | 1087 | if (tx->extra_frag) { |
1047 | control->flags &= ~(IEEE80211_TXCTL_USE_RTS_CTS | | 1088 | control->flags &= ~(IEEE80211_TXCTL_USE_RTS_CTS | |
1048 | IEEE80211_TXCTL_USE_CTS_PROTECT | | 1089 | IEEE80211_TXCTL_USE_CTS_PROTECT | |
1049 | IEEE80211_TXCTL_CLEAR_DST_MASK | | 1090 | IEEE80211_TXCTL_CLEAR_PS_FILT | |
1050 | IEEE80211_TXCTL_FIRST_FRAGMENT); | 1091 | IEEE80211_TXCTL_FIRST_FRAGMENT); |
1051 | for (i = 0; i < tx->u.tx.num_extra_frag; i++) { | 1092 | for (i = 0; i < tx->num_extra_frag; i++) { |
1052 | if (!tx->u.tx.extra_frag[i]) | 1093 | if (!tx->extra_frag[i]) |
1053 | continue; | 1094 | continue; |
1054 | if (__ieee80211_queue_stopped(local, control->queue)) | 1095 | if (__ieee80211_queue_stopped(local, control->queue)) |
1055 | return IEEE80211_TX_FRAG_AGAIN; | 1096 | return IEEE80211_TX_FRAG_AGAIN; |
1056 | if (i == tx->u.tx.num_extra_frag) { | 1097 | if (i == tx->num_extra_frag) { |
1057 | control->tx_rate = tx->u.tx.last_frag_hwrate; | 1098 | control->tx_rate = tx->last_frag_rate; |
1058 | control->rate = tx->u.tx.last_frag_rate; | 1099 | |
1059 | if (tx->flags & IEEE80211_TXRXD_TXPROBE_LAST_FRAG) | 1100 | if (tx->flags & IEEE80211_TX_PROBE_LAST_FRAG) |
1060 | control->flags |= | 1101 | control->flags |= |
1061 | IEEE80211_TXCTL_RATE_CTRL_PROBE; | 1102 | IEEE80211_TXCTL_RATE_CTRL_PROBE; |
1062 | else | 1103 | else |
@@ -1066,18 +1107,18 @@ static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb, | |||
1066 | 1107 | ||
1067 | ieee80211_dump_frame(wiphy_name(local->hw.wiphy), | 1108 | ieee80211_dump_frame(wiphy_name(local->hw.wiphy), |
1068 | "TX to low-level driver", | 1109 | "TX to low-level driver", |
1069 | tx->u.tx.extra_frag[i]); | 1110 | tx->extra_frag[i]); |
1070 | ret = local->ops->tx(local_to_hw(local), | 1111 | ret = local->ops->tx(local_to_hw(local), |
1071 | tx->u.tx.extra_frag[i], | 1112 | tx->extra_frag[i], |
1072 | control); | 1113 | control); |
1073 | if (ret) | 1114 | if (ret) |
1074 | return IEEE80211_TX_FRAG_AGAIN; | 1115 | return IEEE80211_TX_FRAG_AGAIN; |
1075 | local->mdev->trans_start = jiffies; | 1116 | local->mdev->trans_start = jiffies; |
1076 | ieee80211_led_tx(local, 1); | 1117 | ieee80211_led_tx(local, 1); |
1077 | tx->u.tx.extra_frag[i] = NULL; | 1118 | tx->extra_frag[i] = NULL; |
1078 | } | 1119 | } |
1079 | kfree(tx->u.tx.extra_frag); | 1120 | kfree(tx->extra_frag); |
1080 | tx->u.tx.extra_frag = NULL; | 1121 | tx->extra_frag = NULL; |
1081 | } | 1122 | } |
1082 | return IEEE80211_TX_OK; | 1123 | return IEEE80211_TX_OK; |
1083 | } | 1124 | } |
@@ -1088,8 +1129,8 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb, | |||
1088 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1129 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
1089 | struct sta_info *sta; | 1130 | struct sta_info *sta; |
1090 | ieee80211_tx_handler *handler; | 1131 | ieee80211_tx_handler *handler; |
1091 | struct ieee80211_txrx_data tx; | 1132 | struct ieee80211_tx_data tx; |
1092 | ieee80211_txrx_result res = TXRX_DROP, res_prepare; | 1133 | ieee80211_tx_result res = TX_DROP, res_prepare; |
1093 | int ret, i; | 1134 | int ret, i; |
1094 | 1135 | ||
1095 | WARN_ON(__ieee80211_queue_pending(local, control->queue)); | 1136 | WARN_ON(__ieee80211_queue_pending(local, control->queue)); |
@@ -1099,59 +1140,52 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb, | |||
1099 | return 0; | 1140 | return 0; |
1100 | } | 1141 | } |
1101 | 1142 | ||
1143 | rcu_read_lock(); | ||
1144 | |||
1102 | /* initialises tx */ | 1145 | /* initialises tx */ |
1103 | res_prepare = __ieee80211_tx_prepare(&tx, skb, dev, control); | 1146 | res_prepare = __ieee80211_tx_prepare(&tx, skb, dev, control); |
1104 | 1147 | ||
1105 | if (res_prepare == TXRX_DROP) { | 1148 | if (res_prepare == TX_DROP) { |
1106 | dev_kfree_skb(skb); | 1149 | dev_kfree_skb(skb); |
1150 | rcu_read_unlock(); | ||
1107 | return 0; | 1151 | return 0; |
1108 | } | 1152 | } |
1109 | 1153 | ||
1110 | /* | ||
1111 | * key references are protected using RCU and this requires that | ||
1112 | * we are in a read-site RCU section during receive processing | ||
1113 | */ | ||
1114 | rcu_read_lock(); | ||
1115 | |||
1116 | sta = tx.sta; | 1154 | sta = tx.sta; |
1117 | tx.u.tx.mode = local->hw.conf.mode; | 1155 | tx.channel = local->hw.conf.channel; |
1118 | 1156 | ||
1119 | for (handler = local->tx_handlers; *handler != NULL; | 1157 | for (handler = ieee80211_tx_handlers; *handler != NULL; |
1120 | handler++) { | 1158 | handler++) { |
1121 | res = (*handler)(&tx); | 1159 | res = (*handler)(&tx); |
1122 | if (res != TXRX_CONTINUE) | 1160 | if (res != TX_CONTINUE) |
1123 | break; | 1161 | break; |
1124 | } | 1162 | } |
1125 | 1163 | ||
1126 | skb = tx.skb; /* handlers are allowed to change skb */ | 1164 | skb = tx.skb; /* handlers are allowed to change skb */ |
1127 | 1165 | ||
1128 | if (sta) | 1166 | if (unlikely(res == TX_DROP)) { |
1129 | sta_info_put(sta); | ||
1130 | |||
1131 | if (unlikely(res == TXRX_DROP)) { | ||
1132 | I802_DEBUG_INC(local->tx_handlers_drop); | 1167 | I802_DEBUG_INC(local->tx_handlers_drop); |
1133 | goto drop; | 1168 | goto drop; |
1134 | } | 1169 | } |
1135 | 1170 | ||
1136 | if (unlikely(res == TXRX_QUEUED)) { | 1171 | if (unlikely(res == TX_QUEUED)) { |
1137 | I802_DEBUG_INC(local->tx_handlers_queued); | 1172 | I802_DEBUG_INC(local->tx_handlers_queued); |
1138 | rcu_read_unlock(); | 1173 | rcu_read_unlock(); |
1139 | return 0; | 1174 | return 0; |
1140 | } | 1175 | } |
1141 | 1176 | ||
1142 | if (tx.u.tx.extra_frag) { | 1177 | if (tx.extra_frag) { |
1143 | for (i = 0; i < tx.u.tx.num_extra_frag; i++) { | 1178 | for (i = 0; i < tx.num_extra_frag; i++) { |
1144 | int next_len, dur; | 1179 | int next_len, dur; |
1145 | struct ieee80211_hdr *hdr = | 1180 | struct ieee80211_hdr *hdr = |
1146 | (struct ieee80211_hdr *) | 1181 | (struct ieee80211_hdr *) |
1147 | tx.u.tx.extra_frag[i]->data; | 1182 | tx.extra_frag[i]->data; |
1148 | 1183 | ||
1149 | if (i + 1 < tx.u.tx.num_extra_frag) { | 1184 | if (i + 1 < tx.num_extra_frag) { |
1150 | next_len = tx.u.tx.extra_frag[i + 1]->len; | 1185 | next_len = tx.extra_frag[i + 1]->len; |
1151 | } else { | 1186 | } else { |
1152 | next_len = 0; | 1187 | next_len = 0; |
1153 | tx.u.tx.rate = tx.u.tx.last_frag_rate; | 1188 | tx.rate = tx.last_frag_rate; |
1154 | tx.u.tx.last_frag_hwrate = tx.u.tx.rate->val; | ||
1155 | } | 1189 | } |
1156 | dur = ieee80211_duration(&tx, 0, next_len); | 1190 | dur = ieee80211_duration(&tx, 0, next_len); |
1157 | hdr->duration_id = cpu_to_le16(dur); | 1191 | hdr->duration_id = cpu_to_le16(dur); |
@@ -1186,12 +1220,11 @@ retry: | |||
1186 | memcpy(&store->control, control, | 1220 | memcpy(&store->control, control, |
1187 | sizeof(struct ieee80211_tx_control)); | 1221 | sizeof(struct ieee80211_tx_control)); |
1188 | store->skb = skb; | 1222 | store->skb = skb; |
1189 | store->extra_frag = tx.u.tx.extra_frag; | 1223 | store->extra_frag = tx.extra_frag; |
1190 | store->num_extra_frag = tx.u.tx.num_extra_frag; | 1224 | store->num_extra_frag = tx.num_extra_frag; |
1191 | store->last_frag_hwrate = tx.u.tx.last_frag_hwrate; | 1225 | store->last_frag_rate = tx.last_frag_rate; |
1192 | store->last_frag_rate = tx.u.tx.last_frag_rate; | ||
1193 | store->last_frag_rate_ctrl_probe = | 1226 | store->last_frag_rate_ctrl_probe = |
1194 | !!(tx.flags & IEEE80211_TXRXD_TXPROBE_LAST_FRAG); | 1227 | !!(tx.flags & IEEE80211_TX_PROBE_LAST_FRAG); |
1195 | } | 1228 | } |
1196 | rcu_read_unlock(); | 1229 | rcu_read_unlock(); |
1197 | return 0; | 1230 | return 0; |
@@ -1199,10 +1232,10 @@ retry: | |||
1199 | drop: | 1232 | drop: |
1200 | if (skb) | 1233 | if (skb) |
1201 | dev_kfree_skb(skb); | 1234 | dev_kfree_skb(skb); |
1202 | for (i = 0; i < tx.u.tx.num_extra_frag; i++) | 1235 | for (i = 0; i < tx.num_extra_frag; i++) |
1203 | if (tx.u.tx.extra_frag[i]) | 1236 | if (tx.extra_frag[i]) |
1204 | dev_kfree_skb(tx.u.tx.extra_frag[i]); | 1237 | dev_kfree_skb(tx.extra_frag[i]); |
1205 | kfree(tx.u.tx.extra_frag); | 1238 | kfree(tx.extra_frag); |
1206 | rcu_read_unlock(); | 1239 | rcu_read_unlock(); |
1207 | return 0; | 1240 | return 0; |
1208 | } | 1241 | } |
@@ -1260,6 +1293,8 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, | |||
1260 | control.flags |= IEEE80211_TXCTL_REQUEUE; | 1293 | control.flags |= IEEE80211_TXCTL_REQUEUE; |
1261 | if (pkt_data->flags & IEEE80211_TXPD_EAPOL_FRAME) | 1294 | if (pkt_data->flags & IEEE80211_TXPD_EAPOL_FRAME) |
1262 | control.flags |= IEEE80211_TXCTL_EAPOL_FRAME; | 1295 | control.flags |= IEEE80211_TXCTL_EAPOL_FRAME; |
1296 | if (pkt_data->flags & IEEE80211_TXPD_AMPDU) | ||
1297 | control.flags |= IEEE80211_TXCTL_AMPDU; | ||
1263 | control.queue = pkt_data->queue; | 1298 | control.queue = pkt_data->queue; |
1264 | 1299 | ||
1265 | ret = ieee80211_tx(odev, skb, &control); | 1300 | ret = ieee80211_tx(odev, skb, &control); |
@@ -1346,8 +1381,9 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1346 | struct ieee80211_tx_packet_data *pkt_data; | 1381 | struct ieee80211_tx_packet_data *pkt_data; |
1347 | struct ieee80211_sub_if_data *sdata; | 1382 | struct ieee80211_sub_if_data *sdata; |
1348 | int ret = 1, head_need; | 1383 | int ret = 1, head_need; |
1349 | u16 ethertype, hdrlen, fc; | 1384 | u16 ethertype, hdrlen, meshhdrlen = 0, fc; |
1350 | struct ieee80211_hdr hdr; | 1385 | struct ieee80211_hdr hdr; |
1386 | struct ieee80211s_hdr mesh_hdr; | ||
1351 | const u8 *encaps_data; | 1387 | const u8 *encaps_data; |
1352 | int encaps_len, skip_header_bytes; | 1388 | int encaps_len, skip_header_bytes; |
1353 | int nh_pos, h_pos; | 1389 | int nh_pos, h_pos; |
@@ -1389,6 +1425,37 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1389 | memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); | 1425 | memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); |
1390 | hdrlen = 30; | 1426 | hdrlen = 30; |
1391 | break; | 1427 | break; |
1428 | #ifdef CONFIG_MAC80211_MESH | ||
1429 | case IEEE80211_IF_TYPE_MESH_POINT: | ||
1430 | fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS; | ||
1431 | /* RA TA DA SA */ | ||
1432 | if (is_multicast_ether_addr(skb->data)) | ||
1433 | memcpy(hdr.addr1, skb->data, ETH_ALEN); | ||
1434 | else if (mesh_nexthop_lookup(hdr.addr1, skb, dev)) | ||
1435 | return 0; | ||
1436 | memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); | ||
1437 | memcpy(hdr.addr3, skb->data, ETH_ALEN); | ||
1438 | memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); | ||
1439 | if (skb->pkt_type == PACKET_OTHERHOST) { | ||
1440 | /* Forwarded frame, keep mesh ttl and seqnum */ | ||
1441 | struct ieee80211s_hdr *prev_meshhdr; | ||
1442 | prev_meshhdr = ((struct ieee80211s_hdr *)skb->cb); | ||
1443 | meshhdrlen = ieee80211_get_mesh_hdrlen(prev_meshhdr); | ||
1444 | memcpy(&mesh_hdr, prev_meshhdr, meshhdrlen); | ||
1445 | sdata->u.sta.mshstats.fwded_frames++; | ||
1446 | } else { | ||
1447 | if (!sdata->u.sta.mshcfg.dot11MeshTTL) { | ||
1448 | /* Do not send frames with mesh_ttl == 0 */ | ||
1449 | sdata->u.sta.mshstats.dropped_frames_ttl++; | ||
1450 | ret = 0; | ||
1451 | goto fail; | ||
1452 | } | ||
1453 | meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, | ||
1454 | sdata); | ||
1455 | } | ||
1456 | hdrlen = 30; | ||
1457 | break; | ||
1458 | #endif | ||
1392 | case IEEE80211_IF_TYPE_STA: | 1459 | case IEEE80211_IF_TYPE_STA: |
1393 | fc |= IEEE80211_FCTL_TODS; | 1460 | fc |= IEEE80211_FCTL_TODS; |
1394 | /* BSSID SA DA */ | 1461 | /* BSSID SA DA */ |
@@ -1409,10 +1476,17 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1409 | goto fail; | 1476 | goto fail; |
1410 | } | 1477 | } |
1411 | 1478 | ||
1412 | sta = sta_info_get(local, hdr.addr1); | 1479 | /* |
1413 | if (sta) { | 1480 | * There's no need to try to look up the destination |
1414 | sta_flags = sta->flags; | 1481 | * if it is a multicast address (which can only happen |
1415 | sta_info_put(sta); | 1482 | * in AP mode) |
1483 | */ | ||
1484 | if (!is_multicast_ether_addr(hdr.addr1)) { | ||
1485 | rcu_read_lock(); | ||
1486 | sta = sta_info_get(local, hdr.addr1); | ||
1487 | if (sta) | ||
1488 | sta_flags = sta->flags; | ||
1489 | rcu_read_unlock(); | ||
1416 | } | 1490 | } |
1417 | 1491 | ||
1418 | /* receiver is QoS enabled, use a QoS type frame */ | 1492 | /* receiver is QoS enabled, use a QoS type frame */ |
@@ -1422,12 +1496,12 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1422 | } | 1496 | } |
1423 | 1497 | ||
1424 | /* | 1498 | /* |
1425 | * If port access control is enabled, drop frames to unauthorised | 1499 | * Drop unicast frames to unauthorised stations unless they are |
1426 | * stations unless they are EAPOL frames from the local station. | 1500 | * EAPOL frames from the local station. |
1427 | */ | 1501 | */ |
1428 | if (unlikely(sdata->ieee802_1x_pac && | 1502 | if (unlikely(!is_multicast_ether_addr(hdr.addr1) && |
1429 | !(sta_flags & WLAN_STA_AUTHORIZED) && | 1503 | !(sta_flags & WLAN_STA_AUTHORIZED) && |
1430 | !(ethertype == ETH_P_PAE && | 1504 | !(ethertype == ETH_P_PAE && |
1431 | compare_ether_addr(dev->dev_addr, | 1505 | compare_ether_addr(dev->dev_addr, |
1432 | skb->data + ETH_ALEN) == 0))) { | 1506 | skb->data + ETH_ALEN) == 0))) { |
1433 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 1507 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
@@ -1480,7 +1554,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1480 | * build in headroom in __dev_alloc_skb() (linux/skbuff.h) and | 1554 | * build in headroom in __dev_alloc_skb() (linux/skbuff.h) and |
1481 | * alloc_skb() (net/core/skbuff.c) | 1555 | * alloc_skb() (net/core/skbuff.c) |
1482 | */ | 1556 | */ |
1483 | head_need = hdrlen + encaps_len + local->tx_headroom; | 1557 | head_need = hdrlen + encaps_len + meshhdrlen + local->tx_headroom; |
1484 | head_need -= skb_headroom(skb); | 1558 | head_need -= skb_headroom(skb); |
1485 | 1559 | ||
1486 | /* We are going to modify skb data, so make a copy of it if happens to | 1560 | /* We are going to modify skb data, so make a copy of it if happens to |
@@ -1514,6 +1588,12 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1514 | h_pos += encaps_len; | 1588 | h_pos += encaps_len; |
1515 | } | 1589 | } |
1516 | 1590 | ||
1591 | if (meshhdrlen > 0) { | ||
1592 | memcpy(skb_push(skb, meshhdrlen), &mesh_hdr, meshhdrlen); | ||
1593 | nh_pos += meshhdrlen; | ||
1594 | h_pos += meshhdrlen; | ||
1595 | } | ||
1596 | |||
1517 | if (fc & IEEE80211_STYPE_QOS_DATA) { | 1597 | if (fc & IEEE80211_STYPE_QOS_DATA) { |
1518 | __le16 *qos_control; | 1598 | __le16 *qos_control; |
1519 | 1599 | ||
@@ -1583,7 +1663,7 @@ void ieee80211_tx_pending(unsigned long data) | |||
1583 | struct ieee80211_local *local = (struct ieee80211_local *)data; | 1663 | struct ieee80211_local *local = (struct ieee80211_local *)data; |
1584 | struct net_device *dev = local->mdev; | 1664 | struct net_device *dev = local->mdev; |
1585 | struct ieee80211_tx_stored_packet *store; | 1665 | struct ieee80211_tx_stored_packet *store; |
1586 | struct ieee80211_txrx_data tx; | 1666 | struct ieee80211_tx_data tx; |
1587 | int i, ret, reschedule = 0; | 1667 | int i, ret, reschedule = 0; |
1588 | 1668 | ||
1589 | netif_tx_lock_bh(dev); | 1669 | netif_tx_lock_bh(dev); |
@@ -1595,14 +1675,13 @@ void ieee80211_tx_pending(unsigned long data) | |||
1595 | continue; | 1675 | continue; |
1596 | } | 1676 | } |
1597 | store = &local->pending_packet[i]; | 1677 | store = &local->pending_packet[i]; |
1598 | tx.u.tx.control = &store->control; | 1678 | tx.control = &store->control; |
1599 | tx.u.tx.extra_frag = store->extra_frag; | 1679 | tx.extra_frag = store->extra_frag; |
1600 | tx.u.tx.num_extra_frag = store->num_extra_frag; | 1680 | tx.num_extra_frag = store->num_extra_frag; |
1601 | tx.u.tx.last_frag_hwrate = store->last_frag_hwrate; | 1681 | tx.last_frag_rate = store->last_frag_rate; |
1602 | tx.u.tx.last_frag_rate = store->last_frag_rate; | ||
1603 | tx.flags = 0; | 1682 | tx.flags = 0; |
1604 | if (store->last_frag_rate_ctrl_probe) | 1683 | if (store->last_frag_rate_ctrl_probe) |
1605 | tx.flags |= IEEE80211_TXRXD_TXPROBE_LAST_FRAG; | 1684 | tx.flags |= IEEE80211_TX_PROBE_LAST_FRAG; |
1606 | ret = __ieee80211_tx(local, store->skb, &tx); | 1685 | ret = __ieee80211_tx(local, store->skb, &tx); |
1607 | if (ret) { | 1686 | if (ret) { |
1608 | if (ret == IEEE80211_TX_FRAG_AGAIN) | 1687 | if (ret == IEEE80211_TX_FRAG_AGAIN) |
@@ -1636,7 +1715,6 @@ static void ieee80211_beacon_add_tim(struct ieee80211_local *local, | |||
1636 | 1715 | ||
1637 | /* Generate bitmap for TIM only if there are any STAs in power save | 1716 | /* Generate bitmap for TIM only if there are any STAs in power save |
1638 | * mode. */ | 1717 | * mode. */ |
1639 | read_lock_bh(&local->sta_lock); | ||
1640 | if (atomic_read(&bss->num_sta_ps) > 0) | 1718 | if (atomic_read(&bss->num_sta_ps) > 0) |
1641 | /* in the hope that this is faster than | 1719 | /* in the hope that this is faster than |
1642 | * checking byte-for-byte */ | 1720 | * checking byte-for-byte */ |
@@ -1687,7 +1765,6 @@ static void ieee80211_beacon_add_tim(struct ieee80211_local *local, | |||
1687 | *pos++ = aid0; /* Bitmap control */ | 1765 | *pos++ = aid0; /* Bitmap control */ |
1688 | *pos++ = 0; /* Part Virt Bitmap */ | 1766 | *pos++ = 0; /* Part Virt Bitmap */ |
1689 | } | 1767 | } |
1690 | read_unlock_bh(&local->sta_lock); | ||
1691 | } | 1768 | } |
1692 | 1769 | ||
1693 | struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, | 1770 | struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, |
@@ -1701,16 +1778,96 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, | |||
1701 | struct ieee80211_if_ap *ap = NULL; | 1778 | struct ieee80211_if_ap *ap = NULL; |
1702 | struct rate_selection rsel; | 1779 | struct rate_selection rsel; |
1703 | struct beacon_data *beacon; | 1780 | struct beacon_data *beacon; |
1781 | struct ieee80211_supported_band *sband; | ||
1782 | struct ieee80211_mgmt *mgmt; | ||
1783 | int *num_beacons; | ||
1784 | bool err = true; | ||
1785 | u8 *pos; | ||
1786 | |||
1787 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | ||
1704 | 1788 | ||
1705 | rcu_read_lock(); | 1789 | rcu_read_lock(); |
1706 | 1790 | ||
1707 | sdata = vif_to_sdata(vif); | 1791 | sdata = vif_to_sdata(vif); |
1708 | bdev = sdata->dev; | 1792 | bdev = sdata->dev; |
1709 | ap = &sdata->u.ap; | ||
1710 | 1793 | ||
1711 | beacon = rcu_dereference(ap->beacon); | 1794 | if (sdata->vif.type == IEEE80211_IF_TYPE_AP) { |
1795 | ap = &sdata->u.ap; | ||
1796 | beacon = rcu_dereference(ap->beacon); | ||
1797 | if (ap && beacon) { | ||
1798 | /* | ||
1799 | * headroom, head length, | ||
1800 | * tail length and maximum TIM length | ||
1801 | */ | ||
1802 | skb = dev_alloc_skb(local->tx_headroom + | ||
1803 | beacon->head_len + | ||
1804 | beacon->tail_len + 256); | ||
1805 | if (!skb) | ||
1806 | goto out; | ||
1807 | |||
1808 | skb_reserve(skb, local->tx_headroom); | ||
1809 | memcpy(skb_put(skb, beacon->head_len), beacon->head, | ||
1810 | beacon->head_len); | ||
1811 | |||
1812 | ieee80211_include_sequence(sdata, | ||
1813 | (struct ieee80211_hdr *)skb->data); | ||
1814 | |||
1815 | /* | ||
1816 | * Not very nice, but we want to allow the driver to call | ||
1817 | * ieee80211_beacon_get() as a response to the set_tim() | ||
1818 | * callback. That, however, is already invoked under the | ||
1819 | * sta_lock to guarantee consistent and race-free update | ||
1820 | * of the tim bitmap in mac80211 and the driver. | ||
1821 | */ | ||
1822 | if (local->tim_in_locked_section) { | ||
1823 | ieee80211_beacon_add_tim(local, ap, skb, beacon); | ||
1824 | } else { | ||
1825 | unsigned long flags; | ||
1826 | |||
1827 | spin_lock_irqsave(&local->sta_lock, flags); | ||
1828 | ieee80211_beacon_add_tim(local, ap, skb, beacon); | ||
1829 | spin_unlock_irqrestore(&local->sta_lock, flags); | ||
1830 | } | ||
1831 | |||
1832 | if (beacon->tail) | ||
1833 | memcpy(skb_put(skb, beacon->tail_len), | ||
1834 | beacon->tail, beacon->tail_len); | ||
1712 | 1835 | ||
1713 | if (!ap || sdata->vif.type != IEEE80211_IF_TYPE_AP || !beacon) { | 1836 | num_beacons = &ap->num_beacons; |
1837 | |||
1838 | err = false; | ||
1839 | } | ||
1840 | } else if (ieee80211_vif_is_mesh(&sdata->vif)) { | ||
1841 | /* headroom, head length, tail length and maximum TIM length */ | ||
1842 | skb = dev_alloc_skb(local->tx_headroom + 400); | ||
1843 | if (!skb) | ||
1844 | goto out; | ||
1845 | |||
1846 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
1847 | mgmt = (struct ieee80211_mgmt *) | ||
1848 | skb_put(skb, 24 + sizeof(mgmt->u.beacon)); | ||
1849 | memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon)); | ||
1850 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | ||
1851 | IEEE80211_STYPE_BEACON); | ||
1852 | memset(mgmt->da, 0xff, ETH_ALEN); | ||
1853 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); | ||
1854 | /* BSSID is left zeroed, wildcard value */ | ||
1855 | mgmt->u.beacon.beacon_int = | ||
1856 | cpu_to_le16(local->hw.conf.beacon_int); | ||
1857 | mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */ | ||
1858 | |||
1859 | pos = skb_put(skb, 2); | ||
1860 | *pos++ = WLAN_EID_SSID; | ||
1861 | *pos++ = 0x0; | ||
1862 | |||
1863 | mesh_mgmt_ies_add(skb, sdata->dev); | ||
1864 | |||
1865 | num_beacons = &sdata->u.sta.num_beacons; | ||
1866 | |||
1867 | err = false; | ||
1868 | } | ||
1869 | |||
1870 | if (err) { | ||
1714 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 1871 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
1715 | if (net_ratelimit()) | 1872 | if (net_ratelimit()) |
1716 | printk(KERN_DEBUG "no beacon data avail for %s\n", | 1873 | printk(KERN_DEBUG "no beacon data avail for %s\n", |
@@ -1720,27 +1877,8 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, | |||
1720 | goto out; | 1877 | goto out; |
1721 | } | 1878 | } |
1722 | 1879 | ||
1723 | /* headroom, head length, tail length and maximum TIM length */ | ||
1724 | skb = dev_alloc_skb(local->tx_headroom + beacon->head_len + | ||
1725 | beacon->tail_len + 256); | ||
1726 | if (!skb) | ||
1727 | goto out; | ||
1728 | |||
1729 | skb_reserve(skb, local->tx_headroom); | ||
1730 | memcpy(skb_put(skb, beacon->head_len), beacon->head, | ||
1731 | beacon->head_len); | ||
1732 | |||
1733 | ieee80211_include_sequence(sdata, (struct ieee80211_hdr *)skb->data); | ||
1734 | |||
1735 | ieee80211_beacon_add_tim(local, ap, skb, beacon); | ||
1736 | |||
1737 | if (beacon->tail) | ||
1738 | memcpy(skb_put(skb, beacon->tail_len), beacon->tail, | ||
1739 | beacon->tail_len); | ||
1740 | |||
1741 | if (control) { | 1880 | if (control) { |
1742 | rate_control_get_rate(local->mdev, local->oper_hw_mode, skb, | 1881 | rate_control_get_rate(local->mdev, sband, skb, &rsel); |
1743 | &rsel); | ||
1744 | if (!rsel.rate) { | 1882 | if (!rsel.rate) { |
1745 | if (net_ratelimit()) { | 1883 | if (net_ratelimit()) { |
1746 | printk(KERN_DEBUG "%s: ieee80211_beacon_get: " | 1884 | printk(KERN_DEBUG "%s: ieee80211_beacon_get: " |
@@ -1753,20 +1891,17 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, | |||
1753 | } | 1891 | } |
1754 | 1892 | ||
1755 | control->vif = vif; | 1893 | control->vif = vif; |
1756 | control->tx_rate = | 1894 | control->tx_rate = rsel.rate; |
1757 | (sdata->bss_conf.use_short_preamble && | 1895 | if (sdata->bss_conf.use_short_preamble && |
1758 | (rsel.rate->flags & IEEE80211_RATE_PREAMBLE2)) ? | 1896 | rsel.rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) |
1759 | rsel.rate->val2 : rsel.rate->val; | 1897 | control->flags |= IEEE80211_TXCTL_SHORT_PREAMBLE; |
1760 | control->antenna_sel_tx = local->hw.conf.antenna_sel_tx; | 1898 | control->antenna_sel_tx = local->hw.conf.antenna_sel_tx; |
1761 | control->power_level = local->hw.conf.power_level; | ||
1762 | control->flags |= IEEE80211_TXCTL_NO_ACK; | 1899 | control->flags |= IEEE80211_TXCTL_NO_ACK; |
1763 | control->retry_limit = 1; | 1900 | control->retry_limit = 1; |
1764 | control->flags |= IEEE80211_TXCTL_CLEAR_DST_MASK; | 1901 | control->flags |= IEEE80211_TXCTL_CLEAR_PS_FILT; |
1765 | } | 1902 | } |
1766 | 1903 | (*num_beacons)++; | |
1767 | ap->num_beacons++; | 1904 | out: |
1768 | |||
1769 | out: | ||
1770 | rcu_read_unlock(); | 1905 | rcu_read_unlock(); |
1771 | return skb; | 1906 | return skb; |
1772 | } | 1907 | } |
@@ -1814,8 +1949,8 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, | |||
1814 | struct sk_buff *skb; | 1949 | struct sk_buff *skb; |
1815 | struct sta_info *sta; | 1950 | struct sta_info *sta; |
1816 | ieee80211_tx_handler *handler; | 1951 | ieee80211_tx_handler *handler; |
1817 | struct ieee80211_txrx_data tx; | 1952 | struct ieee80211_tx_data tx; |
1818 | ieee80211_txrx_result res = TXRX_DROP; | 1953 | ieee80211_tx_result res = TX_DROP; |
1819 | struct net_device *bdev; | 1954 | struct net_device *bdev; |
1820 | struct ieee80211_sub_if_data *sdata; | 1955 | struct ieee80211_sub_if_data *sdata; |
1821 | struct ieee80211_if_ap *bss = NULL; | 1956 | struct ieee80211_if_ap *bss = NULL; |
@@ -1836,7 +1971,6 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, | |||
1836 | rcu_read_unlock(); | 1971 | rcu_read_unlock(); |
1837 | return NULL; | 1972 | return NULL; |
1838 | } | 1973 | } |
1839 | rcu_read_unlock(); | ||
1840 | 1974 | ||
1841 | if (bss->dtim_count != 0) | 1975 | if (bss->dtim_count != 0) |
1842 | return NULL; /* send buffered bc/mc only after DTIM beacon */ | 1976 | return NULL; /* send buffered bc/mc only after DTIM beacon */ |
@@ -1862,27 +1996,26 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, | |||
1862 | dev_kfree_skb_any(skb); | 1996 | dev_kfree_skb_any(skb); |
1863 | } | 1997 | } |
1864 | sta = tx.sta; | 1998 | sta = tx.sta; |
1865 | tx.flags |= IEEE80211_TXRXD_TXPS_BUFFERED; | 1999 | tx.flags |= IEEE80211_TX_PS_BUFFERED; |
1866 | tx.u.tx.mode = local->hw.conf.mode; | 2000 | tx.channel = local->hw.conf.channel; |
1867 | 2001 | ||
1868 | for (handler = local->tx_handlers; *handler != NULL; handler++) { | 2002 | for (handler = ieee80211_tx_handlers; *handler != NULL; handler++) { |
1869 | res = (*handler)(&tx); | 2003 | res = (*handler)(&tx); |
1870 | if (res == TXRX_DROP || res == TXRX_QUEUED) | 2004 | if (res == TX_DROP || res == TX_QUEUED) |
1871 | break; | 2005 | break; |
1872 | } | 2006 | } |
1873 | skb = tx.skb; /* handlers are allowed to change skb */ | 2007 | skb = tx.skb; /* handlers are allowed to change skb */ |
1874 | 2008 | ||
1875 | if (res == TXRX_DROP) { | 2009 | if (res == TX_DROP) { |
1876 | I802_DEBUG_INC(local->tx_handlers_drop); | 2010 | I802_DEBUG_INC(local->tx_handlers_drop); |
1877 | dev_kfree_skb(skb); | 2011 | dev_kfree_skb(skb); |
1878 | skb = NULL; | 2012 | skb = NULL; |
1879 | } else if (res == TXRX_QUEUED) { | 2013 | } else if (res == TX_QUEUED) { |
1880 | I802_DEBUG_INC(local->tx_handlers_queued); | 2014 | I802_DEBUG_INC(local->tx_handlers_queued); |
1881 | skb = NULL; | 2015 | skb = NULL; |
1882 | } | 2016 | } |
1883 | 2017 | ||
1884 | if (sta) | 2018 | rcu_read_unlock(); |
1885 | sta_info_put(sta); | ||
1886 | 2019 | ||
1887 | return skb; | 2020 | return skb; |
1888 | } | 2021 | } |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 5e631ce98d7e..57c404f3f6d0 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -26,6 +26,7 @@ | |||
26 | 26 | ||
27 | #include "ieee80211_i.h" | 27 | #include "ieee80211_i.h" |
28 | #include "ieee80211_rate.h" | 28 | #include "ieee80211_rate.h" |
29 | #include "mesh.h" | ||
29 | #include "wme.h" | 30 | #include "wme.h" |
30 | 31 | ||
31 | /* privid for wiphys to determine whether they belong to us or not */ | 32 | /* privid for wiphys to determine whether they belong to us or not */ |
@@ -41,92 +42,6 @@ const unsigned char bridge_tunnel_header[] = | |||
41 | { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; | 42 | { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; |
42 | 43 | ||
43 | 44 | ||
44 | static int rate_list_match(const int *rate_list, int rate) | ||
45 | { | ||
46 | int i; | ||
47 | |||
48 | if (!rate_list) | ||
49 | return 0; | ||
50 | |||
51 | for (i = 0; rate_list[i] >= 0; i++) | ||
52 | if (rate_list[i] == rate) | ||
53 | return 1; | ||
54 | |||
55 | return 0; | ||
56 | } | ||
57 | |||
58 | void ieee80211_prepare_rates(struct ieee80211_local *local, | ||
59 | struct ieee80211_hw_mode *mode) | ||
60 | { | ||
61 | int i; | ||
62 | |||
63 | for (i = 0; i < mode->num_rates; i++) { | ||
64 | struct ieee80211_rate *rate = &mode->rates[i]; | ||
65 | |||
66 | rate->flags &= ~(IEEE80211_RATE_SUPPORTED | | ||
67 | IEEE80211_RATE_BASIC); | ||
68 | |||
69 | if (local->supp_rates[mode->mode]) { | ||
70 | if (!rate_list_match(local->supp_rates[mode->mode], | ||
71 | rate->rate)) | ||
72 | continue; | ||
73 | } | ||
74 | |||
75 | rate->flags |= IEEE80211_RATE_SUPPORTED; | ||
76 | |||
77 | /* Use configured basic rate set if it is available. If not, | ||
78 | * use defaults that are sane for most cases. */ | ||
79 | if (local->basic_rates[mode->mode]) { | ||
80 | if (rate_list_match(local->basic_rates[mode->mode], | ||
81 | rate->rate)) | ||
82 | rate->flags |= IEEE80211_RATE_BASIC; | ||
83 | } else switch (mode->mode) { | ||
84 | case MODE_IEEE80211A: | ||
85 | if (rate->rate == 60 || rate->rate == 120 || | ||
86 | rate->rate == 240) | ||
87 | rate->flags |= IEEE80211_RATE_BASIC; | ||
88 | break; | ||
89 | case MODE_IEEE80211B: | ||
90 | if (rate->rate == 10 || rate->rate == 20) | ||
91 | rate->flags |= IEEE80211_RATE_BASIC; | ||
92 | break; | ||
93 | case MODE_IEEE80211G: | ||
94 | if (rate->rate == 10 || rate->rate == 20 || | ||
95 | rate->rate == 55 || rate->rate == 110) | ||
96 | rate->flags |= IEEE80211_RATE_BASIC; | ||
97 | break; | ||
98 | case NUM_IEEE80211_MODES: | ||
99 | /* not useful */ | ||
100 | break; | ||
101 | } | ||
102 | |||
103 | /* Set ERP and MANDATORY flags based on phymode */ | ||
104 | switch (mode->mode) { | ||
105 | case MODE_IEEE80211A: | ||
106 | if (rate->rate == 60 || rate->rate == 120 || | ||
107 | rate->rate == 240) | ||
108 | rate->flags |= IEEE80211_RATE_MANDATORY; | ||
109 | break; | ||
110 | case MODE_IEEE80211B: | ||
111 | if (rate->rate == 10) | ||
112 | rate->flags |= IEEE80211_RATE_MANDATORY; | ||
113 | break; | ||
114 | case MODE_IEEE80211G: | ||
115 | if (rate->rate == 10 || rate->rate == 20 || | ||
116 | rate->rate == 55 || rate->rate == 110 || | ||
117 | rate->rate == 60 || rate->rate == 120 || | ||
118 | rate->rate == 240) | ||
119 | rate->flags |= IEEE80211_RATE_MANDATORY; | ||
120 | break; | ||
121 | case NUM_IEEE80211_MODES: | ||
122 | /* not useful */ | ||
123 | break; | ||
124 | } | ||
125 | if (ieee80211_is_erp_rate(mode->mode, rate->rate)) | ||
126 | rate->flags |= IEEE80211_RATE_ERP; | ||
127 | } | ||
128 | } | ||
129 | |||
130 | u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, | 45 | u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, |
131 | enum ieee80211_if_types type) | 46 | enum ieee80211_if_types type) |
132 | { | 47 | { |
@@ -232,17 +147,35 @@ int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb) | |||
232 | } | 147 | } |
233 | EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb); | 148 | EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb); |
234 | 149 | ||
235 | void ieee80211_tx_set_iswep(struct ieee80211_txrx_data *tx) | 150 | int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr) |
151 | { | ||
152 | int ae = meshhdr->flags & IEEE80211S_FLAGS_AE; | ||
153 | /* 7.1.3.5a.2 */ | ||
154 | switch (ae) { | ||
155 | case 0: | ||
156 | return 5; | ||
157 | case 1: | ||
158 | return 11; | ||
159 | case 2: | ||
160 | return 17; | ||
161 | case 3: | ||
162 | return 23; | ||
163 | default: | ||
164 | return 5; | ||
165 | } | ||
166 | } | ||
167 | |||
168 | void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx) | ||
236 | { | 169 | { |
237 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data; | 170 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data; |
238 | 171 | ||
239 | hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); | 172 | hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); |
240 | if (tx->u.tx.extra_frag) { | 173 | if (tx->extra_frag) { |
241 | struct ieee80211_hdr *fhdr; | 174 | struct ieee80211_hdr *fhdr; |
242 | int i; | 175 | int i; |
243 | for (i = 0; i < tx->u.tx.num_extra_frag; i++) { | 176 | for (i = 0; i < tx->num_extra_frag; i++) { |
244 | fhdr = (struct ieee80211_hdr *) | 177 | fhdr = (struct ieee80211_hdr *) |
245 | tx->u.tx.extra_frag[i]->data; | 178 | tx->extra_frag[i]->data; |
246 | fhdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); | 179 | fhdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); |
247 | } | 180 | } |
248 | } | 181 | } |
@@ -262,7 +195,7 @@ int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, | |||
262 | * DIV_ROUND_UP() operations. | 195 | * DIV_ROUND_UP() operations. |
263 | */ | 196 | */ |
264 | 197 | ||
265 | if (local->hw.conf.phymode == MODE_IEEE80211A || erp) { | 198 | if (local->hw.conf.channel->band == IEEE80211_BAND_5GHZ || erp) { |
266 | /* | 199 | /* |
267 | * OFDM: | 200 | * OFDM: |
268 | * | 201 | * |
@@ -304,15 +237,19 @@ int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, | |||
304 | /* Exported duration function for driver use */ | 237 | /* Exported duration function for driver use */ |
305 | __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, | 238 | __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, |
306 | struct ieee80211_vif *vif, | 239 | struct ieee80211_vif *vif, |
307 | size_t frame_len, int rate) | 240 | size_t frame_len, |
241 | struct ieee80211_rate *rate) | ||
308 | { | 242 | { |
309 | struct ieee80211_local *local = hw_to_local(hw); | 243 | struct ieee80211_local *local = hw_to_local(hw); |
310 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); | 244 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); |
311 | u16 dur; | 245 | u16 dur; |
312 | int erp; | 246 | int erp; |
313 | 247 | ||
314 | erp = ieee80211_is_erp_rate(hw->conf.phymode, rate); | 248 | erp = 0; |
315 | dur = ieee80211_frame_duration(local, frame_len, rate, erp, | 249 | if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) |
250 | erp = rate->flags & IEEE80211_RATE_ERP_G; | ||
251 | |||
252 | dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, erp, | ||
316 | sdata->bss_conf.use_short_preamble); | 253 | sdata->bss_conf.use_short_preamble); |
317 | 254 | ||
318 | return cpu_to_le16(dur); | 255 | return cpu_to_le16(dur); |
@@ -332,17 +269,20 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw, | |||
332 | 269 | ||
333 | short_preamble = sdata->bss_conf.use_short_preamble; | 270 | short_preamble = sdata->bss_conf.use_short_preamble; |
334 | 271 | ||
335 | rate = frame_txctl->rts_rate; | 272 | rate = frame_txctl->rts_cts_rate; |
336 | erp = !!(rate->flags & IEEE80211_RATE_ERP); | 273 | |
274 | erp = 0; | ||
275 | if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) | ||
276 | erp = rate->flags & IEEE80211_RATE_ERP_G; | ||
337 | 277 | ||
338 | /* CTS duration */ | 278 | /* CTS duration */ |
339 | dur = ieee80211_frame_duration(local, 10, rate->rate, | 279 | dur = ieee80211_frame_duration(local, 10, rate->bitrate, |
340 | erp, short_preamble); | 280 | erp, short_preamble); |
341 | /* Data frame duration */ | 281 | /* Data frame duration */ |
342 | dur += ieee80211_frame_duration(local, frame_len, rate->rate, | 282 | dur += ieee80211_frame_duration(local, frame_len, rate->bitrate, |
343 | erp, short_preamble); | 283 | erp, short_preamble); |
344 | /* ACK duration */ | 284 | /* ACK duration */ |
345 | dur += ieee80211_frame_duration(local, 10, rate->rate, | 285 | dur += ieee80211_frame_duration(local, 10, rate->bitrate, |
346 | erp, short_preamble); | 286 | erp, short_preamble); |
347 | 287 | ||
348 | return cpu_to_le16(dur); | 288 | return cpu_to_le16(dur); |
@@ -363,15 +303,17 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, | |||
363 | 303 | ||
364 | short_preamble = sdata->bss_conf.use_short_preamble; | 304 | short_preamble = sdata->bss_conf.use_short_preamble; |
365 | 305 | ||
366 | rate = frame_txctl->rts_rate; | 306 | rate = frame_txctl->rts_cts_rate; |
367 | erp = !!(rate->flags & IEEE80211_RATE_ERP); | 307 | erp = 0; |
308 | if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) | ||
309 | erp = rate->flags & IEEE80211_RATE_ERP_G; | ||
368 | 310 | ||
369 | /* Data frame duration */ | 311 | /* Data frame duration */ |
370 | dur = ieee80211_frame_duration(local, frame_len, rate->rate, | 312 | dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, |
371 | erp, short_preamble); | 313 | erp, short_preamble); |
372 | if (!(frame_txctl->flags & IEEE80211_TXCTL_NO_ACK)) { | 314 | if (!(frame_txctl->flags & IEEE80211_TXCTL_NO_ACK)) { |
373 | /* ACK duration */ | 315 | /* ACK duration */ |
374 | dur += ieee80211_frame_duration(local, 10, rate->rate, | 316 | dur += ieee80211_frame_duration(local, 10, rate->bitrate, |
375 | erp, short_preamble); | 317 | erp, short_preamble); |
376 | } | 318 | } |
377 | 319 | ||
@@ -379,27 +321,6 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, | |||
379 | } | 321 | } |
380 | EXPORT_SYMBOL(ieee80211_ctstoself_duration); | 322 | EXPORT_SYMBOL(ieee80211_ctstoself_duration); |
381 | 323 | ||
382 | struct ieee80211_rate * | ||
383 | ieee80211_get_rate(struct ieee80211_local *local, int phymode, int hw_rate) | ||
384 | { | ||
385 | struct ieee80211_hw_mode *mode; | ||
386 | int r; | ||
387 | |||
388 | list_for_each_entry(mode, &local->modes_list, list) { | ||
389 | if (mode->mode != phymode) | ||
390 | continue; | ||
391 | for (r = 0; r < mode->num_rates; r++) { | ||
392 | struct ieee80211_rate *rate = &mode->rates[r]; | ||
393 | if (rate->val == hw_rate || | ||
394 | (rate->flags & IEEE80211_RATE_PREAMBLE2 && | ||
395 | rate->val2 == hw_rate)) | ||
396 | return rate; | ||
397 | } | ||
398 | } | ||
399 | |||
400 | return NULL; | ||
401 | } | ||
402 | |||
403 | void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue) | 324 | void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue) |
404 | { | 325 | { |
405 | struct ieee80211_local *local = hw_to_local(hw); | 326 | struct ieee80211_local *local = hw_to_local(hw); |
@@ -480,6 +401,7 @@ void ieee80211_iterate_active_interfaces( | |||
480 | case IEEE80211_IF_TYPE_STA: | 401 | case IEEE80211_IF_TYPE_STA: |
481 | case IEEE80211_IF_TYPE_IBSS: | 402 | case IEEE80211_IF_TYPE_IBSS: |
482 | case IEEE80211_IF_TYPE_WDS: | 403 | case IEEE80211_IF_TYPE_WDS: |
404 | case IEEE80211_IF_TYPE_MESH_POINT: | ||
483 | break; | 405 | break; |
484 | } | 406 | } |
485 | if (sdata->dev == local->mdev) | 407 | if (sdata->dev == local->mdev) |
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c index a0cff72a580b..affcecd78c10 100644 --- a/net/mac80211/wep.c +++ b/net/mac80211/wep.c | |||
@@ -305,39 +305,39 @@ u8 * ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key) | |||
305 | return NULL; | 305 | return NULL; |
306 | } | 306 | } |
307 | 307 | ||
308 | ieee80211_txrx_result | 308 | ieee80211_rx_result |
309 | ieee80211_crypto_wep_decrypt(struct ieee80211_txrx_data *rx) | 309 | ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx) |
310 | { | 310 | { |
311 | if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA && | 311 | if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA && |
312 | ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT || | 312 | ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT || |
313 | (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_AUTH)) | 313 | (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_AUTH)) |
314 | return TXRX_CONTINUE; | 314 | return RX_CONTINUE; |
315 | 315 | ||
316 | if (!(rx->u.rx.status->flag & RX_FLAG_DECRYPTED)) { | 316 | if (!(rx->status->flag & RX_FLAG_DECRYPTED)) { |
317 | if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key)) { | 317 | if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key)) { |
318 | #ifdef CONFIG_MAC80211_DEBUG | 318 | #ifdef CONFIG_MAC80211_DEBUG |
319 | if (net_ratelimit()) | 319 | if (net_ratelimit()) |
320 | printk(KERN_DEBUG "%s: RX WEP frame, decrypt " | 320 | printk(KERN_DEBUG "%s: RX WEP frame, decrypt " |
321 | "failed\n", rx->dev->name); | 321 | "failed\n", rx->dev->name); |
322 | #endif /* CONFIG_MAC80211_DEBUG */ | 322 | #endif /* CONFIG_MAC80211_DEBUG */ |
323 | return TXRX_DROP; | 323 | return RX_DROP_UNUSABLE; |
324 | } | 324 | } |
325 | } else if (!(rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED)) { | 325 | } else if (!(rx->status->flag & RX_FLAG_IV_STRIPPED)) { |
326 | ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key); | 326 | ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key); |
327 | /* remove ICV */ | 327 | /* remove ICV */ |
328 | skb_trim(rx->skb, rx->skb->len - 4); | 328 | skb_trim(rx->skb, rx->skb->len - 4); |
329 | } | 329 | } |
330 | 330 | ||
331 | return TXRX_CONTINUE; | 331 | return RX_CONTINUE; |
332 | } | 332 | } |
333 | 333 | ||
334 | static int wep_encrypt_skb(struct ieee80211_txrx_data *tx, struct sk_buff *skb) | 334 | static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) |
335 | { | 335 | { |
336 | if (!(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) { | 336 | if (!(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) { |
337 | if (ieee80211_wep_encrypt(tx->local, skb, tx->key)) | 337 | if (ieee80211_wep_encrypt(tx->local, skb, tx->key)) |
338 | return -1; | 338 | return -1; |
339 | } else { | 339 | } else { |
340 | tx->u.tx.control->key_idx = tx->key->conf.hw_key_idx; | 340 | tx->control->key_idx = tx->key->conf.hw_key_idx; |
341 | if (tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) { | 341 | if (tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) { |
342 | if (!ieee80211_wep_add_iv(tx->local, skb, tx->key)) | 342 | if (!ieee80211_wep_add_iv(tx->local, skb, tx->key)) |
343 | return -1; | 343 | return -1; |
@@ -346,28 +346,28 @@ static int wep_encrypt_skb(struct ieee80211_txrx_data *tx, struct sk_buff *skb) | |||
346 | return 0; | 346 | return 0; |
347 | } | 347 | } |
348 | 348 | ||
349 | ieee80211_txrx_result | 349 | ieee80211_tx_result |
350 | ieee80211_crypto_wep_encrypt(struct ieee80211_txrx_data *tx) | 350 | ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx) |
351 | { | 351 | { |
352 | tx->u.tx.control->iv_len = WEP_IV_LEN; | 352 | tx->control->iv_len = WEP_IV_LEN; |
353 | tx->u.tx.control->icv_len = WEP_ICV_LEN; | 353 | tx->control->icv_len = WEP_ICV_LEN; |
354 | ieee80211_tx_set_iswep(tx); | 354 | ieee80211_tx_set_protected(tx); |
355 | 355 | ||
356 | if (wep_encrypt_skb(tx, tx->skb) < 0) { | 356 | if (wep_encrypt_skb(tx, tx->skb) < 0) { |
357 | I802_DEBUG_INC(tx->local->tx_handlers_drop_wep); | 357 | I802_DEBUG_INC(tx->local->tx_handlers_drop_wep); |
358 | return TXRX_DROP; | 358 | return TX_DROP; |
359 | } | 359 | } |
360 | 360 | ||
361 | if (tx->u.tx.extra_frag) { | 361 | if (tx->extra_frag) { |
362 | int i; | 362 | int i; |
363 | for (i = 0; i < tx->u.tx.num_extra_frag; i++) { | 363 | for (i = 0; i < tx->num_extra_frag; i++) { |
364 | if (wep_encrypt_skb(tx, tx->u.tx.extra_frag[i]) < 0) { | 364 | if (wep_encrypt_skb(tx, tx->extra_frag[i]) < 0) { |
365 | I802_DEBUG_INC(tx->local-> | 365 | I802_DEBUG_INC(tx->local-> |
366 | tx_handlers_drop_wep); | 366 | tx_handlers_drop_wep); |
367 | return TXRX_DROP; | 367 | return TX_DROP; |
368 | } | 368 | } |
369 | } | 369 | } |
370 | } | 370 | } |
371 | 371 | ||
372 | return TXRX_CONTINUE; | 372 | return TX_CONTINUE; |
373 | } | 373 | } |
diff --git a/net/mac80211/wep.h b/net/mac80211/wep.h index 785fbb4e0dd7..9f723938b63f 100644 --- a/net/mac80211/wep.h +++ b/net/mac80211/wep.h | |||
@@ -28,9 +28,9 @@ int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb, | |||
28 | struct ieee80211_key *key); | 28 | struct ieee80211_key *key); |
29 | u8 * ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key); | 29 | u8 * ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key); |
30 | 30 | ||
31 | ieee80211_txrx_result | 31 | ieee80211_rx_result |
32 | ieee80211_crypto_wep_decrypt(struct ieee80211_txrx_data *rx); | 32 | ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx); |
33 | ieee80211_txrx_result | 33 | ieee80211_tx_result |
34 | ieee80211_crypto_wep_encrypt(struct ieee80211_txrx_data *tx); | 34 | ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx); |
35 | 35 | ||
36 | #endif /* WEP_H */ | 36 | #endif /* WEP_H */ |
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index 4e236599dd31..4e94e4026e78 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c | |||
@@ -19,10 +19,13 @@ | |||
19 | #include "wme.h" | 19 | #include "wme.h" |
20 | 20 | ||
21 | /* maximum number of hardware queues we support. */ | 21 | /* maximum number of hardware queues we support. */ |
22 | #define TC_80211_MAX_QUEUES 8 | 22 | #define TC_80211_MAX_QUEUES 16 |
23 | |||
24 | const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 }; | ||
23 | 25 | ||
24 | struct ieee80211_sched_data | 26 | struct ieee80211_sched_data |
25 | { | 27 | { |
28 | unsigned long qdisc_pool[BITS_TO_LONGS(TC_80211_MAX_QUEUES)]; | ||
26 | struct tcf_proto *filter_list; | 29 | struct tcf_proto *filter_list; |
27 | struct Qdisc *queues[TC_80211_MAX_QUEUES]; | 30 | struct Qdisc *queues[TC_80211_MAX_QUEUES]; |
28 | struct sk_buff_head requeued[TC_80211_MAX_QUEUES]; | 31 | struct sk_buff_head requeued[TC_80211_MAX_QUEUES]; |
@@ -98,7 +101,6 @@ static inline int classify80211(struct sk_buff *skb, struct Qdisc *qd) | |||
98 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 101 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
99 | unsigned short fc = le16_to_cpu(hdr->frame_control); | 102 | unsigned short fc = le16_to_cpu(hdr->frame_control); |
100 | int qos; | 103 | int qos; |
101 | const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 }; | ||
102 | 104 | ||
103 | /* see if frame is data or non data frame */ | 105 | /* see if frame is data or non data frame */ |
104 | if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) { | 106 | if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) { |
@@ -146,9 +148,26 @@ static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd) | |||
146 | unsigned short fc = le16_to_cpu(hdr->frame_control); | 148 | unsigned short fc = le16_to_cpu(hdr->frame_control); |
147 | struct Qdisc *qdisc; | 149 | struct Qdisc *qdisc; |
148 | int err, queue; | 150 | int err, queue; |
151 | struct sta_info *sta; | ||
152 | u8 tid; | ||
149 | 153 | ||
150 | if (pkt_data->flags & IEEE80211_TXPD_REQUEUE) { | 154 | if (pkt_data->flags & IEEE80211_TXPD_REQUEUE) { |
151 | skb_queue_tail(&q->requeued[pkt_data->queue], skb); | 155 | queue = pkt_data->queue; |
156 | rcu_read_lock(); | ||
157 | sta = sta_info_get(local, hdr->addr1); | ||
158 | tid = skb->priority & QOS_CONTROL_TAG1D_MASK; | ||
159 | if (sta) { | ||
160 | int ampdu_queue = sta->tid_to_tx_q[tid]; | ||
161 | if ((ampdu_queue < local->hw.queues) && | ||
162 | test_bit(ampdu_queue, q->qdisc_pool)) { | ||
163 | queue = ampdu_queue; | ||
164 | pkt_data->flags |= IEEE80211_TXPD_AMPDU; | ||
165 | } else { | ||
166 | pkt_data->flags &= ~IEEE80211_TXPD_AMPDU; | ||
167 | } | ||
168 | } | ||
169 | rcu_read_unlock(); | ||
170 | skb_queue_tail(&q->requeued[queue], skb); | ||
152 | qd->q.qlen++; | 171 | qd->q.qlen++; |
153 | return 0; | 172 | return 0; |
154 | } | 173 | } |
@@ -159,14 +178,31 @@ static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd) | |||
159 | */ | 178 | */ |
160 | if (WLAN_FC_IS_QOS_DATA(fc)) { | 179 | if (WLAN_FC_IS_QOS_DATA(fc)) { |
161 | u8 *p = skb->data + ieee80211_get_hdrlen(fc) - 2; | 180 | u8 *p = skb->data + ieee80211_get_hdrlen(fc) - 2; |
162 | u8 qos_hdr = skb->priority & QOS_CONTROL_TAG1D_MASK; | 181 | u8 ack_policy = 0; |
182 | tid = skb->priority & QOS_CONTROL_TAG1D_MASK; | ||
163 | if (local->wifi_wme_noack_test) | 183 | if (local->wifi_wme_noack_test) |
164 | qos_hdr |= QOS_CONTROL_ACK_POLICY_NOACK << | 184 | ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK << |
165 | QOS_CONTROL_ACK_POLICY_SHIFT; | 185 | QOS_CONTROL_ACK_POLICY_SHIFT; |
166 | /* qos header is 2 bytes, second reserved */ | 186 | /* qos header is 2 bytes, second reserved */ |
167 | *p = qos_hdr; | 187 | *p = ack_policy | tid; |
168 | p++; | 188 | p++; |
169 | *p = 0; | 189 | *p = 0; |
190 | |||
191 | rcu_read_lock(); | ||
192 | |||
193 | sta = sta_info_get(local, hdr->addr1); | ||
194 | if (sta) { | ||
195 | int ampdu_queue = sta->tid_to_tx_q[tid]; | ||
196 | if ((ampdu_queue < local->hw.queues) && | ||
197 | test_bit(ampdu_queue, q->qdisc_pool)) { | ||
198 | queue = ampdu_queue; | ||
199 | pkt_data->flags |= IEEE80211_TXPD_AMPDU; | ||
200 | } else { | ||
201 | pkt_data->flags &= ~IEEE80211_TXPD_AMPDU; | ||
202 | } | ||
203 | } | ||
204 | |||
205 | rcu_read_unlock(); | ||
170 | } | 206 | } |
171 | 207 | ||
172 | if (unlikely(queue >= local->hw.queues)) { | 208 | if (unlikely(queue >= local->hw.queues)) { |
@@ -184,6 +220,7 @@ static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd) | |||
184 | kfree_skb(skb); | 220 | kfree_skb(skb); |
185 | err = NET_XMIT_DROP; | 221 | err = NET_XMIT_DROP; |
186 | } else { | 222 | } else { |
223 | tid = skb->priority & QOS_CONTROL_TAG1D_MASK; | ||
187 | pkt_data->queue = (unsigned int) queue; | 224 | pkt_data->queue = (unsigned int) queue; |
188 | qdisc = q->queues[queue]; | 225 | qdisc = q->queues[queue]; |
189 | err = qdisc->enqueue(skb, qdisc); | 226 | err = qdisc->enqueue(skb, qdisc); |
@@ -235,10 +272,11 @@ static struct sk_buff *wme_qdiscop_dequeue(struct Qdisc* qd) | |||
235 | /* check all the h/w queues in numeric/priority order */ | 272 | /* check all the h/w queues in numeric/priority order */ |
236 | for (queue = 0; queue < hw->queues; queue++) { | 273 | for (queue = 0; queue < hw->queues; queue++) { |
237 | /* see if there is room in this hardware queue */ | 274 | /* see if there is room in this hardware queue */ |
238 | if (test_bit(IEEE80211_LINK_STATE_XOFF, | 275 | if ((test_bit(IEEE80211_LINK_STATE_XOFF, |
239 | &local->state[queue]) || | 276 | &local->state[queue])) || |
240 | test_bit(IEEE80211_LINK_STATE_PENDING, | 277 | (test_bit(IEEE80211_LINK_STATE_PENDING, |
241 | &local->state[queue])) | 278 | &local->state[queue])) || |
279 | (!test_bit(queue, q->qdisc_pool))) | ||
242 | continue; | 280 | continue; |
243 | 281 | ||
244 | /* there is space - try and get a frame */ | 282 | /* there is space - try and get a frame */ |
@@ -360,6 +398,10 @@ static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt) | |||
360 | } | 398 | } |
361 | } | 399 | } |
362 | 400 | ||
401 | /* reserve all legacy QoS queues */ | ||
402 | for (i = 0; i < min(IEEE80211_TX_QUEUE_DATA4, queues); i++) | ||
403 | set_bit(i, q->qdisc_pool); | ||
404 | |||
363 | return err; | 405 | return err; |
364 | } | 406 | } |
365 | 407 | ||
@@ -605,3 +647,80 @@ void ieee80211_wme_unregister(void) | |||
605 | { | 647 | { |
606 | unregister_qdisc(&wme_qdisc_ops); | 648 | unregister_qdisc(&wme_qdisc_ops); |
607 | } | 649 | } |
650 | |||
651 | int ieee80211_ht_agg_queue_add(struct ieee80211_local *local, | ||
652 | struct sta_info *sta, u16 tid) | ||
653 | { | ||
654 | int i; | ||
655 | struct ieee80211_sched_data *q = | ||
656 | qdisc_priv(local->mdev->qdisc_sleeping); | ||
657 | DECLARE_MAC_BUF(mac); | ||
658 | |||
659 | /* prepare the filter and save it for the SW queue | ||
660 | * matching the recieved HW queue */ | ||
661 | |||
662 | /* try to get a Qdisc from the pool */ | ||
663 | for (i = IEEE80211_TX_QUEUE_BEACON; i < local->hw.queues; i++) | ||
664 | if (!test_and_set_bit(i, q->qdisc_pool)) { | ||
665 | ieee80211_stop_queue(local_to_hw(local), i); | ||
666 | sta->tid_to_tx_q[tid] = i; | ||
667 | |||
668 | /* IF there are already pending packets | ||
669 | * on this tid first we need to drain them | ||
670 | * on the previous queue | ||
671 | * since HT is strict in order */ | ||
672 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
673 | if (net_ratelimit()) | ||
674 | printk(KERN_DEBUG "allocated aggregation queue" | ||
675 | " %d tid %d addr %s pool=0x%lX", | ||
676 | i, tid, print_mac(mac, sta->addr), | ||
677 | q->qdisc_pool[0]); | ||
678 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
679 | return 0; | ||
680 | } | ||
681 | |||
682 | return -EAGAIN; | ||
683 | } | ||
684 | |||
685 | /** | ||
686 | * the caller needs to hold local->mdev->queue_lock | ||
687 | */ | ||
688 | void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local, | ||
689 | struct sta_info *sta, u16 tid, | ||
690 | u8 requeue) | ||
691 | { | ||
692 | struct ieee80211_sched_data *q = | ||
693 | qdisc_priv(local->mdev->qdisc_sleeping); | ||
694 | int agg_queue = sta->tid_to_tx_q[tid]; | ||
695 | |||
696 | /* return the qdisc to the pool */ | ||
697 | clear_bit(agg_queue, q->qdisc_pool); | ||
698 | sta->tid_to_tx_q[tid] = local->hw.queues; | ||
699 | |||
700 | if (requeue) | ||
701 | ieee80211_requeue(local, agg_queue); | ||
702 | else | ||
703 | q->queues[agg_queue]->ops->reset(q->queues[agg_queue]); | ||
704 | } | ||
705 | |||
706 | void ieee80211_requeue(struct ieee80211_local *local, int queue) | ||
707 | { | ||
708 | struct Qdisc *root_qd = local->mdev->qdisc_sleeping; | ||
709 | struct ieee80211_sched_data *q = qdisc_priv(root_qd); | ||
710 | struct Qdisc *qdisc = q->queues[queue]; | ||
711 | struct sk_buff *skb = NULL; | ||
712 | u32 len = qdisc->q.qlen; | ||
713 | |||
714 | if (!qdisc || !qdisc->dequeue) | ||
715 | return; | ||
716 | |||
717 | printk(KERN_DEBUG "requeue: qlen = %d\n", qdisc->q.qlen); | ||
718 | for (len = qdisc->q.qlen; len > 0; len--) { | ||
719 | skb = qdisc->dequeue(qdisc); | ||
720 | root_qd->q.qlen--; | ||
721 | /* packet will be classified again and */ | ||
722 | /* skb->packet_data->queue will be overridden if needed */ | ||
723 | if (skb) | ||
724 | wme_qdiscop_enqueue(skb, root_qd); | ||
725 | } | ||
726 | } | ||
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h index 76c713a6450c..fcc6b05508cc 100644 --- a/net/mac80211/wme.h +++ b/net/mac80211/wme.h | |||
@@ -24,6 +24,8 @@ | |||
24 | 24 | ||
25 | #define QOS_CONTROL_TAG1D_MASK 0x07 | 25 | #define QOS_CONTROL_TAG1D_MASK 0x07 |
26 | 26 | ||
27 | extern const int ieee802_1d_to_ac[8]; | ||
28 | |||
27 | static inline int WLAN_FC_IS_QOS_DATA(u16 fc) | 29 | static inline int WLAN_FC_IS_QOS_DATA(u16 fc) |
28 | { | 30 | { |
29 | return (fc & 0x8C) == 0x88; | 31 | return (fc & 0x8C) == 0x88; |
@@ -32,7 +34,12 @@ static inline int WLAN_FC_IS_QOS_DATA(u16 fc) | |||
32 | #ifdef CONFIG_NET_SCHED | 34 | #ifdef CONFIG_NET_SCHED |
33 | void ieee80211_install_qdisc(struct net_device *dev); | 35 | void ieee80211_install_qdisc(struct net_device *dev); |
34 | int ieee80211_qdisc_installed(struct net_device *dev); | 36 | int ieee80211_qdisc_installed(struct net_device *dev); |
35 | 37 | int ieee80211_ht_agg_queue_add(struct ieee80211_local *local, | |
38 | struct sta_info *sta, u16 tid); | ||
39 | void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local, | ||
40 | struct sta_info *sta, u16 tid, | ||
41 | u8 requeue); | ||
42 | void ieee80211_requeue(struct ieee80211_local *local, int queue); | ||
36 | int ieee80211_wme_register(void); | 43 | int ieee80211_wme_register(void); |
37 | void ieee80211_wme_unregister(void); | 44 | void ieee80211_wme_unregister(void); |
38 | #else | 45 | #else |
@@ -43,7 +50,19 @@ static inline int ieee80211_qdisc_installed(struct net_device *dev) | |||
43 | { | 50 | { |
44 | return 0; | 51 | return 0; |
45 | } | 52 | } |
46 | 53 | static inline int ieee80211_ht_agg_queue_add(struct ieee80211_local *local, | |
54 | struct sta_info *sta, u16 tid) | ||
55 | { | ||
56 | return -EAGAIN; | ||
57 | } | ||
58 | static inline void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local, | ||
59 | struct sta_info *sta, u16 tid, | ||
60 | u8 requeue) | ||
61 | { | ||
62 | } | ||
63 | static inline void ieee80211_requeue(struct ieee80211_local *local, int queue) | ||
64 | { | ||
65 | } | ||
47 | static inline int ieee80211_wme_register(void) | 66 | static inline int ieee80211_wme_register(void) |
48 | { | 67 | { |
49 | return 0; | 68 | return 0; |
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index 6f04311cf0a0..df0b7341efc8 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c | |||
@@ -70,8 +70,8 @@ static int ieee80211_get_hdr_info(const struct sk_buff *skb, u8 **sa, u8 **da, | |||
70 | } | 70 | } |
71 | 71 | ||
72 | 72 | ||
73 | ieee80211_txrx_result | 73 | ieee80211_tx_result |
74 | ieee80211_tx_h_michael_mic_add(struct ieee80211_txrx_data *tx) | 74 | ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx) |
75 | { | 75 | { |
76 | u8 *data, *sa, *da, *key, *mic, qos_tid; | 76 | u8 *data, *sa, *da, *key, *mic, qos_tid; |
77 | size_t data_len; | 77 | size_t data_len; |
@@ -84,18 +84,18 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_txrx_data *tx) | |||
84 | 84 | ||
85 | if (!tx->key || tx->key->conf.alg != ALG_TKIP || skb->len < 24 || | 85 | if (!tx->key || tx->key->conf.alg != ALG_TKIP || skb->len < 24 || |
86 | !WLAN_FC_DATA_PRESENT(fc)) | 86 | !WLAN_FC_DATA_PRESENT(fc)) |
87 | return TXRX_CONTINUE; | 87 | return TX_CONTINUE; |
88 | 88 | ||
89 | if (ieee80211_get_hdr_info(skb, &sa, &da, &qos_tid, &data, &data_len)) | 89 | if (ieee80211_get_hdr_info(skb, &sa, &da, &qos_tid, &data, &data_len)) |
90 | return TXRX_DROP; | 90 | return TX_DROP; |
91 | 91 | ||
92 | if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && | 92 | if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && |
93 | !(tx->flags & IEEE80211_TXRXD_FRAGMENTED) && | 93 | !(tx->flags & IEEE80211_TX_FRAGMENTED) && |
94 | !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) && | 94 | !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) && |
95 | !wpa_test) { | 95 | !wpa_test) { |
96 | /* hwaccel - with no need for preallocated room for Michael MIC | 96 | /* hwaccel - with no need for preallocated room for Michael MIC |
97 | */ | 97 | */ |
98 | return TXRX_CONTINUE; | 98 | return TX_CONTINUE; |
99 | } | 99 | } |
100 | 100 | ||
101 | if (skb_tailroom(skb) < MICHAEL_MIC_LEN) { | 101 | if (skb_tailroom(skb) < MICHAEL_MIC_LEN) { |
@@ -105,7 +105,7 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_txrx_data *tx) | |||
105 | GFP_ATOMIC))) { | 105 | GFP_ATOMIC))) { |
106 | printk(KERN_DEBUG "%s: failed to allocate more memory " | 106 | printk(KERN_DEBUG "%s: failed to allocate more memory " |
107 | "for Michael MIC\n", tx->dev->name); | 107 | "for Michael MIC\n", tx->dev->name); |
108 | return TXRX_DROP; | 108 | return TX_DROP; |
109 | } | 109 | } |
110 | } | 110 | } |
111 | 111 | ||
@@ -119,12 +119,12 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_txrx_data *tx) | |||
119 | mic = skb_put(skb, MICHAEL_MIC_LEN); | 119 | mic = skb_put(skb, MICHAEL_MIC_LEN); |
120 | michael_mic(key, da, sa, qos_tid & 0x0f, data, data_len, mic); | 120 | michael_mic(key, da, sa, qos_tid & 0x0f, data, data_len, mic); |
121 | 121 | ||
122 | return TXRX_CONTINUE; | 122 | return TX_CONTINUE; |
123 | } | 123 | } |
124 | 124 | ||
125 | 125 | ||
126 | ieee80211_txrx_result | 126 | ieee80211_rx_result |
127 | ieee80211_rx_h_michael_mic_verify(struct ieee80211_txrx_data *rx) | 127 | ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) |
128 | { | 128 | { |
129 | u8 *data, *sa, *da, *key = NULL, qos_tid; | 129 | u8 *data, *sa, *da, *key = NULL, qos_tid; |
130 | size_t data_len; | 130 | size_t data_len; |
@@ -139,16 +139,16 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_txrx_data *rx) | |||
139 | /* | 139 | /* |
140 | * No way to verify the MIC if the hardware stripped it | 140 | * No way to verify the MIC if the hardware stripped it |
141 | */ | 141 | */ |
142 | if (rx->u.rx.status->flag & RX_FLAG_MMIC_STRIPPED) | 142 | if (rx->status->flag & RX_FLAG_MMIC_STRIPPED) |
143 | return TXRX_CONTINUE; | 143 | return RX_CONTINUE; |
144 | 144 | ||
145 | if (!rx->key || rx->key->conf.alg != ALG_TKIP || | 145 | if (!rx->key || rx->key->conf.alg != ALG_TKIP || |
146 | !(rx->fc & IEEE80211_FCTL_PROTECTED) || !WLAN_FC_DATA_PRESENT(fc)) | 146 | !(rx->fc & IEEE80211_FCTL_PROTECTED) || !WLAN_FC_DATA_PRESENT(fc)) |
147 | return TXRX_CONTINUE; | 147 | return RX_CONTINUE; |
148 | 148 | ||
149 | if (ieee80211_get_hdr_info(skb, &sa, &da, &qos_tid, &data, &data_len) | 149 | if (ieee80211_get_hdr_info(skb, &sa, &da, &qos_tid, &data, &data_len) |
150 | || data_len < MICHAEL_MIC_LEN) | 150 | || data_len < MICHAEL_MIC_LEN) |
151 | return TXRX_DROP; | 151 | return RX_DROP_UNUSABLE; |
152 | 152 | ||
153 | data_len -= MICHAEL_MIC_LEN; | 153 | data_len -= MICHAEL_MIC_LEN; |
154 | 154 | ||
@@ -161,29 +161,29 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_txrx_data *rx) | |||
161 | ALG_TKIP_TEMP_AUTH_TX_MIC_KEY]; | 161 | ALG_TKIP_TEMP_AUTH_TX_MIC_KEY]; |
162 | michael_mic(key, da, sa, qos_tid & 0x0f, data, data_len, mic); | 162 | michael_mic(key, da, sa, qos_tid & 0x0f, data, data_len, mic); |
163 | if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0 || wpa_test) { | 163 | if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0 || wpa_test) { |
164 | if (!(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) | 164 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) |
165 | return TXRX_DROP; | 165 | return RX_DROP_UNUSABLE; |
166 | 166 | ||
167 | printk(KERN_DEBUG "%s: invalid Michael MIC in data frame from " | 167 | printk(KERN_DEBUG "%s: invalid Michael MIC in data frame from " |
168 | "%s\n", rx->dev->name, print_mac(mac, sa)); | 168 | "%s\n", rx->dev->name, print_mac(mac, sa)); |
169 | 169 | ||
170 | mac80211_ev_michael_mic_failure(rx->dev, rx->key->conf.keyidx, | 170 | mac80211_ev_michael_mic_failure(rx->dev, rx->key->conf.keyidx, |
171 | (void *) skb->data); | 171 | (void *) skb->data); |
172 | return TXRX_DROP; | 172 | return RX_DROP_UNUSABLE; |
173 | } | 173 | } |
174 | 174 | ||
175 | /* remove Michael MIC from payload */ | 175 | /* remove Michael MIC from payload */ |
176 | skb_trim(skb, skb->len - MICHAEL_MIC_LEN); | 176 | skb_trim(skb, skb->len - MICHAEL_MIC_LEN); |
177 | 177 | ||
178 | /* update IV in key information to be able to detect replays */ | 178 | /* update IV in key information to be able to detect replays */ |
179 | rx->key->u.tkip.iv32_rx[rx->u.rx.queue] = rx->u.rx.tkip_iv32; | 179 | rx->key->u.tkip.iv32_rx[rx->queue] = rx->tkip_iv32; |
180 | rx->key->u.tkip.iv16_rx[rx->u.rx.queue] = rx->u.rx.tkip_iv16; | 180 | rx->key->u.tkip.iv16_rx[rx->queue] = rx->tkip_iv16; |
181 | 181 | ||
182 | return TXRX_CONTINUE; | 182 | return RX_CONTINUE; |
183 | } | 183 | } |
184 | 184 | ||
185 | 185 | ||
186 | static int tkip_encrypt_skb(struct ieee80211_txrx_data *tx, | 186 | static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, |
187 | struct sk_buff *skb, int test) | 187 | struct sk_buff *skb, int test) |
188 | { | 188 | { |
189 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 189 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
@@ -228,7 +228,7 @@ static int tkip_encrypt_skb(struct ieee80211_txrx_data *tx, | |||
228 | 0x7f), | 228 | 0x7f), |
229 | (u8) key->u.tkip.iv16); | 229 | (u8) key->u.tkip.iv16); |
230 | 230 | ||
231 | tx->u.tx.control->key_idx = tx->key->conf.hw_key_idx; | 231 | tx->control->key_idx = tx->key->conf.hw_key_idx; |
232 | return 0; | 232 | return 0; |
233 | } | 233 | } |
234 | 234 | ||
@@ -242,42 +242,42 @@ static int tkip_encrypt_skb(struct ieee80211_txrx_data *tx, | |||
242 | } | 242 | } |
243 | 243 | ||
244 | 244 | ||
245 | ieee80211_txrx_result | 245 | ieee80211_tx_result |
246 | ieee80211_crypto_tkip_encrypt(struct ieee80211_txrx_data *tx) | 246 | ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx) |
247 | { | 247 | { |
248 | struct sk_buff *skb = tx->skb; | 248 | struct sk_buff *skb = tx->skb; |
249 | int wpa_test = 0, test = 0; | 249 | int wpa_test = 0, test = 0; |
250 | 250 | ||
251 | tx->u.tx.control->icv_len = TKIP_ICV_LEN; | 251 | tx->control->icv_len = TKIP_ICV_LEN; |
252 | tx->u.tx.control->iv_len = TKIP_IV_LEN; | 252 | tx->control->iv_len = TKIP_IV_LEN; |
253 | ieee80211_tx_set_iswep(tx); | 253 | ieee80211_tx_set_protected(tx); |
254 | 254 | ||
255 | if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && | 255 | if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && |
256 | !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) && | 256 | !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) && |
257 | !wpa_test) { | 257 | !wpa_test) { |
258 | /* hwaccel - with no need for preallocated room for IV/ICV */ | 258 | /* hwaccel - with no need for preallocated room for IV/ICV */ |
259 | tx->u.tx.control->key_idx = tx->key->conf.hw_key_idx; | 259 | tx->control->key_idx = tx->key->conf.hw_key_idx; |
260 | return TXRX_CONTINUE; | 260 | return TX_CONTINUE; |
261 | } | 261 | } |
262 | 262 | ||
263 | if (tkip_encrypt_skb(tx, skb, test) < 0) | 263 | if (tkip_encrypt_skb(tx, skb, test) < 0) |
264 | return TXRX_DROP; | 264 | return TX_DROP; |
265 | 265 | ||
266 | if (tx->u.tx.extra_frag) { | 266 | if (tx->extra_frag) { |
267 | int i; | 267 | int i; |
268 | for (i = 0; i < tx->u.tx.num_extra_frag; i++) { | 268 | for (i = 0; i < tx->num_extra_frag; i++) { |
269 | if (tkip_encrypt_skb(tx, tx->u.tx.extra_frag[i], test) | 269 | if (tkip_encrypt_skb(tx, tx->extra_frag[i], test) |
270 | < 0) | 270 | < 0) |
271 | return TXRX_DROP; | 271 | return TX_DROP; |
272 | } | 272 | } |
273 | } | 273 | } |
274 | 274 | ||
275 | return TXRX_CONTINUE; | 275 | return TX_CONTINUE; |
276 | } | 276 | } |
277 | 277 | ||
278 | 278 | ||
279 | ieee80211_txrx_result | 279 | ieee80211_rx_result |
280 | ieee80211_crypto_tkip_decrypt(struct ieee80211_txrx_data *rx) | 280 | ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx) |
281 | { | 281 | { |
282 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; | 282 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; |
283 | u16 fc; | 283 | u16 fc; |
@@ -290,19 +290,19 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_txrx_data *rx) | |||
290 | hdrlen = ieee80211_get_hdrlen(fc); | 290 | hdrlen = ieee80211_get_hdrlen(fc); |
291 | 291 | ||
292 | if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) | 292 | if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) |
293 | return TXRX_CONTINUE; | 293 | return RX_CONTINUE; |
294 | 294 | ||
295 | if (!rx->sta || skb->len - hdrlen < 12) | 295 | if (!rx->sta || skb->len - hdrlen < 12) |
296 | return TXRX_DROP; | 296 | return RX_DROP_UNUSABLE; |
297 | 297 | ||
298 | if (rx->u.rx.status->flag & RX_FLAG_DECRYPTED) { | 298 | if (rx->status->flag & RX_FLAG_DECRYPTED) { |
299 | if (rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED) { | 299 | if (rx->status->flag & RX_FLAG_IV_STRIPPED) { |
300 | /* | 300 | /* |
301 | * Hardware took care of all processing, including | 301 | * Hardware took care of all processing, including |
302 | * replay protection, and stripped the ICV/IV so | 302 | * replay protection, and stripped the ICV/IV so |
303 | * we cannot do any checks here. | 303 | * we cannot do any checks here. |
304 | */ | 304 | */ |
305 | return TXRX_CONTINUE; | 305 | return RX_CONTINUE; |
306 | } | 306 | } |
307 | 307 | ||
308 | /* let TKIP code verify IV, but skip decryption */ | 308 | /* let TKIP code verify IV, but skip decryption */ |
@@ -312,9 +312,9 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_txrx_data *rx) | |||
312 | res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm, | 312 | res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm, |
313 | key, skb->data + hdrlen, | 313 | key, skb->data + hdrlen, |
314 | skb->len - hdrlen, rx->sta->addr, | 314 | skb->len - hdrlen, rx->sta->addr, |
315 | hwaccel, rx->u.rx.queue, | 315 | hwaccel, rx->queue, |
316 | &rx->u.rx.tkip_iv32, | 316 | &rx->tkip_iv32, |
317 | &rx->u.rx.tkip_iv16); | 317 | &rx->tkip_iv16); |
318 | if (res != TKIP_DECRYPT_OK || wpa_test) { | 318 | if (res != TKIP_DECRYPT_OK || wpa_test) { |
319 | #ifdef CONFIG_MAC80211_DEBUG | 319 | #ifdef CONFIG_MAC80211_DEBUG |
320 | if (net_ratelimit()) | 320 | if (net_ratelimit()) |
@@ -322,7 +322,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_txrx_data *rx) | |||
322 | "frame from %s (res=%d)\n", rx->dev->name, | 322 | "frame from %s (res=%d)\n", rx->dev->name, |
323 | print_mac(mac, rx->sta->addr), res); | 323 | print_mac(mac, rx->sta->addr), res); |
324 | #endif /* CONFIG_MAC80211_DEBUG */ | 324 | #endif /* CONFIG_MAC80211_DEBUG */ |
325 | return TXRX_DROP; | 325 | return RX_DROP_UNUSABLE; |
326 | } | 326 | } |
327 | 327 | ||
328 | /* Trim ICV */ | 328 | /* Trim ICV */ |
@@ -332,7 +332,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_txrx_data *rx) | |||
332 | memmove(skb->data + TKIP_IV_LEN, skb->data, hdrlen); | 332 | memmove(skb->data + TKIP_IV_LEN, skb->data, hdrlen); |
333 | skb_pull(skb, TKIP_IV_LEN); | 333 | skb_pull(skb, TKIP_IV_LEN); |
334 | 334 | ||
335 | return TXRX_CONTINUE; | 335 | return RX_CONTINUE; |
336 | } | 336 | } |
337 | 337 | ||
338 | 338 | ||
@@ -429,7 +429,7 @@ static inline int ccmp_hdr2pn(u8 *pn, u8 *hdr) | |||
429 | } | 429 | } |
430 | 430 | ||
431 | 431 | ||
432 | static int ccmp_encrypt_skb(struct ieee80211_txrx_data *tx, | 432 | static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, |
433 | struct sk_buff *skb, int test) | 433 | struct sk_buff *skb, int test) |
434 | { | 434 | { |
435 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 435 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
@@ -478,7 +478,7 @@ static int ccmp_encrypt_skb(struct ieee80211_txrx_data *tx, | |||
478 | 478 | ||
479 | if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { | 479 | if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { |
480 | /* hwaccel - with preallocated room for CCMP header */ | 480 | /* hwaccel - with preallocated room for CCMP header */ |
481 | tx->u.tx.control->key_idx = key->conf.hw_key_idx; | 481 | tx->control->key_idx = key->conf.hw_key_idx; |
482 | return 0; | 482 | return 0; |
483 | } | 483 | } |
484 | 484 | ||
@@ -491,42 +491,42 @@ static int ccmp_encrypt_skb(struct ieee80211_txrx_data *tx, | |||
491 | } | 491 | } |
492 | 492 | ||
493 | 493 | ||
494 | ieee80211_txrx_result | 494 | ieee80211_tx_result |
495 | ieee80211_crypto_ccmp_encrypt(struct ieee80211_txrx_data *tx) | 495 | ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx) |
496 | { | 496 | { |
497 | struct sk_buff *skb = tx->skb; | 497 | struct sk_buff *skb = tx->skb; |
498 | int test = 0; | 498 | int test = 0; |
499 | 499 | ||
500 | tx->u.tx.control->icv_len = CCMP_MIC_LEN; | 500 | tx->control->icv_len = CCMP_MIC_LEN; |
501 | tx->u.tx.control->iv_len = CCMP_HDR_LEN; | 501 | tx->control->iv_len = CCMP_HDR_LEN; |
502 | ieee80211_tx_set_iswep(tx); | 502 | ieee80211_tx_set_protected(tx); |
503 | 503 | ||
504 | if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && | 504 | if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && |
505 | !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) { | 505 | !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) { |
506 | /* hwaccel - with no need for preallocated room for CCMP " | 506 | /* hwaccel - with no need for preallocated room for CCMP " |
507 | * header or MIC fields */ | 507 | * header or MIC fields */ |
508 | tx->u.tx.control->key_idx = tx->key->conf.hw_key_idx; | 508 | tx->control->key_idx = tx->key->conf.hw_key_idx; |
509 | return TXRX_CONTINUE; | 509 | return TX_CONTINUE; |
510 | } | 510 | } |
511 | 511 | ||
512 | if (ccmp_encrypt_skb(tx, skb, test) < 0) | 512 | if (ccmp_encrypt_skb(tx, skb, test) < 0) |
513 | return TXRX_DROP; | 513 | return TX_DROP; |
514 | 514 | ||
515 | if (tx->u.tx.extra_frag) { | 515 | if (tx->extra_frag) { |
516 | int i; | 516 | int i; |
517 | for (i = 0; i < tx->u.tx.num_extra_frag; i++) { | 517 | for (i = 0; i < tx->num_extra_frag; i++) { |
518 | if (ccmp_encrypt_skb(tx, tx->u.tx.extra_frag[i], test) | 518 | if (ccmp_encrypt_skb(tx, tx->extra_frag[i], test) |
519 | < 0) | 519 | < 0) |
520 | return TXRX_DROP; | 520 | return TX_DROP; |
521 | } | 521 | } |
522 | } | 522 | } |
523 | 523 | ||
524 | return TXRX_CONTINUE; | 524 | return TX_CONTINUE; |
525 | } | 525 | } |
526 | 526 | ||
527 | 527 | ||
528 | ieee80211_txrx_result | 528 | ieee80211_rx_result |
529 | ieee80211_crypto_ccmp_decrypt(struct ieee80211_txrx_data *rx) | 529 | ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx) |
530 | { | 530 | { |
531 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; | 531 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; |
532 | u16 fc; | 532 | u16 fc; |
@@ -541,21 +541,21 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_txrx_data *rx) | |||
541 | hdrlen = ieee80211_get_hdrlen(fc); | 541 | hdrlen = ieee80211_get_hdrlen(fc); |
542 | 542 | ||
543 | if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) | 543 | if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) |
544 | return TXRX_CONTINUE; | 544 | return RX_CONTINUE; |
545 | 545 | ||
546 | data_len = skb->len - hdrlen - CCMP_HDR_LEN - CCMP_MIC_LEN; | 546 | data_len = skb->len - hdrlen - CCMP_HDR_LEN - CCMP_MIC_LEN; |
547 | if (!rx->sta || data_len < 0) | 547 | if (!rx->sta || data_len < 0) |
548 | return TXRX_DROP; | 548 | return RX_DROP_UNUSABLE; |
549 | 549 | ||
550 | if ((rx->u.rx.status->flag & RX_FLAG_DECRYPTED) && | 550 | if ((rx->status->flag & RX_FLAG_DECRYPTED) && |
551 | (rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED)) | 551 | (rx->status->flag & RX_FLAG_IV_STRIPPED)) |
552 | return TXRX_CONTINUE; | 552 | return RX_CONTINUE; |
553 | 553 | ||
554 | (void) ccmp_hdr2pn(pn, skb->data + hdrlen); | 554 | (void) ccmp_hdr2pn(pn, skb->data + hdrlen); |
555 | 555 | ||
556 | if (memcmp(pn, key->u.ccmp.rx_pn[rx->u.rx.queue], CCMP_PN_LEN) <= 0) { | 556 | if (memcmp(pn, key->u.ccmp.rx_pn[rx->queue], CCMP_PN_LEN) <= 0) { |
557 | #ifdef CONFIG_MAC80211_DEBUG | 557 | #ifdef CONFIG_MAC80211_DEBUG |
558 | u8 *ppn = key->u.ccmp.rx_pn[rx->u.rx.queue]; | 558 | u8 *ppn = key->u.ccmp.rx_pn[rx->queue]; |
559 | 559 | ||
560 | printk(KERN_DEBUG "%s: CCMP replay detected for RX frame from " | 560 | printk(KERN_DEBUG "%s: CCMP replay detected for RX frame from " |
561 | "%s (RX PN %02x%02x%02x%02x%02x%02x <= prev. PN " | 561 | "%s (RX PN %02x%02x%02x%02x%02x%02x <= prev. PN " |
@@ -565,10 +565,10 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_txrx_data *rx) | |||
565 | ppn[0], ppn[1], ppn[2], ppn[3], ppn[4], ppn[5]); | 565 | ppn[0], ppn[1], ppn[2], ppn[3], ppn[4], ppn[5]); |
566 | #endif /* CONFIG_MAC80211_DEBUG */ | 566 | #endif /* CONFIG_MAC80211_DEBUG */ |
567 | key->u.ccmp.replays++; | 567 | key->u.ccmp.replays++; |
568 | return TXRX_DROP; | 568 | return RX_DROP_UNUSABLE; |
569 | } | 569 | } |
570 | 570 | ||
571 | if (!(rx->u.rx.status->flag & RX_FLAG_DECRYPTED)) { | 571 | if (!(rx->status->flag & RX_FLAG_DECRYPTED)) { |
572 | /* hardware didn't decrypt/verify MIC */ | 572 | /* hardware didn't decrypt/verify MIC */ |
573 | u8 *scratch, *b_0, *aad; | 573 | u8 *scratch, *b_0, *aad; |
574 | 574 | ||
@@ -589,16 +589,16 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_txrx_data *rx) | |||
589 | "for RX frame from %s\n", rx->dev->name, | 589 | "for RX frame from %s\n", rx->dev->name, |
590 | print_mac(mac, rx->sta->addr)); | 590 | print_mac(mac, rx->sta->addr)); |
591 | #endif /* CONFIG_MAC80211_DEBUG */ | 591 | #endif /* CONFIG_MAC80211_DEBUG */ |
592 | return TXRX_DROP; | 592 | return RX_DROP_UNUSABLE; |
593 | } | 593 | } |
594 | } | 594 | } |
595 | 595 | ||
596 | memcpy(key->u.ccmp.rx_pn[rx->u.rx.queue], pn, CCMP_PN_LEN); | 596 | memcpy(key->u.ccmp.rx_pn[rx->queue], pn, CCMP_PN_LEN); |
597 | 597 | ||
598 | /* Remove CCMP header and MIC */ | 598 | /* Remove CCMP header and MIC */ |
599 | skb_trim(skb, skb->len - CCMP_MIC_LEN); | 599 | skb_trim(skb, skb->len - CCMP_MIC_LEN); |
600 | memmove(skb->data + CCMP_HDR_LEN, skb->data, hdrlen); | 600 | memmove(skb->data + CCMP_HDR_LEN, skb->data, hdrlen); |
601 | skb_pull(skb, CCMP_HDR_LEN); | 601 | skb_pull(skb, CCMP_HDR_LEN); |
602 | 602 | ||
603 | return TXRX_CONTINUE; | 603 | return RX_CONTINUE; |
604 | } | 604 | } |
diff --git a/net/mac80211/wpa.h b/net/mac80211/wpa.h index 49d80cf0cd75..d42d221d8a1d 100644 --- a/net/mac80211/wpa.h +++ b/net/mac80211/wpa.h | |||
@@ -13,19 +13,19 @@ | |||
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | #include "ieee80211_i.h" | 14 | #include "ieee80211_i.h" |
15 | 15 | ||
16 | ieee80211_txrx_result | 16 | ieee80211_tx_result |
17 | ieee80211_tx_h_michael_mic_add(struct ieee80211_txrx_data *tx); | 17 | ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx); |
18 | ieee80211_txrx_result | 18 | ieee80211_rx_result |
19 | ieee80211_rx_h_michael_mic_verify(struct ieee80211_txrx_data *rx); | 19 | ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx); |
20 | 20 | ||
21 | ieee80211_txrx_result | 21 | ieee80211_tx_result |
22 | ieee80211_crypto_tkip_encrypt(struct ieee80211_txrx_data *tx); | 22 | ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx); |
23 | ieee80211_txrx_result | 23 | ieee80211_rx_result |
24 | ieee80211_crypto_tkip_decrypt(struct ieee80211_txrx_data *rx); | 24 | ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx); |
25 | 25 | ||
26 | ieee80211_txrx_result | 26 | ieee80211_tx_result |
27 | ieee80211_crypto_ccmp_encrypt(struct ieee80211_txrx_data *tx); | 27 | ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx); |
28 | ieee80211_txrx_result | 28 | ieee80211_rx_result |
29 | ieee80211_crypto_ccmp_decrypt(struct ieee80211_txrx_data *rx); | 29 | ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx); |
30 | 30 | ||
31 | #endif /* WPA_H */ | 31 | #endif /* WPA_H */ |
diff --git a/net/netfilter/nf_conntrack_netbios_ns.c b/net/netfilter/nf_conntrack_netbios_ns.c index 9810d81e2a06..60dedaded84e 100644 --- a/net/netfilter/nf_conntrack_netbios_ns.c +++ b/net/netfilter/nf_conntrack_netbios_ns.c | |||
@@ -47,7 +47,7 @@ static int help(struct sk_buff *skb, unsigned int protoff, | |||
47 | { | 47 | { |
48 | struct nf_conntrack_expect *exp; | 48 | struct nf_conntrack_expect *exp; |
49 | struct iphdr *iph = ip_hdr(skb); | 49 | struct iphdr *iph = ip_hdr(skb); |
50 | struct rtable *rt = (struct rtable *)skb->dst; | 50 | struct rtable *rt = skb->rtable; |
51 | struct in_device *in_dev; | 51 | struct in_device *in_dev; |
52 | __be32 mask = 0; | 52 | __be32 mask = 0; |
53 | 53 | ||
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index e88e96af613d..a9bf6e4fd0cc 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c | |||
@@ -293,7 +293,7 @@ static const struct file_operations ct_cpu_seq_fops = { | |||
293 | .open = ct_cpu_seq_open, | 293 | .open = ct_cpu_seq_open, |
294 | .read = seq_read, | 294 | .read = seq_read, |
295 | .llseek = seq_lseek, | 295 | .llseek = seq_lseek, |
296 | .release = seq_release_private, | 296 | .release = seq_release, |
297 | }; | 297 | }; |
298 | #endif /* CONFIG_PROC_FS */ | 298 | #endif /* CONFIG_PROC_FS */ |
299 | 299 | ||
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 1ab0da2632e1..524e826bb976 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -1344,22 +1344,6 @@ static void netlink_data_ready(struct sock *sk, int len) | |||
1344 | * queueing. | 1344 | * queueing. |
1345 | */ | 1345 | */ |
1346 | 1346 | ||
1347 | static void __netlink_release(struct sock *sk) | ||
1348 | { | ||
1349 | /* | ||
1350 | * Last sock_put should drop referrence to sk->sk_net. It has already | ||
1351 | * been dropped in netlink_kernel_create. Taking referrence to stopping | ||
1352 | * namespace is not an option. | ||
1353 | * Take referrence to a socket to remove it from netlink lookup table | ||
1354 | * _alive_ and after that destroy it in the context of init_net. | ||
1355 | */ | ||
1356 | |||
1357 | sock_hold(sk); | ||
1358 | sock_release(sk->sk_socket); | ||
1359 | sk->sk_net = get_net(&init_net); | ||
1360 | sock_put(sk); | ||
1361 | } | ||
1362 | |||
1363 | struct sock * | 1347 | struct sock * |
1364 | netlink_kernel_create(struct net *net, int unit, unsigned int groups, | 1348 | netlink_kernel_create(struct net *net, int unit, unsigned int groups, |
1365 | void (*input)(struct sk_buff *skb), | 1349 | void (*input)(struct sk_buff *skb), |
@@ -1388,8 +1372,7 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups, | |||
1388 | goto out_sock_release_nosk; | 1372 | goto out_sock_release_nosk; |
1389 | 1373 | ||
1390 | sk = sock->sk; | 1374 | sk = sock->sk; |
1391 | put_net(sk->sk_net); | 1375 | sk_change_net(sk, net); |
1392 | sk->sk_net = net; | ||
1393 | 1376 | ||
1394 | if (groups < 32) | 1377 | if (groups < 32) |
1395 | groups = 32; | 1378 | groups = 32; |
@@ -1424,7 +1407,7 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups, | |||
1424 | 1407 | ||
1425 | out_sock_release: | 1408 | out_sock_release: |
1426 | kfree(listeners); | 1409 | kfree(listeners); |
1427 | __netlink_release(sk); | 1410 | netlink_kernel_release(sk); |
1428 | return NULL; | 1411 | return NULL; |
1429 | 1412 | ||
1430 | out_sock_release_nosk: | 1413 | out_sock_release_nosk: |
@@ -1437,10 +1420,7 @@ EXPORT_SYMBOL(netlink_kernel_create); | |||
1437 | void | 1420 | void |
1438 | netlink_kernel_release(struct sock *sk) | 1421 | netlink_kernel_release(struct sock *sk) |
1439 | { | 1422 | { |
1440 | if (sk == NULL || sk->sk_socket == NULL) | 1423 | sk_release_kernel(sk); |
1441 | return; | ||
1442 | |||
1443 | __netlink_release(sk); | ||
1444 | } | 1424 | } |
1445 | EXPORT_SYMBOL(netlink_kernel_release); | 1425 | EXPORT_SYMBOL(netlink_kernel_release); |
1446 | 1426 | ||
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 1aaa2e804b0d..e58cc65728b5 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h | |||
@@ -619,8 +619,8 @@ void _dbprintk(const char *fmt, ...) | |||
619 | { | 619 | { |
620 | } | 620 | } |
621 | 621 | ||
622 | #define kenter(FMT,...) dbgprintk("==> %s("FMT")",__FUNCTION__ ,##__VA_ARGS__) | 622 | #define kenter(FMT,...) dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__) |
623 | #define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__FUNCTION__ ,##__VA_ARGS__) | 623 | #define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__) |
624 | #define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__) | 624 | #define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__) |
625 | #define kproto(FMT,...) dbgprintk("### "FMT ,##__VA_ARGS__) | 625 | #define kproto(FMT,...) dbgprintk("### "FMT ,##__VA_ARGS__) |
626 | #define knet(FMT,...) dbgprintk("@@@ "FMT ,##__VA_ARGS__) | 626 | #define knet(FMT,...) dbgprintk("@@@ "FMT ,##__VA_ARGS__) |
@@ -671,8 +671,8 @@ do { \ | |||
671 | } while (0) | 671 | } while (0) |
672 | 672 | ||
673 | #else | 673 | #else |
674 | #define _enter(FMT,...) _dbprintk("==> %s("FMT")",__FUNCTION__ ,##__VA_ARGS__) | 674 | #define _enter(FMT,...) _dbprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__) |
675 | #define _leave(FMT,...) _dbprintk("<== %s()"FMT"",__FUNCTION__ ,##__VA_ARGS__) | 675 | #define _leave(FMT,...) _dbprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__) |
676 | #define _debug(FMT,...) _dbprintk(" "FMT ,##__VA_ARGS__) | 676 | #define _debug(FMT,...) _dbprintk(" "FMT ,##__VA_ARGS__) |
677 | #define _proto(FMT,...) _dbprintk("### "FMT ,##__VA_ARGS__) | 677 | #define _proto(FMT,...) _dbprintk("### "FMT ,##__VA_ARGS__) |
678 | #define _net(FMT,...) _dbprintk("@@@ "FMT ,##__VA_ARGS__) | 678 | #define _net(FMT,...) _dbprintk("@@@ "FMT ,##__VA_ARGS__) |
diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c index 83eda247fe48..017322e2786d 100644 --- a/net/rxrpc/ar-proc.c +++ b/net/rxrpc/ar-proc.c | |||
@@ -103,7 +103,7 @@ const struct file_operations rxrpc_call_seq_fops = { | |||
103 | .open = rxrpc_call_seq_open, | 103 | .open = rxrpc_call_seq_open, |
104 | .read = seq_read, | 104 | .read = seq_read, |
105 | .llseek = seq_lseek, | 105 | .llseek = seq_lseek, |
106 | .release = seq_release_private, | 106 | .release = seq_release, |
107 | }; | 107 | }; |
108 | 108 | ||
109 | /* | 109 | /* |
@@ -188,5 +188,5 @@ const struct file_operations rxrpc_connection_seq_fops = { | |||
188 | .open = rxrpc_connection_seq_open, | 188 | .open = rxrpc_connection_seq_open, |
189 | .read = seq_read, | 189 | .read = seq_read, |
190 | .llseek = seq_lseek, | 190 | .llseek = seq_lseek, |
191 | .release = seq_release_private, | 191 | .release = seq_release, |
192 | }; | 192 | }; |
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 3da4129b89d1..72cf86e3c090 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c | |||
@@ -256,10 +256,10 @@ META_COLLECTOR(int_rtclassid) | |||
256 | 256 | ||
257 | META_COLLECTOR(int_rtiif) | 257 | META_COLLECTOR(int_rtiif) |
258 | { | 258 | { |
259 | if (unlikely(skb->dst == NULL)) | 259 | if (unlikely(skb->rtable == NULL)) |
260 | *err = -1; | 260 | *err = -1; |
261 | else | 261 | else |
262 | dst->value = ((struct rtable*) skb->dst)->fl.iif; | 262 | dst->value = skb->rtable->fl.iif; |
263 | } | 263 | } |
264 | 264 | ||
265 | /************************************************************************** | 265 | /************************************************************************** |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index d29f792e0529..422c98aa9d5c 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -1330,7 +1330,7 @@ void sctp_assoc_sync_pmtu(struct sctp_association *asoc) | |||
1330 | } | 1330 | } |
1331 | 1331 | ||
1332 | SCTP_DEBUG_PRINTK("%s: asoc:%p, pmtu:%d, frag_point:%d\n", | 1332 | SCTP_DEBUG_PRINTK("%s: asoc:%p, pmtu:%d, frag_point:%d\n", |
1333 | __FUNCTION__, asoc, asoc->pathmtu, asoc->frag_point); | 1333 | __func__, asoc, asoc->pathmtu, asoc->frag_point); |
1334 | } | 1334 | } |
1335 | 1335 | ||
1336 | /* Should we send a SACK to update our peer? */ | 1336 | /* Should we send a SACK to update our peer? */ |
@@ -1370,7 +1370,7 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned len) | |||
1370 | } | 1370 | } |
1371 | 1371 | ||
1372 | SCTP_DEBUG_PRINTK("%s: asoc %p rwnd increased by %d to (%u, %u) " | 1372 | SCTP_DEBUG_PRINTK("%s: asoc %p rwnd increased by %d to (%u, %u) " |
1373 | "- %u\n", __FUNCTION__, asoc, len, asoc->rwnd, | 1373 | "- %u\n", __func__, asoc, len, asoc->rwnd, |
1374 | asoc->rwnd_over, asoc->a_rwnd); | 1374 | asoc->rwnd_over, asoc->a_rwnd); |
1375 | 1375 | ||
1376 | /* Send a window update SACK if the rwnd has increased by at least the | 1376 | /* Send a window update SACK if the rwnd has increased by at least the |
@@ -1381,7 +1381,7 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned len) | |||
1381 | if (sctp_peer_needs_update(asoc)) { | 1381 | if (sctp_peer_needs_update(asoc)) { |
1382 | asoc->a_rwnd = asoc->rwnd; | 1382 | asoc->a_rwnd = asoc->rwnd; |
1383 | SCTP_DEBUG_PRINTK("%s: Sending window update SACK- asoc: %p " | 1383 | SCTP_DEBUG_PRINTK("%s: Sending window update SACK- asoc: %p " |
1384 | "rwnd: %u a_rwnd: %u\n", __FUNCTION__, | 1384 | "rwnd: %u a_rwnd: %u\n", __func__, |
1385 | asoc, asoc->rwnd, asoc->a_rwnd); | 1385 | asoc, asoc->rwnd, asoc->a_rwnd); |
1386 | sack = sctp_make_sack(asoc); | 1386 | sack = sctp_make_sack(asoc); |
1387 | if (!sack) | 1387 | if (!sack) |
@@ -1410,7 +1410,7 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len) | |||
1410 | asoc->rwnd = 0; | 1410 | asoc->rwnd = 0; |
1411 | } | 1411 | } |
1412 | SCTP_DEBUG_PRINTK("%s: asoc %p rwnd decreased by %d to (%u, %u)\n", | 1412 | SCTP_DEBUG_PRINTK("%s: asoc %p rwnd decreased by %d to (%u, %u)\n", |
1413 | __FUNCTION__, asoc, len, asoc->rwnd, | 1413 | __func__, asoc, len, asoc->rwnd, |
1414 | asoc->rwnd_over); | 1414 | asoc->rwnd_over); |
1415 | } | 1415 | } |
1416 | 1416 | ||
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index 4d3128f5ccc3..e1f355080026 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c | |||
@@ -189,7 +189,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, | |||
189 | msecs_to_jiffies(sinfo->sinfo_timetolive); | 189 | msecs_to_jiffies(sinfo->sinfo_timetolive); |
190 | msg->can_abandon = 1; | 190 | msg->can_abandon = 1; |
191 | SCTP_DEBUG_PRINTK("%s: msg:%p expires_at: %ld jiffies:%ld\n", | 191 | SCTP_DEBUG_PRINTK("%s: msg:%p expires_at: %ld jiffies:%ld\n", |
192 | __FUNCTION__, msg, msg->expires_at, jiffies); | 192 | __func__, msg, msg->expires_at, jiffies); |
193 | } | 193 | } |
194 | 194 | ||
195 | max = asoc->frag_point; | 195 | max = asoc->frag_point; |
diff --git a/net/sctp/input.c b/net/sctp/input.c index 812ff1756c3e..c1d7e3b5c4b4 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
@@ -409,7 +409,7 @@ void sctp_icmp_proto_unreachable(struct sock *sk, | |||
409 | struct sctp_association *asoc, | 409 | struct sctp_association *asoc, |
410 | struct sctp_transport *t) | 410 | struct sctp_transport *t) |
411 | { | 411 | { |
412 | SCTP_DEBUG_PRINTK("%s\n", __FUNCTION__); | 412 | SCTP_DEBUG_PRINTK("%s\n", __func__); |
413 | 413 | ||
414 | sctp_do_sm(SCTP_EVENT_T_OTHER, | 414 | sctp_do_sm(SCTP_EVENT_T_OTHER, |
415 | SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), | 415 | SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), |
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 9aa0733aee87..1937be583cd7 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
@@ -225,7 +225,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport, | |||
225 | 225 | ||
226 | SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, " | 226 | SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, " |
227 | "src:" NIP6_FMT " dst:" NIP6_FMT "\n", | 227 | "src:" NIP6_FMT " dst:" NIP6_FMT "\n", |
228 | __FUNCTION__, skb, skb->len, | 228 | __func__, skb, skb->len, |
229 | NIP6(fl.fl6_src), NIP6(fl.fl6_dst)); | 229 | NIP6(fl.fl6_src), NIP6(fl.fl6_dst)); |
230 | 230 | ||
231 | SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS); | 231 | SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS); |
@@ -250,7 +250,7 @@ static struct dst_entry *sctp_v6_get_dst(struct sctp_association *asoc, | |||
250 | 250 | ||
251 | 251 | ||
252 | SCTP_DEBUG_PRINTK("%s: DST=" NIP6_FMT " ", | 252 | SCTP_DEBUG_PRINTK("%s: DST=" NIP6_FMT " ", |
253 | __FUNCTION__, NIP6(fl.fl6_dst)); | 253 | __func__, NIP6(fl.fl6_dst)); |
254 | 254 | ||
255 | if (saddr) { | 255 | if (saddr) { |
256 | ipv6_addr_copy(&fl.fl6_src, &saddr->v6.sin6_addr); | 256 | ipv6_addr_copy(&fl.fl6_src, &saddr->v6.sin6_addr); |
@@ -259,7 +259,7 @@ static struct dst_entry *sctp_v6_get_dst(struct sctp_association *asoc, | |||
259 | NIP6(fl.fl6_src)); | 259 | NIP6(fl.fl6_src)); |
260 | } | 260 | } |
261 | 261 | ||
262 | dst = ip6_route_output(NULL, &fl); | 262 | dst = ip6_route_output(&init_net, NULL, &fl); |
263 | if (!dst->error) { | 263 | if (!dst->error) { |
264 | struct rt6_info *rt; | 264 | struct rt6_info *rt; |
265 | rt = (struct rt6_info *)dst; | 265 | rt = (struct rt6_info *)dst; |
@@ -312,10 +312,11 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc, | |||
312 | 312 | ||
313 | SCTP_DEBUG_PRINTK("%s: asoc:%p dst:%p " | 313 | SCTP_DEBUG_PRINTK("%s: asoc:%p dst:%p " |
314 | "daddr:" NIP6_FMT " ", | 314 | "daddr:" NIP6_FMT " ", |
315 | __FUNCTION__, asoc, dst, NIP6(daddr->v6.sin6_addr)); | 315 | __func__, asoc, dst, NIP6(daddr->v6.sin6_addr)); |
316 | 316 | ||
317 | if (!asoc) { | 317 | if (!asoc) { |
318 | ipv6_get_saddr(dst, &daddr->v6.sin6_addr,&saddr->v6.sin6_addr); | 318 | ipv6_dev_get_saddr(dst ? ip6_dst_idev(dst)->dev : NULL, |
319 | &daddr->v6.sin6_addr, &saddr->v6.sin6_addr); | ||
319 | SCTP_DEBUG_PRINTK("saddr from ipv6_get_saddr: " NIP6_FMT "\n", | 320 | SCTP_DEBUG_PRINTK("saddr from ipv6_get_saddr: " NIP6_FMT "\n", |
320 | NIP6(saddr->v6.sin6_addr)); | 321 | NIP6(saddr->v6.sin6_addr)); |
321 | return; | 322 | return; |
@@ -350,7 +351,7 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc, | |||
350 | } else { | 351 | } else { |
351 | printk(KERN_ERR "%s: asoc:%p Could not find a valid source " | 352 | printk(KERN_ERR "%s: asoc:%p Could not find a valid source " |
352 | "address for the dest:" NIP6_FMT "\n", | 353 | "address for the dest:" NIP6_FMT "\n", |
353 | __FUNCTION__, asoc, NIP6(daddr->v6.sin6_addr)); | 354 | __func__, asoc, NIP6(daddr->v6.sin6_addr)); |
354 | } | 355 | } |
355 | 356 | ||
356 | rcu_read_unlock(); | 357 | rcu_read_unlock(); |
diff --git a/net/sctp/output.c b/net/sctp/output.c index aa700feea76c..cf4f9fb6819d 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
@@ -74,7 +74,7 @@ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet, | |||
74 | { | 74 | { |
75 | struct sctp_chunk *chunk = NULL; | 75 | struct sctp_chunk *chunk = NULL; |
76 | 76 | ||
77 | SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __FUNCTION__, | 77 | SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__, |
78 | packet, vtag); | 78 | packet, vtag); |
79 | 79 | ||
80 | packet->vtag = vtag; | 80 | packet->vtag = vtag; |
@@ -106,7 +106,7 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet, | |||
106 | struct sctp_association *asoc = transport->asoc; | 106 | struct sctp_association *asoc = transport->asoc; |
107 | size_t overhead; | 107 | size_t overhead; |
108 | 108 | ||
109 | SCTP_DEBUG_PRINTK("%s: packet:%p transport:%p\n", __FUNCTION__, | 109 | SCTP_DEBUG_PRINTK("%s: packet:%p transport:%p\n", __func__, |
110 | packet, transport); | 110 | packet, transport); |
111 | 111 | ||
112 | packet->transport = transport; | 112 | packet->transport = transport; |
@@ -138,7 +138,7 @@ void sctp_packet_free(struct sctp_packet *packet) | |||
138 | { | 138 | { |
139 | struct sctp_chunk *chunk, *tmp; | 139 | struct sctp_chunk *chunk, *tmp; |
140 | 140 | ||
141 | SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet); | 141 | SCTP_DEBUG_PRINTK("%s: packet:%p\n", __func__, packet); |
142 | 142 | ||
143 | list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { | 143 | list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { |
144 | list_del_init(&chunk->list); | 144 | list_del_init(&chunk->list); |
@@ -162,7 +162,7 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet, | |||
162 | sctp_xmit_t retval; | 162 | sctp_xmit_t retval; |
163 | int error = 0; | 163 | int error = 0; |
164 | 164 | ||
165 | SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __FUNCTION__, | 165 | SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __func__, |
166 | packet, chunk); | 166 | packet, chunk); |
167 | 167 | ||
168 | switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) { | 168 | switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) { |
@@ -264,7 +264,7 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet, | |||
264 | size_t pmtu; | 264 | size_t pmtu; |
265 | int too_big; | 265 | int too_big; |
266 | 266 | ||
267 | SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __FUNCTION__, packet, | 267 | SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __func__, packet, |
268 | chunk); | 268 | chunk); |
269 | 269 | ||
270 | /* Try to bundle AUTH chunk */ | 270 | /* Try to bundle AUTH chunk */ |
@@ -372,7 +372,7 @@ int sctp_packet_transmit(struct sctp_packet *packet) | |||
372 | unsigned char *auth = NULL; /* pointer to auth in skb data */ | 372 | unsigned char *auth = NULL; /* pointer to auth in skb data */ |
373 | __u32 cksum_buf_len = sizeof(struct sctphdr); | 373 | __u32 cksum_buf_len = sizeof(struct sctphdr); |
374 | 374 | ||
375 | SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet); | 375 | SCTP_DEBUG_PRINTK("%s: packet:%p\n", __func__, packet); |
376 | 376 | ||
377 | /* Do NOT generate a chunkless packet. */ | 377 | /* Do NOT generate a chunkless packet. */ |
378 | if (list_empty(&packet->chunk_list)) | 378 | if (list_empty(&packet->chunk_list)) |
@@ -677,7 +677,7 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, | |||
677 | "transport: %p, cwnd: %d, " | 677 | "transport: %p, cwnd: %d, " |
678 | "ssthresh: %d, flight_size: %d, " | 678 | "ssthresh: %d, flight_size: %d, " |
679 | "pba: %d\n", | 679 | "pba: %d\n", |
680 | __FUNCTION__, transport, | 680 | __func__, transport, |
681 | transport->cwnd, | 681 | transport->cwnd, |
682 | transport->ssthresh, | 682 | transport->ssthresh, |
683 | transport->flight_size, | 683 | transport->flight_size, |
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 1bb3c5c35d2a..392012f5ab83 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
@@ -469,7 +469,7 @@ void sctp_retransmit_mark(struct sctp_outq *q, | |||
469 | 469 | ||
470 | SCTP_DEBUG_PRINTK("%s: transport: %p, reason: %d, " | 470 | SCTP_DEBUG_PRINTK("%s: transport: %p, reason: %d, " |
471 | "cwnd: %d, ssthresh: %d, flight_size: %d, " | 471 | "cwnd: %d, ssthresh: %d, flight_size: %d, " |
472 | "pba: %d\n", __FUNCTION__, | 472 | "pba: %d\n", __func__, |
473 | transport, reason, | 473 | transport, reason, |
474 | transport->cwnd, transport->ssthresh, | 474 | transport->cwnd, transport->ssthresh, |
475 | transport->flight_size, | 475 | transport->flight_size, |
@@ -494,6 +494,8 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, | |||
494 | */ | 494 | */ |
495 | if (transport == transport->asoc->peer.retran_path) | 495 | if (transport == transport->asoc->peer.retran_path) |
496 | sctp_assoc_update_retran_path(transport->asoc); | 496 | sctp_assoc_update_retran_path(transport->asoc); |
497 | transport->asoc->rtx_data_chunks += | ||
498 | transport->asoc->unack_data; | ||
497 | break; | 499 | break; |
498 | case SCTP_RTXR_FAST_RTX: | 500 | case SCTP_RTXR_FAST_RTX: |
499 | SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS); | 501 | SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS); |
@@ -504,6 +506,7 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, | |||
504 | break; | 506 | break; |
505 | case SCTP_RTXR_T1_RTX: | 507 | case SCTP_RTXR_T1_RTX: |
506 | SCTP_INC_STATS(SCTP_MIB_T1_RETRANSMITS); | 508 | SCTP_INC_STATS(SCTP_MIB_T1_RETRANSMITS); |
509 | transport->asoc->init_retries++; | ||
507 | break; | 510 | break; |
508 | default: | 511 | default: |
509 | BUG(); | 512 | BUG(); |
@@ -1203,10 +1206,10 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack) | |||
1203 | sctp_generate_fwdtsn(q, sack_ctsn); | 1206 | sctp_generate_fwdtsn(q, sack_ctsn); |
1204 | 1207 | ||
1205 | SCTP_DEBUG_PRINTK("%s: sack Cumulative TSN Ack is 0x%x.\n", | 1208 | SCTP_DEBUG_PRINTK("%s: sack Cumulative TSN Ack is 0x%x.\n", |
1206 | __FUNCTION__, sack_ctsn); | 1209 | __func__, sack_ctsn); |
1207 | SCTP_DEBUG_PRINTK("%s: Cumulative TSN Ack of association, " | 1210 | SCTP_DEBUG_PRINTK("%s: Cumulative TSN Ack of association, " |
1208 | "%p is 0x%x. Adv peer ack point: 0x%x\n", | 1211 | "%p is 0x%x. Adv peer ack point: 0x%x\n", |
1209 | __FUNCTION__, asoc, ctsn, asoc->adv_peer_ack_point); | 1212 | __func__, asoc, ctsn, asoc->adv_peer_ack_point); |
1210 | 1213 | ||
1211 | /* See if all chunks are acked. | 1214 | /* See if all chunks are acked. |
1212 | * Make sure the empty queue handler will get run later. | 1215 | * Make sure the empty queue handler will get run later. |
@@ -1441,7 +1444,7 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1441 | if (tchunk->tsn_gap_acked) { | 1444 | if (tchunk->tsn_gap_acked) { |
1442 | SCTP_DEBUG_PRINTK("%s: Receiver reneged on " | 1445 | SCTP_DEBUG_PRINTK("%s: Receiver reneged on " |
1443 | "data TSN: 0x%x\n", | 1446 | "data TSN: 0x%x\n", |
1444 | __FUNCTION__, | 1447 | __func__, |
1445 | tsn); | 1448 | tsn); |
1446 | tchunk->tsn_gap_acked = 0; | 1449 | tchunk->tsn_gap_acked = 0; |
1447 | 1450 | ||
@@ -1558,7 +1561,7 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1558 | (sack_ctsn+2 == q->asoc->next_tsn)) { | 1561 | (sack_ctsn+2 == q->asoc->next_tsn)) { |
1559 | SCTP_DEBUG_PRINTK("%s: SACK received for zero " | 1562 | SCTP_DEBUG_PRINTK("%s: SACK received for zero " |
1560 | "window probe: %u\n", | 1563 | "window probe: %u\n", |
1561 | __FUNCTION__, sack_ctsn); | 1564 | __func__, sack_ctsn); |
1562 | q->asoc->overall_error_count = 0; | 1565 | q->asoc->overall_error_count = 0; |
1563 | transport->error_count = 0; | 1566 | transport->error_count = 0; |
1564 | } | 1567 | } |
@@ -1623,7 +1626,7 @@ static void sctp_mark_missing(struct sctp_outq *q, | |||
1623 | 1626 | ||
1624 | SCTP_DEBUG_PRINTK( | 1627 | SCTP_DEBUG_PRINTK( |
1625 | "%s: TSN 0x%x missing counter: %d\n", | 1628 | "%s: TSN 0x%x missing counter: %d\n", |
1626 | __FUNCTION__, tsn, | 1629 | __func__, tsn, |
1627 | chunk->tsn_missing_report); | 1630 | chunk->tsn_missing_report); |
1628 | } | 1631 | } |
1629 | } | 1632 | } |
@@ -1646,7 +1649,7 @@ static void sctp_mark_missing(struct sctp_outq *q, | |||
1646 | 1649 | ||
1647 | SCTP_DEBUG_PRINTK("%s: transport: %p, cwnd: %d, " | 1650 | SCTP_DEBUG_PRINTK("%s: transport: %p, cwnd: %d, " |
1648 | "ssthresh: %d, flight_size: %d, pba: %d\n", | 1651 | "ssthresh: %d, flight_size: %d, pba: %d\n", |
1649 | __FUNCTION__, transport, transport->cwnd, | 1652 | __func__, transport, transport->cwnd, |
1650 | transport->ssthresh, transport->flight_size, | 1653 | transport->ssthresh, transport->flight_size, |
1651 | transport->partial_bytes_acked); | 1654 | transport->partial_bytes_acked); |
1652 | } | 1655 | } |
diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 973f1dbc2ec3..ddca90e5e3a5 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c | |||
@@ -279,8 +279,10 @@ static void * sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos) | |||
279 | *pos = 0; | 279 | *pos = 0; |
280 | 280 | ||
281 | if (*pos == 0) | 281 | if (*pos == 0) |
282 | seq_printf(seq, " ASSOC SOCK STY SST ST HBKT ASSOC-ID TX_QUEUE RX_QUEUE UID INODE LPORT " | 282 | seq_printf(seq, " ASSOC SOCK STY SST ST HBKT " |
283 | "RPORT LADDRS <-> RADDRS\n"); | 283 | "ASSOC-ID TX_QUEUE RX_QUEUE UID INODE LPORT " |
284 | "RPORT LADDRS <-> RADDRS " | ||
285 | "HBINT INS OUTS MAXRT T1X T2X RTXC\n"); | ||
284 | 286 | ||
285 | return (void *)pos; | 287 | return (void *)pos; |
286 | } | 288 | } |
@@ -319,15 +321,21 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) | |||
319 | assoc = sctp_assoc(epb); | 321 | assoc = sctp_assoc(epb); |
320 | sk = epb->sk; | 322 | sk = epb->sk; |
321 | seq_printf(seq, | 323 | seq_printf(seq, |
322 | "%8p %8p %-3d %-3d %-2d %-4d %4d %8d %8d %7d %5lu %-5d %5d ", | 324 | "%8p %8p %-3d %-3d %-2d %-4d " |
325 | "%4d %8d %8d %7d %5lu %-5d %5d " | ||
326 | "%8lu %5d %5d %4d %4d %4d %8d ", | ||
323 | assoc, sk, sctp_sk(sk)->type, sk->sk_state, | 327 | assoc, sk, sctp_sk(sk)->type, sk->sk_state, |
324 | assoc->state, hash, assoc->assoc_id, | 328 | assoc->state, hash, |
329 | assoc->assoc_id, | ||
325 | assoc->sndbuf_used, | 330 | assoc->sndbuf_used, |
326 | atomic_read(&assoc->rmem_alloc), | 331 | atomic_read(&assoc->rmem_alloc), |
327 | sock_i_uid(sk), sock_i_ino(sk), | 332 | sock_i_uid(sk), sock_i_ino(sk), |
328 | epb->bind_addr.port, | 333 | epb->bind_addr.port, |
329 | assoc->peer.port); | 334 | assoc->peer.port, |
330 | 335 | assoc->hbinterval, assoc->c.sinit_max_instreams, | |
336 | assoc->c.sinit_num_ostreams, assoc->max_retrans, | ||
337 | assoc->init_retries, assoc->shutdown_retries, | ||
338 | assoc->rtx_data_chunks); | ||
331 | seq_printf(seq, " "); | 339 | seq_printf(seq, " "); |
332 | sctp_seq_dump_local_addrs(seq, epb); | 340 | sctp_seq_dump_local_addrs(seq, epb); |
333 | seq_printf(seq, "<-> "); | 341 | seq_printf(seq, "<-> "); |
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 7a7646a9565c..25be8f04de6e 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
@@ -363,7 +363,7 @@ static int sctp_v4_addr_valid(union sctp_addr *addr, | |||
363 | return 0; | 363 | return 0; |
364 | 364 | ||
365 | /* Is this a broadcast address? */ | 365 | /* Is this a broadcast address? */ |
366 | if (skb && ((struct rtable *)skb->dst)->rt_flags & RTCF_BROADCAST) | 366 | if (skb && skb->rtable->rt_flags & RTCF_BROADCAST) |
367 | return 0; | 367 | return 0; |
368 | 368 | ||
369 | return 1; | 369 | return 1; |
@@ -451,7 +451,7 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc, | |||
451 | fl.fl4_src = saddr->v4.sin_addr.s_addr; | 451 | fl.fl4_src = saddr->v4.sin_addr.s_addr; |
452 | 452 | ||
453 | SCTP_DEBUG_PRINTK("%s: DST:%u.%u.%u.%u, SRC:%u.%u.%u.%u - ", | 453 | SCTP_DEBUG_PRINTK("%s: DST:%u.%u.%u.%u, SRC:%u.%u.%u.%u - ", |
454 | __FUNCTION__, NIPQUAD(fl.fl4_dst), | 454 | __func__, NIPQUAD(fl.fl4_dst), |
455 | NIPQUAD(fl.fl4_src)); | 455 | NIPQUAD(fl.fl4_src)); |
456 | 456 | ||
457 | if (!ip_route_output_key(&init_net, &rt, &fl)) { | 457 | if (!ip_route_output_key(&init_net, &rt, &fl)) { |
@@ -539,7 +539,7 @@ static void sctp_v4_get_saddr(struct sctp_association *asoc, | |||
539 | /* What interface did this skb arrive on? */ | 539 | /* What interface did this skb arrive on? */ |
540 | static int sctp_v4_skb_iif(const struct sk_buff *skb) | 540 | static int sctp_v4_skb_iif(const struct sk_buff *skb) |
541 | { | 541 | { |
542 | return ((struct rtable *)skb->dst)->rt_iif; | 542 | return skb->rtable->rt_iif; |
543 | } | 543 | } |
544 | 544 | ||
545 | /* Was this packet marked by Explicit Congestion Notification? */ | 545 | /* Was this packet marked by Explicit Congestion Notification? */ |
@@ -630,6 +630,9 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev, | |||
630 | struct sctp_sockaddr_entry *temp; | 630 | struct sctp_sockaddr_entry *temp; |
631 | int found = 0; | 631 | int found = 0; |
632 | 632 | ||
633 | if (ifa->ifa_dev->dev->nd_net != &init_net) | ||
634 | return NOTIFY_DONE; | ||
635 | |||
633 | switch (ev) { | 636 | switch (ev) { |
634 | case NETDEV_UP: | 637 | case NETDEV_UP: |
635 | addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); | 638 | addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); |
@@ -826,9 +829,9 @@ static inline int sctp_v4_xmit(struct sk_buff *skb, | |||
826 | { | 829 | { |
827 | SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, " | 830 | SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, " |
828 | "src:%u.%u.%u.%u, dst:%u.%u.%u.%u\n", | 831 | "src:%u.%u.%u.%u, dst:%u.%u.%u.%u\n", |
829 | __FUNCTION__, skb, skb->len, | 832 | __func__, skb, skb->len, |
830 | NIPQUAD(((struct rtable *)skb->dst)->rt_src), | 833 | NIPQUAD(skb->rtable->rt_src), |
831 | NIPQUAD(((struct rtable *)skb->dst)->rt_dst)); | 834 | NIPQUAD(skb->rtable->rt_dst)); |
832 | 835 | ||
833 | SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS); | 836 | SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS); |
834 | return ip_queue_xmit(skb, ipfragok); | 837 | return ip_queue_xmit(skb, ipfragok); |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 28eb38eb6083..02bf32c30263 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
@@ -243,7 +243,7 @@ void sctp_generate_t3_rtx_event(unsigned long peer) | |||
243 | 243 | ||
244 | sctp_bh_lock_sock(asoc->base.sk); | 244 | sctp_bh_lock_sock(asoc->base.sk); |
245 | if (sock_owned_by_user(asoc->base.sk)) { | 245 | if (sock_owned_by_user(asoc->base.sk)) { |
246 | SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __FUNCTION__); | 246 | SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__); |
247 | 247 | ||
248 | /* Try again later. */ | 248 | /* Try again later. */ |
249 | if (!mod_timer(&transport->T3_rtx_timer, jiffies + (HZ/20))) | 249 | if (!mod_timer(&transport->T3_rtx_timer, jiffies + (HZ/20))) |
@@ -283,7 +283,7 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc, | |||
283 | sctp_bh_lock_sock(asoc->base.sk); | 283 | sctp_bh_lock_sock(asoc->base.sk); |
284 | if (sock_owned_by_user(asoc->base.sk)) { | 284 | if (sock_owned_by_user(asoc->base.sk)) { |
285 | SCTP_DEBUG_PRINTK("%s:Sock is busy: timer %d\n", | 285 | SCTP_DEBUG_PRINTK("%s:Sock is busy: timer %d\n", |
286 | __FUNCTION__, | 286 | __func__, |
287 | timeout_type); | 287 | timeout_type); |
288 | 288 | ||
289 | /* Try again later. */ | 289 | /* Try again later. */ |
@@ -361,7 +361,7 @@ void sctp_generate_heartbeat_event(unsigned long data) | |||
361 | 361 | ||
362 | sctp_bh_lock_sock(asoc->base.sk); | 362 | sctp_bh_lock_sock(asoc->base.sk); |
363 | if (sock_owned_by_user(asoc->base.sk)) { | 363 | if (sock_owned_by_user(asoc->base.sk)) { |
364 | SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __FUNCTION__); | 364 | SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__); |
365 | 365 | ||
366 | /* Try again later. */ | 366 | /* Try again later. */ |
367 | if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20))) | 367 | if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20))) |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index f2ed6473feef..c0c6bee77cf5 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -1124,7 +1124,7 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep, | |||
1124 | printk(KERN_WARNING | 1124 | printk(KERN_WARNING |
1125 | "%s association %p could not find address " | 1125 | "%s association %p could not find address " |
1126 | NIP6_FMT "\n", | 1126 | NIP6_FMT "\n", |
1127 | __FUNCTION__, | 1127 | __func__, |
1128 | asoc, | 1128 | asoc, |
1129 | NIP6(from_addr.v6.sin6_addr)); | 1129 | NIP6(from_addr.v6.sin6_addr)); |
1130 | } else { | 1130 | } else { |
@@ -1132,7 +1132,7 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep, | |||
1132 | printk(KERN_WARNING | 1132 | printk(KERN_WARNING |
1133 | "%s association %p could not find address " | 1133 | "%s association %p could not find address " |
1134 | NIPQUAD_FMT "\n", | 1134 | NIPQUAD_FMT "\n", |
1135 | __FUNCTION__, | 1135 | __func__, |
1136 | asoc, | 1136 | asoc, |
1137 | NIPQUAD(from_addr.v4.sin_addr.s_addr)); | 1137 | NIPQUAD(from_addr.v4.sin_addr.s_addr)); |
1138 | } | 1138 | } |
@@ -1150,7 +1150,7 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep, | |||
1150 | time_after(jiffies, hbinfo->sent_at + max_interval)) { | 1150 | time_after(jiffies, hbinfo->sent_at + max_interval)) { |
1151 | SCTP_DEBUG_PRINTK("%s: HEARTBEAT ACK with invalid timestamp " | 1151 | SCTP_DEBUG_PRINTK("%s: HEARTBEAT ACK with invalid timestamp " |
1152 | "received for transport: %p\n", | 1152 | "received for transport: %p\n", |
1153 | __FUNCTION__, link); | 1153 | __func__, link); |
1154 | return SCTP_DISPOSITION_DISCARD; | 1154 | return SCTP_DISPOSITION_DISCARD; |
1155 | } | 1155 | } |
1156 | 1156 | ||
@@ -3668,7 +3668,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(const struct sctp_endpoint *ep, | |||
3668 | skb_pull(chunk->skb, len); | 3668 | skb_pull(chunk->skb, len); |
3669 | 3669 | ||
3670 | tsn = ntohl(fwdtsn_hdr->new_cum_tsn); | 3670 | tsn = ntohl(fwdtsn_hdr->new_cum_tsn); |
3671 | SCTP_DEBUG_PRINTK("%s: TSN 0x%x.\n", __FUNCTION__, tsn); | 3671 | SCTP_DEBUG_PRINTK("%s: TSN 0x%x.\n", __func__, tsn); |
3672 | 3672 | ||
3673 | /* The TSN is too high--silently discard the chunk and count on it | 3673 | /* The TSN is too high--silently discard the chunk and count on it |
3674 | * getting retransmitted later. | 3674 | * getting retransmitted later. |
@@ -3728,7 +3728,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast( | |||
3728 | skb_pull(chunk->skb, len); | 3728 | skb_pull(chunk->skb, len); |
3729 | 3729 | ||
3730 | tsn = ntohl(fwdtsn_hdr->new_cum_tsn); | 3730 | tsn = ntohl(fwdtsn_hdr->new_cum_tsn); |
3731 | SCTP_DEBUG_PRINTK("%s: TSN 0x%x.\n", __FUNCTION__, tsn); | 3731 | SCTP_DEBUG_PRINTK("%s: TSN 0x%x.\n", __func__, tsn); |
3732 | 3732 | ||
3733 | /* The TSN is too high--silently discard the chunk and count on it | 3733 | /* The TSN is too high--silently discard the chunk and count on it |
3734 | * getting retransmitted later. | 3734 | * getting retransmitted later. |
@@ -5312,6 +5312,8 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep, | |||
5312 | SCTP_DEBUG_PRINTK("Timer T2 expired.\n"); | 5312 | SCTP_DEBUG_PRINTK("Timer T2 expired.\n"); |
5313 | SCTP_INC_STATS(SCTP_MIB_T2_SHUTDOWN_EXPIREDS); | 5313 | SCTP_INC_STATS(SCTP_MIB_T2_SHUTDOWN_EXPIREDS); |
5314 | 5314 | ||
5315 | ((struct sctp_association *)asoc)->shutdown_retries++; | ||
5316 | |||
5315 | if (asoc->overall_error_count >= asoc->max_retrans) { | 5317 | if (asoc->overall_error_count >= asoc->max_retrans) { |
5316 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | 5318 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, |
5317 | SCTP_ERROR(ETIMEDOUT)); | 5319 | SCTP_ERROR(ETIMEDOUT)); |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index d994d822900d..a3138a0fe2c5 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -525,7 +525,7 @@ static int sctp_send_asconf_add_ip(struct sock *sk, | |||
525 | ep = sp->ep; | 525 | ep = sp->ep; |
526 | 526 | ||
527 | SCTP_DEBUG_PRINTK("%s: (sk: %p, addrs: %p, addrcnt: %d)\n", | 527 | SCTP_DEBUG_PRINTK("%s: (sk: %p, addrs: %p, addrcnt: %d)\n", |
528 | __FUNCTION__, sk, addrs, addrcnt); | 528 | __func__, sk, addrs, addrcnt); |
529 | 529 | ||
530 | list_for_each(pos, &ep->asocs) { | 530 | list_for_each(pos, &ep->asocs) { |
531 | asoc = list_entry(pos, struct sctp_association, asocs); | 531 | asoc = list_entry(pos, struct sctp_association, asocs); |
@@ -711,7 +711,7 @@ static int sctp_send_asconf_del_ip(struct sock *sk, | |||
711 | ep = sp->ep; | 711 | ep = sp->ep; |
712 | 712 | ||
713 | SCTP_DEBUG_PRINTK("%s: (sk: %p, addrs: %p, addrcnt: %d)\n", | 713 | SCTP_DEBUG_PRINTK("%s: (sk: %p, addrs: %p, addrcnt: %d)\n", |
714 | __FUNCTION__, sk, addrs, addrcnt); | 714 | __func__, sk, addrs, addrcnt); |
715 | 715 | ||
716 | list_for_each(pos, &ep->asocs) { | 716 | list_for_each(pos, &ep->asocs) { |
717 | asoc = list_entry(pos, struct sctp_association, asocs); | 717 | asoc = list_entry(pos, struct sctp_association, asocs); |
@@ -1197,7 +1197,7 @@ SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk, | |||
1197 | struct sockaddr *kaddrs; | 1197 | struct sockaddr *kaddrs; |
1198 | 1198 | ||
1199 | SCTP_DEBUG_PRINTK("%s - sk %p addrs %p addrs_size %d\n", | 1199 | SCTP_DEBUG_PRINTK("%s - sk %p addrs %p addrs_size %d\n", |
1200 | __FUNCTION__, sk, addrs, addrs_size); | 1200 | __func__, sk, addrs, addrs_size); |
1201 | 1201 | ||
1202 | if (unlikely(addrs_size <= 0)) | 1202 | if (unlikely(addrs_size <= 0)) |
1203 | return -EINVAL; | 1203 | return -EINVAL; |
@@ -3302,7 +3302,7 @@ SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *addr, | |||
3302 | sctp_lock_sock(sk); | 3302 | sctp_lock_sock(sk); |
3303 | 3303 | ||
3304 | SCTP_DEBUG_PRINTK("%s - sk: %p, sockaddr: %p, addr_len: %d\n", | 3304 | SCTP_DEBUG_PRINTK("%s - sk: %p, sockaddr: %p, addr_len: %d\n", |
3305 | __FUNCTION__, sk, addr, addr_len); | 3305 | __func__, sk, addr, addr_len); |
3306 | 3306 | ||
3307 | /* Validate addr_len before calling common connect/connectx routine. */ | 3307 | /* Validate addr_len before calling common connect/connectx routine. */ |
3308 | af = sctp_get_af_specific(addr->sa_family); | 3308 | af = sctp_get_af_specific(addr->sa_family); |
@@ -3823,7 +3823,7 @@ static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval | |||
3823 | goto out; | 3823 | goto out; |
3824 | } | 3824 | } |
3825 | 3825 | ||
3826 | SCTP_DEBUG_PRINTK("%s: sk: %p asoc: %p\n", __FUNCTION__, sk, asoc); | 3826 | SCTP_DEBUG_PRINTK("%s: sk: %p asoc: %p\n", __func__, sk, asoc); |
3827 | 3827 | ||
3828 | retval = sctp_do_peeloff(asoc, &newsock); | 3828 | retval = sctp_do_peeloff(asoc, &newsock); |
3829 | if (retval < 0) | 3829 | if (retval < 0) |
@@ -3837,7 +3837,7 @@ static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval | |||
3837 | } | 3837 | } |
3838 | 3838 | ||
3839 | SCTP_DEBUG_PRINTK("%s: sk: %p asoc: %p newsk: %p sd: %d\n", | 3839 | SCTP_DEBUG_PRINTK("%s: sk: %p asoc: %p newsk: %p sd: %d\n", |
3840 | __FUNCTION__, sk, asoc, newsock->sk, retval); | 3840 | __func__, sk, asoc, newsock->sk, retval); |
3841 | 3841 | ||
3842 | /* Return the fd mapped to the new socket. */ | 3842 | /* Return the fd mapped to the new socket. */ |
3843 | peeloff.sd = retval; | 3843 | peeloff.sd = retval; |
@@ -6233,7 +6233,7 @@ static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p) | |||
6233 | long current_timeo = *timeo_p; | 6233 | long current_timeo = *timeo_p; |
6234 | DEFINE_WAIT(wait); | 6234 | DEFINE_WAIT(wait); |
6235 | 6235 | ||
6236 | SCTP_DEBUG_PRINTK("%s: asoc=%p, timeo=%ld\n", __FUNCTION__, asoc, | 6236 | SCTP_DEBUG_PRINTK("%s: asoc=%p, timeo=%ld\n", __func__, asoc, |
6237 | (long)(*timeo_p)); | 6237 | (long)(*timeo_p)); |
6238 | 6238 | ||
6239 | /* Increment the association's refcnt. */ | 6239 | /* Increment the association's refcnt. */ |
diff --git a/net/sctp/transport.c b/net/sctp/transport.c index d9f8af852b56..f4938f6c5abe 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c | |||
@@ -260,7 +260,7 @@ void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) | |||
260 | if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { | 260 | if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { |
261 | printk(KERN_WARNING "%s: Reported pmtu %d too low, " | 261 | printk(KERN_WARNING "%s: Reported pmtu %d too low, " |
262 | "using default minimum of %d\n", | 262 | "using default minimum of %d\n", |
263 | __FUNCTION__, pmtu, | 263 | __func__, pmtu, |
264 | SCTP_DEFAULT_MINSEGMENT); | 264 | SCTP_DEFAULT_MINSEGMENT); |
265 | /* Use default minimum segment size and disable | 265 | /* Use default minimum segment size and disable |
266 | * pmtu discovery on this transport. | 266 | * pmtu discovery on this transport. |
@@ -388,7 +388,7 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt) | |||
388 | tp->rto_pending = 0; | 388 | tp->rto_pending = 0; |
389 | 389 | ||
390 | SCTP_DEBUG_PRINTK("%s: transport: %p, rtt: %d, srtt: %d " | 390 | SCTP_DEBUG_PRINTK("%s: transport: %p, rtt: %d, srtt: %d " |
391 | "rttvar: %d, rto: %ld\n", __FUNCTION__, | 391 | "rttvar: %d, rto: %ld\n", __func__, |
392 | tp, rtt, tp->srtt, tp->rttvar, tp->rto); | 392 | tp, rtt, tp->srtt, tp->rttvar, tp->rto); |
393 | } | 393 | } |
394 | 394 | ||
@@ -434,7 +434,7 @@ void sctp_transport_raise_cwnd(struct sctp_transport *transport, | |||
434 | SCTP_DEBUG_PRINTK("%s: SLOW START: transport: %p, " | 434 | SCTP_DEBUG_PRINTK("%s: SLOW START: transport: %p, " |
435 | "bytes_acked: %d, cwnd: %d, ssthresh: %d, " | 435 | "bytes_acked: %d, cwnd: %d, ssthresh: %d, " |
436 | "flight_size: %d, pba: %d\n", | 436 | "flight_size: %d, pba: %d\n", |
437 | __FUNCTION__, | 437 | __func__, |
438 | transport, bytes_acked, cwnd, | 438 | transport, bytes_acked, cwnd, |
439 | ssthresh, flight_size, pba); | 439 | ssthresh, flight_size, pba); |
440 | } else { | 440 | } else { |
@@ -460,7 +460,7 @@ void sctp_transport_raise_cwnd(struct sctp_transport *transport, | |||
460 | SCTP_DEBUG_PRINTK("%s: CONGESTION AVOIDANCE: " | 460 | SCTP_DEBUG_PRINTK("%s: CONGESTION AVOIDANCE: " |
461 | "transport: %p, bytes_acked: %d, cwnd: %d, " | 461 | "transport: %p, bytes_acked: %d, cwnd: %d, " |
462 | "ssthresh: %d, flight_size: %d, pba: %d\n", | 462 | "ssthresh: %d, flight_size: %d, pba: %d\n", |
463 | __FUNCTION__, | 463 | __func__, |
464 | transport, bytes_acked, cwnd, | 464 | transport, bytes_acked, cwnd, |
465 | ssthresh, flight_size, pba); | 465 | ssthresh, flight_size, pba); |
466 | } | 466 | } |
@@ -546,7 +546,7 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport, | |||
546 | 546 | ||
547 | transport->partial_bytes_acked = 0; | 547 | transport->partial_bytes_acked = 0; |
548 | SCTP_DEBUG_PRINTK("%s: transport: %p reason: %d cwnd: " | 548 | SCTP_DEBUG_PRINTK("%s: transport: %p reason: %d cwnd: " |
549 | "%d ssthresh: %d\n", __FUNCTION__, | 549 | "%d ssthresh: %d\n", __func__, |
550 | transport, reason, | 550 | transport, reason, |
551 | transport->cwnd, transport->ssthresh); | 551 | transport->cwnd, transport->ssthresh); |
552 | } | 552 | } |
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 6dac38792288..5828e5c060ca 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -625,7 +625,7 @@ gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor) | |||
625 | gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor); | 625 | gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor); |
626 | if (!gss_auth->mech) { | 626 | if (!gss_auth->mech) { |
627 | printk(KERN_WARNING "%s: Pseudoflavor %d not found!\n", | 627 | printk(KERN_WARNING "%s: Pseudoflavor %d not found!\n", |
628 | __FUNCTION__, flavor); | 628 | __func__, flavor); |
629 | goto err_free; | 629 | goto err_free; |
630 | } | 630 | } |
631 | gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor); | 631 | gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor); |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 8c6a7f1a25e9..13a3718e7cc9 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -43,7 +43,7 @@ | |||
43 | 43 | ||
44 | #define dprint_status(t) \ | 44 | #define dprint_status(t) \ |
45 | dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \ | 45 | dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \ |
46 | __FUNCTION__, t->tk_status) | 46 | __func__, t->tk_status) |
47 | 47 | ||
48 | /* | 48 | /* |
49 | * All RPC clients are linked into this list | 49 | * All RPC clients are linked into this list |
@@ -372,7 +372,7 @@ out_no_path: | |||
372 | out_no_stats: | 372 | out_no_stats: |
373 | kfree(new); | 373 | kfree(new); |
374 | out_no_clnt: | 374 | out_no_clnt: |
375 | dprintk("RPC: %s: returned error %d\n", __FUNCTION__, err); | 375 | dprintk("RPC: %s: returned error %d\n", __func__, err); |
376 | return ERR_PTR(err); | 376 | return ERR_PTR(err); |
377 | } | 377 | } |
378 | EXPORT_SYMBOL_GPL(rpc_clone_client); | 378 | EXPORT_SYMBOL_GPL(rpc_clone_client); |
@@ -756,7 +756,7 @@ call_reserveresult(struct rpc_task *task) | |||
756 | } | 756 | } |
757 | 757 | ||
758 | printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n", | 758 | printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n", |
759 | __FUNCTION__, status); | 759 | __func__, status); |
760 | rpc_exit(task, -EIO); | 760 | rpc_exit(task, -EIO); |
761 | return; | 761 | return; |
762 | } | 762 | } |
@@ -767,7 +767,7 @@ call_reserveresult(struct rpc_task *task) | |||
767 | */ | 767 | */ |
768 | if (task->tk_rqstp) { | 768 | if (task->tk_rqstp) { |
769 | printk(KERN_ERR "%s: status=%d, request allocated anyway\n", | 769 | printk(KERN_ERR "%s: status=%d, request allocated anyway\n", |
770 | __FUNCTION__, status); | 770 | __func__, status); |
771 | xprt_release(task); | 771 | xprt_release(task); |
772 | } | 772 | } |
773 | 773 | ||
@@ -779,7 +779,7 @@ call_reserveresult(struct rpc_task *task) | |||
779 | break; | 779 | break; |
780 | default: | 780 | default: |
781 | printk(KERN_ERR "%s: unrecognized error %d, exiting\n", | 781 | printk(KERN_ERR "%s: unrecognized error %d, exiting\n", |
782 | __FUNCTION__, status); | 782 | __func__, status); |
783 | break; | 783 | break; |
784 | } | 784 | } |
785 | rpc_exit(task, status); | 785 | rpc_exit(task, status); |
@@ -1327,7 +1327,7 @@ call_verify(struct rpc_task *task) | |||
1327 | * undefined results | 1327 | * undefined results |
1328 | */ | 1328 | */ |
1329 | dprintk("RPC: %5u %s: XDR representation not a multiple of" | 1329 | dprintk("RPC: %5u %s: XDR representation not a multiple of" |
1330 | " 4 bytes: 0x%x\n", task->tk_pid, __FUNCTION__, | 1330 | " 4 bytes: 0x%x\n", task->tk_pid, __func__, |
1331 | task->tk_rqstp->rq_rcv_buf.len); | 1331 | task->tk_rqstp->rq_rcv_buf.len); |
1332 | goto out_eio; | 1332 | goto out_eio; |
1333 | } | 1333 | } |
@@ -1337,7 +1337,7 @@ call_verify(struct rpc_task *task) | |||
1337 | 1337 | ||
1338 | if ((n = ntohl(*p++)) != RPC_REPLY) { | 1338 | if ((n = ntohl(*p++)) != RPC_REPLY) { |
1339 | dprintk("RPC: %5u %s: not an RPC reply: %x\n", | 1339 | dprintk("RPC: %5u %s: not an RPC reply: %x\n", |
1340 | task->tk_pid, __FUNCTION__, n); | 1340 | task->tk_pid, __func__, n); |
1341 | goto out_garbage; | 1341 | goto out_garbage; |
1342 | } | 1342 | } |
1343 | if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { | 1343 | if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { |
@@ -1349,13 +1349,13 @@ call_verify(struct rpc_task *task) | |||
1349 | case RPC_MISMATCH: | 1349 | case RPC_MISMATCH: |
1350 | dprintk("RPC: %5u %s: RPC call version " | 1350 | dprintk("RPC: %5u %s: RPC call version " |
1351 | "mismatch!\n", | 1351 | "mismatch!\n", |
1352 | task->tk_pid, __FUNCTION__); | 1352 | task->tk_pid, __func__); |
1353 | error = -EPROTONOSUPPORT; | 1353 | error = -EPROTONOSUPPORT; |
1354 | goto out_err; | 1354 | goto out_err; |
1355 | default: | 1355 | default: |
1356 | dprintk("RPC: %5u %s: RPC call rejected, " | 1356 | dprintk("RPC: %5u %s: RPC call rejected, " |
1357 | "unknown error: %x\n", | 1357 | "unknown error: %x\n", |
1358 | task->tk_pid, __FUNCTION__, n); | 1358 | task->tk_pid, __func__, n); |
1359 | goto out_eio; | 1359 | goto out_eio; |
1360 | } | 1360 | } |
1361 | if (--len < 0) | 1361 | if (--len < 0) |
@@ -1369,7 +1369,7 @@ call_verify(struct rpc_task *task) | |||
1369 | break; | 1369 | break; |
1370 | task->tk_cred_retry--; | 1370 | task->tk_cred_retry--; |
1371 | dprintk("RPC: %5u %s: retry stale creds\n", | 1371 | dprintk("RPC: %5u %s: retry stale creds\n", |
1372 | task->tk_pid, __FUNCTION__); | 1372 | task->tk_pid, __func__); |
1373 | rpcauth_invalcred(task); | 1373 | rpcauth_invalcred(task); |
1374 | /* Ensure we obtain a new XID! */ | 1374 | /* Ensure we obtain a new XID! */ |
1375 | xprt_release(task); | 1375 | xprt_release(task); |
@@ -1382,7 +1382,7 @@ call_verify(struct rpc_task *task) | |||
1382 | break; | 1382 | break; |
1383 | task->tk_garb_retry--; | 1383 | task->tk_garb_retry--; |
1384 | dprintk("RPC: %5u %s: retry garbled creds\n", | 1384 | dprintk("RPC: %5u %s: retry garbled creds\n", |
1385 | task->tk_pid, __FUNCTION__); | 1385 | task->tk_pid, __func__); |
1386 | task->tk_action = call_bind; | 1386 | task->tk_action = call_bind; |
1387 | goto out_retry; | 1387 | goto out_retry; |
1388 | case RPC_AUTH_TOOWEAK: | 1388 | case RPC_AUTH_TOOWEAK: |
@@ -1391,16 +1391,16 @@ call_verify(struct rpc_task *task) | |||
1391 | break; | 1391 | break; |
1392 | default: | 1392 | default: |
1393 | dprintk("RPC: %5u %s: unknown auth error: %x\n", | 1393 | dprintk("RPC: %5u %s: unknown auth error: %x\n", |
1394 | task->tk_pid, __FUNCTION__, n); | 1394 | task->tk_pid, __func__, n); |
1395 | error = -EIO; | 1395 | error = -EIO; |
1396 | } | 1396 | } |
1397 | dprintk("RPC: %5u %s: call rejected %d\n", | 1397 | dprintk("RPC: %5u %s: call rejected %d\n", |
1398 | task->tk_pid, __FUNCTION__, n); | 1398 | task->tk_pid, __func__, n); |
1399 | goto out_err; | 1399 | goto out_err; |
1400 | } | 1400 | } |
1401 | if (!(p = rpcauth_checkverf(task, p))) { | 1401 | if (!(p = rpcauth_checkverf(task, p))) { |
1402 | dprintk("RPC: %5u %s: auth check failed\n", | 1402 | dprintk("RPC: %5u %s: auth check failed\n", |
1403 | task->tk_pid, __FUNCTION__); | 1403 | task->tk_pid, __func__); |
1404 | goto out_garbage; /* bad verifier, retry */ | 1404 | goto out_garbage; /* bad verifier, retry */ |
1405 | } | 1405 | } |
1406 | len = p - (__be32 *)iov->iov_base - 1; | 1406 | len = p - (__be32 *)iov->iov_base - 1; |
@@ -1411,14 +1411,14 @@ call_verify(struct rpc_task *task) | |||
1411 | return p; | 1411 | return p; |
1412 | case RPC_PROG_UNAVAIL: | 1412 | case RPC_PROG_UNAVAIL: |
1413 | dprintk("RPC: %5u %s: program %u is unsupported by server %s\n", | 1413 | dprintk("RPC: %5u %s: program %u is unsupported by server %s\n", |
1414 | task->tk_pid, __FUNCTION__, | 1414 | task->tk_pid, __func__, |
1415 | (unsigned int)task->tk_client->cl_prog, | 1415 | (unsigned int)task->tk_client->cl_prog, |
1416 | task->tk_client->cl_server); | 1416 | task->tk_client->cl_server); |
1417 | error = -EPFNOSUPPORT; | 1417 | error = -EPFNOSUPPORT; |
1418 | goto out_err; | 1418 | goto out_err; |
1419 | case RPC_PROG_MISMATCH: | 1419 | case RPC_PROG_MISMATCH: |
1420 | dprintk("RPC: %5u %s: program %u, version %u unsupported by " | 1420 | dprintk("RPC: %5u %s: program %u, version %u unsupported by " |
1421 | "server %s\n", task->tk_pid, __FUNCTION__, | 1421 | "server %s\n", task->tk_pid, __func__, |
1422 | (unsigned int)task->tk_client->cl_prog, | 1422 | (unsigned int)task->tk_client->cl_prog, |
1423 | (unsigned int)task->tk_client->cl_vers, | 1423 | (unsigned int)task->tk_client->cl_vers, |
1424 | task->tk_client->cl_server); | 1424 | task->tk_client->cl_server); |
@@ -1427,7 +1427,7 @@ call_verify(struct rpc_task *task) | |||
1427 | case RPC_PROC_UNAVAIL: | 1427 | case RPC_PROC_UNAVAIL: |
1428 | dprintk("RPC: %5u %s: proc %p unsupported by program %u, " | 1428 | dprintk("RPC: %5u %s: proc %p unsupported by program %u, " |
1429 | "version %u on server %s\n", | 1429 | "version %u on server %s\n", |
1430 | task->tk_pid, __FUNCTION__, | 1430 | task->tk_pid, __func__, |
1431 | task->tk_msg.rpc_proc, | 1431 | task->tk_msg.rpc_proc, |
1432 | task->tk_client->cl_prog, | 1432 | task->tk_client->cl_prog, |
1433 | task->tk_client->cl_vers, | 1433 | task->tk_client->cl_vers, |
@@ -1436,11 +1436,11 @@ call_verify(struct rpc_task *task) | |||
1436 | goto out_err; | 1436 | goto out_err; |
1437 | case RPC_GARBAGE_ARGS: | 1437 | case RPC_GARBAGE_ARGS: |
1438 | dprintk("RPC: %5u %s: server saw garbage\n", | 1438 | dprintk("RPC: %5u %s: server saw garbage\n", |
1439 | task->tk_pid, __FUNCTION__); | 1439 | task->tk_pid, __func__); |
1440 | break; /* retry */ | 1440 | break; /* retry */ |
1441 | default: | 1441 | default: |
1442 | dprintk("RPC: %5u %s: server accept status: %x\n", | 1442 | dprintk("RPC: %5u %s: server accept status: %x\n", |
1443 | task->tk_pid, __FUNCTION__, n); | 1443 | task->tk_pid, __func__, n); |
1444 | /* Also retry */ | 1444 | /* Also retry */ |
1445 | } | 1445 | } |
1446 | 1446 | ||
@@ -1449,7 +1449,7 @@ out_garbage: | |||
1449 | if (task->tk_garb_retry) { | 1449 | if (task->tk_garb_retry) { |
1450 | task->tk_garb_retry--; | 1450 | task->tk_garb_retry--; |
1451 | dprintk("RPC: %5u %s: retrying\n", | 1451 | dprintk("RPC: %5u %s: retrying\n", |
1452 | task->tk_pid, __FUNCTION__); | 1452 | task->tk_pid, __func__); |
1453 | task->tk_action = call_bind; | 1453 | task->tk_action = call_bind; |
1454 | out_retry: | 1454 | out_retry: |
1455 | return ERR_PTR(-EAGAIN); | 1455 | return ERR_PTR(-EAGAIN); |
@@ -1459,11 +1459,11 @@ out_eio: | |||
1459 | out_err: | 1459 | out_err: |
1460 | rpc_exit(task, error); | 1460 | rpc_exit(task, error); |
1461 | dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid, | 1461 | dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid, |
1462 | __FUNCTION__, error); | 1462 | __func__, error); |
1463 | return ERR_PTR(error); | 1463 | return ERR_PTR(error); |
1464 | out_overflow: | 1464 | out_overflow: |
1465 | dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid, | 1465 | dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid, |
1466 | __FUNCTION__); | 1466 | __func__); |
1467 | goto out_garbage; | 1467 | goto out_garbage; |
1468 | } | 1468 | } |
1469 | 1469 | ||
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 1b395a41a8b2..5a9b0e7828cd 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -479,13 +479,13 @@ rpc_lookup_parent(char *path, struct nameidata *nd) | |||
479 | mnt = rpc_get_mount(); | 479 | mnt = rpc_get_mount(); |
480 | if (IS_ERR(mnt)) { | 480 | if (IS_ERR(mnt)) { |
481 | printk(KERN_WARNING "%s: %s failed to mount " | 481 | printk(KERN_WARNING "%s: %s failed to mount " |
482 | "pseudofilesystem \n", __FILE__, __FUNCTION__); | 482 | "pseudofilesystem \n", __FILE__, __func__); |
483 | return PTR_ERR(mnt); | 483 | return PTR_ERR(mnt); |
484 | } | 484 | } |
485 | 485 | ||
486 | if (vfs_path_lookup(mnt->mnt_root, mnt, path, LOOKUP_PARENT, nd)) { | 486 | if (vfs_path_lookup(mnt->mnt_root, mnt, path, LOOKUP_PARENT, nd)) { |
487 | printk(KERN_WARNING "%s: %s failed to find path %s\n", | 487 | printk(KERN_WARNING "%s: %s failed to find path %s\n", |
488 | __FILE__, __FUNCTION__, path); | 488 | __FILE__, __func__, path); |
489 | rpc_put_mount(); | 489 | rpc_put_mount(); |
490 | return -ENOENT; | 490 | return -ENOENT; |
491 | } | 491 | } |
@@ -604,7 +604,7 @@ rpc_populate(struct dentry *parent, | |||
604 | out_bad: | 604 | out_bad: |
605 | mutex_unlock(&dir->i_mutex); | 605 | mutex_unlock(&dir->i_mutex); |
606 | printk(KERN_WARNING "%s: %s failed to populate directory %s\n", | 606 | printk(KERN_WARNING "%s: %s failed to populate directory %s\n", |
607 | __FILE__, __FUNCTION__, parent->d_name.name); | 607 | __FILE__, __func__, parent->d_name.name); |
608 | return -ENOMEM; | 608 | return -ENOMEM; |
609 | } | 609 | } |
610 | 610 | ||
@@ -623,7 +623,7 @@ __rpc_mkdir(struct inode *dir, struct dentry *dentry) | |||
623 | return 0; | 623 | return 0; |
624 | out_err: | 624 | out_err: |
625 | printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n", | 625 | printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n", |
626 | __FILE__, __FUNCTION__, dentry->d_name.name); | 626 | __FILE__, __func__, dentry->d_name.name); |
627 | return -ENOMEM; | 627 | return -ENOMEM; |
628 | } | 628 | } |
629 | 629 | ||
@@ -715,7 +715,7 @@ err_depopulate: | |||
715 | err_dput: | 715 | err_dput: |
716 | dput(dentry); | 716 | dput(dentry); |
717 | printk(KERN_WARNING "%s: %s() failed to create directory %s (errno = %d)\n", | 717 | printk(KERN_WARNING "%s: %s() failed to create directory %s (errno = %d)\n", |
718 | __FILE__, __FUNCTION__, path, error); | 718 | __FILE__, __func__, path, error); |
719 | dentry = ERR_PTR(error); | 719 | dentry = ERR_PTR(error); |
720 | goto out; | 720 | goto out; |
721 | } | 721 | } |
@@ -804,7 +804,7 @@ err_dput: | |||
804 | dput(dentry); | 804 | dput(dentry); |
805 | dentry = ERR_PTR(-ENOMEM); | 805 | dentry = ERR_PTR(-ENOMEM); |
806 | printk(KERN_WARNING "%s: %s() failed to create pipe %s/%s (errno = %d)\n", | 806 | printk(KERN_WARNING "%s: %s() failed to create pipe %s/%s (errno = %d)\n", |
807 | __FILE__, __FUNCTION__, parent->d_name.name, name, | 807 | __FILE__, __func__, parent->d_name.name, name, |
808 | -ENOMEM); | 808 | -ENOMEM); |
809 | goto out; | 809 | goto out; |
810 | } | 810 | } |
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index 3164a0871cf0..56aa018dce3a 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c | |||
@@ -224,7 +224,7 @@ int rpcb_getport_sync(struct sockaddr_in *sin, u32 prog, u32 vers, int prot) | |||
224 | int status; | 224 | int status; |
225 | 225 | ||
226 | dprintk("RPC: %s(" NIPQUAD_FMT ", %u, %u, %d)\n", | 226 | dprintk("RPC: %s(" NIPQUAD_FMT ", %u, %u, %d)\n", |
227 | __FUNCTION__, NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot); | 227 | __func__, NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot); |
228 | 228 | ||
229 | rpcb_clnt = rpcb_create(NULL, (struct sockaddr *)sin, | 229 | rpcb_clnt = rpcb_create(NULL, (struct sockaddr *)sin, |
230 | sizeof(*sin), prot, 2, 0); | 230 | sizeof(*sin), prot, 2, 0); |
@@ -283,7 +283,7 @@ void rpcb_getport_async(struct rpc_task *task) | |||
283 | struct rpcb_info *info; | 283 | struct rpcb_info *info; |
284 | 284 | ||
285 | dprintk("RPC: %5u %s(%s, %u, %u, %d)\n", | 285 | dprintk("RPC: %5u %s(%s, %u, %u, %d)\n", |
286 | task->tk_pid, __FUNCTION__, | 286 | task->tk_pid, __func__, |
287 | clnt->cl_server, clnt->cl_prog, clnt->cl_vers, xprt->prot); | 287 | clnt->cl_server, clnt->cl_prog, clnt->cl_vers, xprt->prot); |
288 | 288 | ||
289 | /* Autobind on cloned rpc clients is discouraged */ | 289 | /* Autobind on cloned rpc clients is discouraged */ |
@@ -292,7 +292,7 @@ void rpcb_getport_async(struct rpc_task *task) | |||
292 | if (xprt_test_and_set_binding(xprt)) { | 292 | if (xprt_test_and_set_binding(xprt)) { |
293 | status = -EAGAIN; /* tell caller to check again */ | 293 | status = -EAGAIN; /* tell caller to check again */ |
294 | dprintk("RPC: %5u %s: waiting for another binder\n", | 294 | dprintk("RPC: %5u %s: waiting for another binder\n", |
295 | task->tk_pid, __FUNCTION__); | 295 | task->tk_pid, __func__); |
296 | goto bailout_nowake; | 296 | goto bailout_nowake; |
297 | } | 297 | } |
298 | 298 | ||
@@ -304,7 +304,7 @@ void rpcb_getport_async(struct rpc_task *task) | |||
304 | if (xprt_bound(xprt)) { | 304 | if (xprt_bound(xprt)) { |
305 | status = 0; | 305 | status = 0; |
306 | dprintk("RPC: %5u %s: already bound\n", | 306 | dprintk("RPC: %5u %s: already bound\n", |
307 | task->tk_pid, __FUNCTION__); | 307 | task->tk_pid, __func__); |
308 | goto bailout_nofree; | 308 | goto bailout_nofree; |
309 | } | 309 | } |
310 | 310 | ||
@@ -321,27 +321,27 @@ void rpcb_getport_async(struct rpc_task *task) | |||
321 | default: | 321 | default: |
322 | status = -EAFNOSUPPORT; | 322 | status = -EAFNOSUPPORT; |
323 | dprintk("RPC: %5u %s: bad address family\n", | 323 | dprintk("RPC: %5u %s: bad address family\n", |
324 | task->tk_pid, __FUNCTION__); | 324 | task->tk_pid, __func__); |
325 | goto bailout_nofree; | 325 | goto bailout_nofree; |
326 | } | 326 | } |
327 | if (info[xprt->bind_index].rpc_proc == NULL) { | 327 | if (info[xprt->bind_index].rpc_proc == NULL) { |
328 | xprt->bind_index = 0; | 328 | xprt->bind_index = 0; |
329 | status = -EPFNOSUPPORT; | 329 | status = -EPFNOSUPPORT; |
330 | dprintk("RPC: %5u %s: no more getport versions available\n", | 330 | dprintk("RPC: %5u %s: no more getport versions available\n", |
331 | task->tk_pid, __FUNCTION__); | 331 | task->tk_pid, __func__); |
332 | goto bailout_nofree; | 332 | goto bailout_nofree; |
333 | } | 333 | } |
334 | bind_version = info[xprt->bind_index].rpc_vers; | 334 | bind_version = info[xprt->bind_index].rpc_vers; |
335 | 335 | ||
336 | dprintk("RPC: %5u %s: trying rpcbind version %u\n", | 336 | dprintk("RPC: %5u %s: trying rpcbind version %u\n", |
337 | task->tk_pid, __FUNCTION__, bind_version); | 337 | task->tk_pid, __func__, bind_version); |
338 | 338 | ||
339 | rpcb_clnt = rpcb_create(clnt->cl_server, sap, salen, xprt->prot, | 339 | rpcb_clnt = rpcb_create(clnt->cl_server, sap, salen, xprt->prot, |
340 | bind_version, 0); | 340 | bind_version, 0); |
341 | if (IS_ERR(rpcb_clnt)) { | 341 | if (IS_ERR(rpcb_clnt)) { |
342 | status = PTR_ERR(rpcb_clnt); | 342 | status = PTR_ERR(rpcb_clnt); |
343 | dprintk("RPC: %5u %s: rpcb_create failed, error %ld\n", | 343 | dprintk("RPC: %5u %s: rpcb_create failed, error %ld\n", |
344 | task->tk_pid, __FUNCTION__, PTR_ERR(rpcb_clnt)); | 344 | task->tk_pid, __func__, PTR_ERR(rpcb_clnt)); |
345 | goto bailout_nofree; | 345 | goto bailout_nofree; |
346 | } | 346 | } |
347 | 347 | ||
@@ -349,7 +349,7 @@ void rpcb_getport_async(struct rpc_task *task) | |||
349 | if (!map) { | 349 | if (!map) { |
350 | status = -ENOMEM; | 350 | status = -ENOMEM; |
351 | dprintk("RPC: %5u %s: no memory available\n", | 351 | dprintk("RPC: %5u %s: no memory available\n", |
352 | task->tk_pid, __FUNCTION__); | 352 | task->tk_pid, __func__); |
353 | goto bailout_nofree; | 353 | goto bailout_nofree; |
354 | } | 354 | } |
355 | map->r_prog = clnt->cl_prog; | 355 | map->r_prog = clnt->cl_prog; |
@@ -366,7 +366,7 @@ void rpcb_getport_async(struct rpc_task *task) | |||
366 | if (IS_ERR(child)) { | 366 | if (IS_ERR(child)) { |
367 | status = -EIO; | 367 | status = -EIO; |
368 | dprintk("RPC: %5u %s: rpc_run_task failed\n", | 368 | dprintk("RPC: %5u %s: rpc_run_task failed\n", |
369 | task->tk_pid, __FUNCTION__); | 369 | task->tk_pid, __func__); |
370 | goto bailout; | 370 | goto bailout; |
371 | } | 371 | } |
372 | rpc_put_task(child); | 372 | rpc_put_task(child); |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 30e7ac243a90..613daf8c1ff7 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -1359,7 +1359,7 @@ static int xs_bind4(struct sock_xprt *transport, struct socket *sock) | |||
1359 | nloop++; | 1359 | nloop++; |
1360 | } while (err == -EADDRINUSE && nloop != 2); | 1360 | } while (err == -EADDRINUSE && nloop != 2); |
1361 | dprintk("RPC: %s "NIPQUAD_FMT":%u: %s (%d)\n", | 1361 | dprintk("RPC: %s "NIPQUAD_FMT":%u: %s (%d)\n", |
1362 | __FUNCTION__, NIPQUAD(myaddr.sin_addr), | 1362 | __func__, NIPQUAD(myaddr.sin_addr), |
1363 | port, err ? "failed" : "ok", err); | 1363 | port, err ? "failed" : "ok", err); |
1364 | return err; | 1364 | return err; |
1365 | } | 1365 | } |
diff --git a/net/tipc/core.c b/net/tipc/core.c index d2d7d32c02c7..740aac5cdfb6 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c | |||
@@ -48,16 +48,8 @@ | |||
48 | #include "subscr.h" | 48 | #include "subscr.h" |
49 | #include "config.h" | 49 | #include "config.h" |
50 | 50 | ||
51 | int tipc_eth_media_start(void); | ||
52 | void tipc_eth_media_stop(void); | ||
53 | int tipc_handler_start(void); | ||
54 | void tipc_handler_stop(void); | ||
55 | int tipc_socket_init(void); | ||
56 | void tipc_socket_stop(void); | ||
57 | int tipc_netlink_start(void); | ||
58 | void tipc_netlink_stop(void); | ||
59 | 51 | ||
60 | #define TIPC_MOD_VER "1.6.2" | 52 | #define TIPC_MOD_VER "1.6.3" |
61 | 53 | ||
62 | #ifndef CONFIG_TIPC_ZONES | 54 | #ifndef CONFIG_TIPC_ZONES |
63 | #define CONFIG_TIPC_ZONES 3 | 55 | #define CONFIG_TIPC_ZONES 3 |
@@ -277,7 +269,6 @@ EXPORT_SYMBOL(tipc_register_media); | |||
277 | /* TIPC API for external APIs (see tipc_port.h) */ | 269 | /* TIPC API for external APIs (see tipc_port.h) */ |
278 | 270 | ||
279 | EXPORT_SYMBOL(tipc_createport_raw); | 271 | EXPORT_SYMBOL(tipc_createport_raw); |
280 | EXPORT_SYMBOL(tipc_set_msg_option); | ||
281 | EXPORT_SYMBOL(tipc_reject_msg); | 272 | EXPORT_SYMBOL(tipc_reject_msg); |
282 | EXPORT_SYMBOL(tipc_send_buf_fast); | 273 | EXPORT_SYMBOL(tipc_send_buf_fast); |
283 | EXPORT_SYMBOL(tipc_acknowledge); | 274 | EXPORT_SYMBOL(tipc_acknowledge); |
diff --git a/net/tipc/core.h b/net/tipc/core.h index feabca580820..3fe9b70331d9 100644 --- a/net/tipc/core.h +++ b/net/tipc/core.h | |||
@@ -180,6 +180,12 @@ extern int tipc_core_start(void); | |||
180 | extern void tipc_core_stop(void); | 180 | extern void tipc_core_stop(void); |
181 | extern int tipc_core_start_net(void); | 181 | extern int tipc_core_start_net(void); |
182 | extern void tipc_core_stop_net(void); | 182 | extern void tipc_core_stop_net(void); |
183 | extern int tipc_handler_start(void); | ||
184 | extern void tipc_handler_stop(void); | ||
185 | extern int tipc_netlink_start(void); | ||
186 | extern void tipc_netlink_stop(void); | ||
187 | extern int tipc_socket_init(void); | ||
188 | extern void tipc_socket_stop(void); | ||
183 | 189 | ||
184 | static inline int delimit(int val, int min, int max) | 190 | static inline int delimit(int val, int min, int max) |
185 | { | 191 | { |
diff --git a/net/tipc/link.c b/net/tipc/link.c index cefa99824c58..a42f43430101 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -2832,15 +2832,15 @@ static void link_set_supervision_props(struct link *l_ptr, u32 tolerance) | |||
2832 | void tipc_link_set_queue_limits(struct link *l_ptr, u32 window) | 2832 | void tipc_link_set_queue_limits(struct link *l_ptr, u32 window) |
2833 | { | 2833 | { |
2834 | /* Data messages from this node, inclusive FIRST_FRAGM */ | 2834 | /* Data messages from this node, inclusive FIRST_FRAGM */ |
2835 | l_ptr->queue_limit[DATA_LOW] = window; | 2835 | l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window; |
2836 | l_ptr->queue_limit[DATA_MEDIUM] = (window / 3) * 4; | 2836 | l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4; |
2837 | l_ptr->queue_limit[DATA_HIGH] = (window / 3) * 5; | 2837 | l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5; |
2838 | l_ptr->queue_limit[DATA_CRITICAL] = (window / 3) * 6; | 2838 | l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6; |
2839 | /* Transiting data messages,inclusive FIRST_FRAGM */ | 2839 | /* Transiting data messages,inclusive FIRST_FRAGM */ |
2840 | l_ptr->queue_limit[DATA_LOW + 4] = 300; | 2840 | l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300; |
2841 | l_ptr->queue_limit[DATA_MEDIUM + 4] = 600; | 2841 | l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600; |
2842 | l_ptr->queue_limit[DATA_HIGH + 4] = 900; | 2842 | l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900; |
2843 | l_ptr->queue_limit[DATA_CRITICAL + 4] = 1200; | 2843 | l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200; |
2844 | l_ptr->queue_limit[CONN_MANAGER] = 1200; | 2844 | l_ptr->queue_limit[CONN_MANAGER] = 1200; |
2845 | l_ptr->queue_limit[ROUTE_DISTRIBUTOR] = 1200; | 2845 | l_ptr->queue_limit[ROUTE_DISTRIBUTOR] = 1200; |
2846 | l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500; | 2846 | l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500; |
diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 782485468fb2..696a8633df75 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c | |||
@@ -73,10 +73,10 @@ void tipc_msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str | |||
73 | tipc_printf(buf, "NO(%u/%u):",msg_long_msgno(msg), | 73 | tipc_printf(buf, "NO(%u/%u):",msg_long_msgno(msg), |
74 | msg_fragm_no(msg)); | 74 | msg_fragm_no(msg)); |
75 | break; | 75 | break; |
76 | case DATA_LOW: | 76 | case TIPC_LOW_IMPORTANCE: |
77 | case DATA_MEDIUM: | 77 | case TIPC_MEDIUM_IMPORTANCE: |
78 | case DATA_HIGH: | 78 | case TIPC_HIGH_IMPORTANCE: |
79 | case DATA_CRITICAL: | 79 | case TIPC_CRITICAL_IMPORTANCE: |
80 | tipc_printf(buf, "DAT%u:", msg_user(msg)); | 80 | tipc_printf(buf, "DAT%u:", msg_user(msg)); |
81 | if (msg_short(msg)) { | 81 | if (msg_short(msg)) { |
82 | tipc_printf(buf, "CON:"); | 82 | tipc_printf(buf, "CON:"); |
@@ -229,10 +229,10 @@ void tipc_msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str | |||
229 | switch (usr) { | 229 | switch (usr) { |
230 | case CONN_MANAGER: | 230 | case CONN_MANAGER: |
231 | case NAME_DISTRIBUTOR: | 231 | case NAME_DISTRIBUTOR: |
232 | case DATA_LOW: | 232 | case TIPC_LOW_IMPORTANCE: |
233 | case DATA_MEDIUM: | 233 | case TIPC_MEDIUM_IMPORTANCE: |
234 | case DATA_HIGH: | 234 | case TIPC_HIGH_IMPORTANCE: |
235 | case DATA_CRITICAL: | 235 | case TIPC_CRITICAL_IMPORTANCE: |
236 | if (msg_short(msg)) | 236 | if (msg_short(msg)) |
237 | break; /* No error */ | 237 | break; /* No error */ |
238 | switch (msg_errcode(msg)) { | 238 | switch (msg_errcode(msg)) { |
diff --git a/net/tipc/msg.h b/net/tipc/msg.h index e9ef6df26562..6ad070d87702 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h | |||
@@ -40,18 +40,16 @@ | |||
40 | #include "core.h" | 40 | #include "core.h" |
41 | 41 | ||
42 | #define TIPC_VERSION 2 | 42 | #define TIPC_VERSION 2 |
43 | #define DATA_LOW TIPC_LOW_IMPORTANCE | 43 | |
44 | #define DATA_MEDIUM TIPC_MEDIUM_IMPORTANCE | 44 | #define SHORT_H_SIZE 24 /* Connected, in-cluster messages */ |
45 | #define DATA_HIGH TIPC_HIGH_IMPORTANCE | ||
46 | #define DATA_CRITICAL TIPC_CRITICAL_IMPORTANCE | ||
47 | #define SHORT_H_SIZE 24 /* Connected,in cluster */ | ||
48 | #define DIR_MSG_H_SIZE 32 /* Directly addressed messages */ | 45 | #define DIR_MSG_H_SIZE 32 /* Directly addressed messages */ |
49 | #define CONN_MSG_H_SIZE 36 /* Routed connected msgs*/ | 46 | #define LONG_H_SIZE 40 /* Named messages */ |
50 | #define LONG_H_SIZE 40 /* Named Messages */ | ||
51 | #define MCAST_H_SIZE 44 /* Multicast messages */ | 47 | #define MCAST_H_SIZE 44 /* Multicast messages */ |
52 | #define MAX_H_SIZE 60 /* Inclusive full options */ | 48 | #define INT_H_SIZE 40 /* Internal messages */ |
49 | #define MIN_H_SIZE 24 /* Smallest legal TIPC header size */ | ||
50 | #define MAX_H_SIZE 60 /* Largest possible TIPC header size */ | ||
51 | |||
53 | #define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE) | 52 | #define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE) |
54 | #define LINK_CONFIG 13 | ||
55 | 53 | ||
56 | 54 | ||
57 | /* | 55 | /* |
@@ -72,8 +70,10 @@ static inline void msg_set_bits(struct tipc_msg *m, u32 w, | |||
72 | u32 pos, u32 mask, u32 val) | 70 | u32 pos, u32 mask, u32 val) |
73 | { | 71 | { |
74 | val = (val & mask) << pos; | 72 | val = (val & mask) << pos; |
75 | m->hdr[w] &= ~htonl(mask << pos); | 73 | val = htonl(val); |
76 | m->hdr[w] |= htonl(val); | 74 | mask = htonl(mask << pos); |
75 | m->hdr[w] &= ~mask; | ||
76 | m->hdr[w] |= val; | ||
77 | } | 77 | } |
78 | 78 | ||
79 | /* | 79 | /* |
@@ -87,7 +87,7 @@ static inline u32 msg_version(struct tipc_msg *m) | |||
87 | 87 | ||
88 | static inline void msg_set_version(struct tipc_msg *m) | 88 | static inline void msg_set_version(struct tipc_msg *m) |
89 | { | 89 | { |
90 | msg_set_bits(m, 0, 29, 0xf, TIPC_VERSION); | 90 | msg_set_bits(m, 0, 29, 7, TIPC_VERSION); |
91 | } | 91 | } |
92 | 92 | ||
93 | static inline u32 msg_user(struct tipc_msg *m) | 93 | static inline u32 msg_user(struct tipc_msg *m) |
@@ -97,7 +97,7 @@ static inline u32 msg_user(struct tipc_msg *m) | |||
97 | 97 | ||
98 | static inline u32 msg_isdata(struct tipc_msg *m) | 98 | static inline u32 msg_isdata(struct tipc_msg *m) |
99 | { | 99 | { |
100 | return (msg_user(m) <= DATA_CRITICAL); | 100 | return (msg_user(m) <= TIPC_CRITICAL_IMPORTANCE); |
101 | } | 101 | } |
102 | 102 | ||
103 | static inline void msg_set_user(struct tipc_msg *m, u32 n) | 103 | static inline void msg_set_user(struct tipc_msg *m, u32 n) |
@@ -190,18 +190,6 @@ static inline void msg_set_lookup_scope(struct tipc_msg *m, u32 n) | |||
190 | msg_set_bits(m, 1, 19, 0x3, n); | 190 | msg_set_bits(m, 1, 19, 0x3, n); |
191 | } | 191 | } |
192 | 192 | ||
193 | static inline void msg_set_options(struct tipc_msg *m, const char *opt, u32 sz) | ||
194 | { | ||
195 | u32 hsz = msg_hdr_sz(m); | ||
196 | char *to = (char *)&m->hdr[hsz/4]; | ||
197 | |||
198 | if ((hsz < DIR_MSG_H_SIZE) || ((hsz + sz) > MAX_H_SIZE)) | ||
199 | return; | ||
200 | msg_set_bits(m, 1, 16, 0x7, (hsz - 28)/4); | ||
201 | msg_set_hdr_sz(m, hsz + sz); | ||
202 | memcpy(to, opt, sz); | ||
203 | } | ||
204 | |||
205 | static inline u32 msg_bcast_ack(struct tipc_msg *m) | 193 | static inline u32 msg_bcast_ack(struct tipc_msg *m) |
206 | { | 194 | { |
207 | return msg_bits(m, 1, 0, 0xffff); | 195 | return msg_bits(m, 1, 0, 0xffff); |
@@ -330,17 +318,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) | |||
330 | return (struct tipc_msg *)msg_data(m); | 318 | return (struct tipc_msg *)msg_data(m); |
331 | } | 319 | } |
332 | 320 | ||
333 | static inline void msg_expand(struct tipc_msg *m, u32 destnode) | ||
334 | { | ||
335 | if (!msg_short(m)) | ||
336 | return; | ||
337 | msg_set_hdr_sz(m, LONG_H_SIZE); | ||
338 | msg_set_orignode(m, msg_prevnode(m)); | ||
339 | msg_set_destnode(m, destnode); | ||
340 | memset(&m->hdr[8], 0, 12); | ||
341 | } | ||
342 | |||
343 | |||
344 | 321 | ||
345 | /* | 322 | /* |
346 | TIPC internal message header format, version 2 | 323 | TIPC internal message header format, version 2 |
@@ -388,7 +365,6 @@ static inline void msg_expand(struct tipc_msg *m, u32 destnode) | |||
388 | #define NAME_DISTRIBUTOR 11 | 365 | #define NAME_DISTRIBUTOR 11 |
389 | #define MSG_FRAGMENTER 12 | 366 | #define MSG_FRAGMENTER 12 |
390 | #define LINK_CONFIG 13 | 367 | #define LINK_CONFIG 13 |
391 | #define INT_H_SIZE 40 | ||
392 | #define DSC_H_SIZE 40 | 368 | #define DSC_H_SIZE 40 |
393 | 369 | ||
394 | /* | 370 | /* |
diff --git a/net/tipc/port.c b/net/tipc/port.c index f508614ca59b..e2646a96935d 100644 --- a/net/tipc/port.c +++ b/net/tipc/port.c | |||
@@ -242,7 +242,8 @@ u32 tipc_createport_raw(void *usr_handle, | |||
242 | p_ptr->publ.max_pkt = MAX_PKT_DEFAULT; | 242 | p_ptr->publ.max_pkt = MAX_PKT_DEFAULT; |
243 | p_ptr->publ.ref = ref; | 243 | p_ptr->publ.ref = ref; |
244 | msg = &p_ptr->publ.phdr; | 244 | msg = &p_ptr->publ.phdr; |
245 | msg_init(msg, DATA_LOW, TIPC_NAMED_MSG, TIPC_OK, LONG_H_SIZE, 0); | 245 | msg_init(msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG, TIPC_OK, LONG_H_SIZE, |
246 | 0); | ||
246 | msg_set_orignode(msg, tipc_own_addr); | 247 | msg_set_orignode(msg, tipc_own_addr); |
247 | msg_set_prevnode(msg, tipc_own_addr); | 248 | msg_set_prevnode(msg, tipc_own_addr); |
248 | msg_set_origport(msg, ref); | 249 | msg_set_origport(msg, ref); |
@@ -413,13 +414,6 @@ static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode, | |||
413 | return buf; | 414 | return buf; |
414 | } | 415 | } |
415 | 416 | ||
416 | int tipc_set_msg_option(struct tipc_port *tp_ptr, const char *opt, const u32 sz) | ||
417 | { | ||
418 | msg_expand(&tp_ptr->phdr, msg_destnode(&tp_ptr->phdr)); | ||
419 | msg_set_options(&tp_ptr->phdr, opt, sz); | ||
420 | return TIPC_OK; | ||
421 | } | ||
422 | |||
423 | int tipc_reject_msg(struct sk_buff *buf, u32 err) | 417 | int tipc_reject_msg(struct sk_buff *buf, u32 err) |
424 | { | 418 | { |
425 | struct tipc_msg *msg = buf_msg(buf); | 419 | struct tipc_msg *msg = buf_msg(buf); |
@@ -632,7 +626,7 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf) | |||
632 | msg_orignode(msg), | 626 | msg_orignode(msg), |
633 | msg_destport(msg), | 627 | msg_destport(msg), |
634 | tipc_own_addr, | 628 | tipc_own_addr, |
635 | DATA_HIGH, | 629 | TIPC_HIGH_IMPORTANCE, |
636 | TIPC_CONN_MSG, | 630 | TIPC_CONN_MSG, |
637 | err, | 631 | err, |
638 | 0, | 632 | 0, |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 22909036b9bc..3220d5cb5b5d 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -43,7 +43,7 @@ | |||
43 | #include <linux/slab.h> | 43 | #include <linux/slab.h> |
44 | #include <linux/poll.h> | 44 | #include <linux/poll.h> |
45 | #include <linux/fcntl.h> | 45 | #include <linux/fcntl.h> |
46 | #include <asm/semaphore.h> | 46 | #include <linux/mutex.h> |
47 | #include <asm/string.h> | 47 | #include <asm/string.h> |
48 | #include <asm/atomic.h> | 48 | #include <asm/atomic.h> |
49 | #include <net/sock.h> | 49 | #include <net/sock.h> |
@@ -63,7 +63,7 @@ | |||
63 | struct tipc_sock { | 63 | struct tipc_sock { |
64 | struct sock sk; | 64 | struct sock sk; |
65 | struct tipc_port *p; | 65 | struct tipc_port *p; |
66 | struct semaphore sem; | 66 | struct mutex lock; |
67 | }; | 67 | }; |
68 | 68 | ||
69 | #define tipc_sk(sk) ((struct tipc_sock*)sk) | 69 | #define tipc_sk(sk) ((struct tipc_sock*)sk) |
@@ -217,7 +217,7 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol) | |||
217 | tsock->p = port; | 217 | tsock->p = port; |
218 | port->usr_handle = tsock; | 218 | port->usr_handle = tsock; |
219 | 219 | ||
220 | init_MUTEX(&tsock->sem); | 220 | mutex_init(&tsock->lock); |
221 | 221 | ||
222 | dbg("sock_create: %x\n",tsock); | 222 | dbg("sock_create: %x\n",tsock); |
223 | 223 | ||
@@ -253,9 +253,9 @@ static int release(struct socket *sock) | |||
253 | dbg("sock_delete: %x\n",tsock); | 253 | dbg("sock_delete: %x\n",tsock); |
254 | if (!tsock) | 254 | if (!tsock) |
255 | return 0; | 255 | return 0; |
256 | down(&tsock->sem); | 256 | mutex_lock(&tsock->lock); |
257 | if (!sock->sk) { | 257 | if (!sock->sk) { |
258 | up(&tsock->sem); | 258 | mutex_unlock(&tsock->lock); |
259 | return 0; | 259 | return 0; |
260 | } | 260 | } |
261 | 261 | ||
@@ -288,7 +288,7 @@ static int release(struct socket *sock) | |||
288 | atomic_dec(&tipc_queue_size); | 288 | atomic_dec(&tipc_queue_size); |
289 | } | 289 | } |
290 | 290 | ||
291 | up(&tsock->sem); | 291 | mutex_unlock(&tsock->lock); |
292 | 292 | ||
293 | sock_put(sk); | 293 | sock_put(sk); |
294 | 294 | ||
@@ -315,7 +315,7 @@ static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len) | |||
315 | struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; | 315 | struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; |
316 | int res; | 316 | int res; |
317 | 317 | ||
318 | if (down_interruptible(&tsock->sem)) | 318 | if (mutex_lock_interruptible(&tsock->lock)) |
319 | return -ERESTARTSYS; | 319 | return -ERESTARTSYS; |
320 | 320 | ||
321 | if (unlikely(!uaddr_len)) { | 321 | if (unlikely(!uaddr_len)) { |
@@ -346,7 +346,7 @@ static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len) | |||
346 | res = tipc_withdraw(tsock->p->ref, -addr->scope, | 346 | res = tipc_withdraw(tsock->p->ref, -addr->scope, |
347 | &addr->addr.nameseq); | 347 | &addr->addr.nameseq); |
348 | exit: | 348 | exit: |
349 | up(&tsock->sem); | 349 | mutex_unlock(&tsock->lock); |
350 | return res; | 350 | return res; |
351 | } | 351 | } |
352 | 352 | ||
@@ -367,7 +367,7 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr, | |||
367 | struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; | 367 | struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; |
368 | u32 res; | 368 | u32 res; |
369 | 369 | ||
370 | if (down_interruptible(&tsock->sem)) | 370 | if (mutex_lock_interruptible(&tsock->lock)) |
371 | return -ERESTARTSYS; | 371 | return -ERESTARTSYS; |
372 | 372 | ||
373 | *uaddr_len = sizeof(*addr); | 373 | *uaddr_len = sizeof(*addr); |
@@ -380,7 +380,7 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr, | |||
380 | res = tipc_ownidentity(tsock->p->ref, &addr->addr.id); | 380 | res = tipc_ownidentity(tsock->p->ref, &addr->addr.id); |
381 | addr->addr.name.domain = 0; | 381 | addr->addr.name.domain = 0; |
382 | 382 | ||
383 | up(&tsock->sem); | 383 | mutex_unlock(&tsock->lock); |
384 | return res; | 384 | return res; |
385 | } | 385 | } |
386 | 386 | ||
@@ -477,7 +477,7 @@ static int send_msg(struct kiocb *iocb, struct socket *sock, | |||
477 | } | 477 | } |
478 | } | 478 | } |
479 | 479 | ||
480 | if (down_interruptible(&tsock->sem)) | 480 | if (mutex_lock_interruptible(&tsock->lock)) |
481 | return -ERESTARTSYS; | 481 | return -ERESTARTSYS; |
482 | 482 | ||
483 | if (needs_conn) { | 483 | if (needs_conn) { |
@@ -523,7 +523,7 @@ static int send_msg(struct kiocb *iocb, struct socket *sock, | |||
523 | } | 523 | } |
524 | if (likely(res != -ELINKCONG)) { | 524 | if (likely(res != -ELINKCONG)) { |
525 | exit: | 525 | exit: |
526 | up(&tsock->sem); | 526 | mutex_unlock(&tsock->lock); |
527 | return res; | 527 | return res; |
528 | } | 528 | } |
529 | if (m->msg_flags & MSG_DONTWAIT) { | 529 | if (m->msg_flags & MSG_DONTWAIT) { |
@@ -562,7 +562,7 @@ static int send_packet(struct kiocb *iocb, struct socket *sock, | |||
562 | if (unlikely(dest)) | 562 | if (unlikely(dest)) |
563 | return send_msg(iocb, sock, m, total_len); | 563 | return send_msg(iocb, sock, m, total_len); |
564 | 564 | ||
565 | if (down_interruptible(&tsock->sem)) { | 565 | if (mutex_lock_interruptible(&tsock->lock)) { |
566 | return -ERESTARTSYS; | 566 | return -ERESTARTSYS; |
567 | } | 567 | } |
568 | 568 | ||
@@ -578,7 +578,7 @@ static int send_packet(struct kiocb *iocb, struct socket *sock, | |||
578 | res = tipc_send(tsock->p->ref, m->msg_iovlen, m->msg_iov); | 578 | res = tipc_send(tsock->p->ref, m->msg_iovlen, m->msg_iov); |
579 | if (likely(res != -ELINKCONG)) { | 579 | if (likely(res != -ELINKCONG)) { |
580 | exit: | 580 | exit: |
581 | up(&tsock->sem); | 581 | mutex_unlock(&tsock->lock); |
582 | return res; | 582 | return res; |
583 | } | 583 | } |
584 | if (m->msg_flags & MSG_DONTWAIT) { | 584 | if (m->msg_flags & MSG_DONTWAIT) { |
@@ -846,7 +846,7 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock, | |||
846 | 846 | ||
847 | /* Look for a message in receive queue; wait if necessary */ | 847 | /* Look for a message in receive queue; wait if necessary */ |
848 | 848 | ||
849 | if (unlikely(down_interruptible(&tsock->sem))) | 849 | if (unlikely(mutex_lock_interruptible(&tsock->lock))) |
850 | return -ERESTARTSYS; | 850 | return -ERESTARTSYS; |
851 | 851 | ||
852 | restart: | 852 | restart: |
@@ -930,7 +930,7 @@ restart: | |||
930 | advance_queue(tsock); | 930 | advance_queue(tsock); |
931 | } | 931 | } |
932 | exit: | 932 | exit: |
933 | up(&tsock->sem); | 933 | mutex_unlock(&tsock->lock); |
934 | return res; | 934 | return res; |
935 | } | 935 | } |
936 | 936 | ||
@@ -981,7 +981,7 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock, | |||
981 | 981 | ||
982 | /* Look for a message in receive queue; wait if necessary */ | 982 | /* Look for a message in receive queue; wait if necessary */ |
983 | 983 | ||
984 | if (unlikely(down_interruptible(&tsock->sem))) | 984 | if (unlikely(mutex_lock_interruptible(&tsock->lock))) |
985 | return -ERESTARTSYS; | 985 | return -ERESTARTSYS; |
986 | 986 | ||
987 | restart: | 987 | restart: |
@@ -1077,7 +1077,7 @@ restart: | |||
1077 | goto restart; | 1077 | goto restart; |
1078 | 1078 | ||
1079 | exit: | 1079 | exit: |
1080 | up(&tsock->sem); | 1080 | mutex_unlock(&tsock->lock); |
1081 | return sz_copied ? sz_copied : res; | 1081 | return sz_copied ? sz_copied : res; |
1082 | } | 1082 | } |
1083 | 1083 | ||
@@ -1293,7 +1293,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen, | |||
1293 | return res; | 1293 | return res; |
1294 | } | 1294 | } |
1295 | 1295 | ||
1296 | if (down_interruptible(&tsock->sem)) | 1296 | if (mutex_lock_interruptible(&tsock->lock)) |
1297 | return -ERESTARTSYS; | 1297 | return -ERESTARTSYS; |
1298 | 1298 | ||
1299 | /* Wait for destination's 'ACK' response */ | 1299 | /* Wait for destination's 'ACK' response */ |
@@ -1317,7 +1317,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen, | |||
1317 | sock->state = SS_DISCONNECTING; | 1317 | sock->state = SS_DISCONNECTING; |
1318 | } | 1318 | } |
1319 | 1319 | ||
1320 | up(&tsock->sem); | 1320 | mutex_unlock(&tsock->lock); |
1321 | return res; | 1321 | return res; |
1322 | } | 1322 | } |
1323 | 1323 | ||
@@ -1365,7 +1365,7 @@ static int accept(struct socket *sock, struct socket *newsock, int flags) | |||
1365 | (flags & O_NONBLOCK))) | 1365 | (flags & O_NONBLOCK))) |
1366 | return -EWOULDBLOCK; | 1366 | return -EWOULDBLOCK; |
1367 | 1367 | ||
1368 | if (down_interruptible(&tsock->sem)) | 1368 | if (mutex_lock_interruptible(&tsock->lock)) |
1369 | return -ERESTARTSYS; | 1369 | return -ERESTARTSYS; |
1370 | 1370 | ||
1371 | if (wait_event_interruptible(*sock->sk->sk_sleep, | 1371 | if (wait_event_interruptible(*sock->sk->sk_sleep, |
@@ -1412,14 +1412,14 @@ static int accept(struct socket *sock, struct socket *newsock, int flags) | |||
1412 | } | 1412 | } |
1413 | } | 1413 | } |
1414 | exit: | 1414 | exit: |
1415 | up(&tsock->sem); | 1415 | mutex_unlock(&tsock->lock); |
1416 | return res; | 1416 | return res; |
1417 | } | 1417 | } |
1418 | 1418 | ||
1419 | /** | 1419 | /** |
1420 | * shutdown - shutdown socket connection | 1420 | * shutdown - shutdown socket connection |
1421 | * @sock: socket structure | 1421 | * @sock: socket structure |
1422 | * @how: direction to close (unused; always treated as read + write) | 1422 | * @how: direction to close (must be SHUT_RDWR) |
1423 | * | 1423 | * |
1424 | * Terminates connection (if necessary), then purges socket's receive queue. | 1424 | * Terminates connection (if necessary), then purges socket's receive queue. |
1425 | * | 1425 | * |
@@ -1432,9 +1432,10 @@ static int shutdown(struct socket *sock, int how) | |||
1432 | struct sk_buff *buf; | 1432 | struct sk_buff *buf; |
1433 | int res; | 1433 | int res; |
1434 | 1434 | ||
1435 | /* Could return -EINVAL for an invalid "how", but why bother? */ | 1435 | if (how != SHUT_RDWR) |
1436 | return -EINVAL; | ||
1436 | 1437 | ||
1437 | if (down_interruptible(&tsock->sem)) | 1438 | if (mutex_lock_interruptible(&tsock->lock)) |
1438 | return -ERESTARTSYS; | 1439 | return -ERESTARTSYS; |
1439 | 1440 | ||
1440 | sock_lock(tsock); | 1441 | sock_lock(tsock); |
@@ -1484,7 +1485,7 @@ restart: | |||
1484 | 1485 | ||
1485 | sock_unlock(tsock); | 1486 | sock_unlock(tsock); |
1486 | 1487 | ||
1487 | up(&tsock->sem); | 1488 | mutex_unlock(&tsock->lock); |
1488 | return res; | 1489 | return res; |
1489 | } | 1490 | } |
1490 | 1491 | ||
@@ -1518,7 +1519,7 @@ static int setsockopt(struct socket *sock, | |||
1518 | if ((res = get_user(value, (u32 __user *)ov))) | 1519 | if ((res = get_user(value, (u32 __user *)ov))) |
1519 | return res; | 1520 | return res; |
1520 | 1521 | ||
1521 | if (down_interruptible(&tsock->sem)) | 1522 | if (mutex_lock_interruptible(&tsock->lock)) |
1522 | return -ERESTARTSYS; | 1523 | return -ERESTARTSYS; |
1523 | 1524 | ||
1524 | switch (opt) { | 1525 | switch (opt) { |
@@ -1541,7 +1542,7 @@ static int setsockopt(struct socket *sock, | |||
1541 | res = -EINVAL; | 1542 | res = -EINVAL; |
1542 | } | 1543 | } |
1543 | 1544 | ||
1544 | up(&tsock->sem); | 1545 | mutex_unlock(&tsock->lock); |
1545 | return res; | 1546 | return res; |
1546 | } | 1547 | } |
1547 | 1548 | ||
@@ -1574,7 +1575,7 @@ static int getsockopt(struct socket *sock, | |||
1574 | if ((res = get_user(len, ol))) | 1575 | if ((res = get_user(len, ol))) |
1575 | return res; | 1576 | return res; |
1576 | 1577 | ||
1577 | if (down_interruptible(&tsock->sem)) | 1578 | if (mutex_lock_interruptible(&tsock->lock)) |
1578 | return -ERESTARTSYS; | 1579 | return -ERESTARTSYS; |
1579 | 1580 | ||
1580 | switch (opt) { | 1581 | switch (opt) { |
@@ -1607,7 +1608,7 @@ static int getsockopt(struct socket *sock, | |||
1607 | res = put_user(sizeof(value), ol); | 1608 | res = put_user(sizeof(value), ol); |
1608 | } | 1609 | } |
1609 | 1610 | ||
1610 | up(&tsock->sem); | 1611 | mutex_unlock(&tsock->lock); |
1611 | return res; | 1612 | return res; |
1612 | } | 1613 | } |
1613 | 1614 | ||
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index b8788fd5e3c6..ae584356852c 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -2176,7 +2176,7 @@ static int __init af_unix_init(void) | |||
2176 | rc = proto_register(&unix_proto, 1); | 2176 | rc = proto_register(&unix_proto, 1); |
2177 | if (rc != 0) { | 2177 | if (rc != 0) { |
2178 | printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n", | 2178 | printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n", |
2179 | __FUNCTION__); | 2179 | __func__); |
2180 | goto out; | 2180 | goto out; |
2181 | } | 2181 | } |
2182 | 2182 | ||
diff --git a/net/wireless/Makefile b/net/wireless/Makefile index 65710a42e5a7..b9f943c45f3b 100644 --- a/net/wireless/Makefile +++ b/net/wireless/Makefile | |||
@@ -1,5 +1,5 @@ | |||
1 | obj-$(CONFIG_WIRELESS_EXT) += wext.o | 1 | obj-$(CONFIG_WIRELESS_EXT) += wext.o |
2 | obj-$(CONFIG_CFG80211) += cfg80211.o | 2 | obj-$(CONFIG_CFG80211) += cfg80211.o |
3 | 3 | ||
4 | cfg80211-y += core.o sysfs.o radiotap.o | 4 | cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o |
5 | cfg80211-$(CONFIG_NL80211) += nl80211.o | 5 | cfg80211-$(CONFIG_NL80211) += nl80211.o |
diff --git a/net/wireless/core.c b/net/wireless/core.c index cfc5fc5f9e75..80afacdae46c 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -232,6 +232,47 @@ int wiphy_register(struct wiphy *wiphy) | |||
232 | { | 232 | { |
233 | struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy); | 233 | struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy); |
234 | int res; | 234 | int res; |
235 | enum ieee80211_band band; | ||
236 | struct ieee80211_supported_band *sband; | ||
237 | bool have_band = false; | ||
238 | int i; | ||
239 | |||
240 | /* sanity check supported bands/channels */ | ||
241 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { | ||
242 | sband = wiphy->bands[band]; | ||
243 | if (!sband) | ||
244 | continue; | ||
245 | |||
246 | sband->band = band; | ||
247 | |||
248 | if (!sband->n_channels || !sband->n_bitrates) { | ||
249 | WARN_ON(1); | ||
250 | return -EINVAL; | ||
251 | } | ||
252 | |||
253 | for (i = 0; i < sband->n_channels; i++) { | ||
254 | sband->channels[i].orig_flags = | ||
255 | sband->channels[i].flags; | ||
256 | sband->channels[i].orig_mag = | ||
257 | sband->channels[i].max_antenna_gain; | ||
258 | sband->channels[i].orig_mpwr = | ||
259 | sband->channels[i].max_power; | ||
260 | sband->channels[i].band = band; | ||
261 | } | ||
262 | |||
263 | have_band = true; | ||
264 | } | ||
265 | |||
266 | if (!have_band) { | ||
267 | WARN_ON(1); | ||
268 | return -EINVAL; | ||
269 | } | ||
270 | |||
271 | /* check and set up bitrates */ | ||
272 | ieee80211_set_bitrate_flags(wiphy); | ||
273 | |||
274 | /* set up regulatory info */ | ||
275 | wiphy_update_regulatory(wiphy); | ||
235 | 276 | ||
236 | mutex_lock(&cfg80211_drv_mutex); | 277 | mutex_lock(&cfg80211_drv_mutex); |
237 | 278 | ||
diff --git a/net/wireless/core.h b/net/wireless/core.h index eb0f846b40df..7a02c356d63d 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h | |||
@@ -78,4 +78,7 @@ extern void cfg80211_dev_free(struct cfg80211_registered_device *drv); | |||
78 | extern int cfg80211_dev_rename(struct cfg80211_registered_device *drv, | 78 | extern int cfg80211_dev_rename(struct cfg80211_registered_device *drv, |
79 | char *newname); | 79 | char *newname); |
80 | 80 | ||
81 | void ieee80211_set_bitrate_flags(struct wiphy *wiphy); | ||
82 | void wiphy_update_regulatory(struct wiphy *wiphy); | ||
83 | |||
81 | #endif /* __NET_WIRELESS_CORE_H */ | 84 | #endif /* __NET_WIRELESS_CORE_H */ |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index e3a214f63f91..64a7460af734 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -81,7 +81,12 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = { | |||
81 | [NL80211_ATTR_STA_LISTEN_INTERVAL] = { .type = NLA_U16 }, | 81 | [NL80211_ATTR_STA_LISTEN_INTERVAL] = { .type = NLA_U16 }, |
82 | [NL80211_ATTR_STA_SUPPORTED_RATES] = { .type = NLA_BINARY, | 82 | [NL80211_ATTR_STA_SUPPORTED_RATES] = { .type = NLA_BINARY, |
83 | .len = NL80211_MAX_SUPP_RATES }, | 83 | .len = NL80211_MAX_SUPP_RATES }, |
84 | [NL80211_ATTR_STA_PLINK_ACTION] = { .type = NLA_U8 }, | ||
84 | [NL80211_ATTR_STA_VLAN] = { .type = NLA_U32 }, | 85 | [NL80211_ATTR_STA_VLAN] = { .type = NLA_U32 }, |
86 | [NL80211_ATTR_MNTR_FLAGS] = { .type = NLA_NESTED }, | ||
87 | [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY, | ||
88 | .len = IEEE80211_MAX_MESH_ID_LEN }, | ||
89 | [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 }, | ||
85 | }; | 90 | }; |
86 | 91 | ||
87 | /* message building helper */ | 92 | /* message building helper */ |
@@ -98,6 +103,13 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, | |||
98 | struct cfg80211_registered_device *dev) | 103 | struct cfg80211_registered_device *dev) |
99 | { | 104 | { |
100 | void *hdr; | 105 | void *hdr; |
106 | struct nlattr *nl_bands, *nl_band; | ||
107 | struct nlattr *nl_freqs, *nl_freq; | ||
108 | struct nlattr *nl_rates, *nl_rate; | ||
109 | enum ieee80211_band band; | ||
110 | struct ieee80211_channel *chan; | ||
111 | struct ieee80211_rate *rate; | ||
112 | int i; | ||
101 | 113 | ||
102 | hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_WIPHY); | 114 | hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_WIPHY); |
103 | if (!hdr) | 115 | if (!hdr) |
@@ -105,6 +117,73 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, | |||
105 | 117 | ||
106 | NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->idx); | 118 | NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->idx); |
107 | NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)); | 119 | NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)); |
120 | |||
121 | nl_bands = nla_nest_start(msg, NL80211_ATTR_WIPHY_BANDS); | ||
122 | if (!nl_bands) | ||
123 | goto nla_put_failure; | ||
124 | |||
125 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { | ||
126 | if (!dev->wiphy.bands[band]) | ||
127 | continue; | ||
128 | |||
129 | nl_band = nla_nest_start(msg, band); | ||
130 | if (!nl_band) | ||
131 | goto nla_put_failure; | ||
132 | |||
133 | /* add frequencies */ | ||
134 | nl_freqs = nla_nest_start(msg, NL80211_BAND_ATTR_FREQS); | ||
135 | if (!nl_freqs) | ||
136 | goto nla_put_failure; | ||
137 | |||
138 | for (i = 0; i < dev->wiphy.bands[band]->n_channels; i++) { | ||
139 | nl_freq = nla_nest_start(msg, i); | ||
140 | if (!nl_freq) | ||
141 | goto nla_put_failure; | ||
142 | |||
143 | chan = &dev->wiphy.bands[band]->channels[i]; | ||
144 | NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_FREQ, | ||
145 | chan->center_freq); | ||
146 | |||
147 | if (chan->flags & IEEE80211_CHAN_DISABLED) | ||
148 | NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_DISABLED); | ||
149 | if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) | ||
150 | NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_PASSIVE_SCAN); | ||
151 | if (chan->flags & IEEE80211_CHAN_NO_IBSS) | ||
152 | NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_NO_IBSS); | ||
153 | if (chan->flags & IEEE80211_CHAN_RADAR) | ||
154 | NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_RADAR); | ||
155 | |||
156 | nla_nest_end(msg, nl_freq); | ||
157 | } | ||
158 | |||
159 | nla_nest_end(msg, nl_freqs); | ||
160 | |||
161 | /* add bitrates */ | ||
162 | nl_rates = nla_nest_start(msg, NL80211_BAND_ATTR_RATES); | ||
163 | if (!nl_rates) | ||
164 | goto nla_put_failure; | ||
165 | |||
166 | for (i = 0; i < dev->wiphy.bands[band]->n_bitrates; i++) { | ||
167 | nl_rate = nla_nest_start(msg, i); | ||
168 | if (!nl_rate) | ||
169 | goto nla_put_failure; | ||
170 | |||
171 | rate = &dev->wiphy.bands[band]->bitrates[i]; | ||
172 | NLA_PUT_U32(msg, NL80211_BITRATE_ATTR_RATE, | ||
173 | rate->bitrate); | ||
174 | if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) | ||
175 | NLA_PUT_FLAG(msg, | ||
176 | NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE); | ||
177 | |||
178 | nla_nest_end(msg, nl_rate); | ||
179 | } | ||
180 | |||
181 | nla_nest_end(msg, nl_rates); | ||
182 | |||
183 | nla_nest_end(msg, nl_band); | ||
184 | } | ||
185 | nla_nest_end(msg, nl_bands); | ||
186 | |||
108 | return genlmsg_end(msg, hdr); | 187 | return genlmsg_end(msg, hdr); |
109 | 188 | ||
110 | nla_put_failure: | 189 | nla_put_failure: |
@@ -262,12 +341,45 @@ static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info) | |||
262 | return -ENOBUFS; | 341 | return -ENOBUFS; |
263 | } | 342 | } |
264 | 343 | ||
344 | static const struct nla_policy mntr_flags_policy[NL80211_MNTR_FLAG_MAX + 1] = { | ||
345 | [NL80211_MNTR_FLAG_FCSFAIL] = { .type = NLA_FLAG }, | ||
346 | [NL80211_MNTR_FLAG_PLCPFAIL] = { .type = NLA_FLAG }, | ||
347 | [NL80211_MNTR_FLAG_CONTROL] = { .type = NLA_FLAG }, | ||
348 | [NL80211_MNTR_FLAG_OTHER_BSS] = { .type = NLA_FLAG }, | ||
349 | [NL80211_MNTR_FLAG_COOK_FRAMES] = { .type = NLA_FLAG }, | ||
350 | }; | ||
351 | |||
352 | static int parse_monitor_flags(struct nlattr *nla, u32 *mntrflags) | ||
353 | { | ||
354 | struct nlattr *flags[NL80211_MNTR_FLAG_MAX + 1]; | ||
355 | int flag; | ||
356 | |||
357 | *mntrflags = 0; | ||
358 | |||
359 | if (!nla) | ||
360 | return -EINVAL; | ||
361 | |||
362 | if (nla_parse_nested(flags, NL80211_MNTR_FLAG_MAX, | ||
363 | nla, mntr_flags_policy)) | ||
364 | return -EINVAL; | ||
365 | |||
366 | for (flag = 1; flag <= NL80211_MNTR_FLAG_MAX; flag++) | ||
367 | if (flags[flag]) | ||
368 | *mntrflags |= (1<<flag); | ||
369 | |||
370 | return 0; | ||
371 | } | ||
372 | |||
265 | static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) | 373 | static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) |
266 | { | 374 | { |
267 | struct cfg80211_registered_device *drv; | 375 | struct cfg80211_registered_device *drv; |
376 | struct vif_params params; | ||
268 | int err, ifindex; | 377 | int err, ifindex; |
269 | enum nl80211_iftype type; | 378 | enum nl80211_iftype type; |
270 | struct net_device *dev; | 379 | struct net_device *dev; |
380 | u32 flags; | ||
381 | |||
382 | memset(¶ms, 0, sizeof(params)); | ||
271 | 383 | ||
272 | if (info->attrs[NL80211_ATTR_IFTYPE]) { | 384 | if (info->attrs[NL80211_ATTR_IFTYPE]) { |
273 | type = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]); | 385 | type = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]); |
@@ -287,8 +399,18 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) | |||
287 | goto unlock; | 399 | goto unlock; |
288 | } | 400 | } |
289 | 401 | ||
402 | if (type == NL80211_IFTYPE_MESH_POINT && | ||
403 | info->attrs[NL80211_ATTR_MESH_ID]) { | ||
404 | params.mesh_id = nla_data(info->attrs[NL80211_ATTR_MESH_ID]); | ||
405 | params.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]); | ||
406 | } | ||
407 | |||
290 | rtnl_lock(); | 408 | rtnl_lock(); |
291 | err = drv->ops->change_virtual_intf(&drv->wiphy, ifindex, type); | 409 | err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ? |
410 | info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, | ||
411 | &flags); | ||
412 | err = drv->ops->change_virtual_intf(&drv->wiphy, ifindex, | ||
413 | type, err ? NULL : &flags, ¶ms); | ||
292 | rtnl_unlock(); | 414 | rtnl_unlock(); |
293 | 415 | ||
294 | unlock: | 416 | unlock: |
@@ -299,8 +421,12 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) | |||
299 | static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) | 421 | static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) |
300 | { | 422 | { |
301 | struct cfg80211_registered_device *drv; | 423 | struct cfg80211_registered_device *drv; |
424 | struct vif_params params; | ||
302 | int err; | 425 | int err; |
303 | enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED; | 426 | enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED; |
427 | u32 flags; | ||
428 | |||
429 | memset(¶ms, 0, sizeof(params)); | ||
304 | 430 | ||
305 | if (!info->attrs[NL80211_ATTR_IFNAME]) | 431 | if (!info->attrs[NL80211_ATTR_IFNAME]) |
306 | return -EINVAL; | 432 | return -EINVAL; |
@@ -320,11 +446,22 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) | |||
320 | goto unlock; | 446 | goto unlock; |
321 | } | 447 | } |
322 | 448 | ||
449 | if (type == NL80211_IFTYPE_MESH_POINT && | ||
450 | info->attrs[NL80211_ATTR_MESH_ID]) { | ||
451 | params.mesh_id = nla_data(info->attrs[NL80211_ATTR_MESH_ID]); | ||
452 | params.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]); | ||
453 | } | ||
454 | |||
323 | rtnl_lock(); | 455 | rtnl_lock(); |
456 | err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ? | ||
457 | info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, | ||
458 | &flags); | ||
324 | err = drv->ops->add_virtual_intf(&drv->wiphy, | 459 | err = drv->ops->add_virtual_intf(&drv->wiphy, |
325 | nla_data(info->attrs[NL80211_ATTR_IFNAME]), type); | 460 | nla_data(info->attrs[NL80211_ATTR_IFNAME]), |
461 | type, err ? NULL : &flags, ¶ms); | ||
326 | rtnl_unlock(); | 462 | rtnl_unlock(); |
327 | 463 | ||
464 | |||
328 | unlock: | 465 | unlock: |
329 | cfg80211_put_dev(drv); | 466 | cfg80211_put_dev(drv); |
330 | return err; | 467 | return err; |
@@ -752,10 +889,10 @@ static int parse_station_flags(struct nlattr *nla, u32 *staflags) | |||
752 | 889 | ||
753 | static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq, | 890 | static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq, |
754 | int flags, struct net_device *dev, | 891 | int flags, struct net_device *dev, |
755 | u8 *mac_addr, struct station_stats *stats) | 892 | u8 *mac_addr, struct station_info *sinfo) |
756 | { | 893 | { |
757 | void *hdr; | 894 | void *hdr; |
758 | struct nlattr *statsattr; | 895 | struct nlattr *sinfoattr; |
759 | 896 | ||
760 | hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_STATION); | 897 | hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_STATION); |
761 | if (!hdr) | 898 | if (!hdr) |
@@ -764,20 +901,29 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq, | |||
764 | NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); | 901 | NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); |
765 | NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); | 902 | NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); |
766 | 903 | ||
767 | statsattr = nla_nest_start(msg, NL80211_ATTR_STA_STATS); | 904 | sinfoattr = nla_nest_start(msg, NL80211_ATTR_STA_INFO); |
768 | if (!statsattr) | 905 | if (!sinfoattr) |
769 | goto nla_put_failure; | 906 | goto nla_put_failure; |
770 | if (stats->filled & STATION_STAT_INACTIVE_TIME) | 907 | if (sinfo->filled & STATION_INFO_INACTIVE_TIME) |
771 | NLA_PUT_U32(msg, NL80211_STA_STAT_INACTIVE_TIME, | 908 | NLA_PUT_U32(msg, NL80211_STA_INFO_INACTIVE_TIME, |
772 | stats->inactive_time); | 909 | sinfo->inactive_time); |
773 | if (stats->filled & STATION_STAT_RX_BYTES) | 910 | if (sinfo->filled & STATION_INFO_RX_BYTES) |
774 | NLA_PUT_U32(msg, NL80211_STA_STAT_RX_BYTES, | 911 | NLA_PUT_U32(msg, NL80211_STA_INFO_RX_BYTES, |
775 | stats->rx_bytes); | 912 | sinfo->rx_bytes); |
776 | if (stats->filled & STATION_STAT_TX_BYTES) | 913 | if (sinfo->filled & STATION_INFO_TX_BYTES) |
777 | NLA_PUT_U32(msg, NL80211_STA_STAT_TX_BYTES, | 914 | NLA_PUT_U32(msg, NL80211_STA_INFO_TX_BYTES, |
778 | stats->tx_bytes); | 915 | sinfo->tx_bytes); |
779 | 916 | if (sinfo->filled & STATION_INFO_LLID) | |
780 | nla_nest_end(msg, statsattr); | 917 | NLA_PUT_U16(msg, NL80211_STA_INFO_LLID, |
918 | sinfo->llid); | ||
919 | if (sinfo->filled & STATION_INFO_PLID) | ||
920 | NLA_PUT_U16(msg, NL80211_STA_INFO_PLID, | ||
921 | sinfo->plid); | ||
922 | if (sinfo->filled & STATION_INFO_PLINK_STATE) | ||
923 | NLA_PUT_U8(msg, NL80211_STA_INFO_PLINK_STATE, | ||
924 | sinfo->plink_state); | ||
925 | |||
926 | nla_nest_end(msg, sinfoattr); | ||
781 | 927 | ||
782 | return genlmsg_end(msg, hdr); | 928 | return genlmsg_end(msg, hdr); |
783 | 929 | ||
@@ -785,17 +931,80 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq, | |||
785 | return genlmsg_cancel(msg, hdr); | 931 | return genlmsg_cancel(msg, hdr); |
786 | } | 932 | } |
787 | 933 | ||
934 | static int nl80211_dump_station(struct sk_buff *skb, | ||
935 | struct netlink_callback *cb) | ||
936 | { | ||
937 | int wp_idx = 0; | ||
938 | int if_idx = 0; | ||
939 | int sta_idx = cb->args[2]; | ||
940 | int wp_start = cb->args[0]; | ||
941 | int if_start = cb->args[1]; | ||
942 | struct station_info sinfo; | ||
943 | struct cfg80211_registered_device *dev; | ||
944 | struct wireless_dev *wdev; | ||
945 | u8 mac_addr[ETH_ALEN]; | ||
946 | int err; | ||
947 | int exit = 0; | ||
948 | |||
949 | /* TODO: filter by device */ | ||
950 | mutex_lock(&cfg80211_drv_mutex); | ||
951 | list_for_each_entry(dev, &cfg80211_drv_list, list) { | ||
952 | if (exit) | ||
953 | break; | ||
954 | if (++wp_idx < wp_start) | ||
955 | continue; | ||
956 | if_idx = 0; | ||
957 | |||
958 | mutex_lock(&dev->devlist_mtx); | ||
959 | list_for_each_entry(wdev, &dev->netdev_list, list) { | ||
960 | if (exit) | ||
961 | break; | ||
962 | if (++if_idx < if_start) | ||
963 | continue; | ||
964 | if (!dev->ops->dump_station) | ||
965 | continue; | ||
966 | |||
967 | for (;; ++sta_idx) { | ||
968 | rtnl_lock(); | ||
969 | err = dev->ops->dump_station(&dev->wiphy, | ||
970 | wdev->netdev, sta_idx, mac_addr, | ||
971 | &sinfo); | ||
972 | rtnl_unlock(); | ||
973 | if (err) { | ||
974 | sta_idx = 0; | ||
975 | break; | ||
976 | } | ||
977 | if (nl80211_send_station(skb, | ||
978 | NETLINK_CB(cb->skb).pid, | ||
979 | cb->nlh->nlmsg_seq, NLM_F_MULTI, | ||
980 | wdev->netdev, mac_addr, | ||
981 | &sinfo) < 0) { | ||
982 | exit = 1; | ||
983 | break; | ||
984 | } | ||
985 | } | ||
986 | } | ||
987 | mutex_unlock(&dev->devlist_mtx); | ||
988 | } | ||
989 | mutex_unlock(&cfg80211_drv_mutex); | ||
990 | |||
991 | cb->args[0] = wp_idx; | ||
992 | cb->args[1] = if_idx; | ||
993 | cb->args[2] = sta_idx; | ||
994 | |||
995 | return skb->len; | ||
996 | } | ||
788 | 997 | ||
789 | static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info) | 998 | static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info) |
790 | { | 999 | { |
791 | struct cfg80211_registered_device *drv; | 1000 | struct cfg80211_registered_device *drv; |
792 | int err; | 1001 | int err; |
793 | struct net_device *dev; | 1002 | struct net_device *dev; |
794 | struct station_stats stats; | 1003 | struct station_info sinfo; |
795 | struct sk_buff *msg; | 1004 | struct sk_buff *msg; |
796 | u8 *mac_addr = NULL; | 1005 | u8 *mac_addr = NULL; |
797 | 1006 | ||
798 | memset(&stats, 0, sizeof(stats)); | 1007 | memset(&sinfo, 0, sizeof(sinfo)); |
799 | 1008 | ||
800 | if (!info->attrs[NL80211_ATTR_MAC]) | 1009 | if (!info->attrs[NL80211_ATTR_MAC]) |
801 | return -EINVAL; | 1010 | return -EINVAL; |
@@ -812,15 +1021,18 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info) | |||
812 | } | 1021 | } |
813 | 1022 | ||
814 | rtnl_lock(); | 1023 | rtnl_lock(); |
815 | err = drv->ops->get_station(&drv->wiphy, dev, mac_addr, &stats); | 1024 | err = drv->ops->get_station(&drv->wiphy, dev, mac_addr, &sinfo); |
816 | rtnl_unlock(); | 1025 | rtnl_unlock(); |
817 | 1026 | ||
1027 | if (err) | ||
1028 | goto out; | ||
1029 | |||
818 | msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | 1030 | msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); |
819 | if (!msg) | 1031 | if (!msg) |
820 | goto out; | 1032 | goto out; |
821 | 1033 | ||
822 | if (nl80211_send_station(msg, info->snd_pid, info->snd_seq, 0, | 1034 | if (nl80211_send_station(msg, info->snd_pid, info->snd_seq, 0, |
823 | dev, mac_addr, &stats) < 0) | 1035 | dev, mac_addr, &sinfo) < 0) |
824 | goto out_free; | 1036 | goto out_free; |
825 | 1037 | ||
826 | err = genlmsg_unicast(msg, info->snd_pid); | 1038 | err = genlmsg_unicast(msg, info->snd_pid); |
@@ -891,6 +1103,10 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) | |||
891 | ¶ms.station_flags)) | 1103 | ¶ms.station_flags)) |
892 | return -EINVAL; | 1104 | return -EINVAL; |
893 | 1105 | ||
1106 | if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION]) | ||
1107 | params.plink_action = | ||
1108 | nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]); | ||
1109 | |||
894 | err = get_drv_dev_by_info_ifindex(info, &drv, &dev); | 1110 | err = get_drv_dev_by_info_ifindex(info, &drv, &dev); |
895 | if (err) | 1111 | if (err) |
896 | return err; | 1112 | return err; |
@@ -1005,6 +1221,273 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info) | |||
1005 | return err; | 1221 | return err; |
1006 | } | 1222 | } |
1007 | 1223 | ||
1224 | static int nl80211_send_mpath(struct sk_buff *msg, u32 pid, u32 seq, | ||
1225 | int flags, struct net_device *dev, | ||
1226 | u8 *dst, u8 *next_hop, | ||
1227 | struct mpath_info *pinfo) | ||
1228 | { | ||
1229 | void *hdr; | ||
1230 | struct nlattr *pinfoattr; | ||
1231 | |||
1232 | hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_STATION); | ||
1233 | if (!hdr) | ||
1234 | return -1; | ||
1235 | |||
1236 | NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); | ||
1237 | NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, dst); | ||
1238 | NLA_PUT(msg, NL80211_ATTR_MPATH_NEXT_HOP, ETH_ALEN, next_hop); | ||
1239 | |||
1240 | pinfoattr = nla_nest_start(msg, NL80211_ATTR_MPATH_INFO); | ||
1241 | if (!pinfoattr) | ||
1242 | goto nla_put_failure; | ||
1243 | if (pinfo->filled & MPATH_INFO_FRAME_QLEN) | ||
1244 | NLA_PUT_U32(msg, NL80211_MPATH_INFO_FRAME_QLEN, | ||
1245 | pinfo->frame_qlen); | ||
1246 | if (pinfo->filled & MPATH_INFO_DSN) | ||
1247 | NLA_PUT_U32(msg, NL80211_MPATH_INFO_DSN, | ||
1248 | pinfo->dsn); | ||
1249 | if (pinfo->filled & MPATH_INFO_METRIC) | ||
1250 | NLA_PUT_U32(msg, NL80211_MPATH_INFO_METRIC, | ||
1251 | pinfo->metric); | ||
1252 | if (pinfo->filled & MPATH_INFO_EXPTIME) | ||
1253 | NLA_PUT_U32(msg, NL80211_MPATH_INFO_EXPTIME, | ||
1254 | pinfo->exptime); | ||
1255 | if (pinfo->filled & MPATH_INFO_FLAGS) | ||
1256 | NLA_PUT_U8(msg, NL80211_MPATH_INFO_FLAGS, | ||
1257 | pinfo->flags); | ||
1258 | if (pinfo->filled & MPATH_INFO_DISCOVERY_TIMEOUT) | ||
1259 | NLA_PUT_U32(msg, NL80211_MPATH_INFO_DISCOVERY_TIMEOUT, | ||
1260 | pinfo->discovery_timeout); | ||
1261 | if (pinfo->filled & MPATH_INFO_DISCOVERY_RETRIES) | ||
1262 | NLA_PUT_U8(msg, NL80211_MPATH_INFO_DISCOVERY_RETRIES, | ||
1263 | pinfo->discovery_retries); | ||
1264 | |||
1265 | nla_nest_end(msg, pinfoattr); | ||
1266 | |||
1267 | return genlmsg_end(msg, hdr); | ||
1268 | |||
1269 | nla_put_failure: | ||
1270 | return genlmsg_cancel(msg, hdr); | ||
1271 | } | ||
1272 | |||
1273 | static int nl80211_dump_mpath(struct sk_buff *skb, | ||
1274 | struct netlink_callback *cb) | ||
1275 | { | ||
1276 | int wp_idx = 0; | ||
1277 | int if_idx = 0; | ||
1278 | int sta_idx = cb->args[2]; | ||
1279 | int wp_start = cb->args[0]; | ||
1280 | int if_start = cb->args[1]; | ||
1281 | struct mpath_info pinfo; | ||
1282 | struct cfg80211_registered_device *dev; | ||
1283 | struct wireless_dev *wdev; | ||
1284 | u8 dst[ETH_ALEN]; | ||
1285 | u8 next_hop[ETH_ALEN]; | ||
1286 | int err; | ||
1287 | int exit = 0; | ||
1288 | |||
1289 | /* TODO: filter by device */ | ||
1290 | mutex_lock(&cfg80211_drv_mutex); | ||
1291 | list_for_each_entry(dev, &cfg80211_drv_list, list) { | ||
1292 | if (exit) | ||
1293 | break; | ||
1294 | if (++wp_idx < wp_start) | ||
1295 | continue; | ||
1296 | if_idx = 0; | ||
1297 | |||
1298 | mutex_lock(&dev->devlist_mtx); | ||
1299 | list_for_each_entry(wdev, &dev->netdev_list, list) { | ||
1300 | if (exit) | ||
1301 | break; | ||
1302 | if (++if_idx < if_start) | ||
1303 | continue; | ||
1304 | if (!dev->ops->dump_mpath) | ||
1305 | continue; | ||
1306 | |||
1307 | for (;; ++sta_idx) { | ||
1308 | rtnl_lock(); | ||
1309 | err = dev->ops->dump_mpath(&dev->wiphy, | ||
1310 | wdev->netdev, sta_idx, dst, | ||
1311 | next_hop, &pinfo); | ||
1312 | rtnl_unlock(); | ||
1313 | if (err) { | ||
1314 | sta_idx = 0; | ||
1315 | break; | ||
1316 | } | ||
1317 | if (nl80211_send_mpath(skb, | ||
1318 | NETLINK_CB(cb->skb).pid, | ||
1319 | cb->nlh->nlmsg_seq, NLM_F_MULTI, | ||
1320 | wdev->netdev, dst, next_hop, | ||
1321 | &pinfo) < 0) { | ||
1322 | exit = 1; | ||
1323 | break; | ||
1324 | } | ||
1325 | } | ||
1326 | } | ||
1327 | mutex_unlock(&dev->devlist_mtx); | ||
1328 | } | ||
1329 | mutex_unlock(&cfg80211_drv_mutex); | ||
1330 | |||
1331 | cb->args[0] = wp_idx; | ||
1332 | cb->args[1] = if_idx; | ||
1333 | cb->args[2] = sta_idx; | ||
1334 | |||
1335 | return skb->len; | ||
1336 | } | ||
1337 | |||
1338 | static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info) | ||
1339 | { | ||
1340 | struct cfg80211_registered_device *drv; | ||
1341 | int err; | ||
1342 | struct net_device *dev; | ||
1343 | struct mpath_info pinfo; | ||
1344 | struct sk_buff *msg; | ||
1345 | u8 *dst = NULL; | ||
1346 | u8 next_hop[ETH_ALEN]; | ||
1347 | |||
1348 | memset(&pinfo, 0, sizeof(pinfo)); | ||
1349 | |||
1350 | if (!info->attrs[NL80211_ATTR_MAC]) | ||
1351 | return -EINVAL; | ||
1352 | |||
1353 | dst = nla_data(info->attrs[NL80211_ATTR_MAC]); | ||
1354 | |||
1355 | err = get_drv_dev_by_info_ifindex(info, &drv, &dev); | ||
1356 | if (err) | ||
1357 | return err; | ||
1358 | |||
1359 | if (!drv->ops->get_mpath) { | ||
1360 | err = -EOPNOTSUPP; | ||
1361 | goto out; | ||
1362 | } | ||
1363 | |||
1364 | rtnl_lock(); | ||
1365 | err = drv->ops->get_mpath(&drv->wiphy, dev, dst, next_hop, &pinfo); | ||
1366 | rtnl_unlock(); | ||
1367 | |||
1368 | if (err) | ||
1369 | goto out; | ||
1370 | |||
1371 | msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | ||
1372 | if (!msg) | ||
1373 | goto out; | ||
1374 | |||
1375 | if (nl80211_send_mpath(msg, info->snd_pid, info->snd_seq, 0, | ||
1376 | dev, dst, next_hop, &pinfo) < 0) | ||
1377 | goto out_free; | ||
1378 | |||
1379 | err = genlmsg_unicast(msg, info->snd_pid); | ||
1380 | goto out; | ||
1381 | |||
1382 | out_free: | ||
1383 | nlmsg_free(msg); | ||
1384 | |||
1385 | out: | ||
1386 | cfg80211_put_dev(drv); | ||
1387 | dev_put(dev); | ||
1388 | return err; | ||
1389 | } | ||
1390 | |||
1391 | static int nl80211_set_mpath(struct sk_buff *skb, struct genl_info *info) | ||
1392 | { | ||
1393 | struct cfg80211_registered_device *drv; | ||
1394 | int err; | ||
1395 | struct net_device *dev; | ||
1396 | u8 *dst = NULL; | ||
1397 | u8 *next_hop = NULL; | ||
1398 | |||
1399 | if (!info->attrs[NL80211_ATTR_MAC]) | ||
1400 | return -EINVAL; | ||
1401 | |||
1402 | if (!info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]) | ||
1403 | return -EINVAL; | ||
1404 | |||
1405 | dst = nla_data(info->attrs[NL80211_ATTR_MAC]); | ||
1406 | next_hop = nla_data(info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]); | ||
1407 | |||
1408 | err = get_drv_dev_by_info_ifindex(info, &drv, &dev); | ||
1409 | if (err) | ||
1410 | return err; | ||
1411 | |||
1412 | if (!drv->ops->change_mpath) { | ||
1413 | err = -EOPNOTSUPP; | ||
1414 | goto out; | ||
1415 | } | ||
1416 | |||
1417 | rtnl_lock(); | ||
1418 | err = drv->ops->change_mpath(&drv->wiphy, dev, dst, next_hop); | ||
1419 | rtnl_unlock(); | ||
1420 | |||
1421 | out: | ||
1422 | cfg80211_put_dev(drv); | ||
1423 | dev_put(dev); | ||
1424 | return err; | ||
1425 | } | ||
1426 | static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info) | ||
1427 | { | ||
1428 | struct cfg80211_registered_device *drv; | ||
1429 | int err; | ||
1430 | struct net_device *dev; | ||
1431 | u8 *dst = NULL; | ||
1432 | u8 *next_hop = NULL; | ||
1433 | |||
1434 | if (!info->attrs[NL80211_ATTR_MAC]) | ||
1435 | return -EINVAL; | ||
1436 | |||
1437 | if (!info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]) | ||
1438 | return -EINVAL; | ||
1439 | |||
1440 | dst = nla_data(info->attrs[NL80211_ATTR_MAC]); | ||
1441 | next_hop = nla_data(info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]); | ||
1442 | |||
1443 | err = get_drv_dev_by_info_ifindex(info, &drv, &dev); | ||
1444 | if (err) | ||
1445 | return err; | ||
1446 | |||
1447 | if (!drv->ops->add_mpath) { | ||
1448 | err = -EOPNOTSUPP; | ||
1449 | goto out; | ||
1450 | } | ||
1451 | |||
1452 | rtnl_lock(); | ||
1453 | err = drv->ops->add_mpath(&drv->wiphy, dev, dst, next_hop); | ||
1454 | rtnl_unlock(); | ||
1455 | |||
1456 | out: | ||
1457 | cfg80211_put_dev(drv); | ||
1458 | dev_put(dev); | ||
1459 | return err; | ||
1460 | } | ||
1461 | |||
1462 | static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info) | ||
1463 | { | ||
1464 | struct cfg80211_registered_device *drv; | ||
1465 | int err; | ||
1466 | struct net_device *dev; | ||
1467 | u8 *dst = NULL; | ||
1468 | |||
1469 | if (info->attrs[NL80211_ATTR_MAC]) | ||
1470 | dst = nla_data(info->attrs[NL80211_ATTR_MAC]); | ||
1471 | |||
1472 | err = get_drv_dev_by_info_ifindex(info, &drv, &dev); | ||
1473 | if (err) | ||
1474 | return err; | ||
1475 | |||
1476 | if (!drv->ops->del_mpath) { | ||
1477 | err = -EOPNOTSUPP; | ||
1478 | goto out; | ||
1479 | } | ||
1480 | |||
1481 | rtnl_lock(); | ||
1482 | err = drv->ops->del_mpath(&drv->wiphy, dev, dst); | ||
1483 | rtnl_unlock(); | ||
1484 | |||
1485 | out: | ||
1486 | cfg80211_put_dev(drv); | ||
1487 | dev_put(dev); | ||
1488 | return err; | ||
1489 | } | ||
1490 | |||
1008 | static struct genl_ops nl80211_ops[] = { | 1491 | static struct genl_ops nl80211_ops[] = { |
1009 | { | 1492 | { |
1010 | .cmd = NL80211_CMD_GET_WIPHY, | 1493 | .cmd = NL80211_CMD_GET_WIPHY, |
@@ -1089,7 +1572,7 @@ static struct genl_ops nl80211_ops[] = { | |||
1089 | { | 1572 | { |
1090 | .cmd = NL80211_CMD_GET_STATION, | 1573 | .cmd = NL80211_CMD_GET_STATION, |
1091 | .doit = nl80211_get_station, | 1574 | .doit = nl80211_get_station, |
1092 | /* TODO: implement dumpit */ | 1575 | .dumpit = nl80211_dump_station, |
1093 | .policy = nl80211_policy, | 1576 | .policy = nl80211_policy, |
1094 | .flags = GENL_ADMIN_PERM, | 1577 | .flags = GENL_ADMIN_PERM, |
1095 | }, | 1578 | }, |
@@ -1111,6 +1594,31 @@ static struct genl_ops nl80211_ops[] = { | |||
1111 | .policy = nl80211_policy, | 1594 | .policy = nl80211_policy, |
1112 | .flags = GENL_ADMIN_PERM, | 1595 | .flags = GENL_ADMIN_PERM, |
1113 | }, | 1596 | }, |
1597 | { | ||
1598 | .cmd = NL80211_CMD_GET_MPATH, | ||
1599 | .doit = nl80211_get_mpath, | ||
1600 | .dumpit = nl80211_dump_mpath, | ||
1601 | .policy = nl80211_policy, | ||
1602 | .flags = GENL_ADMIN_PERM, | ||
1603 | }, | ||
1604 | { | ||
1605 | .cmd = NL80211_CMD_SET_MPATH, | ||
1606 | .doit = nl80211_set_mpath, | ||
1607 | .policy = nl80211_policy, | ||
1608 | .flags = GENL_ADMIN_PERM, | ||
1609 | }, | ||
1610 | { | ||
1611 | .cmd = NL80211_CMD_NEW_MPATH, | ||
1612 | .doit = nl80211_new_mpath, | ||
1613 | .policy = nl80211_policy, | ||
1614 | .flags = GENL_ADMIN_PERM, | ||
1615 | }, | ||
1616 | { | ||
1617 | .cmd = NL80211_CMD_DEL_MPATH, | ||
1618 | .doit = nl80211_del_mpath, | ||
1619 | .policy = nl80211_policy, | ||
1620 | .flags = GENL_ADMIN_PERM, | ||
1621 | }, | ||
1114 | }; | 1622 | }; |
1115 | 1623 | ||
1116 | /* multicast groups */ | 1624 | /* multicast groups */ |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c new file mode 100644 index 000000000000..8cc6037eb2ae --- /dev/null +++ b/net/wireless/reg.c | |||
@@ -0,0 +1,159 @@ | |||
1 | /* | ||
2 | * Copyright 2002-2005, Instant802 Networks, Inc. | ||
3 | * Copyright 2005-2006, Devicescape Software, Inc. | ||
4 | * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | /* | ||
12 | * This regulatory domain control implementation is highly incomplete, it | ||
13 | * only exists for the purpose of not regressing mac80211. | ||
14 | * | ||
15 | * For now, drivers can restrict the set of allowed channels by either | ||
16 | * not registering those channels or setting the IEEE80211_CHAN_DISABLED | ||
17 | * flag; that flag will only be *set* by this code, never *cleared. | ||
18 | * | ||
19 | * The usual implementation is for a driver to read a device EEPROM to | ||
20 | * determine which regulatory domain it should be operating under, then | ||
21 | * looking up the allowable channels in a driver-local table and finally | ||
22 | * registering those channels in the wiphy structure. | ||
23 | * | ||
24 | * Alternatively, drivers that trust the regulatory domain control here | ||
25 | * will register a complete set of capabilities and the control code | ||
26 | * will restrict the set by setting the IEEE80211_CHAN_* flags. | ||
27 | */ | ||
28 | #include <linux/kernel.h> | ||
29 | #include <net/wireless.h> | ||
30 | #include "core.h" | ||
31 | |||
32 | static char *ieee80211_regdom = "US"; | ||
33 | module_param(ieee80211_regdom, charp, 0444); | ||
34 | MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code"); | ||
35 | |||
36 | struct ieee80211_channel_range { | ||
37 | short start_freq; | ||
38 | short end_freq; | ||
39 | int max_power; | ||
40 | int max_antenna_gain; | ||
41 | u32 flags; | ||
42 | }; | ||
43 | |||
44 | struct ieee80211_regdomain { | ||
45 | const char *code; | ||
46 | const struct ieee80211_channel_range *ranges; | ||
47 | int n_ranges; | ||
48 | }; | ||
49 | |||
50 | #define RANGE_PWR(_start, _end, _pwr, _ag, _flags) \ | ||
51 | { _start, _end, _pwr, _ag, _flags } | ||
52 | |||
53 | |||
54 | /* | ||
55 | * Ideally, in the future, these definitions will be loaded from a | ||
56 | * userspace table via some daemon. | ||
57 | */ | ||
58 | static const struct ieee80211_channel_range ieee80211_US_channels[] = { | ||
59 | /* IEEE 802.11b/g, channels 1..11 */ | ||
60 | RANGE_PWR(2412, 2462, 27, 6, 0), | ||
61 | /* IEEE 802.11a, channel 36*/ | ||
62 | RANGE_PWR(5180, 5180, 23, 6, 0), | ||
63 | /* IEEE 802.11a, channel 40*/ | ||
64 | RANGE_PWR(5200, 5200, 23, 6, 0), | ||
65 | /* IEEE 802.11a, channel 44*/ | ||
66 | RANGE_PWR(5220, 5220, 23, 6, 0), | ||
67 | /* IEEE 802.11a, channels 48..64 */ | ||
68 | RANGE_PWR(5240, 5320, 23, 6, 0), | ||
69 | /* IEEE 802.11a, channels 149..165, outdoor */ | ||
70 | RANGE_PWR(5745, 5825, 30, 6, 0), | ||
71 | }; | ||
72 | |||
73 | static const struct ieee80211_channel_range ieee80211_JP_channels[] = { | ||
74 | /* IEEE 802.11b/g, channels 1..14 */ | ||
75 | RANGE_PWR(2412, 2484, 20, 6, 0), | ||
76 | /* IEEE 802.11a, channels 34..48 */ | ||
77 | RANGE_PWR(5170, 5240, 20, 6, IEEE80211_CHAN_PASSIVE_SCAN), | ||
78 | /* IEEE 802.11a, channels 52..64 */ | ||
79 | RANGE_PWR(5260, 5320, 20, 6, IEEE80211_CHAN_NO_IBSS | | ||
80 | IEEE80211_CHAN_RADAR), | ||
81 | }; | ||
82 | |||
83 | #define REGDOM(_code) \ | ||
84 | { \ | ||
85 | .code = __stringify(_code), \ | ||
86 | .ranges = ieee80211_ ##_code## _channels, \ | ||
87 | .n_ranges = ARRAY_SIZE(ieee80211_ ##_code## _channels), \ | ||
88 | } | ||
89 | |||
90 | static const struct ieee80211_regdomain ieee80211_regdoms[] = { | ||
91 | REGDOM(US), | ||
92 | REGDOM(JP), | ||
93 | }; | ||
94 | |||
95 | |||
96 | static const struct ieee80211_regdomain *get_regdom(void) | ||
97 | { | ||
98 | static const struct ieee80211_channel_range | ||
99 | ieee80211_world_channels[] = { | ||
100 | /* IEEE 802.11b/g, channels 1..11 */ | ||
101 | RANGE_PWR(2412, 2462, 27, 6, 0), | ||
102 | }; | ||
103 | static const struct ieee80211_regdomain regdom_world = REGDOM(world); | ||
104 | int i; | ||
105 | |||
106 | for (i = 0; i < ARRAY_SIZE(ieee80211_regdoms); i++) | ||
107 | if (strcmp(ieee80211_regdom, ieee80211_regdoms[i].code) == 0) | ||
108 | return &ieee80211_regdoms[i]; | ||
109 | |||
110 | return ®dom_world; | ||
111 | } | ||
112 | |||
113 | |||
114 | static void handle_channel(struct ieee80211_channel *chan, | ||
115 | const struct ieee80211_regdomain *rd) | ||
116 | { | ||
117 | int i; | ||
118 | u32 flags = chan->orig_flags; | ||
119 | const struct ieee80211_channel_range *rg = NULL; | ||
120 | |||
121 | for (i = 0; i < rd->n_ranges; i++) { | ||
122 | if (rd->ranges[i].start_freq <= chan->center_freq && | ||
123 | chan->center_freq <= rd->ranges[i].end_freq) { | ||
124 | rg = &rd->ranges[i]; | ||
125 | break; | ||
126 | } | ||
127 | } | ||
128 | |||
129 | if (!rg) { | ||
130 | /* not found */ | ||
131 | flags |= IEEE80211_CHAN_DISABLED; | ||
132 | chan->flags = flags; | ||
133 | return; | ||
134 | } | ||
135 | |||
136 | chan->flags = flags; | ||
137 | chan->max_antenna_gain = min(chan->orig_mag, | ||
138 | rg->max_antenna_gain); | ||
139 | chan->max_power = min(chan->orig_mpwr, rg->max_power); | ||
140 | } | ||
141 | |||
142 | static void handle_band(struct ieee80211_supported_band *sband, | ||
143 | const struct ieee80211_regdomain *rd) | ||
144 | { | ||
145 | int i; | ||
146 | |||
147 | for (i = 0; i < sband->n_channels; i++) | ||
148 | handle_channel(&sband->channels[i], rd); | ||
149 | } | ||
150 | |||
151 | void wiphy_update_regulatory(struct wiphy *wiphy) | ||
152 | { | ||
153 | enum ieee80211_band band; | ||
154 | const struct ieee80211_regdomain *rd = get_regdom(); | ||
155 | |||
156 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) | ||
157 | if (wiphy->bands[band]) | ||
158 | handle_band(wiphy->bands[band], rd); | ||
159 | } | ||
diff --git a/net/wireless/util.c b/net/wireless/util.c new file mode 100644 index 000000000000..77336c22fcf2 --- /dev/null +++ b/net/wireless/util.c | |||
@@ -0,0 +1,98 @@ | |||
1 | /* | ||
2 | * Wireless utility functions | ||
3 | * | ||
4 | * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> | ||
5 | */ | ||
6 | #include <net/wireless.h> | ||
7 | #include <asm/bitops.h> | ||
8 | #include "core.h" | ||
9 | |||
10 | int ieee80211_channel_to_frequency(int chan) | ||
11 | { | ||
12 | if (chan < 14) | ||
13 | return 2407 + chan * 5; | ||
14 | |||
15 | if (chan == 14) | ||
16 | return 2484; | ||
17 | |||
18 | /* FIXME: 802.11j 17.3.8.3.2 */ | ||
19 | return (chan + 1000) * 5; | ||
20 | } | ||
21 | EXPORT_SYMBOL(ieee80211_channel_to_frequency); | ||
22 | |||
23 | int ieee80211_frequency_to_channel(int freq) | ||
24 | { | ||
25 | if (freq == 2484) | ||
26 | return 14; | ||
27 | |||
28 | if (freq < 2484) | ||
29 | return (freq - 2407) / 5; | ||
30 | |||
31 | /* FIXME: 802.11j 17.3.8.3.2 */ | ||
32 | return freq/5 - 1000; | ||
33 | } | ||
34 | EXPORT_SYMBOL(ieee80211_frequency_to_channel); | ||
35 | |||
36 | static void set_mandatory_flags_band(struct ieee80211_supported_band *sband, | ||
37 | enum ieee80211_band band) | ||
38 | { | ||
39 | int i, want; | ||
40 | |||
41 | switch (band) { | ||
42 | case IEEE80211_BAND_5GHZ: | ||
43 | want = 3; | ||
44 | for (i = 0; i < sband->n_bitrates; i++) { | ||
45 | if (sband->bitrates[i].bitrate == 60 || | ||
46 | sband->bitrates[i].bitrate == 120 || | ||
47 | sband->bitrates[i].bitrate == 240) { | ||
48 | sband->bitrates[i].flags |= | ||
49 | IEEE80211_RATE_MANDATORY_A; | ||
50 | want--; | ||
51 | } | ||
52 | } | ||
53 | WARN_ON(want); | ||
54 | break; | ||
55 | case IEEE80211_BAND_2GHZ: | ||
56 | want = 7; | ||
57 | for (i = 0; i < sband->n_bitrates; i++) { | ||
58 | if (sband->bitrates[i].bitrate == 10) { | ||
59 | sband->bitrates[i].flags |= | ||
60 | IEEE80211_RATE_MANDATORY_B | | ||
61 | IEEE80211_RATE_MANDATORY_G; | ||
62 | want--; | ||
63 | } | ||
64 | |||
65 | if (sband->bitrates[i].bitrate == 20 || | ||
66 | sband->bitrates[i].bitrate == 55 || | ||
67 | sband->bitrates[i].bitrate == 110 || | ||
68 | sband->bitrates[i].bitrate == 60 || | ||
69 | sband->bitrates[i].bitrate == 120 || | ||
70 | sband->bitrates[i].bitrate == 240) { | ||
71 | sband->bitrates[i].flags |= | ||
72 | IEEE80211_RATE_MANDATORY_G; | ||
73 | want--; | ||
74 | } | ||
75 | |||
76 | if (sband->bitrates[i].bitrate != 10 && | ||
77 | sband->bitrates[i].bitrate != 20 && | ||
78 | sband->bitrates[i].bitrate != 55 && | ||
79 | sband->bitrates[i].bitrate != 110) | ||
80 | sband->bitrates[i].flags |= | ||
81 | IEEE80211_RATE_ERP_G; | ||
82 | } | ||
83 | WARN_ON(want != 0 && want != 3 && want != 6); | ||
84 | break; | ||
85 | case IEEE80211_NUM_BANDS: | ||
86 | WARN_ON(1); | ||
87 | break; | ||
88 | } | ||
89 | } | ||
90 | |||
91 | void ieee80211_set_bitrate_flags(struct wiphy *wiphy) | ||
92 | { | ||
93 | enum ieee80211_band band; | ||
94 | |||
95 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) | ||
96 | if (wiphy->bands[band]) | ||
97 | set_mandatory_flags_band(wiphy->bands[band], band); | ||
98 | } | ||
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 9fc4c315f6cd..bae94a8031a2 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -46,6 +46,7 @@ EXPORT_SYMBOL(xfrm_cfg_mutex); | |||
46 | 46 | ||
47 | static DEFINE_RWLOCK(xfrm_policy_lock); | 47 | static DEFINE_RWLOCK(xfrm_policy_lock); |
48 | 48 | ||
49 | static struct list_head xfrm_policy_bytype[XFRM_POLICY_TYPE_MAX]; | ||
49 | unsigned int xfrm_policy_count[XFRM_POLICY_MAX*2]; | 50 | unsigned int xfrm_policy_count[XFRM_POLICY_MAX*2]; |
50 | EXPORT_SYMBOL(xfrm_policy_count); | 51 | EXPORT_SYMBOL(xfrm_policy_count); |
51 | 52 | ||
@@ -208,6 +209,7 @@ struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp) | |||
208 | policy = kzalloc(sizeof(struct xfrm_policy), gfp); | 209 | policy = kzalloc(sizeof(struct xfrm_policy), gfp); |
209 | 210 | ||
210 | if (policy) { | 211 | if (policy) { |
212 | INIT_LIST_HEAD(&policy->bytype); | ||
211 | INIT_HLIST_NODE(&policy->bydst); | 213 | INIT_HLIST_NODE(&policy->bydst); |
212 | INIT_HLIST_NODE(&policy->byidx); | 214 | INIT_HLIST_NODE(&policy->byidx); |
213 | rwlock_init(&policy->lock); | 215 | rwlock_init(&policy->lock); |
@@ -230,6 +232,10 @@ void xfrm_policy_destroy(struct xfrm_policy *policy) | |||
230 | if (del_timer(&policy->timer)) | 232 | if (del_timer(&policy->timer)) |
231 | BUG(); | 233 | BUG(); |
232 | 234 | ||
235 | write_lock_bh(&xfrm_policy_lock); | ||
236 | list_del(&policy->bytype); | ||
237 | write_unlock_bh(&xfrm_policy_lock); | ||
238 | |||
233 | security_xfrm_policy_free(policy); | 239 | security_xfrm_policy_free(policy); |
234 | kfree(policy); | 240 | kfree(policy); |
235 | } | 241 | } |
@@ -584,6 +590,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) | |||
584 | policy->curlft.use_time = 0; | 590 | policy->curlft.use_time = 0; |
585 | if (!mod_timer(&policy->timer, jiffies + HZ)) | 591 | if (!mod_timer(&policy->timer, jiffies + HZ)) |
586 | xfrm_pol_hold(policy); | 592 | xfrm_pol_hold(policy); |
593 | list_add_tail(&policy->bytype, &xfrm_policy_bytype[policy->type]); | ||
587 | write_unlock_bh(&xfrm_policy_lock); | 594 | write_unlock_bh(&xfrm_policy_lock); |
588 | 595 | ||
589 | if (delpol) | 596 | if (delpol) |
@@ -822,57 +829,60 @@ out: | |||
822 | } | 829 | } |
823 | EXPORT_SYMBOL(xfrm_policy_flush); | 830 | EXPORT_SYMBOL(xfrm_policy_flush); |
824 | 831 | ||
825 | int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*), | 832 | int xfrm_policy_walk(struct xfrm_policy_walk *walk, |
833 | int (*func)(struct xfrm_policy *, int, int, void*), | ||
826 | void *data) | 834 | void *data) |
827 | { | 835 | { |
828 | struct xfrm_policy *pol, *last = NULL; | 836 | struct xfrm_policy *old, *pol, *last = NULL; |
829 | struct hlist_node *entry; | 837 | int error = 0; |
830 | int dir, last_dir = 0, count, error; | 838 | |
839 | if (walk->type >= XFRM_POLICY_TYPE_MAX && | ||
840 | walk->type != XFRM_POLICY_TYPE_ANY) | ||
841 | return -EINVAL; | ||
831 | 842 | ||
843 | if (walk->policy == NULL && walk->count != 0) | ||
844 | return 0; | ||
845 | |||
846 | old = pol = walk->policy; | ||
847 | walk->policy = NULL; | ||
832 | read_lock_bh(&xfrm_policy_lock); | 848 | read_lock_bh(&xfrm_policy_lock); |
833 | count = 0; | ||
834 | 849 | ||
835 | for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) { | 850 | for (; walk->cur_type < XFRM_POLICY_TYPE_MAX; walk->cur_type++) { |
836 | struct hlist_head *table = xfrm_policy_bydst[dir].table; | 851 | if (walk->type != walk->cur_type && |
837 | int i; | 852 | walk->type != XFRM_POLICY_TYPE_ANY) |
853 | continue; | ||
838 | 854 | ||
839 | hlist_for_each_entry(pol, entry, | 855 | if (pol == NULL) { |
840 | &xfrm_policy_inexact[dir], bydst) { | 856 | pol = list_first_entry(&xfrm_policy_bytype[walk->cur_type], |
841 | if (pol->type != type) | 857 | struct xfrm_policy, bytype); |
858 | } | ||
859 | list_for_each_entry_from(pol, &xfrm_policy_bytype[walk->cur_type], bytype) { | ||
860 | if (pol->dead) | ||
842 | continue; | 861 | continue; |
843 | if (last) { | 862 | if (last) { |
844 | error = func(last, last_dir % XFRM_POLICY_MAX, | 863 | error = func(last, xfrm_policy_id2dir(last->index), |
845 | count, data); | 864 | walk->count, data); |
846 | if (error) | 865 | if (error) { |
866 | xfrm_pol_hold(last); | ||
867 | walk->policy = last; | ||
847 | goto out; | 868 | goto out; |
848 | } | ||
849 | last = pol; | ||
850 | last_dir = dir; | ||
851 | count++; | ||
852 | } | ||
853 | for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) { | ||
854 | hlist_for_each_entry(pol, entry, table + i, bydst) { | ||
855 | if (pol->type != type) | ||
856 | continue; | ||
857 | if (last) { | ||
858 | error = func(last, last_dir % XFRM_POLICY_MAX, | ||
859 | count, data); | ||
860 | if (error) | ||
861 | goto out; | ||
862 | } | 869 | } |
863 | last = pol; | ||
864 | last_dir = dir; | ||
865 | count++; | ||
866 | } | 870 | } |
871 | last = pol; | ||
872 | walk->count++; | ||
867 | } | 873 | } |
874 | pol = NULL; | ||
868 | } | 875 | } |
869 | if (count == 0) { | 876 | if (walk->count == 0) { |
870 | error = -ENOENT; | 877 | error = -ENOENT; |
871 | goto out; | 878 | goto out; |
872 | } | 879 | } |
873 | error = func(last, last_dir % XFRM_POLICY_MAX, 0, data); | 880 | if (last) |
881 | error = func(last, xfrm_policy_id2dir(last->index), 0, data); | ||
874 | out: | 882 | out: |
875 | read_unlock_bh(&xfrm_policy_lock); | 883 | read_unlock_bh(&xfrm_policy_lock); |
884 | if (old != NULL) | ||
885 | xfrm_pol_put(old); | ||
876 | return error; | 886 | return error; |
877 | } | 887 | } |
878 | EXPORT_SYMBOL(xfrm_policy_walk); | 888 | EXPORT_SYMBOL(xfrm_policy_walk); |
@@ -2365,6 +2375,9 @@ static void __init xfrm_policy_init(void) | |||
2365 | panic("XFRM: failed to allocate bydst hash\n"); | 2375 | panic("XFRM: failed to allocate bydst hash\n"); |
2366 | } | 2376 | } |
2367 | 2377 | ||
2378 | for (dir = 0; dir < XFRM_POLICY_TYPE_MAX; dir++) | ||
2379 | INIT_LIST_HEAD(&xfrm_policy_bytype[dir]); | ||
2380 | |||
2368 | INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task); | 2381 | INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task); |
2369 | register_netdevice_notifier(&xfrm_dev_notifier); | 2382 | register_netdevice_notifier(&xfrm_dev_notifier); |
2370 | } | 2383 | } |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 7ba65e82941c..9880b792e6a5 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -50,6 +50,7 @@ static DEFINE_SPINLOCK(xfrm_state_lock); | |||
50 | * Main use is finding SA after policy selected tunnel or transport mode. | 50 | * Main use is finding SA after policy selected tunnel or transport mode. |
51 | * Also, it can be used by ah/esp icmp error handler to find offending SA. | 51 | * Also, it can be used by ah/esp icmp error handler to find offending SA. |
52 | */ | 52 | */ |
53 | static LIST_HEAD(xfrm_state_all); | ||
53 | static struct hlist_head *xfrm_state_bydst __read_mostly; | 54 | static struct hlist_head *xfrm_state_bydst __read_mostly; |
54 | static struct hlist_head *xfrm_state_bysrc __read_mostly; | 55 | static struct hlist_head *xfrm_state_bysrc __read_mostly; |
55 | static struct hlist_head *xfrm_state_byspi __read_mostly; | 56 | static struct hlist_head *xfrm_state_byspi __read_mostly; |
@@ -510,6 +511,7 @@ struct xfrm_state *xfrm_state_alloc(void) | |||
510 | if (x) { | 511 | if (x) { |
511 | atomic_set(&x->refcnt, 1); | 512 | atomic_set(&x->refcnt, 1); |
512 | atomic_set(&x->tunnel_users, 0); | 513 | atomic_set(&x->tunnel_users, 0); |
514 | INIT_LIST_HEAD(&x->all); | ||
513 | INIT_HLIST_NODE(&x->bydst); | 515 | INIT_HLIST_NODE(&x->bydst); |
514 | INIT_HLIST_NODE(&x->bysrc); | 516 | INIT_HLIST_NODE(&x->bysrc); |
515 | INIT_HLIST_NODE(&x->byspi); | 517 | INIT_HLIST_NODE(&x->byspi); |
@@ -533,6 +535,10 @@ void __xfrm_state_destroy(struct xfrm_state *x) | |||
533 | { | 535 | { |
534 | BUG_TRAP(x->km.state == XFRM_STATE_DEAD); | 536 | BUG_TRAP(x->km.state == XFRM_STATE_DEAD); |
535 | 537 | ||
538 | spin_lock_bh(&xfrm_state_lock); | ||
539 | list_del(&x->all); | ||
540 | spin_unlock_bh(&xfrm_state_lock); | ||
541 | |||
536 | spin_lock_bh(&xfrm_state_gc_lock); | 542 | spin_lock_bh(&xfrm_state_gc_lock); |
537 | hlist_add_head(&x->bydst, &xfrm_state_gc_list); | 543 | hlist_add_head(&x->bydst, &xfrm_state_gc_list); |
538 | spin_unlock_bh(&xfrm_state_gc_lock); | 544 | spin_unlock_bh(&xfrm_state_gc_lock); |
@@ -909,6 +915,8 @@ static void __xfrm_state_insert(struct xfrm_state *x) | |||
909 | 915 | ||
910 | x->genid = ++xfrm_state_genid; | 916 | x->genid = ++xfrm_state_genid; |
911 | 917 | ||
918 | list_add_tail(&x->all, &xfrm_state_all); | ||
919 | |||
912 | h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr, | 920 | h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr, |
913 | x->props.reqid, x->props.family); | 921 | x->props.reqid, x->props.family); |
914 | hlist_add_head(&x->bydst, xfrm_state_bydst+h); | 922 | hlist_add_head(&x->bydst, xfrm_state_bydst+h); |
@@ -1518,36 +1526,47 @@ unlock: | |||
1518 | } | 1526 | } |
1519 | EXPORT_SYMBOL(xfrm_alloc_spi); | 1527 | EXPORT_SYMBOL(xfrm_alloc_spi); |
1520 | 1528 | ||
1521 | int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*), | 1529 | int xfrm_state_walk(struct xfrm_state_walk *walk, |
1530 | int (*func)(struct xfrm_state *, int, void*), | ||
1522 | void *data) | 1531 | void *data) |
1523 | { | 1532 | { |
1524 | int i; | 1533 | struct xfrm_state *old, *x, *last = NULL; |
1525 | struct xfrm_state *x, *last = NULL; | ||
1526 | struct hlist_node *entry; | ||
1527 | int count = 0; | ||
1528 | int err = 0; | 1534 | int err = 0; |
1529 | 1535 | ||
1536 | if (walk->state == NULL && walk->count != 0) | ||
1537 | return 0; | ||
1538 | |||
1539 | old = x = walk->state; | ||
1540 | walk->state = NULL; | ||
1530 | spin_lock_bh(&xfrm_state_lock); | 1541 | spin_lock_bh(&xfrm_state_lock); |
1531 | for (i = 0; i <= xfrm_state_hmask; i++) { | 1542 | if (x == NULL) |
1532 | hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) { | 1543 | x = list_first_entry(&xfrm_state_all, struct xfrm_state, all); |
1533 | if (!xfrm_id_proto_match(x->id.proto, proto)) | 1544 | list_for_each_entry_from(x, &xfrm_state_all, all) { |
1534 | continue; | 1545 | if (x->km.state == XFRM_STATE_DEAD) |
1535 | if (last) { | 1546 | continue; |
1536 | err = func(last, count, data); | 1547 | if (!xfrm_id_proto_match(x->id.proto, walk->proto)) |
1537 | if (err) | 1548 | continue; |
1538 | goto out; | 1549 | if (last) { |
1550 | err = func(last, walk->count, data); | ||
1551 | if (err) { | ||
1552 | xfrm_state_hold(last); | ||
1553 | walk->state = last; | ||
1554 | goto out; | ||
1539 | } | 1555 | } |
1540 | last = x; | ||
1541 | count++; | ||
1542 | } | 1556 | } |
1557 | last = x; | ||
1558 | walk->count++; | ||
1543 | } | 1559 | } |
1544 | if (count == 0) { | 1560 | if (walk->count == 0) { |
1545 | err = -ENOENT; | 1561 | err = -ENOENT; |
1546 | goto out; | 1562 | goto out; |
1547 | } | 1563 | } |
1548 | err = func(last, 0, data); | 1564 | if (last) |
1565 | err = func(last, 0, data); | ||
1549 | out: | 1566 | out: |
1550 | spin_unlock_bh(&xfrm_state_lock); | 1567 | spin_unlock_bh(&xfrm_state_lock); |
1568 | if (old != NULL) | ||
1569 | xfrm_state_put(old); | ||
1551 | return err; | 1570 | return err; |
1552 | } | 1571 | } |
1553 | EXPORT_SYMBOL(xfrm_state_walk); | 1572 | EXPORT_SYMBOL(xfrm_state_walk); |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index f971ca5645f8..f5fd5b3147cc 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -532,8 +532,6 @@ struct xfrm_dump_info { | |||
532 | struct sk_buff *out_skb; | 532 | struct sk_buff *out_skb; |
533 | u32 nlmsg_seq; | 533 | u32 nlmsg_seq; |
534 | u16 nlmsg_flags; | 534 | u16 nlmsg_flags; |
535 | int start_idx; | ||
536 | int this_idx; | ||
537 | }; | 535 | }; |
538 | 536 | ||
539 | static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb) | 537 | static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb) |
@@ -600,9 +598,6 @@ static int dump_one_state(struct xfrm_state *x, int count, void *ptr) | |||
600 | struct nlmsghdr *nlh; | 598 | struct nlmsghdr *nlh; |
601 | int err; | 599 | int err; |
602 | 600 | ||
603 | if (sp->this_idx < sp->start_idx) | ||
604 | goto out; | ||
605 | |||
606 | nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq, | 601 | nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq, |
607 | XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags); | 602 | XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags); |
608 | if (nlh == NULL) | 603 | if (nlh == NULL) |
@@ -615,8 +610,6 @@ static int dump_one_state(struct xfrm_state *x, int count, void *ptr) | |||
615 | goto nla_put_failure; | 610 | goto nla_put_failure; |
616 | 611 | ||
617 | nlmsg_end(skb, nlh); | 612 | nlmsg_end(skb, nlh); |
618 | out: | ||
619 | sp->this_idx++; | ||
620 | return 0; | 613 | return 0; |
621 | 614 | ||
622 | nla_put_failure: | 615 | nla_put_failure: |
@@ -624,18 +617,32 @@ nla_put_failure: | |||
624 | return err; | 617 | return err; |
625 | } | 618 | } |
626 | 619 | ||
620 | static int xfrm_dump_sa_done(struct netlink_callback *cb) | ||
621 | { | ||
622 | struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1]; | ||
623 | xfrm_state_walk_done(walk); | ||
624 | return 0; | ||
625 | } | ||
626 | |||
627 | static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb) | 627 | static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb) |
628 | { | 628 | { |
629 | struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1]; | ||
629 | struct xfrm_dump_info info; | 630 | struct xfrm_dump_info info; |
630 | 631 | ||
632 | BUILD_BUG_ON(sizeof(struct xfrm_state_walk) > | ||
633 | sizeof(cb->args) - sizeof(cb->args[0])); | ||
634 | |||
631 | info.in_skb = cb->skb; | 635 | info.in_skb = cb->skb; |
632 | info.out_skb = skb; | 636 | info.out_skb = skb; |
633 | info.nlmsg_seq = cb->nlh->nlmsg_seq; | 637 | info.nlmsg_seq = cb->nlh->nlmsg_seq; |
634 | info.nlmsg_flags = NLM_F_MULTI; | 638 | info.nlmsg_flags = NLM_F_MULTI; |
635 | info.this_idx = 0; | 639 | |
636 | info.start_idx = cb->args[0]; | 640 | if (!cb->args[0]) { |
637 | (void) xfrm_state_walk(0, dump_one_state, &info); | 641 | cb->args[0] = 1; |
638 | cb->args[0] = info.this_idx; | 642 | xfrm_state_walk_init(walk, 0); |
643 | } | ||
644 | |||
645 | (void) xfrm_state_walk(walk, dump_one_state, &info); | ||
639 | 646 | ||
640 | return skb->len; | 647 | return skb->len; |
641 | } | 648 | } |
@@ -654,7 +661,6 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb, | |||
654 | info.out_skb = skb; | 661 | info.out_skb = skb; |
655 | info.nlmsg_seq = seq; | 662 | info.nlmsg_seq = seq; |
656 | info.nlmsg_flags = 0; | 663 | info.nlmsg_flags = 0; |
657 | info.this_idx = info.start_idx = 0; | ||
658 | 664 | ||
659 | if (dump_one_state(x, 0, &info)) { | 665 | if (dump_one_state(x, 0, &info)) { |
660 | kfree_skb(skb); | 666 | kfree_skb(skb); |
@@ -1232,9 +1238,6 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr | |||
1232 | struct sk_buff *skb = sp->out_skb; | 1238 | struct sk_buff *skb = sp->out_skb; |
1233 | struct nlmsghdr *nlh; | 1239 | struct nlmsghdr *nlh; |
1234 | 1240 | ||
1235 | if (sp->this_idx < sp->start_idx) | ||
1236 | goto out; | ||
1237 | |||
1238 | nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq, | 1241 | nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq, |
1239 | XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags); | 1242 | XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags); |
1240 | if (nlh == NULL) | 1243 | if (nlh == NULL) |
@@ -1250,8 +1253,6 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr | |||
1250 | goto nlmsg_failure; | 1253 | goto nlmsg_failure; |
1251 | 1254 | ||
1252 | nlmsg_end(skb, nlh); | 1255 | nlmsg_end(skb, nlh); |
1253 | out: | ||
1254 | sp->this_idx++; | ||
1255 | return 0; | 1256 | return 0; |
1256 | 1257 | ||
1257 | nlmsg_failure: | 1258 | nlmsg_failure: |
@@ -1259,21 +1260,33 @@ nlmsg_failure: | |||
1259 | return -EMSGSIZE; | 1260 | return -EMSGSIZE; |
1260 | } | 1261 | } |
1261 | 1262 | ||
1263 | static int xfrm_dump_policy_done(struct netlink_callback *cb) | ||
1264 | { | ||
1265 | struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; | ||
1266 | |||
1267 | xfrm_policy_walk_done(walk); | ||
1268 | return 0; | ||
1269 | } | ||
1270 | |||
1262 | static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb) | 1271 | static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb) |
1263 | { | 1272 | { |
1273 | struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; | ||
1264 | struct xfrm_dump_info info; | 1274 | struct xfrm_dump_info info; |
1265 | 1275 | ||
1276 | BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) > | ||
1277 | sizeof(cb->args) - sizeof(cb->args[0])); | ||
1278 | |||
1266 | info.in_skb = cb->skb; | 1279 | info.in_skb = cb->skb; |
1267 | info.out_skb = skb; | 1280 | info.out_skb = skb; |
1268 | info.nlmsg_seq = cb->nlh->nlmsg_seq; | 1281 | info.nlmsg_seq = cb->nlh->nlmsg_seq; |
1269 | info.nlmsg_flags = NLM_F_MULTI; | 1282 | info.nlmsg_flags = NLM_F_MULTI; |
1270 | info.this_idx = 0; | 1283 | |
1271 | info.start_idx = cb->args[0]; | 1284 | if (!cb->args[0]) { |
1272 | (void) xfrm_policy_walk(XFRM_POLICY_TYPE_MAIN, dump_one_policy, &info); | 1285 | cb->args[0] = 1; |
1273 | #ifdef CONFIG_XFRM_SUB_POLICY | 1286 | xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY); |
1274 | (void) xfrm_policy_walk(XFRM_POLICY_TYPE_SUB, dump_one_policy, &info); | 1287 | } |
1275 | #endif | 1288 | |
1276 | cb->args[0] = info.this_idx; | 1289 | (void) xfrm_policy_walk(walk, dump_one_policy, &info); |
1277 | 1290 | ||
1278 | return skb->len; | 1291 | return skb->len; |
1279 | } | 1292 | } |
@@ -1293,7 +1306,6 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb, | |||
1293 | info.out_skb = skb; | 1306 | info.out_skb = skb; |
1294 | info.nlmsg_seq = seq; | 1307 | info.nlmsg_seq = seq; |
1295 | info.nlmsg_flags = 0; | 1308 | info.nlmsg_flags = 0; |
1296 | info.this_idx = info.start_idx = 0; | ||
1297 | 1309 | ||
1298 | if (dump_one_policy(xp, dir, 0, &info) < 0) { | 1310 | if (dump_one_policy(xp, dir, 0, &info) < 0) { |
1299 | kfree_skb(skb); | 1311 | kfree_skb(skb); |
@@ -1891,15 +1903,18 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { | |||
1891 | static struct xfrm_link { | 1903 | static struct xfrm_link { |
1892 | int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **); | 1904 | int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **); |
1893 | int (*dump)(struct sk_buff *, struct netlink_callback *); | 1905 | int (*dump)(struct sk_buff *, struct netlink_callback *); |
1906 | int (*done)(struct netlink_callback *); | ||
1894 | } xfrm_dispatch[XFRM_NR_MSGTYPES] = { | 1907 | } xfrm_dispatch[XFRM_NR_MSGTYPES] = { |
1895 | [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, | 1908 | [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, |
1896 | [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa }, | 1909 | [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa }, |
1897 | [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa, | 1910 | [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa, |
1898 | .dump = xfrm_dump_sa }, | 1911 | .dump = xfrm_dump_sa, |
1912 | .done = xfrm_dump_sa_done }, | ||
1899 | [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, | 1913 | [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, |
1900 | [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy }, | 1914 | [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy }, |
1901 | [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy, | 1915 | [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy, |
1902 | .dump = xfrm_dump_policy }, | 1916 | .dump = xfrm_dump_policy, |
1917 | .done = xfrm_dump_policy_done }, | ||
1903 | [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi }, | 1918 | [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi }, |
1904 | [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire }, | 1919 | [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire }, |
1905 | [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire }, | 1920 | [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire }, |
@@ -1938,7 +1953,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
1938 | if (link->dump == NULL) | 1953 | if (link->dump == NULL) |
1939 | return -EINVAL; | 1954 | return -EINVAL; |
1940 | 1955 | ||
1941 | return netlink_dump_start(xfrm_nl, skb, nlh, link->dump, NULL); | 1956 | return netlink_dump_start(xfrm_nl, skb, nlh, link->dump, link->done); |
1942 | } | 1957 | } |
1943 | 1958 | ||
1944 | err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX, | 1959 | err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX, |