diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/appletalk/aarp.c | 7 | ||||
-rw-r--r-- | net/bridge/netfilter/ebtables.c | 21 | ||||
-rw-r--r-- | net/core/netpoll.c | 80 | ||||
-rw-r--r-- | net/ipv4/netfilter/ipt_CLUSTERIP.c | 2 | ||||
-rw-r--r-- | net/ipv4/route.c | 8 | ||||
-rw-r--r-- | net/socket.c | 3 | ||||
-rw-r--r-- | net/x25/af_x25.c | 110 | ||||
-rw-r--r-- | net/x25/x25_facilities.c | 34 | ||||
-rw-r--r-- | net/x25/x25_subr.c | 41 |
9 files changed, 203 insertions, 103 deletions
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c index 10d040461021..c34614ea5fce 100644 --- a/net/appletalk/aarp.c +++ b/net/appletalk/aarp.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <net/datalink.h> | 35 | #include <net/datalink.h> |
36 | #include <net/psnap.h> | 36 | #include <net/psnap.h> |
37 | #include <linux/atalk.h> | 37 | #include <linux/atalk.h> |
38 | #include <linux/delay.h> | ||
38 | #include <linux/init.h> | 39 | #include <linux/init.h> |
39 | #include <linux/proc_fs.h> | 40 | #include <linux/proc_fs.h> |
40 | #include <linux/seq_file.h> | 41 | #include <linux/seq_file.h> |
@@ -462,8 +463,7 @@ void aarp_probe_network(struct atalk_iface *atif) | |||
462 | aarp_send_probe(atif->dev, &atif->address); | 463 | aarp_send_probe(atif->dev, &atif->address); |
463 | 464 | ||
464 | /* Defer 1/10th */ | 465 | /* Defer 1/10th */ |
465 | current->state = TASK_INTERRUPTIBLE; | 466 | msleep(100); |
466 | schedule_timeout(HZ / 10); | ||
467 | 467 | ||
468 | if (atif->status & ATIF_PROBE_FAIL) | 468 | if (atif->status & ATIF_PROBE_FAIL) |
469 | break; | 469 | break; |
@@ -510,9 +510,8 @@ int aarp_proxy_probe_network(struct atalk_iface *atif, struct atalk_addr *sa) | |||
510 | aarp_send_probe(atif->dev, sa); | 510 | aarp_send_probe(atif->dev, sa); |
511 | 511 | ||
512 | /* Defer 1/10th */ | 512 | /* Defer 1/10th */ |
513 | current->state = TASK_INTERRUPTIBLE; | ||
514 | write_unlock_bh(&aarp_lock); | 513 | write_unlock_bh(&aarp_lock); |
515 | schedule_timeout(HZ / 10); | 514 | msleep(100); |
516 | write_lock_bh(&aarp_lock); | 515 | write_lock_bh(&aarp_lock); |
517 | 516 | ||
518 | if (entry->status & ATIF_PROBE_FAIL) | 517 | if (entry->status & ATIF_PROBE_FAIL) |
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 18ebc664769b..c4540144f0f4 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c | |||
@@ -859,8 +859,7 @@ static int translate_table(struct ebt_replace *repl, | |||
859 | if (repl->valid_hooks & (1 << i)) | 859 | if (repl->valid_hooks & (1 << i)) |
860 | if (check_chainloops(newinfo->hook_entry[i], | 860 | if (check_chainloops(newinfo->hook_entry[i], |
861 | cl_s, udc_cnt, i, newinfo->entries)) { | 861 | cl_s, udc_cnt, i, newinfo->entries)) { |
862 | if (cl_s) | 862 | vfree(cl_s); |
863 | vfree(cl_s); | ||
864 | return -EINVAL; | 863 | return -EINVAL; |
865 | } | 864 | } |
866 | 865 | ||
@@ -883,8 +882,7 @@ static int translate_table(struct ebt_replace *repl, | |||
883 | EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, | 882 | EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, |
884 | ebt_cleanup_entry, &i); | 883 | ebt_cleanup_entry, &i); |
885 | } | 884 | } |
886 | if (cl_s) | 885 | vfree(cl_s); |
887 | vfree(cl_s); | ||
888 | return ret; | 886 | return ret; |
889 | } | 887 | } |
890 | 888 | ||
@@ -1030,8 +1028,7 @@ static int do_replace(void __user *user, unsigned int len) | |||
1030 | } | 1028 | } |
1031 | vfree(table); | 1029 | vfree(table); |
1032 | 1030 | ||
1033 | if (counterstmp) | 1031 | vfree(counterstmp); |
1034 | vfree(counterstmp); | ||
1035 | return ret; | 1032 | return ret; |
1036 | 1033 | ||
1037 | free_unlock: | 1034 | free_unlock: |
@@ -1040,8 +1037,7 @@ free_iterate: | |||
1040 | EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, | 1037 | EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, |
1041 | ebt_cleanup_entry, NULL); | 1038 | ebt_cleanup_entry, NULL); |
1042 | free_counterstmp: | 1039 | free_counterstmp: |
1043 | if (counterstmp) | 1040 | vfree(counterstmp); |
1044 | vfree(counterstmp); | ||
1045 | /* can be initialized in translate_table() */ | 1041 | /* can be initialized in translate_table() */ |
1046 | if (newinfo->chainstack) { | 1042 | if (newinfo->chainstack) { |
1047 | for (i = 0; i < num_possible_cpus(); i++) | 1043 | for (i = 0; i < num_possible_cpus(); i++) |
@@ -1049,11 +1045,9 @@ free_counterstmp: | |||
1049 | vfree(newinfo->chainstack); | 1045 | vfree(newinfo->chainstack); |
1050 | } | 1046 | } |
1051 | free_entries: | 1047 | free_entries: |
1052 | if (newinfo->entries) | 1048 | vfree(newinfo->entries); |
1053 | vfree(newinfo->entries); | ||
1054 | free_newinfo: | 1049 | free_newinfo: |
1055 | if (newinfo) | 1050 | vfree(newinfo); |
1056 | vfree(newinfo); | ||
1057 | return ret; | 1051 | return ret; |
1058 | } | 1052 | } |
1059 | 1053 | ||
@@ -1213,8 +1207,7 @@ void ebt_unregister_table(struct ebt_table *table) | |||
1213 | down(&ebt_mutex); | 1207 | down(&ebt_mutex); |
1214 | LIST_DELETE(&ebt_tables, table); | 1208 | LIST_DELETE(&ebt_tables, table); |
1215 | up(&ebt_mutex); | 1209 | up(&ebt_mutex); |
1216 | if (table->private->entries) | 1210 | vfree(table->private->entries); |
1217 | vfree(table->private->entries); | ||
1218 | if (table->private->chainstack) { | 1211 | if (table->private->chainstack) { |
1219 | for (i = 0; i < num_possible_cpus(); i++) | 1212 | for (i = 0; i < num_possible_cpus(); i++) |
1220 | vfree(table->private->chainstack[i]); | 1213 | vfree(table->private->chainstack[i]); |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index a119696d5521..c327c9edadc5 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -130,19 +130,20 @@ static int checksum_udp(struct sk_buff *skb, struct udphdr *uh, | |||
130 | */ | 130 | */ |
131 | static void poll_napi(struct netpoll *np) | 131 | static void poll_napi(struct netpoll *np) |
132 | { | 132 | { |
133 | struct netpoll_info *npinfo = np->dev->npinfo; | ||
133 | int budget = 16; | 134 | int budget = 16; |
134 | 135 | ||
135 | if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) && | 136 | if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) && |
136 | np->poll_owner != smp_processor_id() && | 137 | npinfo->poll_owner != smp_processor_id() && |
137 | spin_trylock(&np->poll_lock)) { | 138 | spin_trylock(&npinfo->poll_lock)) { |
138 | np->rx_flags |= NETPOLL_RX_DROP; | 139 | npinfo->rx_flags |= NETPOLL_RX_DROP; |
139 | atomic_inc(&trapped); | 140 | atomic_inc(&trapped); |
140 | 141 | ||
141 | np->dev->poll(np->dev, &budget); | 142 | np->dev->poll(np->dev, &budget); |
142 | 143 | ||
143 | atomic_dec(&trapped); | 144 | atomic_dec(&trapped); |
144 | np->rx_flags &= ~NETPOLL_RX_DROP; | 145 | npinfo->rx_flags &= ~NETPOLL_RX_DROP; |
145 | spin_unlock(&np->poll_lock); | 146 | spin_unlock(&npinfo->poll_lock); |
146 | } | 147 | } |
147 | } | 148 | } |
148 | 149 | ||
@@ -245,6 +246,7 @@ repeat: | |||
245 | static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) | 246 | static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) |
246 | { | 247 | { |
247 | int status; | 248 | int status; |
249 | struct netpoll_info *npinfo; | ||
248 | 250 | ||
249 | repeat: | 251 | repeat: |
250 | if(!np || !np->dev || !netif_running(np->dev)) { | 252 | if(!np || !np->dev || !netif_running(np->dev)) { |
@@ -253,8 +255,9 @@ repeat: | |||
253 | } | 255 | } |
254 | 256 | ||
255 | /* avoid recursion */ | 257 | /* avoid recursion */ |
256 | if(np->poll_owner == smp_processor_id() || | 258 | npinfo = np->dev->npinfo; |
257 | np->dev->xmit_lock_owner == smp_processor_id()) { | 259 | if (npinfo->poll_owner == smp_processor_id() || |
260 | np->dev->xmit_lock_owner == smp_processor_id()) { | ||
258 | if (np->drop) | 261 | if (np->drop) |
259 | np->drop(skb); | 262 | np->drop(skb); |
260 | else | 263 | else |
@@ -341,14 +344,22 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len) | |||
341 | 344 | ||
342 | static void arp_reply(struct sk_buff *skb) | 345 | static void arp_reply(struct sk_buff *skb) |
343 | { | 346 | { |
347 | struct netpoll_info *npinfo = skb->dev->npinfo; | ||
344 | struct arphdr *arp; | 348 | struct arphdr *arp; |
345 | unsigned char *arp_ptr; | 349 | unsigned char *arp_ptr; |
346 | int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; | 350 | int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; |
347 | u32 sip, tip; | 351 | u32 sip, tip; |
352 | unsigned long flags; | ||
348 | struct sk_buff *send_skb; | 353 | struct sk_buff *send_skb; |
349 | struct netpoll *np = skb->dev->np; | 354 | struct netpoll *np = NULL; |
355 | |||
356 | spin_lock_irqsave(&npinfo->rx_lock, flags); | ||
357 | if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev) | ||
358 | np = npinfo->rx_np; | ||
359 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | ||
350 | 360 | ||
351 | if (!np) return; | 361 | if (!np) |
362 | return; | ||
352 | 363 | ||
353 | /* No arp on this interface */ | 364 | /* No arp on this interface */ |
354 | if (skb->dev->flags & IFF_NOARP) | 365 | if (skb->dev->flags & IFF_NOARP) |
@@ -429,9 +440,9 @@ int __netpoll_rx(struct sk_buff *skb) | |||
429 | int proto, len, ulen; | 440 | int proto, len, ulen; |
430 | struct iphdr *iph; | 441 | struct iphdr *iph; |
431 | struct udphdr *uh; | 442 | struct udphdr *uh; |
432 | struct netpoll *np = skb->dev->np; | 443 | struct netpoll *np = skb->dev->npinfo->rx_np; |
433 | 444 | ||
434 | if (!np->rx_hook) | 445 | if (!np) |
435 | goto out; | 446 | goto out; |
436 | if (skb->dev->type != ARPHRD_ETHER) | 447 | if (skb->dev->type != ARPHRD_ETHER) |
437 | goto out; | 448 | goto out; |
@@ -611,9 +622,8 @@ int netpoll_setup(struct netpoll *np) | |||
611 | { | 622 | { |
612 | struct net_device *ndev = NULL; | 623 | struct net_device *ndev = NULL; |
613 | struct in_device *in_dev; | 624 | struct in_device *in_dev; |
614 | 625 | struct netpoll_info *npinfo; | |
615 | np->poll_lock = SPIN_LOCK_UNLOCKED; | 626 | unsigned long flags; |
616 | np->poll_owner = -1; | ||
617 | 627 | ||
618 | if (np->dev_name) | 628 | if (np->dev_name) |
619 | ndev = dev_get_by_name(np->dev_name); | 629 | ndev = dev_get_by_name(np->dev_name); |
@@ -624,7 +634,17 @@ int netpoll_setup(struct netpoll *np) | |||
624 | } | 634 | } |
625 | 635 | ||
626 | np->dev = ndev; | 636 | np->dev = ndev; |
627 | ndev->np = np; | 637 | if (!ndev->npinfo) { |
638 | npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); | ||
639 | if (!npinfo) | ||
640 | goto release; | ||
641 | |||
642 | npinfo->rx_np = NULL; | ||
643 | npinfo->poll_lock = SPIN_LOCK_UNLOCKED; | ||
644 | npinfo->poll_owner = -1; | ||
645 | npinfo->rx_lock = SPIN_LOCK_UNLOCKED; | ||
646 | } else | ||
647 | npinfo = ndev->npinfo; | ||
628 | 648 | ||
629 | if (!ndev->poll_controller) { | 649 | if (!ndev->poll_controller) { |
630 | printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n", | 650 | printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n", |
@@ -692,13 +712,20 @@ int netpoll_setup(struct netpoll *np) | |||
692 | np->name, HIPQUAD(np->local_ip)); | 712 | np->name, HIPQUAD(np->local_ip)); |
693 | } | 713 | } |
694 | 714 | ||
695 | if(np->rx_hook) | 715 | if (np->rx_hook) { |
696 | np->rx_flags = NETPOLL_RX_ENABLED; | 716 | spin_lock_irqsave(&npinfo->rx_lock, flags); |
717 | npinfo->rx_flags |= NETPOLL_RX_ENABLED; | ||
718 | npinfo->rx_np = np; | ||
719 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | ||
720 | } | ||
721 | /* last thing to do is link it to the net device structure */ | ||
722 | ndev->npinfo = npinfo; | ||
697 | 723 | ||
698 | return 0; | 724 | return 0; |
699 | 725 | ||
700 | release: | 726 | release: |
701 | ndev->np = NULL; | 727 | if (!ndev->npinfo) |
728 | kfree(npinfo); | ||
702 | np->dev = NULL; | 729 | np->dev = NULL; |
703 | dev_put(ndev); | 730 | dev_put(ndev); |
704 | return -1; | 731 | return -1; |
@@ -706,9 +733,20 @@ int netpoll_setup(struct netpoll *np) | |||
706 | 733 | ||
707 | void netpoll_cleanup(struct netpoll *np) | 734 | void netpoll_cleanup(struct netpoll *np) |
708 | { | 735 | { |
709 | if (np->dev) | 736 | struct netpoll_info *npinfo; |
710 | np->dev->np = NULL; | 737 | unsigned long flags; |
711 | dev_put(np->dev); | 738 | |
739 | if (np->dev) { | ||
740 | npinfo = np->dev->npinfo; | ||
741 | if (npinfo && npinfo->rx_np == np) { | ||
742 | spin_lock_irqsave(&npinfo->rx_lock, flags); | ||
743 | npinfo->rx_np = NULL; | ||
744 | npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; | ||
745 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | ||
746 | } | ||
747 | dev_put(np->dev); | ||
748 | } | ||
749 | |||
712 | np->dev = NULL; | 750 | np->dev = NULL; |
713 | } | 751 | } |
714 | 752 | ||
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index dc4362b57cfa..9cde8c61f525 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c | |||
@@ -339,7 +339,7 @@ target(struct sk_buff **pskb, | |||
339 | * error messages (RELATED) and information requests (see below) */ | 339 | * error messages (RELATED) and information requests (see below) */ |
340 | if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP | 340 | if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP |
341 | && (ctinfo == IP_CT_RELATED | 341 | && (ctinfo == IP_CT_RELATED |
342 | || ctinfo == IP_CT_IS_REPLY+IP_CT_IS_REPLY)) | 342 | || ctinfo == IP_CT_RELATED+IP_CT_IS_REPLY)) |
343 | return IPT_CONTINUE; | 343 | return IPT_CONTINUE; |
344 | 344 | ||
345 | /* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO, | 345 | /* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO, |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index f4d53c919869..80cf633d9f4a 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1767,7 +1767,7 @@ static inline int ip_mkroute_input_def(struct sk_buff *skb, | |||
1767 | struct in_device *in_dev, | 1767 | struct in_device *in_dev, |
1768 | u32 daddr, u32 saddr, u32 tos) | 1768 | u32 daddr, u32 saddr, u32 tos) |
1769 | { | 1769 | { |
1770 | struct rtable* rth; | 1770 | struct rtable* rth = NULL; |
1771 | int err; | 1771 | int err; |
1772 | unsigned hash; | 1772 | unsigned hash; |
1773 | 1773 | ||
@@ -1794,7 +1794,7 @@ static inline int ip_mkroute_input(struct sk_buff *skb, | |||
1794 | u32 daddr, u32 saddr, u32 tos) | 1794 | u32 daddr, u32 saddr, u32 tos) |
1795 | { | 1795 | { |
1796 | #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED | 1796 | #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED |
1797 | struct rtable* rth; | 1797 | struct rtable* rth = NULL; |
1798 | unsigned char hop, hopcount, lasthop; | 1798 | unsigned char hop, hopcount, lasthop; |
1799 | int err = -EINVAL; | 1799 | int err = -EINVAL; |
1800 | unsigned int hash; | 1800 | unsigned int hash; |
@@ -2239,7 +2239,7 @@ static inline int ip_mkroute_output_def(struct rtable **rp, | |||
2239 | struct net_device *dev_out, | 2239 | struct net_device *dev_out, |
2240 | unsigned flags) | 2240 | unsigned flags) |
2241 | { | 2241 | { |
2242 | struct rtable *rth; | 2242 | struct rtable *rth = NULL; |
2243 | int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags); | 2243 | int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags); |
2244 | unsigned hash; | 2244 | unsigned hash; |
2245 | if (err == 0) { | 2245 | if (err == 0) { |
@@ -2267,7 +2267,7 @@ static inline int ip_mkroute_output(struct rtable** rp, | |||
2267 | unsigned char hop; | 2267 | unsigned char hop; |
2268 | unsigned hash; | 2268 | unsigned hash; |
2269 | int err = -EINVAL; | 2269 | int err = -EINVAL; |
2270 | struct rtable *rth; | 2270 | struct rtable *rth = NULL; |
2271 | 2271 | ||
2272 | if (res->fi && res->fi->fib_nhs > 1) { | 2272 | if (res->fi && res->fi->fib_nhs > 1) { |
2273 | unsigned char hopcount = res->fi->fib_nhs; | 2273 | unsigned char hopcount = res->fi->fib_nhs; |
diff --git a/net/socket.c b/net/socket.c index 38729af09461..6f2a17881972 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -383,9 +383,8 @@ int sock_map_fd(struct socket *sock) | |||
383 | goto out; | 383 | goto out; |
384 | } | 384 | } |
385 | 385 | ||
386 | sprintf(name, "[%lu]", SOCK_INODE(sock)->i_ino); | 386 | this.len = sprintf(name, "[%lu]", SOCK_INODE(sock)->i_ino); |
387 | this.name = name; | 387 | this.name = name; |
388 | this.len = strlen(name); | ||
389 | this.hash = SOCK_INODE(sock)->i_ino; | 388 | this.hash = SOCK_INODE(sock)->i_ino; |
390 | 389 | ||
391 | file->f_dentry = d_alloc(sock_mnt->mnt_sb->s_root, &this); | 390 | file->f_dentry = d_alloc(sock_mnt->mnt_sb->s_root, &this); |
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 2a24b243b841..04bec047fa9a 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c | |||
@@ -29,6 +29,10 @@ | |||
29 | * 2000-11-14 Henner Eisen Closing datalink from NETDEV_GOING_DOWN | 29 | * 2000-11-14 Henner Eisen Closing datalink from NETDEV_GOING_DOWN |
30 | * 2002-10-06 Arnaldo C. Melo Get rid of cli/sti, move proc stuff to | 30 | * 2002-10-06 Arnaldo C. Melo Get rid of cli/sti, move proc stuff to |
31 | * x25_proc.c, using seq_file | 31 | * x25_proc.c, using seq_file |
32 | * 2005-04-02 Shaun Pereira Selective sub address matching | ||
33 | * with call user data | ||
34 | * 2005-04-15 Shaun Pereira Fast select with no restriction on | ||
35 | * response | ||
32 | */ | 36 | */ |
33 | 37 | ||
34 | #include <linux/config.h> | 38 | #include <linux/config.h> |
@@ -219,7 +223,8 @@ static void x25_insert_socket(struct sock *sk) | |||
219 | * Note: if a listening socket has cud set it must only get calls | 223 | * Note: if a listening socket has cud set it must only get calls |
220 | * with matching cud. | 224 | * with matching cud. |
221 | */ | 225 | */ |
222 | static struct sock *x25_find_listener(struct x25_address *addr, struct x25_calluserdata *calluserdata) | 226 | static struct sock *x25_find_listener(struct x25_address *addr, |
227 | struct sk_buff *skb) | ||
223 | { | 228 | { |
224 | struct sock *s; | 229 | struct sock *s; |
225 | struct sock *next_best; | 230 | struct sock *next_best; |
@@ -230,22 +235,23 @@ static struct sock *x25_find_listener(struct x25_address *addr, struct x25_callu | |||
230 | 235 | ||
231 | sk_for_each(s, node, &x25_list) | 236 | sk_for_each(s, node, &x25_list) |
232 | if ((!strcmp(addr->x25_addr, | 237 | if ((!strcmp(addr->x25_addr, |
233 | x25_sk(s)->source_addr.x25_addr) || | 238 | x25_sk(s)->source_addr.x25_addr) || |
234 | !strcmp(addr->x25_addr, | 239 | !strcmp(addr->x25_addr, |
235 | null_x25_address.x25_addr)) && | 240 | null_x25_address.x25_addr)) && |
236 | s->sk_state == TCP_LISTEN) { | 241 | s->sk_state == TCP_LISTEN) { |
237 | |||
238 | /* | 242 | /* |
239 | * Found a listening socket, now check the incoming | 243 | * Found a listening socket, now check the incoming |
240 | * call user data vs this sockets call user data | 244 | * call user data vs this sockets call user data |
241 | */ | 245 | */ |
242 | if (x25_check_calluserdata(&x25_sk(s)->calluserdata, calluserdata)) { | 246 | if(skb->len > 0 && x25_sk(s)->cudmatchlength > 0) { |
243 | sock_hold(s); | 247 | if((memcmp(x25_sk(s)->calluserdata.cuddata, |
244 | goto found; | 248 | skb->data, |
245 | } | 249 | x25_sk(s)->cudmatchlength)) == 0) { |
246 | if (x25_sk(s)->calluserdata.cudlength == 0) { | 250 | sock_hold(s); |
251 | goto found; | ||
252 | } | ||
253 | } else | ||
247 | next_best = s; | 254 | next_best = s; |
248 | } | ||
249 | } | 255 | } |
250 | if (next_best) { | 256 | if (next_best) { |
251 | s = next_best; | 257 | s = next_best; |
@@ -497,6 +503,9 @@ static int x25_create(struct socket *sock, int protocol) | |||
497 | x25->t23 = sysctl_x25_clear_request_timeout; | 503 | x25->t23 = sysctl_x25_clear_request_timeout; |
498 | x25->t2 = sysctl_x25_ack_holdback_timeout; | 504 | x25->t2 = sysctl_x25_ack_holdback_timeout; |
499 | x25->state = X25_STATE_0; | 505 | x25->state = X25_STATE_0; |
506 | x25->cudmatchlength = 0; | ||
507 | x25->accptapprv = X25_DENY_ACCPT_APPRV; /* normally no cud */ | ||
508 | /* on call accept */ | ||
500 | 509 | ||
501 | x25->facilities.winsize_in = X25_DEFAULT_WINDOW_SIZE; | 510 | x25->facilities.winsize_in = X25_DEFAULT_WINDOW_SIZE; |
502 | x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE; | 511 | x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE; |
@@ -545,6 +554,8 @@ static struct sock *x25_make_new(struct sock *osk) | |||
545 | x25->t2 = ox25->t2; | 554 | x25->t2 = ox25->t2; |
546 | x25->facilities = ox25->facilities; | 555 | x25->facilities = ox25->facilities; |
547 | x25->qbitincl = ox25->qbitincl; | 556 | x25->qbitincl = ox25->qbitincl; |
557 | x25->cudmatchlength = ox25->cudmatchlength; | ||
558 | x25->accptapprv = ox25->accptapprv; | ||
548 | 559 | ||
549 | x25_init_timers(sk); | 560 | x25_init_timers(sk); |
550 | out: | 561 | out: |
@@ -822,7 +833,6 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, | |||
822 | struct x25_sock *makex25; | 833 | struct x25_sock *makex25; |
823 | struct x25_address source_addr, dest_addr; | 834 | struct x25_address source_addr, dest_addr; |
824 | struct x25_facilities facilities; | 835 | struct x25_facilities facilities; |
825 | struct x25_calluserdata calluserdata; | ||
826 | int len, rc; | 836 | int len, rc; |
827 | 837 | ||
828 | /* | 838 | /* |
@@ -845,19 +855,10 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, | |||
845 | skb_pull(skb,len); | 855 | skb_pull(skb,len); |
846 | 856 | ||
847 | /* | 857 | /* |
848 | * Incoming Call User Data. | ||
849 | */ | ||
850 | if (skb->len >= 0) { | ||
851 | memcpy(calluserdata.cuddata, skb->data, skb->len); | ||
852 | calluserdata.cudlength = skb->len; | ||
853 | } | ||
854 | |||
855 | skb_push(skb,len); | ||
856 | |||
857 | /* | ||
858 | * Find a listener for the particular address/cud pair. | 858 | * Find a listener for the particular address/cud pair. |
859 | */ | 859 | */ |
860 | sk = x25_find_listener(&source_addr,&calluserdata); | 860 | sk = x25_find_listener(&source_addr,skb); |
861 | skb_push(skb,len); | ||
861 | 862 | ||
862 | /* | 863 | /* |
863 | * We can't accept the Call Request. | 864 | * We can't accept the Call Request. |
@@ -900,11 +901,23 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, | |||
900 | makex25->neighbour = nb; | 901 | makex25->neighbour = nb; |
901 | makex25->facilities = facilities; | 902 | makex25->facilities = facilities; |
902 | makex25->vc_facil_mask = x25_sk(sk)->vc_facil_mask; | 903 | makex25->vc_facil_mask = x25_sk(sk)->vc_facil_mask; |
903 | makex25->calluserdata = calluserdata; | 904 | /* ensure no reverse facil on accept */ |
904 | 905 | makex25->vc_facil_mask &= ~X25_MASK_REVERSE; | |
905 | x25_write_internal(make, X25_CALL_ACCEPTED); | 906 | makex25->cudmatchlength = x25_sk(sk)->cudmatchlength; |
907 | |||
908 | /* Normally all calls are accepted immediatly */ | ||
909 | if(makex25->accptapprv & X25_DENY_ACCPT_APPRV) { | ||
910 | x25_write_internal(make, X25_CALL_ACCEPTED); | ||
911 | makex25->state = X25_STATE_3; | ||
912 | } | ||
906 | 913 | ||
907 | makex25->state = X25_STATE_3; | 914 | /* |
915 | * Incoming Call User Data. | ||
916 | */ | ||
917 | if (skb->len >= 0) { | ||
918 | memcpy(makex25->calluserdata.cuddata, skb->data, skb->len); | ||
919 | makex25->calluserdata.cudlength = skb->len; | ||
920 | } | ||
908 | 921 | ||
909 | sk->sk_ack_backlog++; | 922 | sk->sk_ack_backlog++; |
910 | 923 | ||
@@ -1288,7 +1301,8 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
1288 | if (facilities.throughput < 0x03 || | 1301 | if (facilities.throughput < 0x03 || |
1289 | facilities.throughput > 0xDD) | 1302 | facilities.throughput > 0xDD) |
1290 | break; | 1303 | break; |
1291 | if (facilities.reverse && facilities.reverse != 1) | 1304 | if (facilities.reverse && |
1305 | (facilities.reverse | 0x81)!= 0x81) | ||
1292 | break; | 1306 | break; |
1293 | x25->facilities = facilities; | 1307 | x25->facilities = facilities; |
1294 | rc = 0; | 1308 | rc = 0; |
@@ -1325,6 +1339,44 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
1325 | break; | 1339 | break; |
1326 | } | 1340 | } |
1327 | 1341 | ||
1342 | case SIOCX25SCUDMATCHLEN: { | ||
1343 | struct x25_subaddr sub_addr; | ||
1344 | rc = -EINVAL; | ||
1345 | if(sk->sk_state != TCP_CLOSE) | ||
1346 | break; | ||
1347 | rc = -EFAULT; | ||
1348 | if (copy_from_user(&sub_addr, argp, | ||
1349 | sizeof(sub_addr))) | ||
1350 | break; | ||
1351 | rc = -EINVAL; | ||
1352 | if(sub_addr.cudmatchlength > X25_MAX_CUD_LEN) | ||
1353 | break; | ||
1354 | x25->cudmatchlength = sub_addr.cudmatchlength; | ||
1355 | rc = 0; | ||
1356 | break; | ||
1357 | } | ||
1358 | |||
1359 | case SIOCX25CALLACCPTAPPRV: { | ||
1360 | rc = -EINVAL; | ||
1361 | if (sk->sk_state != TCP_CLOSE) | ||
1362 | break; | ||
1363 | x25->accptapprv = X25_ALLOW_ACCPT_APPRV; | ||
1364 | rc = 0; | ||
1365 | break; | ||
1366 | } | ||
1367 | |||
1368 | case SIOCX25SENDCALLACCPT: { | ||
1369 | rc = -EINVAL; | ||
1370 | if (sk->sk_state != TCP_ESTABLISHED) | ||
1371 | break; | ||
1372 | if (x25->accptapprv) /* must call accptapprv above */ | ||
1373 | break; | ||
1374 | x25_write_internal(sk, X25_CALL_ACCEPTED); | ||
1375 | x25->state = X25_STATE_3; | ||
1376 | rc = 0; | ||
1377 | break; | ||
1378 | } | ||
1379 | |||
1328 | default: | 1380 | default: |
1329 | rc = dev_ioctl(cmd, argp); | 1381 | rc = dev_ioctl(cmd, argp); |
1330 | break; | 1382 | break; |
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c index a21bdb95f9a8..54278b962f4c 100644 --- a/net/x25/x25_facilities.c +++ b/net/x25/x25_facilities.c | |||
@@ -17,6 +17,8 @@ | |||
17 | * X.25 001 Split from x25_subr.c | 17 | * X.25 001 Split from x25_subr.c |
18 | * mar/20/00 Daniela Squassoni Disabling/enabling of facilities | 18 | * mar/20/00 Daniela Squassoni Disabling/enabling of facilities |
19 | * negotiation. | 19 | * negotiation. |
20 | * apr/14/05 Shaun Pereira - Allow fast select with no restriction | ||
21 | * on response. | ||
20 | */ | 22 | */ |
21 | 23 | ||
22 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
@@ -43,9 +45,31 @@ int x25_parse_facilities(struct sk_buff *skb, | |||
43 | case X25_FAC_CLASS_A: | 45 | case X25_FAC_CLASS_A: |
44 | switch (*p) { | 46 | switch (*p) { |
45 | case X25_FAC_REVERSE: | 47 | case X25_FAC_REVERSE: |
46 | facilities->reverse = p[1] & 0x01; | 48 | if((p[1] & 0x81) == 0x81) { |
47 | *vc_fac_mask |= X25_MASK_REVERSE; | 49 | facilities->reverse = p[1] & 0x81; |
48 | break; | 50 | *vc_fac_mask |= X25_MASK_REVERSE; |
51 | break; | ||
52 | } | ||
53 | |||
54 | if((p[1] & 0x01) == 0x01) { | ||
55 | facilities->reverse = p[1] & 0x01; | ||
56 | *vc_fac_mask |= X25_MASK_REVERSE; | ||
57 | break; | ||
58 | } | ||
59 | |||
60 | if((p[1] & 0x80) == 0x80) { | ||
61 | facilities->reverse = p[1] & 0x80; | ||
62 | *vc_fac_mask |= X25_MASK_REVERSE; | ||
63 | break; | ||
64 | } | ||
65 | |||
66 | if(p[1] == 0x00) { | ||
67 | facilities->reverse | ||
68 | = X25_DEFAULT_REVERSE; | ||
69 | *vc_fac_mask |= X25_MASK_REVERSE; | ||
70 | break; | ||
71 | } | ||
72 | |||
49 | case X25_FAC_THROUGHPUT: | 73 | case X25_FAC_THROUGHPUT: |
50 | facilities->throughput = p[1]; | 74 | facilities->throughput = p[1]; |
51 | *vc_fac_mask |= X25_MASK_THROUGHPUT; | 75 | *vc_fac_mask |= X25_MASK_THROUGHPUT; |
@@ -122,7 +146,7 @@ int x25_create_facilities(unsigned char *buffer, | |||
122 | 146 | ||
123 | if (facilities->reverse && (facil_mask & X25_MASK_REVERSE)) { | 147 | if (facilities->reverse && (facil_mask & X25_MASK_REVERSE)) { |
124 | *p++ = X25_FAC_REVERSE; | 148 | *p++ = X25_FAC_REVERSE; |
125 | *p++ = !!facilities->reverse; | 149 | *p++ = facilities->reverse; |
126 | } | 150 | } |
127 | 151 | ||
128 | if (facilities->throughput && (facil_mask & X25_MASK_THROUGHPUT)) { | 152 | if (facilities->throughput && (facil_mask & X25_MASK_THROUGHPUT)) { |
@@ -171,7 +195,7 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk, | |||
171 | /* | 195 | /* |
172 | * They want reverse charging, we won't accept it. | 196 | * They want reverse charging, we won't accept it. |
173 | */ | 197 | */ |
174 | if (theirs.reverse && ours->reverse) { | 198 | if ((theirs.reverse & 0x01 ) && (ours->reverse & 0x01)) { |
175 | SOCK_DEBUG(sk, "X.25: rejecting reverse charging request"); | 199 | SOCK_DEBUG(sk, "X.25: rejecting reverse charging request"); |
176 | return -1; | 200 | return -1; |
177 | } | 201 | } |
diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c index 183fea3bba67..7fd872ad0c20 100644 --- a/net/x25/x25_subr.c +++ b/net/x25/x25_subr.c | |||
@@ -19,6 +19,8 @@ | |||
19 | * mar/20/00 Daniela Squassoni Disabling/enabling of facilities | 19 | * mar/20/00 Daniela Squassoni Disabling/enabling of facilities |
20 | * negotiation. | 20 | * negotiation. |
21 | * jun/24/01 Arnaldo C. Melo use skb_queue_purge, cleanups | 21 | * jun/24/01 Arnaldo C. Melo use skb_queue_purge, cleanups |
22 | * apr/04/15 Shaun Pereira Fast select with no | ||
23 | * restriction on response. | ||
22 | */ | 24 | */ |
23 | 25 | ||
24 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
@@ -127,8 +129,12 @@ void x25_write_internal(struct sock *sk, int frametype) | |||
127 | len += 1 + X25_ADDR_LEN + X25_MAX_FAC_LEN + | 129 | len += 1 + X25_ADDR_LEN + X25_MAX_FAC_LEN + |
128 | X25_MAX_CUD_LEN; | 130 | X25_MAX_CUD_LEN; |
129 | break; | 131 | break; |
130 | case X25_CALL_ACCEPTED: | 132 | case X25_CALL_ACCEPTED: /* fast sel with no restr on resp */ |
131 | len += 1 + X25_MAX_FAC_LEN + X25_MAX_CUD_LEN; | 133 | if(x25->facilities.reverse & 0x80) { |
134 | len += 1 + X25_MAX_FAC_LEN + X25_MAX_CUD_LEN; | ||
135 | } else { | ||
136 | len += 1 + X25_MAX_FAC_LEN; | ||
137 | } | ||
132 | break; | 138 | break; |
133 | case X25_CLEAR_REQUEST: | 139 | case X25_CLEAR_REQUEST: |
134 | case X25_RESET_REQUEST: | 140 | case X25_RESET_REQUEST: |
@@ -203,9 +209,16 @@ void x25_write_internal(struct sock *sk, int frametype) | |||
203 | x25->vc_facil_mask); | 209 | x25->vc_facil_mask); |
204 | dptr = skb_put(skb, len); | 210 | dptr = skb_put(skb, len); |
205 | memcpy(dptr, facilities, len); | 211 | memcpy(dptr, facilities, len); |
206 | dptr = skb_put(skb, x25->calluserdata.cudlength); | 212 | |
207 | memcpy(dptr, x25->calluserdata.cuddata, | 213 | /* fast select with no restriction on response |
208 | x25->calluserdata.cudlength); | 214 | allows call user data. Userland must |
215 | ensure it is ours and not theirs */ | ||
216 | if(x25->facilities.reverse & 0x80) { | ||
217 | dptr = skb_put(skb, | ||
218 | x25->calluserdata.cudlength); | ||
219 | memcpy(dptr, x25->calluserdata.cuddata, | ||
220 | x25->calluserdata.cudlength); | ||
221 | } | ||
209 | x25->calluserdata.cudlength = 0; | 222 | x25->calluserdata.cudlength = 0; |
210 | break; | 223 | break; |
211 | 224 | ||
@@ -354,21 +367,3 @@ void x25_check_rbuf(struct sock *sk) | |||
354 | } | 367 | } |
355 | } | 368 | } |
356 | 369 | ||
357 | /* | ||
358 | * Compare 2 calluserdata structures, used to find correct listening sockets | ||
359 | * when call user data is used. | ||
360 | */ | ||
361 | int x25_check_calluserdata(struct x25_calluserdata *ours, struct x25_calluserdata *theirs) | ||
362 | { | ||
363 | int i; | ||
364 | if (ours->cudlength != theirs->cudlength) | ||
365 | return 0; | ||
366 | |||
367 | for (i=0;i<ours->cudlength;i++) { | ||
368 | if (ours->cuddata[i] != theirs->cuddata[i]) { | ||
369 | return 0; | ||
370 | } | ||
371 | } | ||
372 | return 1; | ||
373 | } | ||
374 | |||