diff options
Diffstat (limited to 'net')
51 files changed, 621 insertions, 621 deletions
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 6cabf6d8a751..42233df2b099 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c | |||
| @@ -1088,8 +1088,8 @@ out: | |||
| 1088 | /* | 1088 | /* |
| 1089 | * FIXME: nonblock behaviour looks like it may have a bug. | 1089 | * FIXME: nonblock behaviour looks like it may have a bug. |
| 1090 | */ | 1090 | */ |
| 1091 | static int ax25_connect(struct socket *sock, struct sockaddr *uaddr, | 1091 | static int __must_check ax25_connect(struct socket *sock, |
| 1092 | int addr_len, int flags) | 1092 | struct sockaddr *uaddr, int addr_len, int flags) |
| 1093 | { | 1093 | { |
| 1094 | struct sock *sk = sock->sk; | 1094 | struct sock *sk = sock->sk; |
| 1095 | ax25_cb *ax25 = ax25_sk(sk), *ax25t; | 1095 | ax25_cb *ax25 = ax25_sk(sk), *ax25t; |
diff --git a/net/ax25/ax25_iface.c b/net/ax25/ax25_iface.c index 07ac0207eb69..aff3e652c2d1 100644 --- a/net/ax25/ax25_iface.c +++ b/net/ax25/ax25_iface.c | |||
| @@ -29,17 +29,10 @@ | |||
| 29 | #include <linux/mm.h> | 29 | #include <linux/mm.h> |
| 30 | #include <linux/interrupt.h> | 30 | #include <linux/interrupt.h> |
| 31 | 31 | ||
| 32 | static struct protocol_struct { | 32 | static struct ax25_protocol *protocol_list; |
| 33 | struct protocol_struct *next; | ||
| 34 | unsigned int pid; | ||
| 35 | int (*func)(struct sk_buff *, ax25_cb *); | ||
| 36 | } *protocol_list = NULL; | ||
| 37 | static DEFINE_RWLOCK(protocol_list_lock); | 33 | static DEFINE_RWLOCK(protocol_list_lock); |
| 38 | 34 | ||
| 39 | static struct linkfail_struct { | 35 | static HLIST_HEAD(ax25_linkfail_list); |
| 40 | struct linkfail_struct *next; | ||
| 41 | void (*func)(ax25_cb *, int); | ||
| 42 | } *linkfail_list = NULL; | ||
| 43 | static DEFINE_SPINLOCK(linkfail_lock); | 36 | static DEFINE_SPINLOCK(linkfail_lock); |
| 44 | 37 | ||
| 45 | static struct listen_struct { | 38 | static struct listen_struct { |
| @@ -49,36 +42,23 @@ static struct listen_struct { | |||
| 49 | } *listen_list = NULL; | 42 | } *listen_list = NULL; |
| 50 | static DEFINE_SPINLOCK(listen_lock); | 43 | static DEFINE_SPINLOCK(listen_lock); |
| 51 | 44 | ||
| 52 | int ax25_protocol_register(unsigned int pid, | 45 | /* |
| 53 | int (*func)(struct sk_buff *, ax25_cb *)) | 46 | * Do not register the internal protocols AX25_P_TEXT, AX25_P_SEGMENT, |
| 47 | * AX25_P_IP or AX25_P_ARP ... | ||
| 48 | */ | ||
| 49 | void ax25_register_pid(struct ax25_protocol *ap) | ||
| 54 | { | 50 | { |
| 55 | struct protocol_struct *protocol; | ||
| 56 | |||
| 57 | if (pid == AX25_P_TEXT || pid == AX25_P_SEGMENT) | ||
| 58 | return 0; | ||
| 59 | #ifdef CONFIG_INET | ||
| 60 | if (pid == AX25_P_IP || pid == AX25_P_ARP) | ||
| 61 | return 0; | ||
| 62 | #endif | ||
| 63 | if ((protocol = kmalloc(sizeof(*protocol), GFP_ATOMIC)) == NULL) | ||
| 64 | return 0; | ||
| 65 | |||
| 66 | protocol->pid = pid; | ||
| 67 | protocol->func = func; | ||
| 68 | |||
| 69 | write_lock_bh(&protocol_list_lock); | 51 | write_lock_bh(&protocol_list_lock); |
| 70 | protocol->next = protocol_list; | 52 | ap->next = protocol_list; |
| 71 | protocol_list = protocol; | 53 | protocol_list = ap; |
| 72 | write_unlock_bh(&protocol_list_lock); | 54 | write_unlock_bh(&protocol_list_lock); |
| 73 | |||
| 74 | return 1; | ||
| 75 | } | 55 | } |
| 76 | 56 | ||
| 77 | EXPORT_SYMBOL(ax25_protocol_register); | 57 | EXPORT_SYMBOL_GPL(ax25_register_pid); |
| 78 | 58 | ||
| 79 | void ax25_protocol_release(unsigned int pid) | 59 | void ax25_protocol_release(unsigned int pid) |
| 80 | { | 60 | { |
| 81 | struct protocol_struct *s, *protocol; | 61 | struct ax25_protocol *s, *protocol; |
| 82 | 62 | ||
| 83 | write_lock_bh(&protocol_list_lock); | 63 | write_lock_bh(&protocol_list_lock); |
| 84 | protocol = protocol_list; | 64 | protocol = protocol_list; |
| @@ -110,54 +90,19 @@ void ax25_protocol_release(unsigned int pid) | |||
| 110 | 90 | ||
| 111 | EXPORT_SYMBOL(ax25_protocol_release); | 91 | EXPORT_SYMBOL(ax25_protocol_release); |
| 112 | 92 | ||
| 113 | int ax25_linkfail_register(void (*func)(ax25_cb *, int)) | 93 | void ax25_linkfail_register(struct ax25_linkfail *lf) |
| 114 | { | 94 | { |
| 115 | struct linkfail_struct *linkfail; | ||
| 116 | |||
| 117 | if ((linkfail = kmalloc(sizeof(*linkfail), GFP_ATOMIC)) == NULL) | ||
| 118 | return 0; | ||
| 119 | |||
| 120 | linkfail->func = func; | ||
| 121 | |||
| 122 | spin_lock_bh(&linkfail_lock); | 95 | spin_lock_bh(&linkfail_lock); |
| 123 | linkfail->next = linkfail_list; | 96 | hlist_add_head(&lf->lf_node, &ax25_linkfail_list); |
| 124 | linkfail_list = linkfail; | ||
| 125 | spin_unlock_bh(&linkfail_lock); | 97 | spin_unlock_bh(&linkfail_lock); |
| 126 | |||
| 127 | return 1; | ||
| 128 | } | 98 | } |
| 129 | 99 | ||
| 130 | EXPORT_SYMBOL(ax25_linkfail_register); | 100 | EXPORT_SYMBOL(ax25_linkfail_register); |
| 131 | 101 | ||
| 132 | void ax25_linkfail_release(void (*func)(ax25_cb *, int)) | 102 | void ax25_linkfail_release(struct ax25_linkfail *lf) |
| 133 | { | 103 | { |
| 134 | struct linkfail_struct *s, *linkfail; | ||
| 135 | |||
| 136 | spin_lock_bh(&linkfail_lock); | 104 | spin_lock_bh(&linkfail_lock); |
| 137 | linkfail = linkfail_list; | 105 | hlist_del_init(&lf->lf_node); |
| 138 | if (linkfail == NULL) { | ||
| 139 | spin_unlock_bh(&linkfail_lock); | ||
| 140 | return; | ||
| 141 | } | ||
| 142 | |||
| 143 | if (linkfail->func == func) { | ||
| 144 | linkfail_list = linkfail->next; | ||
| 145 | spin_unlock_bh(&linkfail_lock); | ||
| 146 | kfree(linkfail); | ||
| 147 | return; | ||
| 148 | } | ||
| 149 | |||
| 150 | while (linkfail != NULL && linkfail->next != NULL) { | ||
| 151 | if (linkfail->next->func == func) { | ||
| 152 | s = linkfail->next; | ||
| 153 | linkfail->next = linkfail->next->next; | ||
| 154 | spin_unlock_bh(&linkfail_lock); | ||
| 155 | kfree(s); | ||
| 156 | return; | ||
| 157 | } | ||
| 158 | |||
| 159 | linkfail = linkfail->next; | ||
| 160 | } | ||
| 161 | spin_unlock_bh(&linkfail_lock); | 106 | spin_unlock_bh(&linkfail_lock); |
| 162 | } | 107 | } |
| 163 | 108 | ||
| @@ -171,7 +116,7 @@ int ax25_listen_register(ax25_address *callsign, struct net_device *dev) | |||
| 171 | return 0; | 116 | return 0; |
| 172 | 117 | ||
| 173 | if ((listen = kmalloc(sizeof(*listen), GFP_ATOMIC)) == NULL) | 118 | if ((listen = kmalloc(sizeof(*listen), GFP_ATOMIC)) == NULL) |
| 174 | return 0; | 119 | return -ENOMEM; |
| 175 | 120 | ||
| 176 | listen->callsign = *callsign; | 121 | listen->callsign = *callsign; |
| 177 | listen->dev = dev; | 122 | listen->dev = dev; |
| @@ -181,7 +126,7 @@ int ax25_listen_register(ax25_address *callsign, struct net_device *dev) | |||
| 181 | listen_list = listen; | 126 | listen_list = listen; |
| 182 | spin_unlock_bh(&listen_lock); | 127 | spin_unlock_bh(&listen_lock); |
| 183 | 128 | ||
| 184 | return 1; | 129 | return 0; |
| 185 | } | 130 | } |
| 186 | 131 | ||
| 187 | EXPORT_SYMBOL(ax25_listen_register); | 132 | EXPORT_SYMBOL(ax25_listen_register); |
| @@ -223,7 +168,7 @@ EXPORT_SYMBOL(ax25_listen_release); | |||
| 223 | int (*ax25_protocol_function(unsigned int pid))(struct sk_buff *, ax25_cb *) | 168 | int (*ax25_protocol_function(unsigned int pid))(struct sk_buff *, ax25_cb *) |
| 224 | { | 169 | { |
| 225 | int (*res)(struct sk_buff *, ax25_cb *) = NULL; | 170 | int (*res)(struct sk_buff *, ax25_cb *) = NULL; |
| 226 | struct protocol_struct *protocol; | 171 | struct ax25_protocol *protocol; |
| 227 | 172 | ||
| 228 | read_lock(&protocol_list_lock); | 173 | read_lock(&protocol_list_lock); |
| 229 | for (protocol = protocol_list; protocol != NULL; protocol = protocol->next) | 174 | for (protocol = protocol_list; protocol != NULL; protocol = protocol->next) |
| @@ -242,7 +187,8 @@ int ax25_listen_mine(ax25_address *callsign, struct net_device *dev) | |||
| 242 | 187 | ||
| 243 | spin_lock_bh(&listen_lock); | 188 | spin_lock_bh(&listen_lock); |
| 244 | for (listen = listen_list; listen != NULL; listen = listen->next) | 189 | for (listen = listen_list; listen != NULL; listen = listen->next) |
| 245 | if (ax25cmp(&listen->callsign, callsign) == 0 && (listen->dev == dev || listen->dev == NULL)) { | 190 | if (ax25cmp(&listen->callsign, callsign) == 0 && |
| 191 | (listen->dev == dev || listen->dev == NULL)) { | ||
| 246 | spin_unlock_bh(&listen_lock); | 192 | spin_unlock_bh(&listen_lock); |
| 247 | return 1; | 193 | return 1; |
| 248 | } | 194 | } |
| @@ -253,17 +199,18 @@ int ax25_listen_mine(ax25_address *callsign, struct net_device *dev) | |||
| 253 | 199 | ||
| 254 | void ax25_link_failed(ax25_cb *ax25, int reason) | 200 | void ax25_link_failed(ax25_cb *ax25, int reason) |
| 255 | { | 201 | { |
| 256 | struct linkfail_struct *linkfail; | 202 | struct ax25_linkfail *lf; |
| 203 | struct hlist_node *node; | ||
| 257 | 204 | ||
| 258 | spin_lock_bh(&linkfail_lock); | 205 | spin_lock_bh(&linkfail_lock); |
| 259 | for (linkfail = linkfail_list; linkfail != NULL; linkfail = linkfail->next) | 206 | hlist_for_each_entry(lf, node, &ax25_linkfail_list, lf_node) |
| 260 | (linkfail->func)(ax25, reason); | 207 | lf->func(ax25, reason); |
| 261 | spin_unlock_bh(&linkfail_lock); | 208 | spin_unlock_bh(&linkfail_lock); |
| 262 | } | 209 | } |
| 263 | 210 | ||
| 264 | int ax25_protocol_is_registered(unsigned int pid) | 211 | int ax25_protocol_is_registered(unsigned int pid) |
| 265 | { | 212 | { |
| 266 | struct protocol_struct *protocol; | 213 | struct ax25_protocol *protocol; |
| 267 | int res = 0; | 214 | int res = 0; |
| 268 | 215 | ||
| 269 | read_lock_bh(&protocol_list_lock); | 216 | read_lock_bh(&protocol_list_lock); |
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c index 8580356ace5c..0a0381622b1c 100644 --- a/net/ax25/ax25_route.c +++ b/net/ax25/ax25_route.c | |||
| @@ -71,7 +71,7 @@ void ax25_rt_device_down(struct net_device *dev) | |||
| 71 | write_unlock(&ax25_route_lock); | 71 | write_unlock(&ax25_route_lock); |
| 72 | } | 72 | } |
| 73 | 73 | ||
| 74 | static int ax25_rt_add(struct ax25_routes_struct *route) | 74 | static int __must_check ax25_rt_add(struct ax25_routes_struct *route) |
| 75 | { | 75 | { |
| 76 | ax25_route *ax25_rt; | 76 | ax25_route *ax25_rt; |
| 77 | ax25_dev *ax25_dev; | 77 | ax25_dev *ax25_dev; |
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 711a085eca5b..dbf98c49dbaa 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c | |||
| @@ -123,10 +123,10 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) | |||
| 123 | if (flt->opcode && | 123 | if (flt->opcode && |
| 124 | ((evt == HCI_EV_CMD_COMPLETE && | 124 | ((evt == HCI_EV_CMD_COMPLETE && |
| 125 | flt->opcode != | 125 | flt->opcode != |
| 126 | get_unaligned((__u16 *)(skb->data + 3))) || | 126 | get_unaligned((__le16 *)(skb->data + 3))) || |
| 127 | (evt == HCI_EV_CMD_STATUS && | 127 | (evt == HCI_EV_CMD_STATUS && |
| 128 | flt->opcode != | 128 | flt->opcode != |
| 129 | get_unaligned((__u16 *)(skb->data + 4))))) | 129 | get_unaligned((__le16 *)(skb->data + 4))))) |
| 130 | continue; | 130 | continue; |
| 131 | } | 131 | } |
| 132 | 132 | ||
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index bd221ad52eaf..ea3337ad0edc 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
| @@ -61,9 +61,6 @@ static int brnf_filter_vlan_tagged __read_mostly = 1; | |||
| 61 | #define brnf_filter_vlan_tagged 1 | 61 | #define brnf_filter_vlan_tagged 1 |
| 62 | #endif | 62 | #endif |
| 63 | 63 | ||
| 64 | int brnf_deferred_hooks; | ||
| 65 | EXPORT_SYMBOL_GPL(brnf_deferred_hooks); | ||
| 66 | |||
| 67 | static __be16 inline vlan_proto(const struct sk_buff *skb) | 64 | static __be16 inline vlan_proto(const struct sk_buff *skb) |
| 68 | { | 65 | { |
| 69 | return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; | 66 | return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; |
| @@ -685,110 +682,50 @@ static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff **pskb, | |||
| 685 | return NF_STOLEN; | 682 | return NF_STOLEN; |
| 686 | } | 683 | } |
| 687 | 684 | ||
| 688 | /* PF_BRIDGE/LOCAL_OUT ***********************************************/ | 685 | /* PF_BRIDGE/LOCAL_OUT *********************************************** |
| 689 | static int br_nf_local_out_finish(struct sk_buff *skb) | 686 | * |
| 690 | { | 687 | * This function sees both locally originated IP packets and forwarded |
| 691 | if (skb->protocol == htons(ETH_P_8021Q)) { | ||
| 692 | skb_push(skb, VLAN_HLEN); | ||
| 693 | skb->nh.raw -= VLAN_HLEN; | ||
| 694 | } | ||
| 695 | |||
| 696 | NF_HOOK_THRESH(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, | ||
| 697 | br_forward_finish, NF_BR_PRI_FIRST + 1); | ||
| 698 | |||
| 699 | return 0; | ||
| 700 | } | ||
| 701 | |||
| 702 | /* This function sees both locally originated IP packets and forwarded | ||
| 703 | * IP packets (in both cases the destination device is a bridge | 688 | * IP packets (in both cases the destination device is a bridge |
| 704 | * device). It also sees bridged-and-DNAT'ed packets. | 689 | * device). It also sees bridged-and-DNAT'ed packets. |
| 705 | * To be able to filter on the physical bridge devices (with the physdev | ||
| 706 | * module), we steal packets destined to a bridge device away from the | ||
| 707 | * PF_INET/FORWARD and PF_INET/OUTPUT hook functions, and give them back later, | ||
| 708 | * when we have determined the real output device. This is done in here. | ||
| 709 | * | 690 | * |
| 710 | * If (nf_bridge->mask & BRNF_BRIDGED_DNAT) then the packet is bridged | 691 | * If (nf_bridge->mask & BRNF_BRIDGED_DNAT) then the packet is bridged |
| 711 | * and we fake the PF_BRIDGE/FORWARD hook. The function br_nf_forward() | 692 | * and we fake the PF_BRIDGE/FORWARD hook. The function br_nf_forward() |
| 712 | * will then fake the PF_INET/FORWARD hook. br_nf_local_out() has priority | 693 | * will then fake the PF_INET/FORWARD hook. br_nf_local_out() has priority |
| 713 | * NF_BR_PRI_FIRST, so no relevant PF_BRIDGE/INPUT functions have been nor | 694 | * NF_BR_PRI_FIRST, so no relevant PF_BRIDGE/INPUT functions have been nor |
| 714 | * will be executed. | 695 | * will be executed. |
| 715 | * Otherwise, if nf_bridge->physindev is NULL, the bridge-nf code never touched | 696 | */ |
| 716 | * this packet before, and so the packet was locally originated. We fake | ||
| 717 | * the PF_INET/LOCAL_OUT hook. | ||
| 718 | * Finally, if nf_bridge->physindev isn't NULL, then the packet was IP routed, | ||
| 719 | * so we fake the PF_INET/FORWARD hook. ip_sabotage_out() makes sure | ||
| 720 | * even routed packets that didn't arrive on a bridge interface have their | ||
| 721 | * nf_bridge->physindev set. */ | ||
| 722 | static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff **pskb, | 697 | static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff **pskb, |
| 723 | const struct net_device *in, | 698 | const struct net_device *in, |
| 724 | const struct net_device *out, | 699 | const struct net_device *out, |
| 725 | int (*okfn)(struct sk_buff *)) | 700 | int (*okfn)(struct sk_buff *)) |
| 726 | { | 701 | { |
| 727 | struct net_device *realindev, *realoutdev; | 702 | struct net_device *realindev; |
| 728 | struct sk_buff *skb = *pskb; | 703 | struct sk_buff *skb = *pskb; |
| 729 | struct nf_bridge_info *nf_bridge; | 704 | struct nf_bridge_info *nf_bridge; |
| 730 | int pf; | ||
| 731 | 705 | ||
| 732 | if (!skb->nf_bridge) | 706 | if (!skb->nf_bridge) |
| 733 | return NF_ACCEPT; | 707 | return NF_ACCEPT; |
| 734 | 708 | ||
| 735 | if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb)) | ||
| 736 | pf = PF_INET; | ||
| 737 | else | ||
| 738 | pf = PF_INET6; | ||
| 739 | |||
| 740 | nf_bridge = skb->nf_bridge; | 709 | nf_bridge = skb->nf_bridge; |
| 741 | nf_bridge->physoutdev = skb->dev; | 710 | if (!(nf_bridge->mask & BRNF_BRIDGED_DNAT)) |
| 742 | realindev = nf_bridge->physindev; | 711 | return NF_ACCEPT; |
| 743 | 712 | ||
| 744 | /* Bridged, take PF_BRIDGE/FORWARD. | 713 | /* Bridged, take PF_BRIDGE/FORWARD. |
| 745 | * (see big note in front of br_nf_pre_routing_finish) */ | 714 | * (see big note in front of br_nf_pre_routing_finish) */ |
| 746 | if (nf_bridge->mask & BRNF_BRIDGED_DNAT) { | 715 | nf_bridge->physoutdev = skb->dev; |
| 747 | if (nf_bridge->mask & BRNF_PKT_TYPE) { | 716 | realindev = nf_bridge->physindev; |
| 748 | skb->pkt_type = PACKET_OTHERHOST; | ||
| 749 | nf_bridge->mask ^= BRNF_PKT_TYPE; | ||
| 750 | } | ||
| 751 | if (skb->protocol == htons(ETH_P_8021Q)) { | ||
| 752 | skb_push(skb, VLAN_HLEN); | ||
| 753 | skb->nh.raw -= VLAN_HLEN; | ||
| 754 | } | ||
| 755 | 717 | ||
| 756 | NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, realindev, | 718 | if (nf_bridge->mask & BRNF_PKT_TYPE) { |
| 757 | skb->dev, br_forward_finish); | 719 | skb->pkt_type = PACKET_OTHERHOST; |
| 758 | goto out; | 720 | nf_bridge->mask ^= BRNF_PKT_TYPE; |
| 759 | } | 721 | } |
| 760 | realoutdev = bridge_parent(skb->dev); | ||
| 761 | if (!realoutdev) | ||
| 762 | return NF_DROP; | ||
| 763 | |||
| 764 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | ||
| 765 | /* iptables should match -o br0.x */ | ||
| 766 | if (nf_bridge->netoutdev) | ||
| 767 | realoutdev = nf_bridge->netoutdev; | ||
| 768 | #endif | ||
| 769 | if (skb->protocol == htons(ETH_P_8021Q)) { | 722 | if (skb->protocol == htons(ETH_P_8021Q)) { |
| 770 | skb_pull(skb, VLAN_HLEN); | 723 | skb_push(skb, VLAN_HLEN); |
| 771 | (*pskb)->nh.raw += VLAN_HLEN; | 724 | skb->nh.raw -= VLAN_HLEN; |
| 772 | } | ||
| 773 | /* IP forwarded traffic has a physindev, locally | ||
| 774 | * generated traffic hasn't. */ | ||
| 775 | if (realindev != NULL) { | ||
| 776 | if (!(nf_bridge->mask & BRNF_DONT_TAKE_PARENT)) { | ||
| 777 | struct net_device *parent = bridge_parent(realindev); | ||
| 778 | if (parent) | ||
| 779 | realindev = parent; | ||
| 780 | } | ||
| 781 | |||
| 782 | NF_HOOK_THRESH(pf, NF_IP_FORWARD, skb, realindev, | ||
| 783 | realoutdev, br_nf_local_out_finish, | ||
| 784 | NF_IP_PRI_BRIDGE_SABOTAGE_FORWARD + 1); | ||
| 785 | } else { | ||
| 786 | NF_HOOK_THRESH(pf, NF_IP_LOCAL_OUT, skb, realindev, | ||
| 787 | realoutdev, br_nf_local_out_finish, | ||
| 788 | NF_IP_PRI_BRIDGE_SABOTAGE_LOCAL_OUT + 1); | ||
| 789 | } | 725 | } |
| 790 | 726 | ||
| 791 | out: | 727 | NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, realindev, skb->dev, |
| 728 | br_forward_finish); | ||
| 792 | return NF_STOLEN; | 729 | return NF_STOLEN; |
| 793 | } | 730 | } |
| 794 | 731 | ||
| @@ -894,69 +831,6 @@ static unsigned int ip_sabotage_in(unsigned int hook, struct sk_buff **pskb, | |||
| 894 | return NF_ACCEPT; | 831 | return NF_ACCEPT; |
| 895 | } | 832 | } |
| 896 | 833 | ||
| 897 | /* Postpone execution of PF_INET(6)/FORWARD, PF_INET(6)/LOCAL_OUT | ||
| 898 | * and PF_INET(6)/POST_ROUTING until we have done the forwarding | ||
| 899 | * decision in the bridge code and have determined nf_bridge->physoutdev. */ | ||
| 900 | static unsigned int ip_sabotage_out(unsigned int hook, struct sk_buff **pskb, | ||
| 901 | const struct net_device *in, | ||
| 902 | const struct net_device *out, | ||
| 903 | int (*okfn)(struct sk_buff *)) | ||
| 904 | { | ||
| 905 | struct sk_buff *skb = *pskb; | ||
| 906 | |||
| 907 | if ((out->hard_start_xmit == br_dev_xmit && | ||
| 908 | okfn != br_nf_forward_finish && | ||
| 909 | okfn != br_nf_local_out_finish && okfn != br_nf_dev_queue_xmit) | ||
| 910 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | ||
| 911 | || ((out->priv_flags & IFF_802_1Q_VLAN) && | ||
| 912 | VLAN_DEV_INFO(out)->real_dev->hard_start_xmit == br_dev_xmit) | ||
| 913 | #endif | ||
| 914 | ) { | ||
| 915 | struct nf_bridge_info *nf_bridge; | ||
| 916 | |||
| 917 | if (!skb->nf_bridge) { | ||
| 918 | #ifdef CONFIG_SYSCTL | ||
| 919 | /* This code is executed while in the IP(v6) stack, | ||
| 920 | the version should be 4 or 6. We can't use | ||
| 921 | skb->protocol because that isn't set on | ||
| 922 | PF_INET(6)/LOCAL_OUT. */ | ||
| 923 | struct iphdr *ip = skb->nh.iph; | ||
| 924 | |||
| 925 | if (ip->version == 4 && !brnf_call_iptables) | ||
| 926 | return NF_ACCEPT; | ||
| 927 | else if (ip->version == 6 && !brnf_call_ip6tables) | ||
| 928 | return NF_ACCEPT; | ||
| 929 | else if (!brnf_deferred_hooks) | ||
| 930 | return NF_ACCEPT; | ||
| 931 | #endif | ||
| 932 | if (hook == NF_IP_POST_ROUTING) | ||
| 933 | return NF_ACCEPT; | ||
| 934 | if (!nf_bridge_alloc(skb)) | ||
| 935 | return NF_DROP; | ||
| 936 | } | ||
| 937 | |||
| 938 | nf_bridge = skb->nf_bridge; | ||
| 939 | |||
| 940 | /* This frame will arrive on PF_BRIDGE/LOCAL_OUT and we | ||
| 941 | * will need the indev then. For a brouter, the real indev | ||
| 942 | * can be a bridge port, so we make sure br_nf_local_out() | ||
| 943 | * doesn't use the bridge parent of the indev by using | ||
| 944 | * the BRNF_DONT_TAKE_PARENT mask. */ | ||
| 945 | if (hook == NF_IP_FORWARD && nf_bridge->physindev == NULL) { | ||
| 946 | nf_bridge->mask |= BRNF_DONT_TAKE_PARENT; | ||
| 947 | nf_bridge->physindev = (struct net_device *)in; | ||
| 948 | } | ||
| 949 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | ||
| 950 | /* the iptables outdev is br0.x, not br0 */ | ||
| 951 | if (out->priv_flags & IFF_802_1Q_VLAN) | ||
| 952 | nf_bridge->netoutdev = (struct net_device *)out; | ||
| 953 | #endif | ||
| 954 | return NF_STOP; | ||
| 955 | } | ||
| 956 | |||
| 957 | return NF_ACCEPT; | ||
| 958 | } | ||
| 959 | |||
| 960 | /* For br_nf_local_out we need (prio = NF_BR_PRI_FIRST), to insure that innocent | 834 | /* For br_nf_local_out we need (prio = NF_BR_PRI_FIRST), to insure that innocent |
| 961 | * PF_BRIDGE/NF_BR_LOCAL_OUT functions don't get bridged traffic as input. | 835 | * PF_BRIDGE/NF_BR_LOCAL_OUT functions don't get bridged traffic as input. |
| 962 | * For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because | 836 | * For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because |
| @@ -1002,36 +876,6 @@ static struct nf_hook_ops br_nf_ops[] = { | |||
| 1002 | .pf = PF_INET6, | 876 | .pf = PF_INET6, |
| 1003 | .hooknum = NF_IP6_PRE_ROUTING, | 877 | .hooknum = NF_IP6_PRE_ROUTING, |
| 1004 | .priority = NF_IP6_PRI_FIRST, }, | 878 | .priority = NF_IP6_PRI_FIRST, }, |
| 1005 | { .hook = ip_sabotage_out, | ||
| 1006 | .owner = THIS_MODULE, | ||
| 1007 | .pf = PF_INET, | ||
| 1008 | .hooknum = NF_IP_FORWARD, | ||
| 1009 | .priority = NF_IP_PRI_BRIDGE_SABOTAGE_FORWARD, }, | ||
| 1010 | { .hook = ip_sabotage_out, | ||
| 1011 | .owner = THIS_MODULE, | ||
| 1012 | .pf = PF_INET6, | ||
| 1013 | .hooknum = NF_IP6_FORWARD, | ||
| 1014 | .priority = NF_IP6_PRI_BRIDGE_SABOTAGE_FORWARD, }, | ||
| 1015 | { .hook = ip_sabotage_out, | ||
| 1016 | .owner = THIS_MODULE, | ||
| 1017 | .pf = PF_INET, | ||
| 1018 | .hooknum = NF_IP_LOCAL_OUT, | ||
| 1019 | .priority = NF_IP_PRI_BRIDGE_SABOTAGE_LOCAL_OUT, }, | ||
| 1020 | { .hook = ip_sabotage_out, | ||
| 1021 | .owner = THIS_MODULE, | ||
| 1022 | .pf = PF_INET6, | ||
| 1023 | .hooknum = NF_IP6_LOCAL_OUT, | ||
| 1024 | .priority = NF_IP6_PRI_BRIDGE_SABOTAGE_LOCAL_OUT, }, | ||
| 1025 | { .hook = ip_sabotage_out, | ||
| 1026 | .owner = THIS_MODULE, | ||
| 1027 | .pf = PF_INET, | ||
| 1028 | .hooknum = NF_IP_POST_ROUTING, | ||
| 1029 | .priority = NF_IP_PRI_FIRST, }, | ||
| 1030 | { .hook = ip_sabotage_out, | ||
| 1031 | .owner = THIS_MODULE, | ||
| 1032 | .pf = PF_INET6, | ||
| 1033 | .hooknum = NF_IP6_POST_ROUTING, | ||
| 1034 | .priority = NF_IP6_PRI_FIRST, }, | ||
| 1035 | }; | 879 | }; |
| 1036 | 880 | ||
| 1037 | #ifdef CONFIG_SYSCTL | 881 | #ifdef CONFIG_SYSCTL |
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index fa6b75372ed7..40402c59506a 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c | |||
| @@ -845,8 +845,8 @@ found: | |||
| 845 | * Find some p such that f(p) = fval; return 1/p [RFC 3448, 6.3.1]. | 845 | * Find some p such that f(p) = fval; return 1/p [RFC 3448, 6.3.1]. |
| 846 | */ | 846 | */ |
| 847 | if (rtt == 0) { /* would result in divide-by-zero */ | 847 | if (rtt == 0) { /* would result in divide-by-zero */ |
| 848 | DCCP_WARN("RTT==0, returning 1/p = 1\n"); | 848 | DCCP_WARN("RTT==0\n"); |
| 849 | return 1000000; | 849 | return ~0; |
| 850 | } | 850 | } |
| 851 | 851 | ||
| 852 | dccp_timestamp(sk, &tstamp); | 852 | dccp_timestamp(sk, &tstamp); |
| @@ -858,7 +858,7 @@ found: | |||
| 858 | DCCP_WARN("X_recv==0\n"); | 858 | DCCP_WARN("X_recv==0\n"); |
| 859 | if ((x_recv = hcrx->ccid3hcrx_x_recv) == 0) { | 859 | if ((x_recv = hcrx->ccid3hcrx_x_recv) == 0) { |
| 860 | DCCP_BUG("stored value of X_recv is zero"); | 860 | DCCP_BUG("stored value of X_recv is zero"); |
| 861 | return 1000000; | 861 | return ~0; |
| 862 | } | 862 | } |
| 863 | } | 863 | } |
| 864 | 864 | ||
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c index e3f37fdda65f..a824852909e4 100644 --- a/net/ieee80211/softmac/ieee80211softmac_assoc.c +++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c | |||
| @@ -167,7 +167,7 @@ static void | |||
| 167 | ieee80211softmac_assoc_notify_scan(struct net_device *dev, int event_type, void *context) | 167 | ieee80211softmac_assoc_notify_scan(struct net_device *dev, int event_type, void *context) |
| 168 | { | 168 | { |
| 169 | struct ieee80211softmac_device *mac = ieee80211_priv(dev); | 169 | struct ieee80211softmac_device *mac = ieee80211_priv(dev); |
| 170 | ieee80211softmac_assoc_work((void*)mac); | 170 | ieee80211softmac_assoc_work(&mac->associnfo.work.work); |
| 171 | } | 171 | } |
| 172 | 172 | ||
| 173 | static void | 173 | static void |
| @@ -177,7 +177,7 @@ ieee80211softmac_assoc_notify_auth(struct net_device *dev, int event_type, void | |||
| 177 | 177 | ||
| 178 | switch (event_type) { | 178 | switch (event_type) { |
| 179 | case IEEE80211SOFTMAC_EVENT_AUTHENTICATED: | 179 | case IEEE80211SOFTMAC_EVENT_AUTHENTICATED: |
| 180 | ieee80211softmac_assoc_work((void*)mac); | 180 | ieee80211softmac_assoc_work(&mac->associnfo.work.work); |
| 181 | break; | 181 | break; |
| 182 | case IEEE80211SOFTMAC_EVENT_AUTH_FAILED: | 182 | case IEEE80211SOFTMAC_EVENT_AUTH_FAILED: |
| 183 | case IEEE80211SOFTMAC_EVENT_AUTH_TIMEOUT: | 183 | case IEEE80211SOFTMAC_EVENT_AUTH_TIMEOUT: |
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c index 480d72c7a42c..fa2f7da606a9 100644 --- a/net/ieee80211/softmac/ieee80211softmac_wx.c +++ b/net/ieee80211/softmac/ieee80211softmac_wx.c | |||
| @@ -463,7 +463,7 @@ ieee80211softmac_wx_get_genie(struct net_device *dev, | |||
| 463 | err = -E2BIG; | 463 | err = -E2BIG; |
| 464 | } | 464 | } |
| 465 | spin_unlock_irqrestore(&mac->lock, flags); | 465 | spin_unlock_irqrestore(&mac->lock, flags); |
| 466 | mutex_lock(&mac->associnfo.mutex); | 466 | mutex_unlock(&mac->associnfo.mutex); |
| 467 | 467 | ||
| 468 | return err; | 468 | return err; |
| 469 | } | 469 | } |
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index 363df9976c9d..f6026d4ac428 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig | |||
| @@ -401,7 +401,7 @@ config IP_NF_NAT | |||
| 401 | # NAT + specific targets: nf_conntrack | 401 | # NAT + specific targets: nf_conntrack |
| 402 | config NF_NAT | 402 | config NF_NAT |
| 403 | tristate "Full NAT" | 403 | tristate "Full NAT" |
| 404 | depends on IP_NF_IPTABLES && NF_CONNTRACK | 404 | depends on IP_NF_IPTABLES && NF_CONNTRACK_IPV4 |
| 405 | help | 405 | help |
| 406 | The Full NAT option allows masquerading, port forwarding and other | 406 | The Full NAT option allows masquerading, port forwarding and other |
| 407 | forms of full Network Address Port Translation. It is controlled by | 407 | forms of full Network Address Port Translation. It is controlled by |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 71b76ade00e1..9aa22398b3dc 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
| @@ -358,6 +358,7 @@ static int mark_source_chains(struct xt_table_info *newinfo, | |||
| 358 | for (;;) { | 358 | for (;;) { |
| 359 | struct arpt_standard_target *t | 359 | struct arpt_standard_target *t |
| 360 | = (void *)arpt_get_target(e); | 360 | = (void *)arpt_get_target(e); |
| 361 | int visited = e->comefrom & (1 << hook); | ||
| 361 | 362 | ||
| 362 | if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) { | 363 | if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) { |
| 363 | printk("arptables: loop hook %u pos %u %08X.\n", | 364 | printk("arptables: loop hook %u pos %u %08X.\n", |
| @@ -368,11 +369,11 @@ static int mark_source_chains(struct xt_table_info *newinfo, | |||
| 368 | |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS)); | 369 | |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS)); |
| 369 | 370 | ||
| 370 | /* Unconditional return/END. */ | 371 | /* Unconditional return/END. */ |
| 371 | if (e->target_offset == sizeof(struct arpt_entry) | 372 | if ((e->target_offset == sizeof(struct arpt_entry) |
| 372 | && (strcmp(t->target.u.user.name, | 373 | && (strcmp(t->target.u.user.name, |
| 373 | ARPT_STANDARD_TARGET) == 0) | 374 | ARPT_STANDARD_TARGET) == 0) |
| 374 | && t->verdict < 0 | 375 | && t->verdict < 0 |
| 375 | && unconditional(&e->arp)) { | 376 | && unconditional(&e->arp)) || visited) { |
| 376 | unsigned int oldpos, size; | 377 | unsigned int oldpos, size; |
| 377 | 378 | ||
| 378 | if (t->verdict < -NF_MAX_VERDICT - 1) { | 379 | if (t->verdict < -NF_MAX_VERDICT - 1) { |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 0ff2956d35e5..09696f16aa95 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
| @@ -384,6 +384,7 @@ mark_source_chains(struct xt_table_info *newinfo, | |||
| 384 | for (;;) { | 384 | for (;;) { |
| 385 | struct ipt_standard_target *t | 385 | struct ipt_standard_target *t |
| 386 | = (void *)ipt_get_target(e); | 386 | = (void *)ipt_get_target(e); |
| 387 | int visited = e->comefrom & (1 << hook); | ||
| 387 | 388 | ||
| 388 | if (e->comefrom & (1 << NF_IP_NUMHOOKS)) { | 389 | if (e->comefrom & (1 << NF_IP_NUMHOOKS)) { |
| 389 | printk("iptables: loop hook %u pos %u %08X.\n", | 390 | printk("iptables: loop hook %u pos %u %08X.\n", |
| @@ -394,11 +395,11 @@ mark_source_chains(struct xt_table_info *newinfo, | |||
| 394 | |= ((1 << hook) | (1 << NF_IP_NUMHOOKS)); | 395 | |= ((1 << hook) | (1 << NF_IP_NUMHOOKS)); |
| 395 | 396 | ||
| 396 | /* Unconditional return/END. */ | 397 | /* Unconditional return/END. */ |
| 397 | if (e->target_offset == sizeof(struct ipt_entry) | 398 | if ((e->target_offset == sizeof(struct ipt_entry) |
| 398 | && (strcmp(t->target.u.user.name, | 399 | && (strcmp(t->target.u.user.name, |
| 399 | IPT_STANDARD_TARGET) == 0) | 400 | IPT_STANDARD_TARGET) == 0) |
| 400 | && t->verdict < 0 | 401 | && t->verdict < 0 |
| 401 | && unconditional(&e->ip)) { | 402 | && unconditional(&e->ip)) || visited) { |
| 402 | unsigned int oldpos, size; | 403 | unsigned int oldpos, size; |
| 403 | 404 | ||
| 404 | if (t->verdict < -NF_MAX_VERDICT - 1) { | 405 | if (t->verdict < -NF_MAX_VERDICT - 1) { |
| @@ -484,7 +485,47 @@ cleanup_match(struct ipt_entry_match *m, unsigned int *i) | |||
| 484 | } | 485 | } |
| 485 | 486 | ||
| 486 | static inline int | 487 | static inline int |
| 487 | check_match(struct ipt_entry_match *m, | 488 | check_entry(struct ipt_entry *e, const char *name) |
| 489 | { | ||
| 490 | struct ipt_entry_target *t; | ||
| 491 | |||
| 492 | if (!ip_checkentry(&e->ip)) { | ||
| 493 | duprintf("ip_tables: ip check failed %p %s.\n", e, name); | ||
| 494 | return -EINVAL; | ||
| 495 | } | ||
| 496 | |||
| 497 | if (e->target_offset + sizeof(struct ipt_entry_target) > e->next_offset) | ||
| 498 | return -EINVAL; | ||
| 499 | |||
| 500 | t = ipt_get_target(e); | ||
| 501 | if (e->target_offset + t->u.target_size > e->next_offset) | ||
| 502 | return -EINVAL; | ||
| 503 | |||
| 504 | return 0; | ||
| 505 | } | ||
| 506 | |||
| 507 | static inline int check_match(struct ipt_entry_match *m, const char *name, | ||
| 508 | const struct ipt_ip *ip, unsigned int hookmask) | ||
| 509 | { | ||
| 510 | struct ipt_match *match; | ||
| 511 | int ret; | ||
| 512 | |||
| 513 | match = m->u.kernel.match; | ||
| 514 | ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m), | ||
| 515 | name, hookmask, ip->proto, | ||
| 516 | ip->invflags & IPT_INV_PROTO); | ||
| 517 | if (!ret && m->u.kernel.match->checkentry | ||
| 518 | && !m->u.kernel.match->checkentry(name, ip, match, m->data, | ||
| 519 | hookmask)) { | ||
| 520 | duprintf("ip_tables: check failed for `%s'.\n", | ||
| 521 | m->u.kernel.match->name); | ||
| 522 | ret = -EINVAL; | ||
| 523 | } | ||
| 524 | return ret; | ||
| 525 | } | ||
| 526 | |||
| 527 | static inline int | ||
| 528 | find_check_match(struct ipt_entry_match *m, | ||
| 488 | const char *name, | 529 | const char *name, |
| 489 | const struct ipt_ip *ip, | 530 | const struct ipt_ip *ip, |
| 490 | unsigned int hookmask, | 531 | unsigned int hookmask, |
| @@ -497,26 +538,15 @@ check_match(struct ipt_entry_match *m, | |||
| 497 | m->u.user.revision), | 538 | m->u.user.revision), |
| 498 | "ipt_%s", m->u.user.name); | 539 | "ipt_%s", m->u.user.name); |
| 499 | if (IS_ERR(match) || !match) { | 540 | if (IS_ERR(match) || !match) { |
| 500 | duprintf("check_match: `%s' not found\n", m->u.user.name); | 541 | duprintf("find_check_match: `%s' not found\n", m->u.user.name); |
| 501 | return match ? PTR_ERR(match) : -ENOENT; | 542 | return match ? PTR_ERR(match) : -ENOENT; |
| 502 | } | 543 | } |
| 503 | m->u.kernel.match = match; | 544 | m->u.kernel.match = match; |
| 504 | 545 | ||
| 505 | ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m), | 546 | ret = check_match(m, name, ip, hookmask); |
| 506 | name, hookmask, ip->proto, | ||
| 507 | ip->invflags & IPT_INV_PROTO); | ||
| 508 | if (ret) | 547 | if (ret) |
| 509 | goto err; | 548 | goto err; |
| 510 | 549 | ||
| 511 | if (m->u.kernel.match->checkentry | ||
| 512 | && !m->u.kernel.match->checkentry(name, ip, match, m->data, | ||
| 513 | hookmask)) { | ||
| 514 | duprintf("ip_tables: check failed for `%s'.\n", | ||
| 515 | m->u.kernel.match->name); | ||
| 516 | ret = -EINVAL; | ||
| 517 | goto err; | ||
| 518 | } | ||
| 519 | |||
| 520 | (*i)++; | 550 | (*i)++; |
| 521 | return 0; | 551 | return 0; |
| 522 | err: | 552 | err: |
| @@ -524,10 +554,29 @@ err: | |||
| 524 | return ret; | 554 | return ret; |
| 525 | } | 555 | } |
| 526 | 556 | ||
| 527 | static struct ipt_target ipt_standard_target; | 557 | static inline int check_target(struct ipt_entry *e, const char *name) |
| 558 | { | ||
| 559 | struct ipt_entry_target *t; | ||
| 560 | struct ipt_target *target; | ||
| 561 | int ret; | ||
| 562 | |||
| 563 | t = ipt_get_target(e); | ||
| 564 | target = t->u.kernel.target; | ||
| 565 | ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t), | ||
| 566 | name, e->comefrom, e->ip.proto, | ||
| 567 | e->ip.invflags & IPT_INV_PROTO); | ||
| 568 | if (!ret && t->u.kernel.target->checkentry | ||
| 569 | && !t->u.kernel.target->checkentry(name, e, target, | ||
| 570 | t->data, e->comefrom)) { | ||
| 571 | duprintf("ip_tables: check failed for `%s'.\n", | ||
| 572 | t->u.kernel.target->name); | ||
| 573 | ret = -EINVAL; | ||
| 574 | } | ||
| 575 | return ret; | ||
| 576 | } | ||
| 528 | 577 | ||
| 529 | static inline int | 578 | static inline int |
| 530 | check_entry(struct ipt_entry *e, const char *name, unsigned int size, | 579 | find_check_entry(struct ipt_entry *e, const char *name, unsigned int size, |
| 531 | unsigned int *i) | 580 | unsigned int *i) |
| 532 | { | 581 | { |
| 533 | struct ipt_entry_target *t; | 582 | struct ipt_entry_target *t; |
| @@ -535,49 +584,32 @@ check_entry(struct ipt_entry *e, const char *name, unsigned int size, | |||
| 535 | int ret; | 584 | int ret; |
| 536 | unsigned int j; | 585 | unsigned int j; |
| 537 | 586 | ||
| 538 | if (!ip_checkentry(&e->ip)) { | 587 | ret = check_entry(e, name); |
| 539 | duprintf("ip_tables: ip check failed %p %s.\n", e, name); | 588 | if (ret) |
| 540 | return -EINVAL; | 589 | return ret; |
| 541 | } | ||
| 542 | |||
| 543 | if (e->target_offset + sizeof(struct ipt_entry_target) > e->next_offset) | ||
| 544 | return -EINVAL; | ||
| 545 | 590 | ||
| 546 | j = 0; | 591 | j = 0; |
| 547 | ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom, &j); | 592 | ret = IPT_MATCH_ITERATE(e, find_check_match, name, &e->ip, |
| 593 | e->comefrom, &j); | ||
| 548 | if (ret != 0) | 594 | if (ret != 0) |
| 549 | goto cleanup_matches; | 595 | goto cleanup_matches; |
| 550 | 596 | ||
| 551 | t = ipt_get_target(e); | 597 | t = ipt_get_target(e); |
| 552 | ret = -EINVAL; | ||
| 553 | if (e->target_offset + t->u.target_size > e->next_offset) | ||
| 554 | goto cleanup_matches; | ||
| 555 | target = try_then_request_module(xt_find_target(AF_INET, | 598 | target = try_then_request_module(xt_find_target(AF_INET, |
| 556 | t->u.user.name, | 599 | t->u.user.name, |
| 557 | t->u.user.revision), | 600 | t->u.user.revision), |
| 558 | "ipt_%s", t->u.user.name); | 601 | "ipt_%s", t->u.user.name); |
| 559 | if (IS_ERR(target) || !target) { | 602 | if (IS_ERR(target) || !target) { |
| 560 | duprintf("check_entry: `%s' not found\n", t->u.user.name); | 603 | duprintf("find_check_entry: `%s' not found\n", t->u.user.name); |
| 561 | ret = target ? PTR_ERR(target) : -ENOENT; | 604 | ret = target ? PTR_ERR(target) : -ENOENT; |
| 562 | goto cleanup_matches; | 605 | goto cleanup_matches; |
| 563 | } | 606 | } |
| 564 | t->u.kernel.target = target; | 607 | t->u.kernel.target = target; |
| 565 | 608 | ||
| 566 | ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t), | 609 | ret = check_target(e, name); |
| 567 | name, e->comefrom, e->ip.proto, | ||
| 568 | e->ip.invflags & IPT_INV_PROTO); | ||
| 569 | if (ret) | 610 | if (ret) |
| 570 | goto err; | 611 | goto err; |
| 571 | 612 | ||
| 572 | if (t->u.kernel.target->checkentry | ||
| 573 | && !t->u.kernel.target->checkentry(name, e, target, t->data, | ||
| 574 | e->comefrom)) { | ||
| 575 | duprintf("ip_tables: check failed for `%s'.\n", | ||
| 576 | t->u.kernel.target->name); | ||
| 577 | ret = -EINVAL; | ||
| 578 | goto err; | ||
| 579 | } | ||
| 580 | |||
| 581 | (*i)++; | 613 | (*i)++; |
| 582 | return 0; | 614 | return 0; |
| 583 | err: | 615 | err: |
| @@ -712,7 +744,7 @@ translate_table(const char *name, | |||
| 712 | /* Finally, each sanity check must pass */ | 744 | /* Finally, each sanity check must pass */ |
| 713 | i = 0; | 745 | i = 0; |
| 714 | ret = IPT_ENTRY_ITERATE(entry0, newinfo->size, | 746 | ret = IPT_ENTRY_ITERATE(entry0, newinfo->size, |
| 715 | check_entry, name, size, &i); | 747 | find_check_entry, name, size, &i); |
| 716 | 748 | ||
| 717 | if (ret != 0) { | 749 | if (ret != 0) { |
| 718 | IPT_ENTRY_ITERATE(entry0, newinfo->size, | 750 | IPT_ENTRY_ITERATE(entry0, newinfo->size, |
| @@ -1452,14 +1484,9 @@ check_compat_entry_size_and_hooks(struct ipt_entry *e, | |||
| 1452 | return -EINVAL; | 1484 | return -EINVAL; |
| 1453 | } | 1485 | } |
| 1454 | 1486 | ||
| 1455 | if (!ip_checkentry(&e->ip)) { | 1487 | ret = check_entry(e, name); |
| 1456 | duprintf("ip_tables: ip check failed %p %s.\n", e, name); | 1488 | if (ret) |
| 1457 | return -EINVAL; | 1489 | return ret; |
| 1458 | } | ||
| 1459 | |||
| 1460 | if (e->target_offset + sizeof(struct compat_xt_entry_target) > | ||
| 1461 | e->next_offset) | ||
| 1462 | return -EINVAL; | ||
| 1463 | 1490 | ||
| 1464 | off = 0; | 1491 | off = 0; |
| 1465 | entry_offset = (void *)e - (void *)base; | 1492 | entry_offset = (void *)e - (void *)base; |
| @@ -1470,15 +1497,13 @@ check_compat_entry_size_and_hooks(struct ipt_entry *e, | |||
| 1470 | goto cleanup_matches; | 1497 | goto cleanup_matches; |
| 1471 | 1498 | ||
| 1472 | t = ipt_get_target(e); | 1499 | t = ipt_get_target(e); |
| 1473 | ret = -EINVAL; | ||
| 1474 | if (e->target_offset + t->u.target_size > e->next_offset) | ||
| 1475 | goto cleanup_matches; | ||
| 1476 | target = try_then_request_module(xt_find_target(AF_INET, | 1500 | target = try_then_request_module(xt_find_target(AF_INET, |
| 1477 | t->u.user.name, | 1501 | t->u.user.name, |
| 1478 | t->u.user.revision), | 1502 | t->u.user.revision), |
| 1479 | "ipt_%s", t->u.user.name); | 1503 | "ipt_%s", t->u.user.name); |
| 1480 | if (IS_ERR(target) || !target) { | 1504 | if (IS_ERR(target) || !target) { |
| 1481 | duprintf("check_entry: `%s' not found\n", t->u.user.name); | 1505 | duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", |
| 1506 | t->u.user.name); | ||
| 1482 | ret = target ? PTR_ERR(target) : -ENOENT; | 1507 | ret = target ? PTR_ERR(target) : -ENOENT; |
| 1483 | goto cleanup_matches; | 1508 | goto cleanup_matches; |
| 1484 | } | 1509 | } |
| @@ -1555,57 +1580,15 @@ static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr, | |||
| 1555 | return ret; | 1580 | return ret; |
| 1556 | } | 1581 | } |
| 1557 | 1582 | ||
| 1558 | static inline int compat_check_match(struct ipt_entry_match *m, const char *name, | ||
| 1559 | const struct ipt_ip *ip, unsigned int hookmask) | ||
| 1560 | { | ||
| 1561 | struct ipt_match *match; | ||
| 1562 | int ret; | ||
| 1563 | |||
| 1564 | match = m->u.kernel.match; | ||
| 1565 | ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m), | ||
| 1566 | name, hookmask, ip->proto, | ||
| 1567 | ip->invflags & IPT_INV_PROTO); | ||
| 1568 | if (!ret && m->u.kernel.match->checkentry | ||
| 1569 | && !m->u.kernel.match->checkentry(name, ip, match, m->data, | ||
| 1570 | hookmask)) { | ||
| 1571 | duprintf("ip_tables: compat: check failed for `%s'.\n", | ||
| 1572 | m->u.kernel.match->name); | ||
| 1573 | ret = -EINVAL; | ||
| 1574 | } | ||
| 1575 | return ret; | ||
| 1576 | } | ||
| 1577 | |||
| 1578 | static inline int compat_check_target(struct ipt_entry *e, const char *name) | ||
| 1579 | { | ||
| 1580 | struct ipt_entry_target *t; | ||
| 1581 | struct ipt_target *target; | ||
| 1582 | int ret; | ||
| 1583 | |||
| 1584 | t = ipt_get_target(e); | ||
| 1585 | target = t->u.kernel.target; | ||
| 1586 | ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t), | ||
| 1587 | name, e->comefrom, e->ip.proto, | ||
| 1588 | e->ip.invflags & IPT_INV_PROTO); | ||
| 1589 | if (!ret && t->u.kernel.target->checkentry | ||
| 1590 | && !t->u.kernel.target->checkentry(name, e, target, | ||
| 1591 | t->data, e->comefrom)) { | ||
| 1592 | duprintf("ip_tables: compat: check failed for `%s'.\n", | ||
| 1593 | t->u.kernel.target->name); | ||
| 1594 | ret = -EINVAL; | ||
| 1595 | } | ||
| 1596 | return ret; | ||
| 1597 | } | ||
| 1598 | |||
| 1599 | static inline int compat_check_entry(struct ipt_entry *e, const char *name) | 1583 | static inline int compat_check_entry(struct ipt_entry *e, const char *name) |
| 1600 | { | 1584 | { |
| 1601 | int ret; | 1585 | int ret; |
| 1602 | 1586 | ||
| 1603 | ret = IPT_MATCH_ITERATE(e, compat_check_match, name, &e->ip, | 1587 | ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom); |
| 1604 | e->comefrom); | ||
| 1605 | if (ret) | 1588 | if (ret) |
| 1606 | return ret; | 1589 | return ret; |
| 1607 | 1590 | ||
| 1608 | return compat_check_target(e, name); | 1591 | return check_target(e, name); |
| 1609 | } | 1592 | } |
| 1610 | 1593 | ||
| 1611 | static int | 1594 | static int |
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index fef56ae61abe..b1c11160b9de 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c | |||
| @@ -447,6 +447,12 @@ checkentry(const char *tablename, | |||
| 447 | cipinfo->config = config; | 447 | cipinfo->config = config; |
| 448 | } | 448 | } |
| 449 | 449 | ||
| 450 | if (nf_ct_l3proto_try_module_get(target->family) < 0) { | ||
| 451 | printk(KERN_WARNING "can't load conntrack support for " | ||
| 452 | "proto=%d\n", target->family); | ||
| 453 | return 0; | ||
| 454 | } | ||
| 455 | |||
| 450 | return 1; | 456 | return 1; |
| 451 | } | 457 | } |
| 452 | 458 | ||
| @@ -460,6 +466,8 @@ static void destroy(const struct xt_target *target, void *targinfo) | |||
| 460 | clusterip_config_entry_put(cipinfo->config); | 466 | clusterip_config_entry_put(cipinfo->config); |
| 461 | 467 | ||
| 462 | clusterip_config_put(cipinfo->config); | 468 | clusterip_config_put(cipinfo->config); |
| 469 | |||
| 470 | nf_ct_l3proto_module_put(target->family); | ||
| 463 | } | 471 | } |
| 464 | 472 | ||
| 465 | static struct ipt_target clusterip_tgt = { | 473 | static struct ipt_target clusterip_tgt = { |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 1aaff0a2e098..2daa0dc19d33 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
| @@ -1325,7 +1325,8 @@ void ip_rt_send_redirect(struct sk_buff *skb) | |||
| 1325 | /* Check for load limit; set rate_last to the latest sent | 1325 | /* Check for load limit; set rate_last to the latest sent |
| 1326 | * redirect. | 1326 | * redirect. |
| 1327 | */ | 1327 | */ |
| 1328 | if (time_after(jiffies, | 1328 | if (rt->u.dst.rate_tokens == 0 || |
| 1329 | time_after(jiffies, | ||
| 1329 | (rt->u.dst.rate_last + | 1330 | (rt->u.dst.rate_last + |
| 1330 | (ip_rt_redirect_load << rt->u.dst.rate_tokens)))) { | 1331 | (ip_rt_redirect_load << rt->u.dst.rate_tokens)))) { |
| 1331 | icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway); | 1332 | icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway); |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 090c690627e5..b67e0dd743be 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
| @@ -2364,8 +2364,9 @@ struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu) | |||
| 2364 | 2364 | ||
| 2365 | EXPORT_SYMBOL(__tcp_get_md5sig_pool); | 2365 | EXPORT_SYMBOL(__tcp_get_md5sig_pool); |
| 2366 | 2366 | ||
| 2367 | void __tcp_put_md5sig_pool(void) { | 2367 | void __tcp_put_md5sig_pool(void) |
| 2368 | __tcp_free_md5sig_pool(tcp_md5sig_pool); | 2368 | { |
| 2369 | tcp_free_md5sig_pool(); | ||
| 2369 | } | 2370 | } |
| 2370 | 2371 | ||
| 2371 | EXPORT_SYMBOL(__tcp_put_md5sig_pool); | 2372 | EXPORT_SYMBOL(__tcp_put_md5sig_pool); |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index a1222d6968c4..bf7a22412bcb 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -928,6 +928,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr) | |||
| 928 | if (tp->md5sig_info->entries4 == 0) { | 928 | if (tp->md5sig_info->entries4 == 0) { |
| 929 | kfree(tp->md5sig_info->keys4); | 929 | kfree(tp->md5sig_info->keys4); |
| 930 | tp->md5sig_info->keys4 = NULL; | 930 | tp->md5sig_info->keys4 = NULL; |
| 931 | tp->md5sig_info->alloced4 = 0; | ||
| 931 | } else if (tp->md5sig_info->entries4 != i) { | 932 | } else if (tp->md5sig_info->entries4 != i) { |
| 932 | /* Need to do some manipulation */ | 933 | /* Need to do some manipulation */ |
| 933 | memcpy(&tp->md5sig_info->keys4[i], | 934 | memcpy(&tp->md5sig_info->keys4[i], |
| @@ -1185,7 +1186,7 @@ done_opts: | |||
| 1185 | return 0; | 1186 | return 0; |
| 1186 | 1187 | ||
| 1187 | if (hash_expected && !hash_location) { | 1188 | if (hash_expected && !hash_location) { |
| 1188 | LIMIT_NETDEBUG(KERN_INFO "MD5 Hash NOT expected but found " | 1189 | LIMIT_NETDEBUG(KERN_INFO "MD5 Hash expected but NOT found " |
| 1189 | "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n", | 1190 | "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n", |
| 1190 | NIPQUAD(iph->saddr), ntohs(th->source), | 1191 | NIPQUAD(iph->saddr), ntohs(th->source), |
| 1191 | NIPQUAD(iph->daddr), ntohs(th->dest)); | 1192 | NIPQUAD(iph->daddr), ntohs(th->dest)); |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 035915fc9ed3..cfff930f2baf 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
| @@ -165,11 +165,14 @@ int __udp_lib_get_port(struct sock *sk, unsigned short snum, | |||
| 165 | goto gotit; | 165 | goto gotit; |
| 166 | } | 166 | } |
| 167 | size = 0; | 167 | size = 0; |
| 168 | sk_for_each(sk2, node, head) | 168 | sk_for_each(sk2, node, head) { |
| 169 | if (++size < best_size_so_far) { | 169 | if (++size >= best_size_so_far) |
| 170 | best_size_so_far = size; | 170 | goto next; |
| 171 | best = result; | 171 | } |
| 172 | } | 172 | best_size_so_far = size; |
| 173 | best = result; | ||
| 174 | next: | ||
| 175 | ; | ||
| 173 | } | 176 | } |
| 174 | result = best; | 177 | result = best; |
| 175 | for(i = 0; i < (1 << 16) / UDP_HTABLE_SIZE; i++, result += UDP_HTABLE_SIZE) { | 178 | for(i = 0; i < (1 << 16) / UDP_HTABLE_SIZE; i++, result += UDP_HTABLE_SIZE) { |
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 1eafcfc95e81..352690e2ab82 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
| @@ -978,12 +978,27 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, | |||
| 978 | break; | 978 | break; |
| 979 | 979 | ||
| 980 | case IPV6_UNICAST_HOPS: | 980 | case IPV6_UNICAST_HOPS: |
| 981 | val = np->hop_limit; | ||
| 982 | break; | ||
| 983 | |||
| 984 | case IPV6_MULTICAST_HOPS: | 981 | case IPV6_MULTICAST_HOPS: |
| 985 | val = np->mcast_hops; | 982 | { |
| 983 | struct dst_entry *dst; | ||
| 984 | |||
| 985 | if (optname == IPV6_UNICAST_HOPS) | ||
| 986 | val = np->hop_limit; | ||
| 987 | else | ||
| 988 | val = np->mcast_hops; | ||
| 989 | |||
| 990 | dst = sk_dst_get(sk); | ||
| 991 | if (dst) { | ||
| 992 | if (val < 0) | ||
| 993 | val = dst_metric(dst, RTAX_HOPLIMIT); | ||
| 994 | if (val < 0) | ||
| 995 | val = ipv6_get_hoplimit(dst->dev); | ||
| 996 | dst_release(dst); | ||
| 997 | } | ||
| 998 | if (val < 0) | ||
| 999 | val = ipv6_devconf.hop_limit; | ||
| 986 | break; | 1000 | break; |
| 1001 | } | ||
| 987 | 1002 | ||
| 988 | case IPV6_MULTICAST_LOOP: | 1003 | case IPV6_MULTICAST_LOOP: |
| 989 | val = np->mc_loop; | 1004 | val = np->mc_loop; |
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig index fc3e5eb4bc3f..adcd6131df2a 100644 --- a/net/ipv6/netfilter/Kconfig +++ b/net/ipv6/netfilter/Kconfig | |||
| @@ -7,7 +7,7 @@ menu "IPv6: Netfilter Configuration (EXPERIMENTAL)" | |||
| 7 | 7 | ||
| 8 | config NF_CONNTRACK_IPV6 | 8 | config NF_CONNTRACK_IPV6 |
| 9 | tristate "IPv6 connection tracking support (EXPERIMENTAL)" | 9 | tristate "IPv6 connection tracking support (EXPERIMENTAL)" |
| 10 | depends on EXPERIMENTAL && NF_CONNTRACK | 10 | depends on INET && IPV6 && EXPERIMENTAL && NF_CONNTRACK |
| 11 | ---help--- | 11 | ---help--- |
| 12 | Connection tracking keeps a record of what packets have passed | 12 | Connection tracking keeps a record of what packets have passed |
| 13 | through your machine, in order to figure out how they are related | 13 | through your machine, in order to figure out how they are related |
| @@ -21,6 +21,7 @@ config NF_CONNTRACK_IPV6 | |||
| 21 | 21 | ||
| 22 | config IP6_NF_QUEUE | 22 | config IP6_NF_QUEUE |
| 23 | tristate "IP6 Userspace queueing via NETLINK (OBSOLETE)" | 23 | tristate "IP6 Userspace queueing via NETLINK (OBSOLETE)" |
| 24 | depends on INET && IPV6 && NETFILTER && EXPERIMENTAL | ||
| 24 | ---help--- | 25 | ---help--- |
| 25 | 26 | ||
| 26 | This option adds a queue handler to the kernel for IPv6 | 27 | This option adds a queue handler to the kernel for IPv6 |
| @@ -41,7 +42,7 @@ config IP6_NF_QUEUE | |||
| 41 | 42 | ||
| 42 | config IP6_NF_IPTABLES | 43 | config IP6_NF_IPTABLES |
| 43 | tristate "IP6 tables support (required for filtering)" | 44 | tristate "IP6 tables support (required for filtering)" |
| 44 | depends on NETFILTER_XTABLES | 45 | depends on INET && IPV6 && EXPERIMENTAL && NETFILTER_XTABLES |
| 45 | help | 46 | help |
| 46 | ip6tables is a general, extensible packet identification framework. | 47 | ip6tables is a general, extensible packet identification framework. |
| 47 | Currently only the packet filtering and packet mangling subsystem | 48 | Currently only the packet filtering and packet mangling subsystem |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 4eec4b3988b8..99502c5da4c4 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
| @@ -413,6 +413,7 @@ mark_source_chains(struct xt_table_info *newinfo, | |||
| 413 | unsigned int pos = newinfo->hook_entry[hook]; | 413 | unsigned int pos = newinfo->hook_entry[hook]; |
| 414 | struct ip6t_entry *e | 414 | struct ip6t_entry *e |
| 415 | = (struct ip6t_entry *)(entry0 + pos); | 415 | = (struct ip6t_entry *)(entry0 + pos); |
| 416 | int visited = e->comefrom & (1 << hook); | ||
| 416 | 417 | ||
| 417 | if (!(valid_hooks & (1 << hook))) | 418 | if (!(valid_hooks & (1 << hook))) |
| 418 | continue; | 419 | continue; |
| @@ -433,11 +434,11 @@ mark_source_chains(struct xt_table_info *newinfo, | |||
| 433 | |= ((1 << hook) | (1 << NF_IP6_NUMHOOKS)); | 434 | |= ((1 << hook) | (1 << NF_IP6_NUMHOOKS)); |
| 434 | 435 | ||
| 435 | /* Unconditional return/END. */ | 436 | /* Unconditional return/END. */ |
| 436 | if (e->target_offset == sizeof(struct ip6t_entry) | 437 | if ((e->target_offset == sizeof(struct ip6t_entry) |
| 437 | && (strcmp(t->target.u.user.name, | 438 | && (strcmp(t->target.u.user.name, |
| 438 | IP6T_STANDARD_TARGET) == 0) | 439 | IP6T_STANDARD_TARGET) == 0) |
| 439 | && t->verdict < 0 | 440 | && t->verdict < 0 |
| 440 | && unconditional(&e->ipv6)) { | 441 | && unconditional(&e->ipv6)) || visited) { |
| 441 | unsigned int oldpos, size; | 442 | unsigned int oldpos, size; |
| 442 | 443 | ||
| 443 | if (t->verdict < -NF_MAX_VERDICT - 1) { | 444 | if (t->verdict < -NF_MAX_VERDICT - 1) { |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 9f80518aacbd..8c3d56871b50 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
| @@ -494,7 +494,7 @@ do { \ | |||
| 494 | goto out; \ | 494 | goto out; \ |
| 495 | pn = fn->parent; \ | 495 | pn = fn->parent; \ |
| 496 | if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) \ | 496 | if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) \ |
| 497 | fn = fib6_lookup(pn->subtree, NULL, saddr); \ | 497 | fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); \ |
| 498 | else \ | 498 | else \ |
| 499 | fn = pn; \ | 499 | fn = pn; \ |
| 500 | if (fn->fn_flags & RTN_RTINFO) \ | 500 | if (fn->fn_flags & RTN_RTINFO) \ |
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 3a66878a1829..1b853c34d301 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | menu "Core Netfilter Configuration" | 1 | menu "Core Netfilter Configuration" |
| 2 | depends on NET && NETFILTER | 2 | depends on NET && INET && NETFILTER |
| 3 | 3 | ||
| 4 | config NETFILTER_NETLINK | 4 | config NETFILTER_NETLINK |
| 5 | tristate "Netfilter netlink interface" | 5 | tristate "Netfilter netlink interface" |
diff --git a/net/netfilter/xt_CONNMARK.c b/net/netfilter/xt_CONNMARK.c index b5548239d412..0534bfa65cce 100644 --- a/net/netfilter/xt_CONNMARK.c +++ b/net/netfilter/xt_CONNMARK.c | |||
| @@ -96,6 +96,11 @@ checkentry(const char *tablename, | |||
| 96 | { | 96 | { |
| 97 | struct xt_connmark_target_info *matchinfo = targinfo; | 97 | struct xt_connmark_target_info *matchinfo = targinfo; |
| 98 | 98 | ||
| 99 | if (nf_ct_l3proto_try_module_get(target->family) < 0) { | ||
| 100 | printk(KERN_WARNING "can't load conntrack support for " | ||
| 101 | "proto=%d\n", target->family); | ||
| 102 | return 0; | ||
| 103 | } | ||
| 99 | if (matchinfo->mode == XT_CONNMARK_RESTORE) { | 104 | if (matchinfo->mode == XT_CONNMARK_RESTORE) { |
| 100 | if (strcmp(tablename, "mangle") != 0) { | 105 | if (strcmp(tablename, "mangle") != 0) { |
| 101 | printk(KERN_WARNING "CONNMARK: restore can only be " | 106 | printk(KERN_WARNING "CONNMARK: restore can only be " |
| @@ -111,6 +116,12 @@ checkentry(const char *tablename, | |||
| 111 | return 1; | 116 | return 1; |
| 112 | } | 117 | } |
| 113 | 118 | ||
| 119 | static void | ||
| 120 | destroy(const struct xt_target *target, void *targinfo) | ||
| 121 | { | ||
| 122 | nf_ct_l3proto_module_put(target->family); | ||
| 123 | } | ||
| 124 | |||
| 114 | #ifdef CONFIG_COMPAT | 125 | #ifdef CONFIG_COMPAT |
| 115 | struct compat_xt_connmark_target_info { | 126 | struct compat_xt_connmark_target_info { |
| 116 | compat_ulong_t mark, mask; | 127 | compat_ulong_t mark, mask; |
| @@ -147,6 +158,7 @@ static struct xt_target xt_connmark_target[] = { | |||
| 147 | .name = "CONNMARK", | 158 | .name = "CONNMARK", |
| 148 | .family = AF_INET, | 159 | .family = AF_INET, |
| 149 | .checkentry = checkentry, | 160 | .checkentry = checkentry, |
| 161 | .destroy = destroy, | ||
| 150 | .target = target, | 162 | .target = target, |
| 151 | .targetsize = sizeof(struct xt_connmark_target_info), | 163 | .targetsize = sizeof(struct xt_connmark_target_info), |
| 152 | #ifdef CONFIG_COMPAT | 164 | #ifdef CONFIG_COMPAT |
| @@ -160,6 +172,7 @@ static struct xt_target xt_connmark_target[] = { | |||
| 160 | .name = "CONNMARK", | 172 | .name = "CONNMARK", |
| 161 | .family = AF_INET6, | 173 | .family = AF_INET6, |
| 162 | .checkentry = checkentry, | 174 | .checkentry = checkentry, |
| 175 | .destroy = destroy, | ||
| 163 | .target = target, | 176 | .target = target, |
| 164 | .targetsize = sizeof(struct xt_connmark_target_info), | 177 | .targetsize = sizeof(struct xt_connmark_target_info), |
| 165 | .me = THIS_MODULE | 178 | .me = THIS_MODULE |
| @@ -168,7 +181,6 @@ static struct xt_target xt_connmark_target[] = { | |||
| 168 | 181 | ||
| 169 | static int __init xt_connmark_init(void) | 182 | static int __init xt_connmark_init(void) |
| 170 | { | 183 | { |
| 171 | need_conntrack(); | ||
| 172 | return xt_register_targets(xt_connmark_target, | 184 | return xt_register_targets(xt_connmark_target, |
| 173 | ARRAY_SIZE(xt_connmark_target)); | 185 | ARRAY_SIZE(xt_connmark_target)); |
| 174 | } | 186 | } |
diff --git a/net/netfilter/xt_CONNSECMARK.c b/net/netfilter/xt_CONNSECMARK.c index 467386266674..a3fe3c334b09 100644 --- a/net/netfilter/xt_CONNSECMARK.c +++ b/net/netfilter/xt_CONNSECMARK.c | |||
| @@ -93,6 +93,11 @@ static int checkentry(const char *tablename, const void *entry, | |||
| 93 | { | 93 | { |
| 94 | struct xt_connsecmark_target_info *info = targinfo; | 94 | struct xt_connsecmark_target_info *info = targinfo; |
| 95 | 95 | ||
| 96 | if (nf_ct_l3proto_try_module_get(target->family) < 0) { | ||
| 97 | printk(KERN_WARNING "can't load conntrack support for " | ||
| 98 | "proto=%d\n", target->family); | ||
| 99 | return 0; | ||
| 100 | } | ||
| 96 | switch (info->mode) { | 101 | switch (info->mode) { |
| 97 | case CONNSECMARK_SAVE: | 102 | case CONNSECMARK_SAVE: |
| 98 | case CONNSECMARK_RESTORE: | 103 | case CONNSECMARK_RESTORE: |
| @@ -106,11 +111,18 @@ static int checkentry(const char *tablename, const void *entry, | |||
| 106 | return 1; | 111 | return 1; |
| 107 | } | 112 | } |
| 108 | 113 | ||
| 114 | static void | ||
| 115 | destroy(const struct xt_target *target, void *targinfo) | ||
| 116 | { | ||
| 117 | nf_ct_l3proto_module_put(target->family); | ||
| 118 | } | ||
| 119 | |||
| 109 | static struct xt_target xt_connsecmark_target[] = { | 120 | static struct xt_target xt_connsecmark_target[] = { |
| 110 | { | 121 | { |
| 111 | .name = "CONNSECMARK", | 122 | .name = "CONNSECMARK", |
| 112 | .family = AF_INET, | 123 | .family = AF_INET, |
| 113 | .checkentry = checkentry, | 124 | .checkentry = checkentry, |
| 125 | .destroy = destroy, | ||
| 114 | .target = target, | 126 | .target = target, |
| 115 | .targetsize = sizeof(struct xt_connsecmark_target_info), | 127 | .targetsize = sizeof(struct xt_connsecmark_target_info), |
| 116 | .table = "mangle", | 128 | .table = "mangle", |
| @@ -120,6 +132,7 @@ static struct xt_target xt_connsecmark_target[] = { | |||
| 120 | .name = "CONNSECMARK", | 132 | .name = "CONNSECMARK", |
| 121 | .family = AF_INET6, | 133 | .family = AF_INET6, |
| 122 | .checkentry = checkentry, | 134 | .checkentry = checkentry, |
| 135 | .destroy = destroy, | ||
| 123 | .target = target, | 136 | .target = target, |
| 124 | .targetsize = sizeof(struct xt_connsecmark_target_info), | 137 | .targetsize = sizeof(struct xt_connsecmark_target_info), |
| 125 | .table = "mangle", | 138 | .table = "mangle", |
| @@ -129,7 +142,6 @@ static struct xt_target xt_connsecmark_target[] = { | |||
| 129 | 142 | ||
| 130 | static int __init xt_connsecmark_init(void) | 143 | static int __init xt_connsecmark_init(void) |
| 131 | { | 144 | { |
| 132 | need_conntrack(); | ||
| 133 | return xt_register_targets(xt_connsecmark_target, | 145 | return xt_register_targets(xt_connsecmark_target, |
| 134 | ARRAY_SIZE(xt_connsecmark_target)); | 146 | ARRAY_SIZE(xt_connsecmark_target)); |
| 135 | } | 147 | } |
diff --git a/net/netfilter/xt_connbytes.c b/net/netfilter/xt_connbytes.c index dcc497ea8183..d93cb096a675 100644 --- a/net/netfilter/xt_connbytes.c +++ b/net/netfilter/xt_connbytes.c | |||
| @@ -139,15 +139,28 @@ static int check(const char *tablename, | |||
| 139 | sinfo->direction != XT_CONNBYTES_DIR_BOTH) | 139 | sinfo->direction != XT_CONNBYTES_DIR_BOTH) |
| 140 | return 0; | 140 | return 0; |
| 141 | 141 | ||
| 142 | if (nf_ct_l3proto_try_module_get(match->family) < 0) { | ||
| 143 | printk(KERN_WARNING "can't load conntrack support for " | ||
| 144 | "proto=%d\n", match->family); | ||
| 145 | return 0; | ||
| 146 | } | ||
| 147 | |||
| 142 | return 1; | 148 | return 1; |
| 143 | } | 149 | } |
| 144 | 150 | ||
| 151 | static void | ||
| 152 | destroy(const struct xt_match *match, void *matchinfo) | ||
| 153 | { | ||
| 154 | nf_ct_l3proto_module_put(match->family); | ||
| 155 | } | ||
| 156 | |||
| 145 | static struct xt_match xt_connbytes_match[] = { | 157 | static struct xt_match xt_connbytes_match[] = { |
| 146 | { | 158 | { |
| 147 | .name = "connbytes", | 159 | .name = "connbytes", |
| 148 | .family = AF_INET, | 160 | .family = AF_INET, |
| 149 | .checkentry = check, | 161 | .checkentry = check, |
| 150 | .match = match, | 162 | .match = match, |
| 163 | .destroy = destroy, | ||
| 151 | .matchsize = sizeof(struct xt_connbytes_info), | 164 | .matchsize = sizeof(struct xt_connbytes_info), |
| 152 | .me = THIS_MODULE | 165 | .me = THIS_MODULE |
| 153 | }, | 166 | }, |
| @@ -156,6 +169,7 @@ static struct xt_match xt_connbytes_match[] = { | |||
| 156 | .family = AF_INET6, | 169 | .family = AF_INET6, |
| 157 | .checkentry = check, | 170 | .checkentry = check, |
| 158 | .match = match, | 171 | .match = match, |
| 172 | .destroy = destroy, | ||
| 159 | .matchsize = sizeof(struct xt_connbytes_info), | 173 | .matchsize = sizeof(struct xt_connbytes_info), |
| 160 | .me = THIS_MODULE | 174 | .me = THIS_MODULE |
| 161 | }, | 175 | }, |
diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c index a8f03057dbde..36c2defff238 100644 --- a/net/netfilter/xt_connmark.c +++ b/net/netfilter/xt_connmark.c | |||
| @@ -63,22 +63,18 @@ checkentry(const char *tablename, | |||
| 63 | printk(KERN_WARNING "connmark: only support 32bit mark\n"); | 63 | printk(KERN_WARNING "connmark: only support 32bit mark\n"); |
| 64 | return 0; | 64 | return 0; |
| 65 | } | 65 | } |
| 66 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | ||
| 67 | if (nf_ct_l3proto_try_module_get(match->family) < 0) { | 66 | if (nf_ct_l3proto_try_module_get(match->family) < 0) { |
| 68 | printk(KERN_WARNING "can't load nf_conntrack support for " | 67 | printk(KERN_WARNING "can't load conntrack support for " |
| 69 | "proto=%d\n", match->family); | 68 | "proto=%d\n", match->family); |
| 70 | return 0; | 69 | return 0; |
| 71 | } | 70 | } |
| 72 | #endif | ||
| 73 | return 1; | 71 | return 1; |
| 74 | } | 72 | } |
| 75 | 73 | ||
| 76 | static void | 74 | static void |
| 77 | destroy(const struct xt_match *match, void *matchinfo) | 75 | destroy(const struct xt_match *match, void *matchinfo) |
| 78 | { | 76 | { |
| 79 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | ||
| 80 | nf_ct_l3proto_module_put(match->family); | 77 | nf_ct_l3proto_module_put(match->family); |
| 81 | #endif | ||
| 82 | } | 78 | } |
| 83 | 79 | ||
| 84 | #ifdef CONFIG_COMPAT | 80 | #ifdef CONFIG_COMPAT |
| @@ -140,7 +136,6 @@ static struct xt_match xt_connmark_match[] = { | |||
| 140 | 136 | ||
| 141 | static int __init xt_connmark_init(void) | 137 | static int __init xt_connmark_init(void) |
| 142 | { | 138 | { |
| 143 | need_conntrack(); | ||
| 144 | return xt_register_matches(xt_connmark_match, | 139 | return xt_register_matches(xt_connmark_match, |
| 145 | ARRAY_SIZE(xt_connmark_match)); | 140 | ARRAY_SIZE(xt_connmark_match)); |
| 146 | } | 141 | } |
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c index 0ea501a2fda5..3dc2357b8de8 100644 --- a/net/netfilter/xt_conntrack.c +++ b/net/netfilter/xt_conntrack.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | 20 | ||
| 21 | #include <linux/netfilter/x_tables.h> | 21 | #include <linux/netfilter/x_tables.h> |
| 22 | #include <linux/netfilter/xt_conntrack.h> | 22 | #include <linux/netfilter/xt_conntrack.h> |
| 23 | #include <net/netfilter/nf_conntrack_compat.h> | ||
| 23 | 24 | ||
| 24 | MODULE_LICENSE("GPL"); | 25 | MODULE_LICENSE("GPL"); |
| 25 | MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>"); | 26 | MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>"); |
| @@ -228,21 +229,17 @@ checkentry(const char *tablename, | |||
| 228 | void *matchinfo, | 229 | void *matchinfo, |
| 229 | unsigned int hook_mask) | 230 | unsigned int hook_mask) |
| 230 | { | 231 | { |
| 231 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | ||
| 232 | if (nf_ct_l3proto_try_module_get(match->family) < 0) { | 232 | if (nf_ct_l3proto_try_module_get(match->family) < 0) { |
| 233 | printk(KERN_WARNING "can't load nf_conntrack support for " | 233 | printk(KERN_WARNING "can't load conntrack support for " |
| 234 | "proto=%d\n", match->family); | 234 | "proto=%d\n", match->family); |
| 235 | return 0; | 235 | return 0; |
| 236 | } | 236 | } |
| 237 | #endif | ||
| 238 | return 1; | 237 | return 1; |
| 239 | } | 238 | } |
| 240 | 239 | ||
| 241 | static void destroy(const struct xt_match *match, void *matchinfo) | 240 | static void destroy(const struct xt_match *match, void *matchinfo) |
| 242 | { | 241 | { |
| 243 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | ||
| 244 | nf_ct_l3proto_module_put(match->family); | 242 | nf_ct_l3proto_module_put(match->family); |
| 245 | #endif | ||
| 246 | } | 243 | } |
| 247 | 244 | ||
| 248 | static struct xt_match conntrack_match = { | 245 | static struct xt_match conntrack_match = { |
| @@ -257,7 +254,6 @@ static struct xt_match conntrack_match = { | |||
| 257 | 254 | ||
| 258 | static int __init xt_conntrack_init(void) | 255 | static int __init xt_conntrack_init(void) |
| 259 | { | 256 | { |
| 260 | need_conntrack(); | ||
| 261 | return xt_register_match(&conntrack_match); | 257 | return xt_register_match(&conntrack_match); |
| 262 | } | 258 | } |
| 263 | 259 | ||
diff --git a/net/netfilter/xt_helper.c b/net/netfilter/xt_helper.c index 5d7818b73e3a..04bc32ba7195 100644 --- a/net/netfilter/xt_helper.c +++ b/net/netfilter/xt_helper.c | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #endif | 24 | #endif |
| 25 | #include <linux/netfilter/x_tables.h> | 25 | #include <linux/netfilter/x_tables.h> |
| 26 | #include <linux/netfilter/xt_helper.h> | 26 | #include <linux/netfilter/xt_helper.h> |
| 27 | #include <net/netfilter/nf_conntrack_compat.h> | ||
| 27 | 28 | ||
| 28 | MODULE_LICENSE("GPL"); | 29 | MODULE_LICENSE("GPL"); |
| 29 | MODULE_AUTHOR("Martin Josefsson <gandalf@netfilter.org>"); | 30 | MODULE_AUTHOR("Martin Josefsson <gandalf@netfilter.org>"); |
| @@ -143,13 +144,11 @@ static int check(const char *tablename, | |||
| 143 | { | 144 | { |
| 144 | struct xt_helper_info *info = matchinfo; | 145 | struct xt_helper_info *info = matchinfo; |
| 145 | 146 | ||
| 146 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | ||
| 147 | if (nf_ct_l3proto_try_module_get(match->family) < 0) { | 147 | if (nf_ct_l3proto_try_module_get(match->family) < 0) { |
| 148 | printk(KERN_WARNING "can't load nf_conntrack support for " | 148 | printk(KERN_WARNING "can't load conntrack support for " |
| 149 | "proto=%d\n", match->family); | 149 | "proto=%d\n", match->family); |
| 150 | return 0; | 150 | return 0; |
| 151 | } | 151 | } |
| 152 | #endif | ||
| 153 | info->name[29] = '\0'; | 152 | info->name[29] = '\0'; |
| 154 | return 1; | 153 | return 1; |
| 155 | } | 154 | } |
| @@ -157,9 +156,7 @@ static int check(const char *tablename, | |||
| 157 | static void | 156 | static void |
| 158 | destroy(const struct xt_match *match, void *matchinfo) | 157 | destroy(const struct xt_match *match, void *matchinfo) |
| 159 | { | 158 | { |
| 160 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | ||
| 161 | nf_ct_l3proto_module_put(match->family); | 159 | nf_ct_l3proto_module_put(match->family); |
| 162 | #endif | ||
| 163 | } | 160 | } |
| 164 | 161 | ||
| 165 | static struct xt_match xt_helper_match[] = { | 162 | static struct xt_match xt_helper_match[] = { |
| @@ -185,7 +182,6 @@ static struct xt_match xt_helper_match[] = { | |||
| 185 | 182 | ||
| 186 | static int __init xt_helper_init(void) | 183 | static int __init xt_helper_init(void) |
| 187 | { | 184 | { |
| 188 | need_conntrack(); | ||
| 189 | return xt_register_matches(xt_helper_match, | 185 | return xt_register_matches(xt_helper_match, |
| 190 | ARRAY_SIZE(xt_helper_match)); | 186 | ARRAY_SIZE(xt_helper_match)); |
| 191 | } | 187 | } |
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c index fd8f954cded5..b9b3ffc5451d 100644 --- a/net/netfilter/xt_physdev.c +++ b/net/netfilter/xt_physdev.c | |||
| @@ -113,20 +113,16 @@ checkentry(const char *tablename, | |||
| 113 | if (!(info->bitmask & XT_PHYSDEV_OP_MASK) || | 113 | if (!(info->bitmask & XT_PHYSDEV_OP_MASK) || |
| 114 | info->bitmask & ~XT_PHYSDEV_OP_MASK) | 114 | info->bitmask & ~XT_PHYSDEV_OP_MASK) |
| 115 | return 0; | 115 | return 0; |
| 116 | if (brnf_deferred_hooks == 0 && | 116 | if (info->bitmask & XT_PHYSDEV_OP_OUT && |
| 117 | info->bitmask & XT_PHYSDEV_OP_OUT && | ||
| 118 | (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) || | 117 | (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) || |
| 119 | info->invert & XT_PHYSDEV_OP_BRIDGED) && | 118 | info->invert & XT_PHYSDEV_OP_BRIDGED) && |
| 120 | hook_mask & ((1 << NF_IP_LOCAL_OUT) | (1 << NF_IP_FORWARD) | | 119 | hook_mask & ((1 << NF_IP_LOCAL_OUT) | (1 << NF_IP_FORWARD) | |
| 121 | (1 << NF_IP_POST_ROUTING))) { | 120 | (1 << NF_IP_POST_ROUTING))) { |
| 122 | printk(KERN_WARNING "physdev match: using --physdev-out in the " | 121 | printk(KERN_WARNING "physdev match: using --physdev-out in the " |
| 123 | "OUTPUT, FORWARD and POSTROUTING chains for non-bridged " | 122 | "OUTPUT, FORWARD and POSTROUTING chains for non-bridged " |
| 124 | "traffic is deprecated and breaks other things, it will " | 123 | "traffic is not supported anymore.\n"); |
| 125 | "be removed in January 2007. See Documentation/" | 124 | if (hook_mask & (1 << NF_IP_LOCAL_OUT)) |
| 126 | "feature-removal-schedule.txt for details. This doesn't " | 125 | return 0; |
| 127 | "affect you in case you're using it for purely bridged " | ||
| 128 | "traffic.\n"); | ||
| 129 | brnf_deferred_hooks = 1; | ||
| 130 | } | 126 | } |
| 131 | return 1; | 127 | return 1; |
| 132 | } | 128 | } |
diff --git a/net/netfilter/xt_state.c b/net/netfilter/xt_state.c index d9010b16a1f9..df37b912163a 100644 --- a/net/netfilter/xt_state.c +++ b/net/netfilter/xt_state.c | |||
| @@ -50,22 +50,18 @@ static int check(const char *tablename, | |||
| 50 | void *matchinfo, | 50 | void *matchinfo, |
| 51 | unsigned int hook_mask) | 51 | unsigned int hook_mask) |
| 52 | { | 52 | { |
| 53 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | ||
| 54 | if (nf_ct_l3proto_try_module_get(match->family) < 0) { | 53 | if (nf_ct_l3proto_try_module_get(match->family) < 0) { |
| 55 | printk(KERN_WARNING "can't load nf_conntrack support for " | 54 | printk(KERN_WARNING "can't load conntrack support for " |
| 56 | "proto=%d\n", match->family); | 55 | "proto=%d\n", match->family); |
| 57 | return 0; | 56 | return 0; |
| 58 | } | 57 | } |
| 59 | #endif | ||
| 60 | return 1; | 58 | return 1; |
| 61 | } | 59 | } |
| 62 | 60 | ||
| 63 | static void | 61 | static void |
| 64 | destroy(const struct xt_match *match, void *matchinfo) | 62 | destroy(const struct xt_match *match, void *matchinfo) |
| 65 | { | 63 | { |
| 66 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | ||
| 67 | nf_ct_l3proto_module_put(match->family); | 64 | nf_ct_l3proto_module_put(match->family); |
| 68 | #endif | ||
| 69 | } | 65 | } |
| 70 | 66 | ||
| 71 | static struct xt_match xt_state_match[] = { | 67 | static struct xt_match xt_state_match[] = { |
| @@ -91,7 +87,6 @@ static struct xt_match xt_state_match[] = { | |||
| 91 | 87 | ||
| 92 | static int __init xt_state_init(void) | 88 | static int __init xt_state_init(void) |
| 93 | { | 89 | { |
| 94 | need_conntrack(); | ||
| 95 | return xt_register_matches(xt_state_match, ARRAY_SIZE(xt_state_match)); | 90 | return xt_register_matches(xt_state_match, ARRAY_SIZE(xt_state_match)); |
| 96 | } | 91 | } |
| 97 | 92 | ||
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c index 743b05734a49..4afc75f9e377 100644 --- a/net/netlabel/netlabel_cipso_v4.c +++ b/net/netlabel/netlabel_cipso_v4.c | |||
| @@ -162,6 +162,7 @@ static int netlbl_cipsov4_add_std(struct genl_info *info) | |||
| 162 | struct nlattr *nla_b; | 162 | struct nlattr *nla_b; |
| 163 | int nla_a_rem; | 163 | int nla_a_rem; |
| 164 | int nla_b_rem; | 164 | int nla_b_rem; |
| 165 | u32 iter; | ||
| 165 | 166 | ||
| 166 | if (!info->attrs[NLBL_CIPSOV4_A_TAGLST] || | 167 | if (!info->attrs[NLBL_CIPSOV4_A_TAGLST] || |
| 167 | !info->attrs[NLBL_CIPSOV4_A_MLSLVLLST]) | 168 | !info->attrs[NLBL_CIPSOV4_A_MLSLVLLST]) |
| @@ -185,20 +186,31 @@ static int netlbl_cipsov4_add_std(struct genl_info *info) | |||
| 185 | ret_val = netlbl_cipsov4_add_common(info, doi_def); | 186 | ret_val = netlbl_cipsov4_add_common(info, doi_def); |
| 186 | if (ret_val != 0) | 187 | if (ret_val != 0) |
| 187 | goto add_std_failure; | 188 | goto add_std_failure; |
| 189 | ret_val = -EINVAL; | ||
| 188 | 190 | ||
| 189 | nla_for_each_nested(nla_a, | 191 | nla_for_each_nested(nla_a, |
| 190 | info->attrs[NLBL_CIPSOV4_A_MLSLVLLST], | 192 | info->attrs[NLBL_CIPSOV4_A_MLSLVLLST], |
| 191 | nla_a_rem) | 193 | nla_a_rem) |
| 192 | if (nla_a->nla_type == NLBL_CIPSOV4_A_MLSLVL) { | 194 | if (nla_a->nla_type == NLBL_CIPSOV4_A_MLSLVL) { |
| 195 | if (nla_validate_nested(nla_a, | ||
| 196 | NLBL_CIPSOV4_A_MAX, | ||
| 197 | netlbl_cipsov4_genl_policy) != 0) | ||
| 198 | goto add_std_failure; | ||
| 193 | nla_for_each_nested(nla_b, nla_a, nla_b_rem) | 199 | nla_for_each_nested(nla_b, nla_a, nla_b_rem) |
| 194 | switch (nla_b->nla_type) { | 200 | switch (nla_b->nla_type) { |
| 195 | case NLBL_CIPSOV4_A_MLSLVLLOC: | 201 | case NLBL_CIPSOV4_A_MLSLVLLOC: |
| 202 | if (nla_get_u32(nla_b) > | ||
| 203 | CIPSO_V4_MAX_LOC_LVLS) | ||
| 204 | goto add_std_failure; | ||
| 196 | if (nla_get_u32(nla_b) >= | 205 | if (nla_get_u32(nla_b) >= |
| 197 | doi_def->map.std->lvl.local_size) | 206 | doi_def->map.std->lvl.local_size) |
| 198 | doi_def->map.std->lvl.local_size = | 207 | doi_def->map.std->lvl.local_size = |
| 199 | nla_get_u32(nla_b) + 1; | 208 | nla_get_u32(nla_b) + 1; |
| 200 | break; | 209 | break; |
| 201 | case NLBL_CIPSOV4_A_MLSLVLREM: | 210 | case NLBL_CIPSOV4_A_MLSLVLREM: |
| 211 | if (nla_get_u32(nla_b) > | ||
| 212 | CIPSO_V4_MAX_REM_LVLS) | ||
| 213 | goto add_std_failure; | ||
| 202 | if (nla_get_u32(nla_b) >= | 214 | if (nla_get_u32(nla_b) >= |
| 203 | doi_def->map.std->lvl.cipso_size) | 215 | doi_def->map.std->lvl.cipso_size) |
| 204 | doi_def->map.std->lvl.cipso_size = | 216 | doi_def->map.std->lvl.cipso_size = |
| @@ -206,9 +218,6 @@ static int netlbl_cipsov4_add_std(struct genl_info *info) | |||
| 206 | break; | 218 | break; |
| 207 | } | 219 | } |
| 208 | } | 220 | } |
| 209 | if (doi_def->map.std->lvl.local_size > CIPSO_V4_MAX_LOC_LVLS || | ||
| 210 | doi_def->map.std->lvl.cipso_size > CIPSO_V4_MAX_REM_LVLS) | ||
| 211 | goto add_std_failure; | ||
| 212 | doi_def->map.std->lvl.local = kcalloc(doi_def->map.std->lvl.local_size, | 221 | doi_def->map.std->lvl.local = kcalloc(doi_def->map.std->lvl.local_size, |
| 213 | sizeof(u32), | 222 | sizeof(u32), |
| 214 | GFP_KERNEL); | 223 | GFP_KERNEL); |
| @@ -223,6 +232,10 @@ static int netlbl_cipsov4_add_std(struct genl_info *info) | |||
| 223 | ret_val = -ENOMEM; | 232 | ret_val = -ENOMEM; |
| 224 | goto add_std_failure; | 233 | goto add_std_failure; |
| 225 | } | 234 | } |
| 235 | for (iter = 0; iter < doi_def->map.std->lvl.local_size; iter++) | ||
| 236 | doi_def->map.std->lvl.local[iter] = CIPSO_V4_INV_LVL; | ||
| 237 | for (iter = 0; iter < doi_def->map.std->lvl.cipso_size; iter++) | ||
| 238 | doi_def->map.std->lvl.cipso[iter] = CIPSO_V4_INV_LVL; | ||
| 226 | nla_for_each_nested(nla_a, | 239 | nla_for_each_nested(nla_a, |
| 227 | info->attrs[NLBL_CIPSOV4_A_MLSLVLLST], | 240 | info->attrs[NLBL_CIPSOV4_A_MLSLVLLST], |
| 228 | nla_a_rem) | 241 | nla_a_rem) |
| @@ -230,11 +243,6 @@ static int netlbl_cipsov4_add_std(struct genl_info *info) | |||
| 230 | struct nlattr *lvl_loc; | 243 | struct nlattr *lvl_loc; |
| 231 | struct nlattr *lvl_rem; | 244 | struct nlattr *lvl_rem; |
| 232 | 245 | ||
| 233 | if (nla_validate_nested(nla_a, | ||
| 234 | NLBL_CIPSOV4_A_MAX, | ||
| 235 | netlbl_cipsov4_genl_policy) != 0) | ||
| 236 | goto add_std_failure; | ||
| 237 | |||
| 238 | lvl_loc = nla_find_nested(nla_a, | 246 | lvl_loc = nla_find_nested(nla_a, |
| 239 | NLBL_CIPSOV4_A_MLSLVLLOC); | 247 | NLBL_CIPSOV4_A_MLSLVLLOC); |
| 240 | lvl_rem = nla_find_nested(nla_a, | 248 | lvl_rem = nla_find_nested(nla_a, |
| @@ -264,12 +272,18 @@ static int netlbl_cipsov4_add_std(struct genl_info *info) | |||
| 264 | nla_for_each_nested(nla_b, nla_a, nla_b_rem) | 272 | nla_for_each_nested(nla_b, nla_a, nla_b_rem) |
| 265 | switch (nla_b->nla_type) { | 273 | switch (nla_b->nla_type) { |
| 266 | case NLBL_CIPSOV4_A_MLSCATLOC: | 274 | case NLBL_CIPSOV4_A_MLSCATLOC: |
| 275 | if (nla_get_u32(nla_b) > | ||
| 276 | CIPSO_V4_MAX_LOC_CATS) | ||
| 277 | goto add_std_failure; | ||
| 267 | if (nla_get_u32(nla_b) >= | 278 | if (nla_get_u32(nla_b) >= |
| 268 | doi_def->map.std->cat.local_size) | 279 | doi_def->map.std->cat.local_size) |
| 269 | doi_def->map.std->cat.local_size = | 280 | doi_def->map.std->cat.local_size = |
| 270 | nla_get_u32(nla_b) + 1; | 281 | nla_get_u32(nla_b) + 1; |
| 271 | break; | 282 | break; |
| 272 | case NLBL_CIPSOV4_A_MLSCATREM: | 283 | case NLBL_CIPSOV4_A_MLSCATREM: |
| 284 | if (nla_get_u32(nla_b) > | ||
| 285 | CIPSO_V4_MAX_REM_CATS) | ||
| 286 | goto add_std_failure; | ||
| 273 | if (nla_get_u32(nla_b) >= | 287 | if (nla_get_u32(nla_b) >= |
| 274 | doi_def->map.std->cat.cipso_size) | 288 | doi_def->map.std->cat.cipso_size) |
| 275 | doi_def->map.std->cat.cipso_size = | 289 | doi_def->map.std->cat.cipso_size = |
| @@ -277,9 +291,6 @@ static int netlbl_cipsov4_add_std(struct genl_info *info) | |||
| 277 | break; | 291 | break; |
| 278 | } | 292 | } |
| 279 | } | 293 | } |
| 280 | if (doi_def->map.std->cat.local_size > CIPSO_V4_MAX_LOC_CATS || | ||
| 281 | doi_def->map.std->cat.cipso_size > CIPSO_V4_MAX_REM_CATS) | ||
| 282 | goto add_std_failure; | ||
| 283 | doi_def->map.std->cat.local = kcalloc( | 294 | doi_def->map.std->cat.local = kcalloc( |
| 284 | doi_def->map.std->cat.local_size, | 295 | doi_def->map.std->cat.local_size, |
| 285 | sizeof(u32), | 296 | sizeof(u32), |
| @@ -296,6 +307,10 @@ static int netlbl_cipsov4_add_std(struct genl_info *info) | |||
| 296 | ret_val = -ENOMEM; | 307 | ret_val = -ENOMEM; |
| 297 | goto add_std_failure; | 308 | goto add_std_failure; |
| 298 | } | 309 | } |
| 310 | for (iter = 0; iter < doi_def->map.std->cat.local_size; iter++) | ||
| 311 | doi_def->map.std->cat.local[iter] = CIPSO_V4_INV_CAT; | ||
| 312 | for (iter = 0; iter < doi_def->map.std->cat.cipso_size; iter++) | ||
| 313 | doi_def->map.std->cat.cipso[iter] = CIPSO_V4_INV_CAT; | ||
| 299 | nla_for_each_nested(nla_a, | 314 | nla_for_each_nested(nla_a, |
| 300 | info->attrs[NLBL_CIPSOV4_A_MLSCATLST], | 315 | info->attrs[NLBL_CIPSOV4_A_MLSCATLST], |
| 301 | nla_a_rem) | 316 | nla_a_rem) |
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 1d50f801f181..43bbe2c9e49a 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c | |||
| @@ -1377,6 +1377,15 @@ static struct notifier_block nr_dev_notifier = { | |||
| 1377 | 1377 | ||
| 1378 | static struct net_device **dev_nr; | 1378 | static struct net_device **dev_nr; |
| 1379 | 1379 | ||
| 1380 | static struct ax25_protocol nr_pid = { | ||
| 1381 | .pid = AX25_P_NETROM, | ||
| 1382 | .func = nr_route_frame | ||
| 1383 | }; | ||
| 1384 | |||
| 1385 | static struct ax25_linkfail nr_linkfail_notifier = { | ||
| 1386 | .func = nr_link_failed, | ||
| 1387 | }; | ||
| 1388 | |||
| 1380 | static int __init nr_proto_init(void) | 1389 | static int __init nr_proto_init(void) |
| 1381 | { | 1390 | { |
| 1382 | int i; | 1391 | int i; |
| @@ -1424,8 +1433,8 @@ static int __init nr_proto_init(void) | |||
| 1424 | 1433 | ||
| 1425 | register_netdevice_notifier(&nr_dev_notifier); | 1434 | register_netdevice_notifier(&nr_dev_notifier); |
| 1426 | 1435 | ||
| 1427 | ax25_protocol_register(AX25_P_NETROM, nr_route_frame); | 1436 | ax25_register_pid(&nr_pid); |
| 1428 | ax25_linkfail_register(nr_link_failed); | 1437 | ax25_linkfail_register(&nr_linkfail_notifier); |
| 1429 | 1438 | ||
| 1430 | #ifdef CONFIG_SYSCTL | 1439 | #ifdef CONFIG_SYSCTL |
| 1431 | nr_register_sysctl(); | 1440 | nr_register_sysctl(); |
| @@ -1474,7 +1483,7 @@ static void __exit nr_exit(void) | |||
| 1474 | nr_unregister_sysctl(); | 1483 | nr_unregister_sysctl(); |
| 1475 | #endif | 1484 | #endif |
| 1476 | 1485 | ||
| 1477 | ax25_linkfail_release(nr_link_failed); | 1486 | ax25_linkfail_release(&nr_linkfail_notifier); |
| 1478 | ax25_protocol_release(AX25_P_NETROM); | 1487 | ax25_protocol_release(AX25_P_NETROM); |
| 1479 | 1488 | ||
| 1480 | unregister_netdevice_notifier(&nr_dev_notifier); | 1489 | unregister_netdevice_notifier(&nr_dev_notifier); |
diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c index 9b8eb54971ab..4700d5225b78 100644 --- a/net/netrom/nr_dev.c +++ b/net/netrom/nr_dev.c | |||
| @@ -128,25 +128,37 @@ static int nr_header(struct sk_buff *skb, struct net_device *dev, unsigned short | |||
| 128 | return -37; | 128 | return -37; |
| 129 | } | 129 | } |
| 130 | 130 | ||
| 131 | static int nr_set_mac_address(struct net_device *dev, void *addr) | 131 | static int __must_check nr_set_mac_address(struct net_device *dev, void *addr) |
| 132 | { | 132 | { |
| 133 | struct sockaddr *sa = addr; | 133 | struct sockaddr *sa = addr; |
| 134 | int err; | ||
| 135 | |||
| 136 | if (!memcmp(dev->dev_addr, sa->sa_data, dev->addr_len)) | ||
| 137 | return 0; | ||
| 138 | |||
| 139 | if (dev->flags & IFF_UP) { | ||
| 140 | err = ax25_listen_register((ax25_address *)sa->sa_data, NULL); | ||
| 141 | if (err) | ||
| 142 | return err; | ||
| 134 | 143 | ||
| 135 | if (dev->flags & IFF_UP) | ||
| 136 | ax25_listen_release((ax25_address *)dev->dev_addr, NULL); | 144 | ax25_listen_release((ax25_address *)dev->dev_addr, NULL); |
| 145 | } | ||
| 137 | 146 | ||
| 138 | memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); | 147 | memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); |
| 139 | 148 | ||
| 140 | if (dev->flags & IFF_UP) | ||
| 141 | ax25_listen_register((ax25_address *)dev->dev_addr, NULL); | ||
| 142 | |||
| 143 | return 0; | 149 | return 0; |
| 144 | } | 150 | } |
| 145 | 151 | ||
| 146 | static int nr_open(struct net_device *dev) | 152 | static int nr_open(struct net_device *dev) |
| 147 | { | 153 | { |
| 154 | int err; | ||
| 155 | |||
| 156 | err = ax25_listen_register((ax25_address *)dev->dev_addr, NULL); | ||
| 157 | if (err) | ||
| 158 | return err; | ||
| 159 | |||
| 148 | netif_start_queue(dev); | 160 | netif_start_queue(dev); |
| 149 | ax25_listen_register((ax25_address *)dev->dev_addr, NULL); | 161 | |
| 150 | return 0; | 162 | return 0; |
| 151 | } | 163 | } |
| 152 | 164 | ||
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c index 0096105bcd47..8f88964099ef 100644 --- a/net/netrom/nr_route.c +++ b/net/netrom/nr_route.c | |||
| @@ -87,8 +87,9 @@ static void nr_remove_neigh(struct nr_neigh *); | |||
| 87 | * Add a new route to a node, and in the process add the node and the | 87 | * Add a new route to a node, and in the process add the node and the |
| 88 | * neighbour if it is new. | 88 | * neighbour if it is new. |
| 89 | */ | 89 | */ |
| 90 | static int nr_add_node(ax25_address *nr, const char *mnemonic, ax25_address *ax25, | 90 | static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic, |
| 91 | ax25_digi *ax25_digi, struct net_device *dev, int quality, int obs_count) | 91 | ax25_address *ax25, ax25_digi *ax25_digi, struct net_device *dev, |
| 92 | int quality, int obs_count) | ||
| 92 | { | 93 | { |
| 93 | struct nr_node *nr_node; | 94 | struct nr_node *nr_node; |
| 94 | struct nr_neigh *nr_neigh; | 95 | struct nr_neigh *nr_neigh; |
| @@ -406,7 +407,8 @@ static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct n | |||
| 406 | /* | 407 | /* |
| 407 | * Lock a neighbour with a quality. | 408 | * Lock a neighbour with a quality. |
| 408 | */ | 409 | */ |
| 409 | static int nr_add_neigh(ax25_address *callsign, ax25_digi *ax25_digi, struct net_device *dev, unsigned int quality) | 410 | static int __must_check nr_add_neigh(ax25_address *callsign, |
| 411 | ax25_digi *ax25_digi, struct net_device *dev, unsigned int quality) | ||
| 410 | { | 412 | { |
| 411 | struct nr_neigh *nr_neigh; | 413 | struct nr_neigh *nr_neigh; |
| 412 | 414 | ||
| @@ -777,9 +779,13 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25) | |||
| 777 | nr_src = (ax25_address *)(skb->data + 0); | 779 | nr_src = (ax25_address *)(skb->data + 0); |
| 778 | nr_dest = (ax25_address *)(skb->data + 7); | 780 | nr_dest = (ax25_address *)(skb->data + 7); |
| 779 | 781 | ||
| 780 | if (ax25 != NULL) | 782 | if (ax25 != NULL) { |
| 781 | nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat, | 783 | ret = nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat, |
| 782 | ax25->ax25_dev->dev, 0, sysctl_netrom_obsolescence_count_initialiser); | 784 | ax25->ax25_dev->dev, 0, |
| 785 | sysctl_netrom_obsolescence_count_initialiser); | ||
| 786 | if (ret) | ||
| 787 | return ret; | ||
| 788 | } | ||
| 783 | 789 | ||
| 784 | if ((dev = nr_dev_get(nr_dest)) != NULL) { /* Its for me */ | 790 | if ((dev = nr_dev_get(nr_dest)) != NULL) { /* Its for me */ |
| 785 | if (ax25 == NULL) /* Its from me */ | 791 | if (ax25 == NULL) /* Its from me */ |
| @@ -844,6 +850,7 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25) | |||
| 844 | ret = (nr_neigh->ax25 != NULL); | 850 | ret = (nr_neigh->ax25 != NULL); |
| 845 | nr_node_unlock(nr_node); | 851 | nr_node_unlock(nr_node); |
| 846 | nr_node_put(nr_node); | 852 | nr_node_put(nr_node); |
| 853 | |||
| 847 | return ret; | 854 | return ret; |
| 848 | } | 855 | } |
| 849 | 856 | ||
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 08a542855654..9e279464c9d1 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c | |||
| @@ -1314,7 +1314,8 @@ static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
| 1314 | if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address))) | 1314 | if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address))) |
| 1315 | return -EFAULT; | 1315 | return -EFAULT; |
| 1316 | if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) | 1316 | if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) |
| 1317 | ax25_listen_register(&rose_callsign, NULL); | 1317 | return ax25_listen_register(&rose_callsign, NULL); |
| 1318 | |||
| 1318 | return 0; | 1319 | return 0; |
| 1319 | 1320 | ||
| 1320 | case SIOCRSGL2CALL: | 1321 | case SIOCRSGL2CALL: |
| @@ -1481,6 +1482,15 @@ static struct notifier_block rose_dev_notifier = { | |||
| 1481 | 1482 | ||
| 1482 | static struct net_device **dev_rose; | 1483 | static struct net_device **dev_rose; |
| 1483 | 1484 | ||
| 1485 | static struct ax25_protocol rose_pid = { | ||
| 1486 | .pid = AX25_P_ROSE, | ||
| 1487 | .func = rose_route_frame | ||
| 1488 | }; | ||
| 1489 | |||
| 1490 | static struct ax25_linkfail rose_linkfail_notifier = { | ||
| 1491 | .func = rose_link_failed | ||
| 1492 | }; | ||
| 1493 | |||
| 1484 | static int __init rose_proto_init(void) | 1494 | static int __init rose_proto_init(void) |
| 1485 | { | 1495 | { |
| 1486 | int i; | 1496 | int i; |
| @@ -1530,8 +1540,8 @@ static int __init rose_proto_init(void) | |||
| 1530 | sock_register(&rose_family_ops); | 1540 | sock_register(&rose_family_ops); |
| 1531 | register_netdevice_notifier(&rose_dev_notifier); | 1541 | register_netdevice_notifier(&rose_dev_notifier); |
| 1532 | 1542 | ||
| 1533 | ax25_protocol_register(AX25_P_ROSE, rose_route_frame); | 1543 | ax25_register_pid(&rose_pid); |
| 1534 | ax25_linkfail_register(rose_link_failed); | 1544 | ax25_linkfail_register(&rose_linkfail_notifier); |
| 1535 | 1545 | ||
| 1536 | #ifdef CONFIG_SYSCTL | 1546 | #ifdef CONFIG_SYSCTL |
| 1537 | rose_register_sysctl(); | 1547 | rose_register_sysctl(); |
| @@ -1579,7 +1589,7 @@ static void __exit rose_exit(void) | |||
| 1579 | rose_rt_free(); | 1589 | rose_rt_free(); |
| 1580 | 1590 | ||
| 1581 | ax25_protocol_release(AX25_P_ROSE); | 1591 | ax25_protocol_release(AX25_P_ROSE); |
| 1582 | ax25_linkfail_release(rose_link_failed); | 1592 | ax25_linkfail_release(&rose_linkfail_notifier); |
| 1583 | 1593 | ||
| 1584 | if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) | 1594 | if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) |
| 1585 | ax25_listen_release(&rose_callsign, NULL); | 1595 | ax25_listen_release(&rose_callsign, NULL); |
diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c index 7c279e2659ec..50824d345fa6 100644 --- a/net/rose/rose_dev.c +++ b/net/rose/rose_dev.c | |||
| @@ -93,20 +93,34 @@ static int rose_rebuild_header(struct sk_buff *skb) | |||
| 93 | static int rose_set_mac_address(struct net_device *dev, void *addr) | 93 | static int rose_set_mac_address(struct net_device *dev, void *addr) |
| 94 | { | 94 | { |
| 95 | struct sockaddr *sa = addr; | 95 | struct sockaddr *sa = addr; |
| 96 | int err; | ||
| 96 | 97 | ||
| 97 | rose_del_loopback_node((rose_address *)dev->dev_addr); | 98 | if (!memcpy(dev->dev_addr, sa->sa_data, dev->addr_len)) |
| 99 | return 0; | ||
| 98 | 100 | ||
| 99 | memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); | 101 | if (dev->flags & IFF_UP) { |
| 102 | err = rose_add_loopback_node((rose_address *)dev->dev_addr); | ||
| 103 | if (err) | ||
| 104 | return err; | ||
| 105 | |||
| 106 | rose_del_loopback_node((rose_address *)dev->dev_addr); | ||
| 107 | } | ||
| 100 | 108 | ||
| 101 | rose_add_loopback_node((rose_address *)dev->dev_addr); | 109 | memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); |
| 102 | 110 | ||
| 103 | return 0; | 111 | return 0; |
| 104 | } | 112 | } |
| 105 | 113 | ||
| 106 | static int rose_open(struct net_device *dev) | 114 | static int rose_open(struct net_device *dev) |
| 107 | { | 115 | { |
| 116 | int err; | ||
| 117 | |||
| 118 | err = rose_add_loopback_node((rose_address *)dev->dev_addr); | ||
| 119 | if (err) | ||
| 120 | return err; | ||
| 121 | |||
| 108 | netif_start_queue(dev); | 122 | netif_start_queue(dev); |
| 109 | rose_add_loopback_node((rose_address *)dev->dev_addr); | 123 | |
| 110 | return 0; | 124 | return 0; |
| 111 | } | 125 | } |
| 112 | 126 | ||
diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c index 103b4d38f88a..3e41bd93ab9f 100644 --- a/net/rose/rose_loopback.c +++ b/net/rose/rose_loopback.c | |||
| @@ -79,7 +79,8 @@ static void rose_loopback_timer(unsigned long param) | |||
| 79 | 79 | ||
| 80 | skb->h.raw = skb->data; | 80 | skb->h.raw = skb->data; |
| 81 | 81 | ||
| 82 | if ((sk = rose_find_socket(lci_o, rose_loopback_neigh)) != NULL) { | 82 | sk = rose_find_socket(lci_o, &rose_loopback_neigh); |
| 83 | if (sk) { | ||
| 83 | if (rose_process_rx_frame(sk, skb) == 0) | 84 | if (rose_process_rx_frame(sk, skb) == 0) |
| 84 | kfree_skb(skb); | 85 | kfree_skb(skb); |
| 85 | continue; | 86 | continue; |
| @@ -87,7 +88,7 @@ static void rose_loopback_timer(unsigned long param) | |||
| 87 | 88 | ||
| 88 | if (frametype == ROSE_CALL_REQUEST) { | 89 | if (frametype == ROSE_CALL_REQUEST) { |
| 89 | if ((dev = rose_dev_get(dest)) != NULL) { | 90 | if ((dev = rose_dev_get(dest)) != NULL) { |
| 90 | if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0) | 91 | if (rose_rx_call_request(skb, dev, &rose_loopback_neigh, lci_o) == 0) |
| 91 | kfree_skb(skb); | 92 | kfree_skb(skb); |
| 92 | } else { | 93 | } else { |
| 93 | kfree_skb(skb); | 94 | kfree_skb(skb); |
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c index 7252344779a0..8028c0d425dc 100644 --- a/net/rose/rose_route.c +++ b/net/rose/rose_route.c | |||
| @@ -46,13 +46,13 @@ static DEFINE_SPINLOCK(rose_neigh_list_lock); | |||
| 46 | static struct rose_route *rose_route_list; | 46 | static struct rose_route *rose_route_list; |
| 47 | static DEFINE_SPINLOCK(rose_route_list_lock); | 47 | static DEFINE_SPINLOCK(rose_route_list_lock); |
| 48 | 48 | ||
| 49 | struct rose_neigh *rose_loopback_neigh; | 49 | struct rose_neigh rose_loopback_neigh; |
| 50 | 50 | ||
| 51 | /* | 51 | /* |
| 52 | * Add a new route to a node, and in the process add the node and the | 52 | * Add a new route to a node, and in the process add the node and the |
| 53 | * neighbour if it is new. | 53 | * neighbour if it is new. |
| 54 | */ | 54 | */ |
| 55 | static int rose_add_node(struct rose_route_struct *rose_route, | 55 | static int __must_check rose_add_node(struct rose_route_struct *rose_route, |
| 56 | struct net_device *dev) | 56 | struct net_device *dev) |
| 57 | { | 57 | { |
| 58 | struct rose_node *rose_node, *rose_tmpn, *rose_tmpp; | 58 | struct rose_node *rose_node, *rose_tmpn, *rose_tmpp; |
| @@ -361,33 +361,30 @@ out: | |||
| 361 | /* | 361 | /* |
| 362 | * Add the loopback neighbour. | 362 | * Add the loopback neighbour. |
| 363 | */ | 363 | */ |
| 364 | int rose_add_loopback_neigh(void) | 364 | void rose_add_loopback_neigh(void) |
| 365 | { | 365 | { |
| 366 | if ((rose_loopback_neigh = kmalloc(sizeof(struct rose_neigh), GFP_ATOMIC)) == NULL) | 366 | struct rose_neigh *sn = &rose_loopback_neigh; |
| 367 | return -ENOMEM; | ||
| 368 | 367 | ||
| 369 | rose_loopback_neigh->callsign = null_ax25_address; | 368 | sn->callsign = null_ax25_address; |
| 370 | rose_loopback_neigh->digipeat = NULL; | 369 | sn->digipeat = NULL; |
| 371 | rose_loopback_neigh->ax25 = NULL; | 370 | sn->ax25 = NULL; |
| 372 | rose_loopback_neigh->dev = NULL; | 371 | sn->dev = NULL; |
| 373 | rose_loopback_neigh->count = 0; | 372 | sn->count = 0; |
| 374 | rose_loopback_neigh->use = 0; | 373 | sn->use = 0; |
| 375 | rose_loopback_neigh->dce_mode = 1; | 374 | sn->dce_mode = 1; |
| 376 | rose_loopback_neigh->loopback = 1; | 375 | sn->loopback = 1; |
| 377 | rose_loopback_neigh->number = rose_neigh_no++; | 376 | sn->number = rose_neigh_no++; |
| 378 | rose_loopback_neigh->restarted = 1; | 377 | sn->restarted = 1; |
| 379 | 378 | ||
| 380 | skb_queue_head_init(&rose_loopback_neigh->queue); | 379 | skb_queue_head_init(&sn->queue); |
| 381 | 380 | ||
| 382 | init_timer(&rose_loopback_neigh->ftimer); | 381 | init_timer(&sn->ftimer); |
| 383 | init_timer(&rose_loopback_neigh->t0timer); | 382 | init_timer(&sn->t0timer); |
| 384 | 383 | ||
| 385 | spin_lock_bh(&rose_neigh_list_lock); | 384 | spin_lock_bh(&rose_neigh_list_lock); |
| 386 | rose_loopback_neigh->next = rose_neigh_list; | 385 | sn->next = rose_neigh_list; |
| 387 | rose_neigh_list = rose_loopback_neigh; | 386 | rose_neigh_list = sn; |
| 388 | spin_unlock_bh(&rose_neigh_list_lock); | 387 | spin_unlock_bh(&rose_neigh_list_lock); |
| 389 | |||
| 390 | return 0; | ||
| 391 | } | 388 | } |
| 392 | 389 | ||
| 393 | /* | 390 | /* |
| @@ -421,13 +418,13 @@ int rose_add_loopback_node(rose_address *address) | |||
| 421 | rose_node->mask = 10; | 418 | rose_node->mask = 10; |
| 422 | rose_node->count = 1; | 419 | rose_node->count = 1; |
| 423 | rose_node->loopback = 1; | 420 | rose_node->loopback = 1; |
| 424 | rose_node->neighbour[0] = rose_loopback_neigh; | 421 | rose_node->neighbour[0] = &rose_loopback_neigh; |
| 425 | 422 | ||
| 426 | /* Insert at the head of list. Address is always mask=10 */ | 423 | /* Insert at the head of list. Address is always mask=10 */ |
| 427 | rose_node->next = rose_node_list; | 424 | rose_node->next = rose_node_list; |
| 428 | rose_node_list = rose_node; | 425 | rose_node_list = rose_node; |
| 429 | 426 | ||
| 430 | rose_loopback_neigh->count++; | 427 | rose_loopback_neigh.count++; |
| 431 | 428 | ||
| 432 | out: | 429 | out: |
| 433 | spin_unlock_bh(&rose_node_list_lock); | 430 | spin_unlock_bh(&rose_node_list_lock); |
| @@ -458,7 +455,7 @@ void rose_del_loopback_node(rose_address *address) | |||
| 458 | 455 | ||
| 459 | rose_remove_node(rose_node); | 456 | rose_remove_node(rose_node); |
| 460 | 457 | ||
| 461 | rose_loopback_neigh->count--; | 458 | rose_loopback_neigh.count--; |
| 462 | 459 | ||
| 463 | out: | 460 | out: |
| 464 | spin_unlock_bh(&rose_node_list_lock); | 461 | spin_unlock_bh(&rose_node_list_lock); |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index ad0057db0f91..5db95caed0a3 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
| @@ -298,6 +298,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a | |||
| 298 | asoc->default_flags = sp->default_flags; | 298 | asoc->default_flags = sp->default_flags; |
| 299 | asoc->default_context = sp->default_context; | 299 | asoc->default_context = sp->default_context; |
| 300 | asoc->default_timetolive = sp->default_timetolive; | 300 | asoc->default_timetolive = sp->default_timetolive; |
| 301 | asoc->default_rcv_context = sp->default_rcv_context; | ||
| 301 | 302 | ||
| 302 | return asoc; | 303 | return asoc; |
| 303 | 304 | ||
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 3c3e560087ca..ef36be073a13 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
| @@ -78,8 +78,44 @@ | |||
| 78 | 78 | ||
| 79 | #include <asm/uaccess.h> | 79 | #include <asm/uaccess.h> |
| 80 | 80 | ||
| 81 | /* Event handler for inet6 address addition/deletion events. */ | ||
| 82 | static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev, | ||
| 83 | void *ptr) | ||
| 84 | { | ||
| 85 | struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; | ||
| 86 | struct sctp_sockaddr_entry *addr; | ||
| 87 | struct list_head *pos, *temp; | ||
| 88 | |||
| 89 | switch (ev) { | ||
| 90 | case NETDEV_UP: | ||
| 91 | addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); | ||
| 92 | if (addr) { | ||
| 93 | addr->a.v6.sin6_family = AF_INET6; | ||
| 94 | addr->a.v6.sin6_port = 0; | ||
| 95 | memcpy(&addr->a.v6.sin6_addr, &ifa->addr, | ||
| 96 | sizeof(struct in6_addr)); | ||
| 97 | addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex; | ||
| 98 | list_add_tail(&addr->list, &sctp_local_addr_list); | ||
| 99 | } | ||
| 100 | break; | ||
| 101 | case NETDEV_DOWN: | ||
| 102 | list_for_each_safe(pos, temp, &sctp_local_addr_list) { | ||
| 103 | addr = list_entry(pos, struct sctp_sockaddr_entry, list); | ||
| 104 | if (ipv6_addr_equal(&addr->a.v6.sin6_addr, &ifa->addr)) { | ||
| 105 | list_del(pos); | ||
| 106 | kfree(addr); | ||
| 107 | break; | ||
| 108 | } | ||
| 109 | } | ||
| 110 | |||
| 111 | break; | ||
| 112 | } | ||
| 113 | |||
| 114 | return NOTIFY_DONE; | ||
| 115 | } | ||
| 116 | |||
| 81 | static struct notifier_block sctp_inet6addr_notifier = { | 117 | static struct notifier_block sctp_inet6addr_notifier = { |
| 82 | .notifier_call = sctp_inetaddr_event, | 118 | .notifier_call = sctp_inet6addr_event, |
| 83 | }; | 119 | }; |
| 84 | 120 | ||
| 85 | /* ICMP error handler. */ | 121 | /* ICMP error handler. */ |
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index f2ba8615895b..225f39b5d595 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
| @@ -163,7 +163,7 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist, | |||
| 163 | /* Extract our IP addresses from the system and stash them in the | 163 | /* Extract our IP addresses from the system and stash them in the |
| 164 | * protocol structure. | 164 | * protocol structure. |
| 165 | */ | 165 | */ |
| 166 | static void __sctp_get_local_addr_list(void) | 166 | static void sctp_get_local_addr_list(void) |
| 167 | { | 167 | { |
| 168 | struct net_device *dev; | 168 | struct net_device *dev; |
| 169 | struct list_head *pos; | 169 | struct list_head *pos; |
| @@ -179,17 +179,8 @@ static void __sctp_get_local_addr_list(void) | |||
| 179 | read_unlock(&dev_base_lock); | 179 | read_unlock(&dev_base_lock); |
| 180 | } | 180 | } |
| 181 | 181 | ||
| 182 | static void sctp_get_local_addr_list(void) | ||
| 183 | { | ||
| 184 | unsigned long flags; | ||
| 185 | |||
| 186 | sctp_spin_lock_irqsave(&sctp_local_addr_lock, flags); | ||
| 187 | __sctp_get_local_addr_list(); | ||
| 188 | sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, flags); | ||
| 189 | } | ||
| 190 | |||
| 191 | /* Free the existing local addresses. */ | 182 | /* Free the existing local addresses. */ |
| 192 | static void __sctp_free_local_addr_list(void) | 183 | static void sctp_free_local_addr_list(void) |
| 193 | { | 184 | { |
| 194 | struct sctp_sockaddr_entry *addr; | 185 | struct sctp_sockaddr_entry *addr; |
| 195 | struct list_head *pos, *temp; | 186 | struct list_head *pos, *temp; |
| @@ -201,27 +192,15 @@ static void __sctp_free_local_addr_list(void) | |||
| 201 | } | 192 | } |
| 202 | } | 193 | } |
| 203 | 194 | ||
| 204 | /* Free the existing local addresses. */ | ||
| 205 | static void sctp_free_local_addr_list(void) | ||
| 206 | { | ||
| 207 | unsigned long flags; | ||
| 208 | |||
| 209 | sctp_spin_lock_irqsave(&sctp_local_addr_lock, flags); | ||
| 210 | __sctp_free_local_addr_list(); | ||
| 211 | sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, flags); | ||
| 212 | } | ||
| 213 | |||
| 214 | /* Copy the local addresses which are valid for 'scope' into 'bp'. */ | 195 | /* Copy the local addresses which are valid for 'scope' into 'bp'. */ |
| 215 | int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope, | 196 | int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope, |
| 216 | gfp_t gfp, int copy_flags) | 197 | gfp_t gfp, int copy_flags) |
| 217 | { | 198 | { |
| 218 | struct sctp_sockaddr_entry *addr; | 199 | struct sctp_sockaddr_entry *addr; |
| 219 | int error = 0; | 200 | int error = 0; |
| 220 | struct list_head *pos; | 201 | struct list_head *pos, *temp; |
| 221 | unsigned long flags; | ||
| 222 | 202 | ||
| 223 | sctp_spin_lock_irqsave(&sctp_local_addr_lock, flags); | 203 | list_for_each_safe(pos, temp, &sctp_local_addr_list) { |
| 224 | list_for_each(pos, &sctp_local_addr_list) { | ||
| 225 | addr = list_entry(pos, struct sctp_sockaddr_entry, list); | 204 | addr = list_entry(pos, struct sctp_sockaddr_entry, list); |
| 226 | if (sctp_in_scope(&addr->a, scope)) { | 205 | if (sctp_in_scope(&addr->a, scope)) { |
| 227 | /* Now that the address is in scope, check to see if | 206 | /* Now that the address is in scope, check to see if |
| @@ -242,7 +221,6 @@ int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope, | |||
| 242 | } | 221 | } |
| 243 | 222 | ||
| 244 | end_copy: | 223 | end_copy: |
| 245 | sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, flags); | ||
| 246 | return error; | 224 | return error; |
| 247 | } | 225 | } |
| 248 | 226 | ||
| @@ -622,18 +600,36 @@ static void sctp_v4_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr) | |||
| 622 | seq_printf(seq, "%d.%d.%d.%d ", NIPQUAD(addr->v4.sin_addr)); | 600 | seq_printf(seq, "%d.%d.%d.%d ", NIPQUAD(addr->v4.sin_addr)); |
| 623 | } | 601 | } |
| 624 | 602 | ||
| 625 | /* Event handler for inet address addition/deletion events. | 603 | /* Event handler for inet address addition/deletion events. */ |
| 626 | * Basically, whenever there is an event, we re-build our local address list. | 604 | static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev, |
| 627 | */ | 605 | void *ptr) |
| 628 | int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev, | ||
| 629 | void *ptr) | ||
| 630 | { | 606 | { |
| 631 | unsigned long flags; | 607 | struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; |
| 608 | struct sctp_sockaddr_entry *addr; | ||
| 609 | struct list_head *pos, *temp; | ||
| 632 | 610 | ||
| 633 | sctp_spin_lock_irqsave(&sctp_local_addr_lock, flags); | 611 | switch (ev) { |
| 634 | __sctp_free_local_addr_list(); | 612 | case NETDEV_UP: |
| 635 | __sctp_get_local_addr_list(); | 613 | addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); |
| 636 | sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, flags); | 614 | if (addr) { |
| 615 | addr->a.v4.sin_family = AF_INET; | ||
| 616 | addr->a.v4.sin_port = 0; | ||
| 617 | addr->a.v4.sin_addr.s_addr = ifa->ifa_local; | ||
| 618 | list_add_tail(&addr->list, &sctp_local_addr_list); | ||
| 619 | } | ||
| 620 | break; | ||
| 621 | case NETDEV_DOWN: | ||
| 622 | list_for_each_safe(pos, temp, &sctp_local_addr_list) { | ||
| 623 | addr = list_entry(pos, struct sctp_sockaddr_entry, list); | ||
| 624 | if (addr->a.v4.sin_addr.s_addr == ifa->ifa_local) { | ||
| 625 | list_del(pos); | ||
| 626 | kfree(addr); | ||
| 627 | break; | ||
| 628 | } | ||
| 629 | } | ||
| 630 | |||
| 631 | break; | ||
| 632 | } | ||
| 637 | 633 | ||
| 638 | return NOTIFY_DONE; | 634 | return NOTIFY_DONE; |
| 639 | } | 635 | } |
| @@ -1172,13 +1168,12 @@ SCTP_STATIC __init int sctp_init(void) | |||
| 1172 | 1168 | ||
| 1173 | /* Initialize the local address list. */ | 1169 | /* Initialize the local address list. */ |
| 1174 | INIT_LIST_HEAD(&sctp_local_addr_list); | 1170 | INIT_LIST_HEAD(&sctp_local_addr_list); |
| 1175 | spin_lock_init(&sctp_local_addr_lock); | 1171 | |
| 1172 | sctp_get_local_addr_list(); | ||
| 1176 | 1173 | ||
| 1177 | /* Register notifier for inet address additions/deletions. */ | 1174 | /* Register notifier for inet address additions/deletions. */ |
| 1178 | register_inetaddr_notifier(&sctp_inetaddr_notifier); | 1175 | register_inetaddr_notifier(&sctp_inetaddr_notifier); |
| 1179 | 1176 | ||
| 1180 | sctp_get_local_addr_list(); | ||
| 1181 | |||
| 1182 | __unsafe(THIS_MODULE); | 1177 | __unsafe(THIS_MODULE); |
| 1183 | status = 0; | 1178 | status = 0; |
| 1184 | out: | 1179 | out: |
| @@ -1263,6 +1258,7 @@ module_exit(sctp_exit); | |||
| 1263 | * __stringify doesn't likes enums, so use IPPROTO_SCTP value (132) directly. | 1258 | * __stringify doesn't likes enums, so use IPPROTO_SCTP value (132) directly. |
| 1264 | */ | 1259 | */ |
| 1265 | MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-132"); | 1260 | MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-132"); |
| 1261 | MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-132"); | ||
| 1266 | MODULE_AUTHOR("Linux Kernel SCTP developers <lksctp-developers@lists.sourceforge.net>"); | 1262 | MODULE_AUTHOR("Linux Kernel SCTP developers <lksctp-developers@lists.sourceforge.net>"); |
| 1267 | MODULE_DESCRIPTION("Support for the SCTP protocol (RFC2960)"); | 1263 | MODULE_DESCRIPTION("Support for the SCTP protocol (RFC2960)"); |
| 1268 | MODULE_LICENSE("GPL"); | 1264 | MODULE_LICENSE("GPL"); |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 30927d3a597f..f0bbe36799cf 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
| @@ -184,7 +184,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, | |||
| 184 | struct sctp_sock *sp; | 184 | struct sctp_sock *sp; |
| 185 | sctp_supported_addrs_param_t sat; | 185 | sctp_supported_addrs_param_t sat; |
| 186 | __be16 types[2]; | 186 | __be16 types[2]; |
| 187 | sctp_adaption_ind_param_t aiparam; | 187 | sctp_adaptation_ind_param_t aiparam; |
| 188 | 188 | ||
| 189 | /* RFC 2960 3.3.2 Initiation (INIT) (1) | 189 | /* RFC 2960 3.3.2 Initiation (INIT) (1) |
| 190 | * | 190 | * |
| @@ -249,9 +249,9 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, | |||
| 249 | sctp_addto_chunk(retval, sizeof(ecap_param), &ecap_param); | 249 | sctp_addto_chunk(retval, sizeof(ecap_param), &ecap_param); |
| 250 | if (sctp_prsctp_enable) | 250 | if (sctp_prsctp_enable) |
| 251 | sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param); | 251 | sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param); |
| 252 | aiparam.param_hdr.type = SCTP_PARAM_ADAPTION_LAYER_IND; | 252 | aiparam.param_hdr.type = SCTP_PARAM_ADAPTATION_LAYER_IND; |
| 253 | aiparam.param_hdr.length = htons(sizeof(aiparam)); | 253 | aiparam.param_hdr.length = htons(sizeof(aiparam)); |
| 254 | aiparam.adaption_ind = htonl(sp->adaption_ind); | 254 | aiparam.adaptation_ind = htonl(sp->adaptation_ind); |
| 255 | sctp_addto_chunk(retval, sizeof(aiparam), &aiparam); | 255 | sctp_addto_chunk(retval, sizeof(aiparam), &aiparam); |
| 256 | nodata: | 256 | nodata: |
| 257 | kfree(addrs.v); | 257 | kfree(addrs.v); |
| @@ -269,7 +269,7 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, | |||
| 269 | sctp_cookie_param_t *cookie; | 269 | sctp_cookie_param_t *cookie; |
| 270 | int cookie_len; | 270 | int cookie_len; |
| 271 | size_t chunksize; | 271 | size_t chunksize; |
| 272 | sctp_adaption_ind_param_t aiparam; | 272 | sctp_adaptation_ind_param_t aiparam; |
| 273 | 273 | ||
| 274 | retval = NULL; | 274 | retval = NULL; |
| 275 | 275 | ||
| @@ -323,9 +323,9 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, | |||
| 323 | if (asoc->peer.prsctp_capable) | 323 | if (asoc->peer.prsctp_capable) |
| 324 | sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param); | 324 | sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param); |
| 325 | 325 | ||
| 326 | aiparam.param_hdr.type = SCTP_PARAM_ADAPTION_LAYER_IND; | 326 | aiparam.param_hdr.type = SCTP_PARAM_ADAPTATION_LAYER_IND; |
| 327 | aiparam.param_hdr.length = htons(sizeof(aiparam)); | 327 | aiparam.param_hdr.length = htons(sizeof(aiparam)); |
| 328 | aiparam.adaption_ind = htonl(sctp_sk(asoc->base.sk)->adaption_ind); | 328 | aiparam.adaptation_ind = htonl(sctp_sk(asoc->base.sk)->adaptation_ind); |
| 329 | sctp_addto_chunk(retval, sizeof(aiparam), &aiparam); | 329 | sctp_addto_chunk(retval, sizeof(aiparam), &aiparam); |
| 330 | 330 | ||
| 331 | /* We need to remove the const qualifier at this point. */ | 331 | /* We need to remove the const qualifier at this point. */ |
| @@ -1300,8 +1300,8 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, | |||
| 1300 | /* Remember PR-SCTP capability. */ | 1300 | /* Remember PR-SCTP capability. */ |
| 1301 | cookie->c.prsctp_capable = asoc->peer.prsctp_capable; | 1301 | cookie->c.prsctp_capable = asoc->peer.prsctp_capable; |
| 1302 | 1302 | ||
| 1303 | /* Save adaption indication in the cookie. */ | 1303 | /* Save adaptation indication in the cookie. */ |
| 1304 | cookie->c.adaption_ind = asoc->peer.adaption_ind; | 1304 | cookie->c.adaptation_ind = asoc->peer.adaptation_ind; |
| 1305 | 1305 | ||
| 1306 | /* Set an expiration time for the cookie. */ | 1306 | /* Set an expiration time for the cookie. */ |
| 1307 | do_gettimeofday(&cookie->c.expiration); | 1307 | do_gettimeofday(&cookie->c.expiration); |
| @@ -1512,7 +1512,7 @@ no_hmac: | |||
| 1512 | retval->addip_serial = retval->c.initial_tsn; | 1512 | retval->addip_serial = retval->c.initial_tsn; |
| 1513 | retval->adv_peer_ack_point = retval->ctsn_ack_point; | 1513 | retval->adv_peer_ack_point = retval->ctsn_ack_point; |
| 1514 | retval->peer.prsctp_capable = retval->c.prsctp_capable; | 1514 | retval->peer.prsctp_capable = retval->c.prsctp_capable; |
| 1515 | retval->peer.adaption_ind = retval->c.adaption_ind; | 1515 | retval->peer.adaptation_ind = retval->c.adaptation_ind; |
| 1516 | 1516 | ||
| 1517 | /* The INIT stuff will be done by the side effects. */ | 1517 | /* The INIT stuff will be done by the side effects. */ |
| 1518 | return retval; | 1518 | return retval; |
| @@ -1743,7 +1743,7 @@ static int sctp_verify_param(const struct sctp_association *asoc, | |||
| 1743 | case SCTP_PARAM_HEARTBEAT_INFO: | 1743 | case SCTP_PARAM_HEARTBEAT_INFO: |
| 1744 | case SCTP_PARAM_UNRECOGNIZED_PARAMETERS: | 1744 | case SCTP_PARAM_UNRECOGNIZED_PARAMETERS: |
| 1745 | case SCTP_PARAM_ECN_CAPABLE: | 1745 | case SCTP_PARAM_ECN_CAPABLE: |
| 1746 | case SCTP_PARAM_ADAPTION_LAYER_IND: | 1746 | case SCTP_PARAM_ADAPTATION_LAYER_IND: |
| 1747 | break; | 1747 | break; |
| 1748 | 1748 | ||
| 1749 | case SCTP_PARAM_HOST_NAME_ADDRESS: | 1749 | case SCTP_PARAM_HOST_NAME_ADDRESS: |
| @@ -2098,8 +2098,8 @@ static int sctp_process_param(struct sctp_association *asoc, | |||
| 2098 | asoc->peer.ecn_capable = 1; | 2098 | asoc->peer.ecn_capable = 1; |
| 2099 | break; | 2099 | break; |
| 2100 | 2100 | ||
| 2101 | case SCTP_PARAM_ADAPTION_LAYER_IND: | 2101 | case SCTP_PARAM_ADAPTATION_LAYER_IND: |
| 2102 | asoc->peer.adaption_ind = param.aind->adaption_ind; | 2102 | asoc->peer.adaptation_ind = param.aind->adaptation_ind; |
| 2103 | break; | 2103 | break; |
| 2104 | 2104 | ||
| 2105 | case SCTP_PARAM_FWD_TSN_SUPPORT: | 2105 | case SCTP_PARAM_FWD_TSN_SUPPORT: |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 27cc444aaf11..aa51d190bfb2 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
| @@ -688,12 +688,12 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep, | |||
| 688 | goto nomem_ev; | 688 | goto nomem_ev; |
| 689 | 689 | ||
| 690 | /* Sockets API Draft Section 5.3.1.6 | 690 | /* Sockets API Draft Section 5.3.1.6 |
| 691 | * When a peer sends a Adaption Layer Indication parameter , SCTP | 691 | * When a peer sends a Adaptation Layer Indication parameter , SCTP |
| 692 | * delivers this notification to inform the application that of the | 692 | * delivers this notification to inform the application that of the |
| 693 | * peers requested adaption layer. | 693 | * peers requested adaptation layer. |
| 694 | */ | 694 | */ |
| 695 | if (new_asoc->peer.adaption_ind) { | 695 | if (new_asoc->peer.adaptation_ind) { |
| 696 | ai_ev = sctp_ulpevent_make_adaption_indication(new_asoc, | 696 | ai_ev = sctp_ulpevent_make_adaptation_indication(new_asoc, |
| 697 | GFP_ATOMIC); | 697 | GFP_ATOMIC); |
| 698 | if (!ai_ev) | 698 | if (!ai_ev) |
| 699 | goto nomem_aiev; | 699 | goto nomem_aiev; |
| @@ -820,12 +820,12 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const struct sctp_endpoint *ep, | |||
| 820 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); | 820 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); |
| 821 | 821 | ||
| 822 | /* Sockets API Draft Section 5.3.1.6 | 822 | /* Sockets API Draft Section 5.3.1.6 |
| 823 | * When a peer sends a Adaption Layer Indication parameter , SCTP | 823 | * When a peer sends a Adaptation Layer Indication parameter , SCTP |
| 824 | * delivers this notification to inform the application that of the | 824 | * delivers this notification to inform the application that of the |
| 825 | * peers requested adaption layer. | 825 | * peers requested adaptation layer. |
| 826 | */ | 826 | */ |
| 827 | if (asoc->peer.adaption_ind) { | 827 | if (asoc->peer.adaptation_ind) { |
| 828 | ev = sctp_ulpevent_make_adaption_indication(asoc, GFP_ATOMIC); | 828 | ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC); |
| 829 | if (!ev) | 829 | if (!ev) |
| 830 | goto nomem; | 830 | goto nomem; |
| 831 | 831 | ||
| @@ -1698,12 +1698,12 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(const struct sctp_endpoint *ep, | |||
| 1698 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); | 1698 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); |
| 1699 | 1699 | ||
| 1700 | /* Sockets API Draft Section 5.3.1.6 | 1700 | /* Sockets API Draft Section 5.3.1.6 |
| 1701 | * When a peer sends a Adaption Layer Indication parameter , SCTP | 1701 | * When a peer sends a Adaptation Layer Indication parameter , SCTP |
| 1702 | * delivers this notification to inform the application that of the | 1702 | * delivers this notification to inform the application that of the |
| 1703 | * peers requested adaption layer. | 1703 | * peers requested adaptation layer. |
| 1704 | */ | 1704 | */ |
| 1705 | if (asoc->peer.adaption_ind) { | 1705 | if (asoc->peer.adaptation_ind) { |
| 1706 | ev = sctp_ulpevent_make_adaption_indication(asoc, GFP_ATOMIC); | 1706 | ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC); |
| 1707 | if (!ev) | 1707 | if (!ev) |
| 1708 | goto nomem_ev; | 1708 | goto nomem_ev; |
| 1709 | 1709 | ||
| @@ -1791,12 +1791,12 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const struct sctp_endpoint *ep, | |||
| 1791 | goto nomem; | 1791 | goto nomem; |
| 1792 | 1792 | ||
| 1793 | /* Sockets API Draft Section 5.3.1.6 | 1793 | /* Sockets API Draft Section 5.3.1.6 |
| 1794 | * When a peer sends a Adaption Layer Indication parameter, | 1794 | * When a peer sends a Adaptation Layer Indication parameter, |
| 1795 | * SCTP delivers this notification to inform the application | 1795 | * SCTP delivers this notification to inform the application |
| 1796 | * that of the peers requested adaption layer. | 1796 | * that of the peers requested adaptation layer. |
| 1797 | */ | 1797 | */ |
| 1798 | if (asoc->peer.adaption_ind) { | 1798 | if (asoc->peer.adaptation_ind) { |
| 1799 | ai_ev = sctp_ulpevent_make_adaption_indication(asoc, | 1799 | ai_ev = sctp_ulpevent_make_adaptation_indication(asoc, |
| 1800 | GFP_ATOMIC); | 1800 | GFP_ATOMIC); |
| 1801 | if (!ai_ev) | 1801 | if (!ai_ev) |
| 1802 | goto nomem; | 1802 | goto nomem; |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 1e8132b8c4d9..388d0fb1a377 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
| @@ -2731,17 +2731,57 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optva | |||
| 2731 | return err; | 2731 | return err; |
| 2732 | } | 2732 | } |
| 2733 | 2733 | ||
| 2734 | static int sctp_setsockopt_adaption_layer(struct sock *sk, char __user *optval, | 2734 | static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval, |
| 2735 | int optlen) | 2735 | int optlen) |
| 2736 | { | 2736 | { |
| 2737 | struct sctp_setadaption adaption; | 2737 | struct sctp_setadaptation adaptation; |
| 2738 | 2738 | ||
| 2739 | if (optlen != sizeof(struct sctp_setadaption)) | 2739 | if (optlen != sizeof(struct sctp_setadaptation)) |
| 2740 | return -EINVAL; | 2740 | return -EINVAL; |
| 2741 | if (copy_from_user(&adaption, optval, optlen)) | 2741 | if (copy_from_user(&adaptation, optval, optlen)) |
| 2742 | return -EFAULT; | 2742 | return -EFAULT; |
| 2743 | 2743 | ||
| 2744 | sctp_sk(sk)->adaption_ind = adaption.ssb_adaption_ind; | 2744 | sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind; |
| 2745 | |||
| 2746 | return 0; | ||
| 2747 | } | ||
| 2748 | |||
| 2749 | /* | ||
| 2750 | * 7.1.29. Set or Get the default context (SCTP_CONTEXT) | ||
| 2751 | * | ||
| 2752 | * The context field in the sctp_sndrcvinfo structure is normally only | ||
| 2753 | * used when a failed message is retrieved holding the value that was | ||
| 2754 | * sent down on the actual send call. This option allows the setting of | ||
| 2755 | * a default context on an association basis that will be received on | ||
| 2756 | * reading messages from the peer. This is especially helpful in the | ||
| 2757 | * one-2-many model for an application to keep some reference to an | ||
| 2758 | * internal state machine that is processing messages on the | ||
| 2759 | * association. Note that the setting of this value only effects | ||
| 2760 | * received messages from the peer and does not effect the value that is | ||
| 2761 | * saved with outbound messages. | ||
| 2762 | */ | ||
| 2763 | static int sctp_setsockopt_context(struct sock *sk, char __user *optval, | ||
| 2764 | int optlen) | ||
| 2765 | { | ||
| 2766 | struct sctp_assoc_value params; | ||
| 2767 | struct sctp_sock *sp; | ||
| 2768 | struct sctp_association *asoc; | ||
| 2769 | |||
| 2770 | if (optlen != sizeof(struct sctp_assoc_value)) | ||
| 2771 | return -EINVAL; | ||
| 2772 | if (copy_from_user(¶ms, optval, optlen)) | ||
| 2773 | return -EFAULT; | ||
| 2774 | |||
| 2775 | sp = sctp_sk(sk); | ||
| 2776 | |||
| 2777 | if (params.assoc_id != 0) { | ||
| 2778 | asoc = sctp_id2assoc(sk, params.assoc_id); | ||
| 2779 | if (!asoc) | ||
| 2780 | return -EINVAL; | ||
| 2781 | asoc->default_rcv_context = params.assoc_value; | ||
| 2782 | } else { | ||
| 2783 | sp->default_rcv_context = params.assoc_value; | ||
| 2784 | } | ||
| 2745 | 2785 | ||
| 2746 | return 0; | 2786 | return 0; |
| 2747 | } | 2787 | } |
| @@ -2854,8 +2894,11 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname, | |||
| 2854 | case SCTP_MAXSEG: | 2894 | case SCTP_MAXSEG: |
| 2855 | retval = sctp_setsockopt_maxseg(sk, optval, optlen); | 2895 | retval = sctp_setsockopt_maxseg(sk, optval, optlen); |
| 2856 | break; | 2896 | break; |
| 2857 | case SCTP_ADAPTION_LAYER: | 2897 | case SCTP_ADAPTATION_LAYER: |
| 2858 | retval = sctp_setsockopt_adaption_layer(sk, optval, optlen); | 2898 | retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen); |
| 2899 | break; | ||
| 2900 | case SCTP_CONTEXT: | ||
| 2901 | retval = sctp_setsockopt_context(sk, optval, optlen); | ||
| 2859 | break; | 2902 | break; |
| 2860 | 2903 | ||
| 2861 | default: | 2904 | default: |
| @@ -3016,6 +3059,8 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk) | |||
| 3016 | sp->default_context = 0; | 3059 | sp->default_context = 0; |
| 3017 | sp->default_timetolive = 0; | 3060 | sp->default_timetolive = 0; |
| 3018 | 3061 | ||
| 3062 | sp->default_rcv_context = 0; | ||
| 3063 | |||
| 3019 | /* Initialize default setup parameters. These parameters | 3064 | /* Initialize default setup parameters. These parameters |
| 3020 | * can be modified with the SCTP_INITMSG socket option or | 3065 | * can be modified with the SCTP_INITMSG socket option or |
| 3021 | * overridden by the SCTP_INIT CMSG. | 3066 | * overridden by the SCTP_INIT CMSG. |
| @@ -3078,7 +3123,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk) | |||
| 3078 | /* User specified fragmentation limit. */ | 3123 | /* User specified fragmentation limit. */ |
| 3079 | sp->user_frag = 0; | 3124 | sp->user_frag = 0; |
| 3080 | 3125 | ||
| 3081 | sp->adaption_ind = 0; | 3126 | sp->adaptation_ind = 0; |
| 3082 | 3127 | ||
| 3083 | sp->pf = sctp_get_pf_specific(sk->sk_family); | 3128 | sp->pf = sctp_get_pf_specific(sk->sk_family); |
| 3084 | 3129 | ||
| @@ -3821,10 +3866,9 @@ static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len, | |||
| 3821 | sctp_assoc_t id; | 3866 | sctp_assoc_t id; |
| 3822 | struct sctp_bind_addr *bp; | 3867 | struct sctp_bind_addr *bp; |
| 3823 | struct sctp_association *asoc; | 3868 | struct sctp_association *asoc; |
| 3824 | struct list_head *pos; | 3869 | struct list_head *pos, *temp; |
| 3825 | struct sctp_sockaddr_entry *addr; | 3870 | struct sctp_sockaddr_entry *addr; |
| 3826 | rwlock_t *addr_lock; | 3871 | rwlock_t *addr_lock; |
| 3827 | unsigned long flags; | ||
| 3828 | int cnt = 0; | 3872 | int cnt = 0; |
| 3829 | 3873 | ||
| 3830 | if (len != sizeof(sctp_assoc_t)) | 3874 | if (len != sizeof(sctp_assoc_t)) |
| @@ -3859,8 +3903,7 @@ static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len, | |||
| 3859 | addr = list_entry(bp->address_list.next, | 3903 | addr = list_entry(bp->address_list.next, |
| 3860 | struct sctp_sockaddr_entry, list); | 3904 | struct sctp_sockaddr_entry, list); |
| 3861 | if (sctp_is_any(&addr->a)) { | 3905 | if (sctp_is_any(&addr->a)) { |
| 3862 | sctp_spin_lock_irqsave(&sctp_local_addr_lock, flags); | 3906 | list_for_each_safe(pos, temp, &sctp_local_addr_list) { |
| 3863 | list_for_each(pos, &sctp_local_addr_list) { | ||
| 3864 | addr = list_entry(pos, | 3907 | addr = list_entry(pos, |
| 3865 | struct sctp_sockaddr_entry, | 3908 | struct sctp_sockaddr_entry, |
| 3866 | list); | 3909 | list); |
| @@ -3869,8 +3912,6 @@ static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len, | |||
| 3869 | continue; | 3912 | continue; |
| 3870 | cnt++; | 3913 | cnt++; |
| 3871 | } | 3914 | } |
| 3872 | sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, | ||
| 3873 | flags); | ||
| 3874 | } else { | 3915 | } else { |
| 3875 | cnt = 1; | 3916 | cnt = 1; |
| 3876 | } | 3917 | } |
| @@ -3892,15 +3933,13 @@ done: | |||
| 3892 | static int sctp_copy_laddrs_to_user_old(struct sock *sk, __u16 port, int max_addrs, | 3933 | static int sctp_copy_laddrs_to_user_old(struct sock *sk, __u16 port, int max_addrs, |
| 3893 | void __user *to) | 3934 | void __user *to) |
| 3894 | { | 3935 | { |
| 3895 | struct list_head *pos; | 3936 | struct list_head *pos, *next; |
| 3896 | struct sctp_sockaddr_entry *addr; | 3937 | struct sctp_sockaddr_entry *addr; |
| 3897 | unsigned long flags; | ||
| 3898 | union sctp_addr temp; | 3938 | union sctp_addr temp; |
| 3899 | int cnt = 0; | 3939 | int cnt = 0; |
| 3900 | int addrlen; | 3940 | int addrlen; |
| 3901 | 3941 | ||
| 3902 | sctp_spin_lock_irqsave(&sctp_local_addr_lock, flags); | 3942 | list_for_each_safe(pos, next, &sctp_local_addr_list) { |
| 3903 | list_for_each(pos, &sctp_local_addr_list) { | ||
| 3904 | addr = list_entry(pos, struct sctp_sockaddr_entry, list); | 3943 | addr = list_entry(pos, struct sctp_sockaddr_entry, list); |
| 3905 | if ((PF_INET == sk->sk_family) && | 3944 | if ((PF_INET == sk->sk_family) && |
| 3906 | (AF_INET6 == addr->a.sa.sa_family)) | 3945 | (AF_INET6 == addr->a.sa.sa_family)) |
| @@ -3909,16 +3948,13 @@ static int sctp_copy_laddrs_to_user_old(struct sock *sk, __u16 port, int max_add | |||
| 3909 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), | 3948 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), |
| 3910 | &temp); | 3949 | &temp); |
| 3911 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; | 3950 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; |
| 3912 | if (copy_to_user(to, &temp, addrlen)) { | 3951 | if (copy_to_user(to, &temp, addrlen)) |
| 3913 | sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, | ||
| 3914 | flags); | ||
| 3915 | return -EFAULT; | 3952 | return -EFAULT; |
| 3916 | } | 3953 | |
| 3917 | to += addrlen; | 3954 | to += addrlen; |
| 3918 | cnt ++; | 3955 | cnt ++; |
| 3919 | if (cnt >= max_addrs) break; | 3956 | if (cnt >= max_addrs) break; |
| 3920 | } | 3957 | } |
| 3921 | sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, flags); | ||
| 3922 | 3958 | ||
| 3923 | return cnt; | 3959 | return cnt; |
| 3924 | } | 3960 | } |
| @@ -3926,15 +3962,13 @@ static int sctp_copy_laddrs_to_user_old(struct sock *sk, __u16 port, int max_add | |||
| 3926 | static int sctp_copy_laddrs_to_user(struct sock *sk, __u16 port, | 3962 | static int sctp_copy_laddrs_to_user(struct sock *sk, __u16 port, |
| 3927 | void __user **to, size_t space_left) | 3963 | void __user **to, size_t space_left) |
| 3928 | { | 3964 | { |
| 3929 | struct list_head *pos; | 3965 | struct list_head *pos, *next; |
| 3930 | struct sctp_sockaddr_entry *addr; | 3966 | struct sctp_sockaddr_entry *addr; |
| 3931 | unsigned long flags; | ||
| 3932 | union sctp_addr temp; | 3967 | union sctp_addr temp; |
| 3933 | int cnt = 0; | 3968 | int cnt = 0; |
| 3934 | int addrlen; | 3969 | int addrlen; |
| 3935 | 3970 | ||
| 3936 | sctp_spin_lock_irqsave(&sctp_local_addr_lock, flags); | 3971 | list_for_each_safe(pos, next, &sctp_local_addr_list) { |
| 3937 | list_for_each(pos, &sctp_local_addr_list) { | ||
| 3938 | addr = list_entry(pos, struct sctp_sockaddr_entry, list); | 3972 | addr = list_entry(pos, struct sctp_sockaddr_entry, list); |
| 3939 | if ((PF_INET == sk->sk_family) && | 3973 | if ((PF_INET == sk->sk_family) && |
| 3940 | (AF_INET6 == addr->a.sa.sa_family)) | 3974 | (AF_INET6 == addr->a.sa.sa_family)) |
| @@ -3945,16 +3979,13 @@ static int sctp_copy_laddrs_to_user(struct sock *sk, __u16 port, | |||
| 3945 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; | 3979 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; |
| 3946 | if(space_left<addrlen) | 3980 | if(space_left<addrlen) |
| 3947 | return -ENOMEM; | 3981 | return -ENOMEM; |
| 3948 | if (copy_to_user(*to, &temp, addrlen)) { | 3982 | if (copy_to_user(*to, &temp, addrlen)) |
| 3949 | sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, | ||
| 3950 | flags); | ||
| 3951 | return -EFAULT; | 3983 | return -EFAULT; |
| 3952 | } | 3984 | |
| 3953 | *to += addrlen; | 3985 | *to += addrlen; |
| 3954 | cnt ++; | 3986 | cnt ++; |
| 3955 | space_left -= addrlen; | 3987 | space_left -= addrlen; |
| 3956 | } | 3988 | } |
| 3957 | sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, flags); | ||
| 3958 | 3989 | ||
| 3959 | return cnt; | 3990 | return cnt; |
| 3960 | } | 3991 | } |
| @@ -4179,21 +4210,21 @@ static int sctp_getsockopt_primary_addr(struct sock *sk, int len, | |||
| 4179 | } | 4210 | } |
| 4180 | 4211 | ||
| 4181 | /* | 4212 | /* |
| 4182 | * 7.1.11 Set Adaption Layer Indicator (SCTP_ADAPTION_LAYER) | 4213 | * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER) |
| 4183 | * | 4214 | * |
| 4184 | * Requests that the local endpoint set the specified Adaption Layer | 4215 | * Requests that the local endpoint set the specified Adaptation Layer |
| 4185 | * Indication parameter for all future INIT and INIT-ACK exchanges. | 4216 | * Indication parameter for all future INIT and INIT-ACK exchanges. |
| 4186 | */ | 4217 | */ |
| 4187 | static int sctp_getsockopt_adaption_layer(struct sock *sk, int len, | 4218 | static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len, |
| 4188 | char __user *optval, int __user *optlen) | 4219 | char __user *optval, int __user *optlen) |
| 4189 | { | 4220 | { |
| 4190 | struct sctp_setadaption adaption; | 4221 | struct sctp_setadaptation adaptation; |
| 4191 | 4222 | ||
| 4192 | if (len != sizeof(struct sctp_setadaption)) | 4223 | if (len != sizeof(struct sctp_setadaptation)) |
| 4193 | return -EINVAL; | 4224 | return -EINVAL; |
| 4194 | 4225 | ||
| 4195 | adaption.ssb_adaption_ind = sctp_sk(sk)->adaption_ind; | 4226 | adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind; |
| 4196 | if (copy_to_user(optval, &adaption, len)) | 4227 | if (copy_to_user(optval, &adaptation, len)) |
| 4197 | return -EFAULT; | 4228 | return -EFAULT; |
| 4198 | 4229 | ||
| 4199 | return 0; | 4230 | return 0; |
| @@ -4435,6 +4466,42 @@ static int sctp_getsockopt_mappedv4(struct sock *sk, int len, | |||
| 4435 | } | 4466 | } |
| 4436 | 4467 | ||
| 4437 | /* | 4468 | /* |
| 4469 | * 7.1.29. Set or Get the default context (SCTP_CONTEXT) | ||
| 4470 | * (chapter and verse is quoted at sctp_setsockopt_context()) | ||
| 4471 | */ | ||
| 4472 | static int sctp_getsockopt_context(struct sock *sk, int len, | ||
| 4473 | char __user *optval, int __user *optlen) | ||
| 4474 | { | ||
| 4475 | struct sctp_assoc_value params; | ||
| 4476 | struct sctp_sock *sp; | ||
| 4477 | struct sctp_association *asoc; | ||
| 4478 | |||
| 4479 | if (len != sizeof(struct sctp_assoc_value)) | ||
| 4480 | return -EINVAL; | ||
| 4481 | |||
| 4482 | if (copy_from_user(¶ms, optval, len)) | ||
| 4483 | return -EFAULT; | ||
| 4484 | |||
| 4485 | sp = sctp_sk(sk); | ||
| 4486 | |||
| 4487 | if (params.assoc_id != 0) { | ||
| 4488 | asoc = sctp_id2assoc(sk, params.assoc_id); | ||
| 4489 | if (!asoc) | ||
| 4490 | return -EINVAL; | ||
| 4491 | params.assoc_value = asoc->default_rcv_context; | ||
| 4492 | } else { | ||
| 4493 | params.assoc_value = sp->default_rcv_context; | ||
| 4494 | } | ||
| 4495 | |||
| 4496 | if (put_user(len, optlen)) | ||
| 4497 | return -EFAULT; | ||
| 4498 | if (copy_to_user(optval, ¶ms, len)) | ||
| 4499 | return -EFAULT; | ||
| 4500 | |||
| 4501 | return 0; | ||
| 4502 | } | ||
| 4503 | |||
| 4504 | /* | ||
| 4438 | * 7.1.17 Set the maximum fragrmentation size (SCTP_MAXSEG) | 4505 | * 7.1.17 Set the maximum fragrmentation size (SCTP_MAXSEG) |
| 4439 | * | 4506 | * |
| 4440 | * This socket option specifies the maximum size to put in any outgoing | 4507 | * This socket option specifies the maximum size to put in any outgoing |
| @@ -4568,10 +4635,13 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, | |||
| 4568 | retval = sctp_getsockopt_peer_addr_info(sk, len, optval, | 4635 | retval = sctp_getsockopt_peer_addr_info(sk, len, optval, |
| 4569 | optlen); | 4636 | optlen); |
| 4570 | break; | 4637 | break; |
| 4571 | case SCTP_ADAPTION_LAYER: | 4638 | case SCTP_ADAPTATION_LAYER: |
| 4572 | retval = sctp_getsockopt_adaption_layer(sk, len, optval, | 4639 | retval = sctp_getsockopt_adaptation_layer(sk, len, optval, |
| 4573 | optlen); | 4640 | optlen); |
| 4574 | break; | 4641 | break; |
| 4642 | case SCTP_CONTEXT: | ||
| 4643 | retval = sctp_getsockopt_context(sk, len, optval, optlen); | ||
| 4644 | break; | ||
| 4575 | default: | 4645 | default: |
| 4576 | retval = -ENOPROTOOPT; | 4646 | retval = -ENOPROTOOPT; |
| 4577 | break; | 4647 | break; |
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index e255a709f1b7..445e07a7ac4b 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c | |||
| @@ -609,31 +609,31 @@ fail: | |||
| 609 | return NULL; | 609 | return NULL; |
| 610 | } | 610 | } |
| 611 | 611 | ||
| 612 | /* Create and initialize a SCTP_ADAPTION_INDICATION notification. | 612 | /* Create and initialize a SCTP_ADAPTATION_INDICATION notification. |
| 613 | * | 613 | * |
| 614 | * Socket Extensions for SCTP | 614 | * Socket Extensions for SCTP |
| 615 | * 5.3.1.6 SCTP_ADAPTION_INDICATION | 615 | * 5.3.1.6 SCTP_ADAPTATION_INDICATION |
| 616 | */ | 616 | */ |
| 617 | struct sctp_ulpevent *sctp_ulpevent_make_adaption_indication( | 617 | struct sctp_ulpevent *sctp_ulpevent_make_adaptation_indication( |
| 618 | const struct sctp_association *asoc, gfp_t gfp) | 618 | const struct sctp_association *asoc, gfp_t gfp) |
| 619 | { | 619 | { |
| 620 | struct sctp_ulpevent *event; | 620 | struct sctp_ulpevent *event; |
| 621 | struct sctp_adaption_event *sai; | 621 | struct sctp_adaptation_event *sai; |
| 622 | struct sk_buff *skb; | 622 | struct sk_buff *skb; |
| 623 | 623 | ||
| 624 | event = sctp_ulpevent_new(sizeof(struct sctp_adaption_event), | 624 | event = sctp_ulpevent_new(sizeof(struct sctp_adaptation_event), |
| 625 | MSG_NOTIFICATION, gfp); | 625 | MSG_NOTIFICATION, gfp); |
| 626 | if (!event) | 626 | if (!event) |
| 627 | goto fail; | 627 | goto fail; |
| 628 | 628 | ||
| 629 | skb = sctp_event2skb(event); | 629 | skb = sctp_event2skb(event); |
| 630 | sai = (struct sctp_adaption_event *) | 630 | sai = (struct sctp_adaptation_event *) |
| 631 | skb_put(skb, sizeof(struct sctp_adaption_event)); | 631 | skb_put(skb, sizeof(struct sctp_adaptation_event)); |
| 632 | 632 | ||
| 633 | sai->sai_type = SCTP_ADAPTION_INDICATION; | 633 | sai->sai_type = SCTP_ADAPTATION_INDICATION; |
| 634 | sai->sai_flags = 0; | 634 | sai->sai_flags = 0; |
| 635 | sai->sai_length = sizeof(struct sctp_adaption_event); | 635 | sai->sai_length = sizeof(struct sctp_adaptation_event); |
| 636 | sai->sai_adaption_ind = asoc->peer.adaption_ind; | 636 | sai->sai_adaptation_ind = asoc->peer.adaptation_ind; |
| 637 | sctp_ulpevent_set_owner(event, asoc); | 637 | sctp_ulpevent_set_owner(event, asoc); |
| 638 | sai->sai_assoc_id = sctp_assoc2id(asoc); | 638 | sai->sai_assoc_id = sctp_assoc2id(asoc); |
| 639 | 639 | ||
| @@ -849,8 +849,10 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, | |||
| 849 | */ | 849 | */ |
| 850 | sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc); | 850 | sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc); |
| 851 | 851 | ||
| 852 | /* context value that is set via SCTP_CONTEXT socket option. */ | ||
| 853 | sinfo.sinfo_context = event->asoc->default_rcv_context; | ||
| 854 | |||
| 852 | /* These fields are not used while receiving. */ | 855 | /* These fields are not used while receiving. */ |
| 853 | sinfo.sinfo_context = 0; | ||
| 854 | sinfo.sinfo_timetolive = 0; | 856 | sinfo.sinfo_timetolive = 0; |
| 855 | 857 | ||
| 856 | put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV, | 858 | put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV, |
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c index 41465072d0b5..8ef3f1c19435 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_mech.c +++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c | |||
| @@ -228,7 +228,7 @@ static int __init init_spkm3_module(void) | |||
| 228 | status = gss_mech_register(&gss_spkm3_mech); | 228 | status = gss_mech_register(&gss_spkm3_mech); |
| 229 | if (status) | 229 | if (status) |
| 230 | printk("Failed to register spkm3 gss mechanism!\n"); | 230 | printk("Failed to register spkm3 gss mechanism!\n"); |
| 231 | return 0; | 231 | return status; |
| 232 | } | 232 | } |
| 233 | 233 | ||
| 234 | static void __exit cleanup_spkm3_module(void) | 234 | static void __exit cleanup_spkm3_module(void) |
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 700353b330fd..066c64a97fd8 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c | |||
| @@ -804,19 +804,19 @@ unwrap_integ_data(struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx) | |||
| 804 | 804 | ||
| 805 | integ_len = svc_getnl(&buf->head[0]); | 805 | integ_len = svc_getnl(&buf->head[0]); |
| 806 | if (integ_len & 3) | 806 | if (integ_len & 3) |
| 807 | goto out; | 807 | return stat; |
| 808 | if (integ_len > buf->len) | 808 | if (integ_len > buf->len) |
| 809 | goto out; | 809 | return stat; |
| 810 | if (xdr_buf_subsegment(buf, &integ_buf, 0, integ_len)) | 810 | if (xdr_buf_subsegment(buf, &integ_buf, 0, integ_len)) |
| 811 | BUG(); | 811 | BUG(); |
| 812 | /* copy out mic... */ | 812 | /* copy out mic... */ |
| 813 | if (read_u32_from_xdr_buf(buf, integ_len, &mic.len)) | 813 | if (read_u32_from_xdr_buf(buf, integ_len, &mic.len)) |
| 814 | BUG(); | 814 | BUG(); |
| 815 | if (mic.len > RPC_MAX_AUTH_SIZE) | 815 | if (mic.len > RPC_MAX_AUTH_SIZE) |
| 816 | goto out; | 816 | return stat; |
| 817 | mic.data = kmalloc(mic.len, GFP_KERNEL); | 817 | mic.data = kmalloc(mic.len, GFP_KERNEL); |
| 818 | if (!mic.data) | 818 | if (!mic.data) |
| 819 | goto out; | 819 | return stat; |
| 820 | if (read_bytes_from_xdr_buf(buf, integ_len + 4, mic.data, mic.len)) | 820 | if (read_bytes_from_xdr_buf(buf, integ_len + 4, mic.data, mic.len)) |
| 821 | goto out; | 821 | goto out; |
| 822 | maj_stat = gss_verify_mic(ctx, &integ_buf, &mic); | 822 | maj_stat = gss_verify_mic(ctx, &integ_buf, &mic); |
| @@ -826,6 +826,7 @@ unwrap_integ_data(struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx) | |||
| 826 | goto out; | 826 | goto out; |
| 827 | stat = 0; | 827 | stat = 0; |
| 828 | out: | 828 | out: |
| 829 | kfree(mic.data); | ||
| 829 | return stat; | 830 | return stat; |
| 830 | } | 831 | } |
| 831 | 832 | ||
| @@ -1065,7 +1066,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp) | |||
| 1065 | } | 1066 | } |
| 1066 | switch(cache_check(&rsi_cache, &rsip->h, &rqstp->rq_chandle)) { | 1067 | switch(cache_check(&rsi_cache, &rsip->h, &rqstp->rq_chandle)) { |
| 1067 | case -EAGAIN: | 1068 | case -EAGAIN: |
| 1068 | goto drop; | 1069 | case -ETIMEDOUT: |
| 1069 | case -ENOENT: | 1070 | case -ENOENT: |
| 1070 | goto drop; | 1071 | goto drop; |
| 1071 | case 0: | 1072 | case 0: |
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 80aff0474572..14274490f92e 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
| @@ -34,7 +34,7 @@ | |||
| 34 | 34 | ||
| 35 | #define RPCDBG_FACILITY RPCDBG_CACHE | 35 | #define RPCDBG_FACILITY RPCDBG_CACHE |
| 36 | 36 | ||
| 37 | static void cache_defer_req(struct cache_req *req, struct cache_head *item); | 37 | static int cache_defer_req(struct cache_req *req, struct cache_head *item); |
| 38 | static void cache_revisit_request(struct cache_head *item); | 38 | static void cache_revisit_request(struct cache_head *item); |
| 39 | 39 | ||
| 40 | static void cache_init(struct cache_head *h) | 40 | static void cache_init(struct cache_head *h) |
| @@ -185,6 +185,7 @@ static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h); | |||
| 185 | * | 185 | * |
| 186 | * Returns 0 if the cache_head can be used, or cache_puts it and returns | 186 | * Returns 0 if the cache_head can be used, or cache_puts it and returns |
| 187 | * -EAGAIN if upcall is pending, | 187 | * -EAGAIN if upcall is pending, |
| 188 | * -ETIMEDOUT if upcall failed and should be retried, | ||
| 188 | * -ENOENT if cache entry was negative | 189 | * -ENOENT if cache entry was negative |
| 189 | */ | 190 | */ |
| 190 | int cache_check(struct cache_detail *detail, | 191 | int cache_check(struct cache_detail *detail, |
| @@ -236,7 +237,8 @@ int cache_check(struct cache_detail *detail, | |||
| 236 | } | 237 | } |
| 237 | 238 | ||
| 238 | if (rv == -EAGAIN) | 239 | if (rv == -EAGAIN) |
| 239 | cache_defer_req(rqstp, h); | 240 | if (cache_defer_req(rqstp, h) != 0) |
| 241 | rv = -ETIMEDOUT; | ||
| 240 | 242 | ||
| 241 | if (rv) | 243 | if (rv) |
| 242 | cache_put(h, detail); | 244 | cache_put(h, detail); |
| @@ -523,14 +525,21 @@ static LIST_HEAD(cache_defer_list); | |||
| 523 | static struct list_head cache_defer_hash[DFR_HASHSIZE]; | 525 | static struct list_head cache_defer_hash[DFR_HASHSIZE]; |
| 524 | static int cache_defer_cnt; | 526 | static int cache_defer_cnt; |
| 525 | 527 | ||
| 526 | static void cache_defer_req(struct cache_req *req, struct cache_head *item) | 528 | static int cache_defer_req(struct cache_req *req, struct cache_head *item) |
| 527 | { | 529 | { |
| 528 | struct cache_deferred_req *dreq; | 530 | struct cache_deferred_req *dreq; |
| 529 | int hash = DFR_HASH(item); | 531 | int hash = DFR_HASH(item); |
| 530 | 532 | ||
| 533 | if (cache_defer_cnt >= DFR_MAX) { | ||
| 534 | /* too much in the cache, randomly drop this one, | ||
| 535 | * or continue and drop the oldest below | ||
| 536 | */ | ||
| 537 | if (net_random()&1) | ||
| 538 | return -ETIMEDOUT; | ||
| 539 | } | ||
| 531 | dreq = req->defer(req); | 540 | dreq = req->defer(req); |
| 532 | if (dreq == NULL) | 541 | if (dreq == NULL) |
| 533 | return; | 542 | return -ETIMEDOUT; |
| 534 | 543 | ||
| 535 | dreq->item = item; | 544 | dreq->item = item; |
| 536 | dreq->recv_time = get_seconds(); | 545 | dreq->recv_time = get_seconds(); |
| @@ -546,17 +555,8 @@ static void cache_defer_req(struct cache_req *req, struct cache_head *item) | |||
| 546 | /* it is in, now maybe clean up */ | 555 | /* it is in, now maybe clean up */ |
| 547 | dreq = NULL; | 556 | dreq = NULL; |
| 548 | if (++cache_defer_cnt > DFR_MAX) { | 557 | if (++cache_defer_cnt > DFR_MAX) { |
| 549 | /* too much in the cache, randomly drop | 558 | dreq = list_entry(cache_defer_list.prev, |
| 550 | * first or last | 559 | struct cache_deferred_req, recent); |
| 551 | */ | ||
| 552 | if (net_random()&1) | ||
| 553 | dreq = list_entry(cache_defer_list.next, | ||
| 554 | struct cache_deferred_req, | ||
| 555 | recent); | ||
| 556 | else | ||
| 557 | dreq = list_entry(cache_defer_list.prev, | ||
| 558 | struct cache_deferred_req, | ||
| 559 | recent); | ||
| 560 | list_del(&dreq->recent); | 560 | list_del(&dreq->recent); |
| 561 | list_del(&dreq->hash); | 561 | list_del(&dreq->hash); |
| 562 | cache_defer_cnt--; | 562 | cache_defer_cnt--; |
| @@ -571,6 +571,7 @@ static void cache_defer_req(struct cache_req *req, struct cache_head *item) | |||
| 571 | /* must have just been validated... */ | 571 | /* must have just been validated... */ |
| 572 | cache_revisit_request(item); | 572 | cache_revisit_request(item); |
| 573 | } | 573 | } |
| 574 | return 0; | ||
| 574 | } | 575 | } |
| 575 | 576 | ||
| 576 | static void cache_revisit_request(struct cache_head *item) | 577 | static void cache_revisit_request(struct cache_head *item) |
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index eb44ec929ca1..f3001f3626f6 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
| @@ -308,7 +308,7 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, | |||
| 308 | 308 | ||
| 309 | serv->sv_nrpools = npools; | 309 | serv->sv_nrpools = npools; |
| 310 | serv->sv_pools = | 310 | serv->sv_pools = |
| 311 | kcalloc(sizeof(struct svc_pool), serv->sv_nrpools, | 311 | kcalloc(serv->sv_nrpools, sizeof(struct svc_pool), |
| 312 | GFP_KERNEL); | 312 | GFP_KERNEL); |
| 313 | if (!serv->sv_pools) { | 313 | if (!serv->sv_pools) { |
| 314 | kfree(serv); | 314 | kfree(serv); |
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index a0a953a430c2..0d1e8fb83b93 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c | |||
| @@ -53,6 +53,10 @@ struct auth_domain *unix_domain_find(char *name) | |||
| 53 | return NULL; | 53 | return NULL; |
| 54 | kref_init(&new->h.ref); | 54 | kref_init(&new->h.ref); |
| 55 | new->h.name = kstrdup(name, GFP_KERNEL); | 55 | new->h.name = kstrdup(name, GFP_KERNEL); |
| 56 | if (new->h.name == NULL) { | ||
| 57 | kfree(new); | ||
| 58 | return NULL; | ||
| 59 | } | ||
| 56 | new->h.flavour = &svcauth_unix; | 60 | new->h.flavour = &svcauth_unix; |
| 57 | new->addr_changes = 0; | 61 | new->addr_changes = 0; |
| 58 | rv = auth_domain_lookup(name, &new->h); | 62 | rv = auth_domain_lookup(name, &new->h); |
| @@ -435,6 +439,7 @@ svcauth_unix_set_client(struct svc_rqst *rqstp) | |||
| 435 | default: | 439 | default: |
| 436 | BUG(); | 440 | BUG(); |
| 437 | case -EAGAIN: | 441 | case -EAGAIN: |
| 442 | case -ETIMEDOUT: | ||
| 438 | return SVC_DROP; | 443 | return SVC_DROP; |
| 439 | case -ENOENT: | 444 | case -ENOENT: |
| 440 | return SVC_DENIED; | 445 | return SVC_DENIED; |
diff --git a/net/tipc/config.c b/net/tipc/config.c index 458a2c46cef3..baf55c459c8b 100644 --- a/net/tipc/config.c +++ b/net/tipc/config.c | |||
| @@ -208,7 +208,7 @@ static void cfg_cmd_event(struct tipc_cmd_msg *msg, | |||
| 208 | 208 | ||
| 209 | if (mng.link_subscriptions > 64) | 209 | if (mng.link_subscriptions > 64) |
| 210 | break; | 210 | break; |
| 211 | sub = (struct subscr_data *)kmalloc(sizeof(*sub), | 211 | sub = kmalloc(sizeof(*sub), |
| 212 | GFP_ATOMIC); | 212 | GFP_ATOMIC); |
| 213 | if (sub == NULL) { | 213 | if (sub == NULL) { |
| 214 | warn("Memory squeeze; dropped remote link subscription\n"); | 214 | warn("Memory squeeze; dropped remote link subscription\n"); |
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c index 6b381fc0383d..f1cf3402e75c 100644 --- a/net/xfrm/xfrm_algo.c +++ b/net/xfrm/xfrm_algo.c | |||
| @@ -399,7 +399,8 @@ static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list, | |||
| 399 | if (!probe) | 399 | if (!probe) |
| 400 | break; | 400 | break; |
| 401 | 401 | ||
| 402 | status = crypto_has_alg(name, type, mask | CRYPTO_ALG_ASYNC); | 402 | status = crypto_has_alg(list[i].name, type, |
| 403 | mask | CRYPTO_ALG_ASYNC); | ||
| 403 | if (!status) | 404 | if (!status) |
| 404 | break; | 405 | break; |
| 405 | 406 | ||
