diff options
Diffstat (limited to 'net')
| -rw-r--r-- | net/bridge/br_stp_if.c | 9 | ||||
| -rw-r--r-- | net/core/neighbour.c | 5 | ||||
| -rw-r--r-- | net/core/netpoll.c | 7 | ||||
| -rw-r--r-- | net/core/skbuff.c | 55 | ||||
| -rw-r--r-- | net/irda/af_irda.c | 3 | ||||
| -rw-r--r-- | net/key/af_key.c | 90 | ||||
| -rw-r--r-- | net/netlink/af_netlink.c | 6 | ||||
| -rw-r--r-- | net/sctp/socket.c | 54 | ||||
| -rw-r--r-- | net/sctp/ulpqueue.c | 9 | ||||
| -rw-r--r-- | net/sunrpc/svcauth_unix.c | 21 |
10 files changed, 171 insertions, 88 deletions
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index 58d13f2bd121..a285897a2fb4 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c | |||
| @@ -126,7 +126,9 @@ void br_stp_disable_port(struct net_bridge_port *p) | |||
| 126 | /* called under bridge lock */ | 126 | /* called under bridge lock */ |
| 127 | void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr) | 127 | void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr) |
| 128 | { | 128 | { |
| 129 | unsigned char oldaddr[6]; | 129 | /* should be aligned on 2 bytes for compare_ether_addr() */ |
| 130 | unsigned short oldaddr_aligned[ETH_ALEN >> 1]; | ||
| 131 | unsigned char *oldaddr = (unsigned char *)oldaddr_aligned; | ||
| 130 | struct net_bridge_port *p; | 132 | struct net_bridge_port *p; |
| 131 | int wasroot; | 133 | int wasroot; |
| 132 | 134 | ||
| @@ -151,11 +153,14 @@ void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr) | |||
| 151 | br_become_root_bridge(br); | 153 | br_become_root_bridge(br); |
| 152 | } | 154 | } |
| 153 | 155 | ||
| 154 | static const unsigned char br_mac_zero[6]; | 156 | /* should be aligned on 2 bytes for compare_ether_addr() */ |
| 157 | static const unsigned short br_mac_zero_aligned[ETH_ALEN >> 1]; | ||
| 155 | 158 | ||
| 156 | /* called under bridge lock */ | 159 | /* called under bridge lock */ |
| 157 | void br_stp_recalculate_bridge_id(struct net_bridge *br) | 160 | void br_stp_recalculate_bridge_id(struct net_bridge *br) |
| 158 | { | 161 | { |
| 162 | const unsigned char *br_mac_zero = | ||
| 163 | (const unsigned char *)br_mac_zero_aligned; | ||
| 159 | const unsigned char *addr = br_mac_zero; | 164 | const unsigned char *addr = br_mac_zero; |
| 160 | struct net_bridge_port *p; | 165 | struct net_bridge_port *p; |
| 161 | 166 | ||
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index cfc60019cf92..841e3f32cab1 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
| @@ -1331,6 +1331,8 @@ void neigh_parms_destroy(struct neigh_parms *parms) | |||
| 1331 | kfree(parms); | 1331 | kfree(parms); |
| 1332 | } | 1332 | } |
| 1333 | 1333 | ||
| 1334 | static struct lock_class_key neigh_table_proxy_queue_class; | ||
| 1335 | |||
| 1334 | void neigh_table_init_no_netlink(struct neigh_table *tbl) | 1336 | void neigh_table_init_no_netlink(struct neigh_table *tbl) |
| 1335 | { | 1337 | { |
| 1336 | unsigned long now = jiffies; | 1338 | unsigned long now = jiffies; |
| @@ -1379,7 +1381,8 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl) | |||
| 1379 | init_timer(&tbl->proxy_timer); | 1381 | init_timer(&tbl->proxy_timer); |
| 1380 | tbl->proxy_timer.data = (unsigned long)tbl; | 1382 | tbl->proxy_timer.data = (unsigned long)tbl; |
| 1381 | tbl->proxy_timer.function = neigh_proxy_process; | 1383 | tbl->proxy_timer.function = neigh_proxy_process; |
| 1382 | skb_queue_head_init(&tbl->proxy_queue); | 1384 | skb_queue_head_init_class(&tbl->proxy_queue, |
| 1385 | &neigh_table_proxy_queue_class); | ||
| 1383 | 1386 | ||
| 1384 | tbl->last_flush = now; | 1387 | tbl->last_flush = now; |
| 1385 | tbl->last_rand = now + tbl->parms.reachable_time * 20; | 1388 | tbl->last_rand = now + tbl->parms.reachable_time * 20; |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index da1019451ccb..4581ece48bb2 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
| @@ -471,6 +471,13 @@ int __netpoll_rx(struct sk_buff *skb) | |||
| 471 | if (skb->len < len || len < iph->ihl*4) | 471 | if (skb->len < len || len < iph->ihl*4) |
| 472 | goto out; | 472 | goto out; |
| 473 | 473 | ||
| 474 | /* | ||
| 475 | * Our transport medium may have padded the buffer out. | ||
| 476 | * Now We trim to the true length of the frame. | ||
| 477 | */ | ||
| 478 | if (pskb_trim_rcsum(skb, len)) | ||
| 479 | goto out; | ||
| 480 | |||
| 474 | if (iph->protocol != IPPROTO_UDP) | 481 | if (iph->protocol != IPPROTO_UDP) |
| 475 | goto out; | 482 | goto out; |
| 476 | 483 | ||
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 87573ae35b02..336958fbbcb2 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -197,61 +197,6 @@ nodata: | |||
| 197 | } | 197 | } |
| 198 | 198 | ||
| 199 | /** | 199 | /** |
| 200 | * alloc_skb_from_cache - allocate a network buffer | ||
| 201 | * @cp: kmem_cache from which to allocate the data area | ||
| 202 | * (object size must be big enough for @size bytes + skb overheads) | ||
| 203 | * @size: size to allocate | ||
| 204 | * @gfp_mask: allocation mask | ||
| 205 | * | ||
| 206 | * Allocate a new &sk_buff. The returned buffer has no headroom and | ||
| 207 | * tail room of size bytes. The object has a reference count of one. | ||
| 208 | * The return is the buffer. On a failure the return is %NULL. | ||
| 209 | * | ||
| 210 | * Buffers may only be allocated from interrupts using a @gfp_mask of | ||
| 211 | * %GFP_ATOMIC. | ||
| 212 | */ | ||
| 213 | struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp, | ||
| 214 | unsigned int size, | ||
| 215 | gfp_t gfp_mask) | ||
| 216 | { | ||
| 217 | struct sk_buff *skb; | ||
| 218 | u8 *data; | ||
| 219 | |||
| 220 | /* Get the HEAD */ | ||
| 221 | skb = kmem_cache_alloc(skbuff_head_cache, | ||
| 222 | gfp_mask & ~__GFP_DMA); | ||
| 223 | if (!skb) | ||
| 224 | goto out; | ||
| 225 | |||
| 226 | /* Get the DATA. */ | ||
| 227 | size = SKB_DATA_ALIGN(size); | ||
| 228 | data = kmem_cache_alloc(cp, gfp_mask); | ||
| 229 | if (!data) | ||
| 230 | goto nodata; | ||
| 231 | |||
| 232 | memset(skb, 0, offsetof(struct sk_buff, truesize)); | ||
| 233 | skb->truesize = size + sizeof(struct sk_buff); | ||
| 234 | atomic_set(&skb->users, 1); | ||
| 235 | skb->head = data; | ||
| 236 | skb->data = data; | ||
| 237 | skb->tail = data; | ||
| 238 | skb->end = data + size; | ||
| 239 | |||
| 240 | atomic_set(&(skb_shinfo(skb)->dataref), 1); | ||
| 241 | skb_shinfo(skb)->nr_frags = 0; | ||
| 242 | skb_shinfo(skb)->gso_size = 0; | ||
| 243 | skb_shinfo(skb)->gso_segs = 0; | ||
| 244 | skb_shinfo(skb)->gso_type = 0; | ||
| 245 | skb_shinfo(skb)->frag_list = NULL; | ||
| 246 | out: | ||
| 247 | return skb; | ||
| 248 | nodata: | ||
| 249 | kmem_cache_free(skbuff_head_cache, skb); | ||
| 250 | skb = NULL; | ||
| 251 | goto out; | ||
| 252 | } | ||
| 253 | |||
| 254 | /** | ||
| 255 | * __netdev_alloc_skb - allocate an skbuff for rx on a specific device | 200 | * __netdev_alloc_skb - allocate an skbuff for rx on a specific device |
| 256 | * @dev: network device to receive on | 201 | * @dev: network device to receive on |
| 257 | * @length: length to allocate | 202 | * @length: length to allocate |
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index eabd6838f50a..0eb7d596d470 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c | |||
| @@ -138,7 +138,6 @@ static void irda_disconnect_indication(void *instance, void *sap, | |||
| 138 | sk->sk_shutdown |= SEND_SHUTDOWN; | 138 | sk->sk_shutdown |= SEND_SHUTDOWN; |
| 139 | 139 | ||
| 140 | sk->sk_state_change(sk); | 140 | sk->sk_state_change(sk); |
| 141 | sock_orphan(sk); | ||
| 142 | release_sock(sk); | 141 | release_sock(sk); |
| 143 | 142 | ||
| 144 | /* Close our TSAP. | 143 | /* Close our TSAP. |
| @@ -1446,7 +1445,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock, | |||
| 1446 | */ | 1445 | */ |
| 1447 | ret = sock_error(sk); | 1446 | ret = sock_error(sk); |
| 1448 | if (ret) | 1447 | if (ret) |
| 1449 | break; | 1448 | ; |
| 1450 | else if (sk->sk_shutdown & RCV_SHUTDOWN) | 1449 | else if (sk->sk_shutdown & RCV_SHUTDOWN) |
| 1451 | ; | 1450 | ; |
| 1452 | else if (noblock) | 1451 | else if (noblock) |
diff --git a/net/key/af_key.c b/net/key/af_key.c index a4e7e2db0ff3..345019345f09 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
| @@ -630,6 +630,35 @@ pfkey_sockaddr_size(sa_family_t family) | |||
| 630 | /* NOTREACHED */ | 630 | /* NOTREACHED */ |
| 631 | } | 631 | } |
| 632 | 632 | ||
| 633 | static inline int pfkey_mode_from_xfrm(int mode) | ||
| 634 | { | ||
| 635 | switch(mode) { | ||
| 636 | case XFRM_MODE_TRANSPORT: | ||
| 637 | return IPSEC_MODE_TRANSPORT; | ||
| 638 | case XFRM_MODE_TUNNEL: | ||
| 639 | return IPSEC_MODE_TUNNEL; | ||
| 640 | case XFRM_MODE_BEET: | ||
| 641 | return IPSEC_MODE_BEET; | ||
| 642 | default: | ||
| 643 | return -1; | ||
| 644 | } | ||
| 645 | } | ||
| 646 | |||
| 647 | static inline int pfkey_mode_to_xfrm(int mode) | ||
| 648 | { | ||
| 649 | switch(mode) { | ||
| 650 | case IPSEC_MODE_ANY: /*XXX*/ | ||
| 651 | case IPSEC_MODE_TRANSPORT: | ||
| 652 | return XFRM_MODE_TRANSPORT; | ||
| 653 | case IPSEC_MODE_TUNNEL: | ||
| 654 | return XFRM_MODE_TUNNEL; | ||
| 655 | case IPSEC_MODE_BEET: | ||
| 656 | return XFRM_MODE_BEET; | ||
| 657 | default: | ||
| 658 | return -1; | ||
| 659 | } | ||
| 660 | } | ||
| 661 | |||
| 633 | static struct sk_buff * pfkey_xfrm_state2msg(struct xfrm_state *x, int add_keys, int hsc) | 662 | static struct sk_buff * pfkey_xfrm_state2msg(struct xfrm_state *x, int add_keys, int hsc) |
| 634 | { | 663 | { |
| 635 | struct sk_buff *skb; | 664 | struct sk_buff *skb; |
| @@ -651,6 +680,7 @@ static struct sk_buff * pfkey_xfrm_state2msg(struct xfrm_state *x, int add_keys, | |||
| 651 | int encrypt_key_size = 0; | 680 | int encrypt_key_size = 0; |
| 652 | int sockaddr_size; | 681 | int sockaddr_size; |
| 653 | struct xfrm_encap_tmpl *natt = NULL; | 682 | struct xfrm_encap_tmpl *natt = NULL; |
| 683 | int mode; | ||
| 654 | 684 | ||
| 655 | /* address family check */ | 685 | /* address family check */ |
| 656 | sockaddr_size = pfkey_sockaddr_size(x->props.family); | 686 | sockaddr_size = pfkey_sockaddr_size(x->props.family); |
| @@ -928,7 +958,11 @@ static struct sk_buff * pfkey_xfrm_state2msg(struct xfrm_state *x, int add_keys, | |||
| 928 | sa2 = (struct sadb_x_sa2 *) skb_put(skb, sizeof(struct sadb_x_sa2)); | 958 | sa2 = (struct sadb_x_sa2 *) skb_put(skb, sizeof(struct sadb_x_sa2)); |
| 929 | sa2->sadb_x_sa2_len = sizeof(struct sadb_x_sa2)/sizeof(uint64_t); | 959 | sa2->sadb_x_sa2_len = sizeof(struct sadb_x_sa2)/sizeof(uint64_t); |
| 930 | sa2->sadb_x_sa2_exttype = SADB_X_EXT_SA2; | 960 | sa2->sadb_x_sa2_exttype = SADB_X_EXT_SA2; |
| 931 | sa2->sadb_x_sa2_mode = x->props.mode + 1; | 961 | if ((mode = pfkey_mode_from_xfrm(x->props.mode)) < 0) { |
| 962 | kfree_skb(skb); | ||
| 963 | return ERR_PTR(-EINVAL); | ||
| 964 | } | ||
| 965 | sa2->sadb_x_sa2_mode = mode; | ||
| 932 | sa2->sadb_x_sa2_reserved1 = 0; | 966 | sa2->sadb_x_sa2_reserved1 = 0; |
| 933 | sa2->sadb_x_sa2_reserved2 = 0; | 967 | sa2->sadb_x_sa2_reserved2 = 0; |
| 934 | sa2->sadb_x_sa2_sequence = 0; | 968 | sa2->sadb_x_sa2_sequence = 0; |
| @@ -1155,9 +1189,12 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct sadb_msg *hdr, | |||
| 1155 | 1189 | ||
| 1156 | if (ext_hdrs[SADB_X_EXT_SA2-1]) { | 1190 | if (ext_hdrs[SADB_X_EXT_SA2-1]) { |
| 1157 | struct sadb_x_sa2 *sa2 = (void*)ext_hdrs[SADB_X_EXT_SA2-1]; | 1191 | struct sadb_x_sa2 *sa2 = (void*)ext_hdrs[SADB_X_EXT_SA2-1]; |
| 1158 | x->props.mode = sa2->sadb_x_sa2_mode; | 1192 | int mode = pfkey_mode_to_xfrm(sa2->sadb_x_sa2_mode); |
| 1159 | if (x->props.mode) | 1193 | if (mode < 0) { |
| 1160 | x->props.mode--; | 1194 | err = -EINVAL; |
| 1195 | goto out; | ||
| 1196 | } | ||
| 1197 | x->props.mode = mode; | ||
| 1161 | x->props.reqid = sa2->sadb_x_sa2_reqid; | 1198 | x->props.reqid = sa2->sadb_x_sa2_reqid; |
| 1162 | } | 1199 | } |
| 1163 | 1200 | ||
| @@ -1218,7 +1255,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h | |||
| 1218 | struct sadb_address *saddr, *daddr; | 1255 | struct sadb_address *saddr, *daddr; |
| 1219 | struct sadb_msg *out_hdr; | 1256 | struct sadb_msg *out_hdr; |
| 1220 | struct xfrm_state *x = NULL; | 1257 | struct xfrm_state *x = NULL; |
| 1221 | u8 mode; | 1258 | int mode; |
| 1222 | u32 reqid; | 1259 | u32 reqid; |
| 1223 | u8 proto; | 1260 | u8 proto; |
| 1224 | unsigned short family; | 1261 | unsigned short family; |
| @@ -1233,7 +1270,9 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h | |||
| 1233 | return -EINVAL; | 1270 | return -EINVAL; |
| 1234 | 1271 | ||
| 1235 | if ((sa2 = ext_hdrs[SADB_X_EXT_SA2-1]) != NULL) { | 1272 | if ((sa2 = ext_hdrs[SADB_X_EXT_SA2-1]) != NULL) { |
| 1236 | mode = sa2->sadb_x_sa2_mode - 1; | 1273 | mode = pfkey_mode_to_xfrm(sa2->sadb_x_sa2_mode); |
| 1274 | if (mode < 0) | ||
| 1275 | return -EINVAL; | ||
| 1237 | reqid = sa2->sadb_x_sa2_reqid; | 1276 | reqid = sa2->sadb_x_sa2_reqid; |
| 1238 | } else { | 1277 | } else { |
| 1239 | mode = 0; | 1278 | mode = 0; |
| @@ -1756,6 +1795,7 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq) | |||
| 1756 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 1795 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
| 1757 | struct sockaddr_in6 *sin6; | 1796 | struct sockaddr_in6 *sin6; |
| 1758 | #endif | 1797 | #endif |
| 1798 | int mode; | ||
| 1759 | 1799 | ||
| 1760 | if (xp->xfrm_nr >= XFRM_MAX_DEPTH) | 1800 | if (xp->xfrm_nr >= XFRM_MAX_DEPTH) |
| 1761 | return -ELOOP; | 1801 | return -ELOOP; |
| @@ -1764,7 +1804,9 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq) | |||
| 1764 | return -EINVAL; | 1804 | return -EINVAL; |
| 1765 | 1805 | ||
| 1766 | t->id.proto = rq->sadb_x_ipsecrequest_proto; /* XXX check proto */ | 1806 | t->id.proto = rq->sadb_x_ipsecrequest_proto; /* XXX check proto */ |
| 1767 | t->mode = rq->sadb_x_ipsecrequest_mode-1; | 1807 | if ((mode = pfkey_mode_to_xfrm(rq->sadb_x_ipsecrequest_mode)) < 0) |
| 1808 | return -EINVAL; | ||
| 1809 | t->mode = mode; | ||
| 1768 | if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_USE) | 1810 | if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_USE) |
| 1769 | t->optional = 1; | 1811 | t->optional = 1; |
| 1770 | else if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_UNIQUE) { | 1812 | else if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_UNIQUE) { |
| @@ -1877,7 +1919,7 @@ static struct sk_buff * pfkey_xfrm_policy2msg_prep(struct xfrm_policy *xp) | |||
| 1877 | return skb; | 1919 | return skb; |
| 1878 | } | 1920 | } |
| 1879 | 1921 | ||
| 1880 | static void pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, int dir) | 1922 | static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, int dir) |
| 1881 | { | 1923 | { |
| 1882 | struct sadb_msg *hdr; | 1924 | struct sadb_msg *hdr; |
| 1883 | struct sadb_address *addr; | 1925 | struct sadb_address *addr; |
| @@ -2014,6 +2056,7 @@ static void pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, i | |||
| 2014 | struct sadb_x_ipsecrequest *rq; | 2056 | struct sadb_x_ipsecrequest *rq; |
| 2015 | struct xfrm_tmpl *t = xp->xfrm_vec + i; | 2057 | struct xfrm_tmpl *t = xp->xfrm_vec + i; |
| 2016 | int req_size; | 2058 | int req_size; |
| 2059 | int mode; | ||
| 2017 | 2060 | ||
| 2018 | req_size = sizeof(struct sadb_x_ipsecrequest); | 2061 | req_size = sizeof(struct sadb_x_ipsecrequest); |
| 2019 | if (t->mode == XFRM_MODE_TUNNEL) | 2062 | if (t->mode == XFRM_MODE_TUNNEL) |
| @@ -2027,7 +2070,9 @@ static void pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, i | |||
| 2027 | memset(rq, 0, sizeof(*rq)); | 2070 | memset(rq, 0, sizeof(*rq)); |
| 2028 | rq->sadb_x_ipsecrequest_len = req_size; | 2071 | rq->sadb_x_ipsecrequest_len = req_size; |
| 2029 | rq->sadb_x_ipsecrequest_proto = t->id.proto; | 2072 | rq->sadb_x_ipsecrequest_proto = t->id.proto; |
| 2030 | rq->sadb_x_ipsecrequest_mode = t->mode+1; | 2073 | if ((mode = pfkey_mode_from_xfrm(t->mode)) < 0) |
| 2074 | return -EINVAL; | ||
| 2075 | rq->sadb_x_ipsecrequest_mode = mode; | ||
| 2031 | rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_REQUIRE; | 2076 | rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_REQUIRE; |
| 2032 | if (t->reqid) | 2077 | if (t->reqid) |
| 2033 | rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_UNIQUE; | 2078 | rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_UNIQUE; |
| @@ -2089,6 +2134,8 @@ static void pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, i | |||
| 2089 | 2134 | ||
| 2090 | hdr->sadb_msg_len = size / sizeof(uint64_t); | 2135 | hdr->sadb_msg_len = size / sizeof(uint64_t); |
| 2091 | hdr->sadb_msg_reserved = atomic_read(&xp->refcnt); | 2136 | hdr->sadb_msg_reserved = atomic_read(&xp->refcnt); |
| 2137 | |||
| 2138 | return 0; | ||
| 2092 | } | 2139 | } |
| 2093 | 2140 | ||
| 2094 | static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c) | 2141 | static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c) |
| @@ -2102,7 +2149,9 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c | |||
| 2102 | err = PTR_ERR(out_skb); | 2149 | err = PTR_ERR(out_skb); |
| 2103 | goto out; | 2150 | goto out; |
| 2104 | } | 2151 | } |
| 2105 | pfkey_xfrm_policy2msg(out_skb, xp, dir); | 2152 | err = pfkey_xfrm_policy2msg(out_skb, xp, dir); |
| 2153 | if (err < 0) | ||
| 2154 | return err; | ||
| 2106 | 2155 | ||
| 2107 | out_hdr = (struct sadb_msg *) out_skb->data; | 2156 | out_hdr = (struct sadb_msg *) out_skb->data; |
| 2108 | out_hdr->sadb_msg_version = PF_KEY_V2; | 2157 | out_hdr->sadb_msg_version = PF_KEY_V2; |
| @@ -2327,7 +2376,9 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, struct sadb | |||
| 2327 | err = PTR_ERR(out_skb); | 2376 | err = PTR_ERR(out_skb); |
| 2328 | goto out; | 2377 | goto out; |
| 2329 | } | 2378 | } |
| 2330 | pfkey_xfrm_policy2msg(out_skb, xp, dir); | 2379 | err = pfkey_xfrm_policy2msg(out_skb, xp, dir); |
| 2380 | if (err < 0) | ||
| 2381 | goto out; | ||
| 2331 | 2382 | ||
| 2332 | out_hdr = (struct sadb_msg *) out_skb->data; | 2383 | out_hdr = (struct sadb_msg *) out_skb->data; |
| 2333 | out_hdr->sadb_msg_version = hdr->sadb_msg_version; | 2384 | out_hdr->sadb_msg_version = hdr->sadb_msg_version; |
| @@ -2409,6 +2460,7 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len, | |||
| 2409 | { | 2460 | { |
| 2410 | int err; | 2461 | int err; |
| 2411 | struct sadb_x_ipsecrequest *rq2; | 2462 | struct sadb_x_ipsecrequest *rq2; |
| 2463 | int mode; | ||
| 2412 | 2464 | ||
| 2413 | if (len <= sizeof(struct sadb_x_ipsecrequest) || | 2465 | if (len <= sizeof(struct sadb_x_ipsecrequest) || |
| 2414 | len < rq1->sadb_x_ipsecrequest_len) | 2466 | len < rq1->sadb_x_ipsecrequest_len) |
| @@ -2439,7 +2491,9 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len, | |||
| 2439 | return -EINVAL; | 2491 | return -EINVAL; |
| 2440 | 2492 | ||
| 2441 | m->proto = rq1->sadb_x_ipsecrequest_proto; | 2493 | m->proto = rq1->sadb_x_ipsecrequest_proto; |
| 2442 | m->mode = rq1->sadb_x_ipsecrequest_mode - 1; | 2494 | if ((mode = pfkey_mode_to_xfrm(rq1->sadb_x_ipsecrequest_mode)) < 0) |
| 2495 | return -EINVAL; | ||
| 2496 | m->mode = mode; | ||
| 2443 | m->reqid = rq1->sadb_x_ipsecrequest_reqid; | 2497 | m->reqid = rq1->sadb_x_ipsecrequest_reqid; |
| 2444 | 2498 | ||
| 2445 | return ((int)(rq1->sadb_x_ipsecrequest_len + | 2499 | return ((int)(rq1->sadb_x_ipsecrequest_len + |
| @@ -2579,12 +2633,15 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr) | |||
| 2579 | struct pfkey_dump_data *data = ptr; | 2633 | struct pfkey_dump_data *data = ptr; |
| 2580 | struct sk_buff *out_skb; | 2634 | struct sk_buff *out_skb; |
| 2581 | struct sadb_msg *out_hdr; | 2635 | struct sadb_msg *out_hdr; |
| 2636 | int err; | ||
| 2582 | 2637 | ||
| 2583 | out_skb = pfkey_xfrm_policy2msg_prep(xp); | 2638 | out_skb = pfkey_xfrm_policy2msg_prep(xp); |
| 2584 | if (IS_ERR(out_skb)) | 2639 | if (IS_ERR(out_skb)) |
| 2585 | return PTR_ERR(out_skb); | 2640 | return PTR_ERR(out_skb); |
| 2586 | 2641 | ||
| 2587 | pfkey_xfrm_policy2msg(out_skb, xp, dir); | 2642 | err = pfkey_xfrm_policy2msg(out_skb, xp, dir); |
| 2643 | if (err < 0) | ||
| 2644 | return err; | ||
| 2588 | 2645 | ||
| 2589 | out_hdr = (struct sadb_msg *) out_skb->data; | 2646 | out_hdr = (struct sadb_msg *) out_skb->data; |
| 2590 | out_hdr->sadb_msg_version = data->hdr->sadb_msg_version; | 2647 | out_hdr->sadb_msg_version = data->hdr->sadb_msg_version; |
| @@ -3513,7 +3570,10 @@ static int pfkey_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type, | |||
| 3513 | 3570 | ||
| 3514 | for (i = 0, mp = m; i < num_bundles; i++, mp++) { | 3571 | for (i = 0, mp = m; i < num_bundles; i++, mp++) { |
| 3515 | /* old ipsecrequest */ | 3572 | /* old ipsecrequest */ |
| 3516 | if (set_ipsecrequest(skb, mp->proto, mp->mode + 1, | 3573 | int mode = pfkey_mode_from_xfrm(mp->mode); |
| 3574 | if (mode < 0) | ||
| 3575 | return -EINVAL; | ||
| 3576 | if (set_ipsecrequest(skb, mp->proto, mode, | ||
| 3517 | (mp->reqid ? IPSEC_LEVEL_UNIQUE : IPSEC_LEVEL_REQUIRE), | 3577 | (mp->reqid ? IPSEC_LEVEL_UNIQUE : IPSEC_LEVEL_REQUIRE), |
| 3518 | mp->reqid, mp->old_family, | 3578 | mp->reqid, mp->old_family, |
| 3519 | &mp->old_saddr, &mp->old_daddr) < 0) { | 3579 | &mp->old_saddr, &mp->old_daddr) < 0) { |
| @@ -3521,7 +3581,7 @@ static int pfkey_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type, | |||
| 3521 | } | 3581 | } |
| 3522 | 3582 | ||
| 3523 | /* new ipsecrequest */ | 3583 | /* new ipsecrequest */ |
| 3524 | if (set_ipsecrequest(skb, mp->proto, mp->mode + 1, | 3584 | if (set_ipsecrequest(skb, mp->proto, mode, |
| 3525 | (mp->reqid ? IPSEC_LEVEL_UNIQUE : IPSEC_LEVEL_REQUIRE), | 3585 | (mp->reqid ? IPSEC_LEVEL_UNIQUE : IPSEC_LEVEL_REQUIRE), |
| 3526 | mp->reqid, mp->new_family, | 3586 | mp->reqid, mp->new_family, |
| 3527 | &mp->new_saddr, &mp->new_daddr) < 0) { | 3587 | &mp->new_saddr, &mp->new_daddr) < 0) { |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index e73d8f546c6b..c48b0f49f003 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
| @@ -443,6 +443,7 @@ static int netlink_release(struct socket *sock) | |||
| 443 | return 0; | 443 | return 0; |
| 444 | 444 | ||
| 445 | netlink_remove(sk); | 445 | netlink_remove(sk); |
| 446 | sock_orphan(sk); | ||
| 446 | nlk = nlk_sk(sk); | 447 | nlk = nlk_sk(sk); |
| 447 | 448 | ||
| 448 | spin_lock(&nlk->cb_lock); | 449 | spin_lock(&nlk->cb_lock); |
| @@ -457,7 +458,6 @@ static int netlink_release(struct socket *sock) | |||
| 457 | /* OK. Socket is unlinked, and, therefore, | 458 | /* OK. Socket is unlinked, and, therefore, |
| 458 | no new packets will arrive */ | 459 | no new packets will arrive */ |
| 459 | 460 | ||
| 460 | sock_orphan(sk); | ||
| 461 | sock->sk = NULL; | 461 | sock->sk = NULL; |
| 462 | wake_up_interruptible_all(&nlk->wait); | 462 | wake_up_interruptible_all(&nlk->wait); |
| 463 | 463 | ||
| @@ -1412,9 +1412,9 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | |||
| 1412 | return -ECONNREFUSED; | 1412 | return -ECONNREFUSED; |
| 1413 | } | 1413 | } |
| 1414 | nlk = nlk_sk(sk); | 1414 | nlk = nlk_sk(sk); |
| 1415 | /* A dump is in progress... */ | 1415 | /* A dump or destruction is in progress... */ |
| 1416 | spin_lock(&nlk->cb_lock); | 1416 | spin_lock(&nlk->cb_lock); |
| 1417 | if (nlk->cb) { | 1417 | if (nlk->cb || sock_flag(sk, SOCK_DEAD)) { |
| 1418 | spin_unlock(&nlk->cb_lock); | 1418 | spin_unlock(&nlk->cb_lock); |
| 1419 | netlink_destroy_callback(cb); | 1419 | netlink_destroy_callback(cb); |
| 1420 | sock_put(sk); | 1420 | sock_put(sk); |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 536298c2eda2..a1d026f12b0e 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
| @@ -627,6 +627,12 @@ int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) | |||
| 627 | retval = -EINVAL; | 627 | retval = -EINVAL; |
| 628 | goto err_bindx_rem; | 628 | goto err_bindx_rem; |
| 629 | } | 629 | } |
| 630 | |||
| 631 | if (!af->addr_valid(sa_addr, sp, NULL)) { | ||
| 632 | retval = -EADDRNOTAVAIL; | ||
| 633 | goto err_bindx_rem; | ||
| 634 | } | ||
| 635 | |||
| 630 | if (sa_addr->v4.sin_port != htons(bp->port)) { | 636 | if (sa_addr->v4.sin_port != htons(bp->port)) { |
| 631 | retval = -EINVAL; | 637 | retval = -EINVAL; |
| 632 | goto err_bindx_rem; | 638 | goto err_bindx_rem; |
| @@ -5638,6 +5644,36 @@ void sctp_wait_for_close(struct sock *sk, long timeout) | |||
| 5638 | finish_wait(sk->sk_sleep, &wait); | 5644 | finish_wait(sk->sk_sleep, &wait); |
| 5639 | } | 5645 | } |
| 5640 | 5646 | ||
| 5647 | static void sctp_sock_rfree_frag(struct sk_buff *skb) | ||
| 5648 | { | ||
| 5649 | struct sk_buff *frag; | ||
| 5650 | |||
| 5651 | if (!skb->data_len) | ||
| 5652 | goto done; | ||
| 5653 | |||
| 5654 | /* Don't forget the fragments. */ | ||
| 5655 | for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) | ||
| 5656 | sctp_sock_rfree_frag(frag); | ||
| 5657 | |||
| 5658 | done: | ||
| 5659 | sctp_sock_rfree(skb); | ||
| 5660 | } | ||
| 5661 | |||
| 5662 | static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) | ||
| 5663 | { | ||
| 5664 | struct sk_buff *frag; | ||
| 5665 | |||
| 5666 | if (!skb->data_len) | ||
| 5667 | goto done; | ||
| 5668 | |||
| 5669 | /* Don't forget the fragments. */ | ||
| 5670 | for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) | ||
| 5671 | sctp_skb_set_owner_r_frag(frag, sk); | ||
| 5672 | |||
| 5673 | done: | ||
| 5674 | sctp_skb_set_owner_r(skb, sk); | ||
| 5675 | } | ||
| 5676 | |||
| 5641 | /* Populate the fields of the newsk from the oldsk and migrate the assoc | 5677 | /* Populate the fields of the newsk from the oldsk and migrate the assoc |
| 5642 | * and its messages to the newsk. | 5678 | * and its messages to the newsk. |
| 5643 | */ | 5679 | */ |
| @@ -5692,10 +5728,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, | |||
| 5692 | sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { | 5728 | sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { |
| 5693 | event = sctp_skb2event(skb); | 5729 | event = sctp_skb2event(skb); |
| 5694 | if (event->asoc == assoc) { | 5730 | if (event->asoc == assoc) { |
| 5695 | sctp_sock_rfree(skb); | 5731 | sctp_sock_rfree_frag(skb); |
| 5696 | __skb_unlink(skb, &oldsk->sk_receive_queue); | 5732 | __skb_unlink(skb, &oldsk->sk_receive_queue); |
| 5697 | __skb_queue_tail(&newsk->sk_receive_queue, skb); | 5733 | __skb_queue_tail(&newsk->sk_receive_queue, skb); |
| 5698 | sctp_skb_set_owner_r(skb, newsk); | 5734 | sctp_skb_set_owner_r_frag(skb, newsk); |
| 5699 | } | 5735 | } |
| 5700 | } | 5736 | } |
| 5701 | 5737 | ||
| @@ -5723,10 +5759,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, | |||
| 5723 | sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { | 5759 | sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { |
| 5724 | event = sctp_skb2event(skb); | 5760 | event = sctp_skb2event(skb); |
| 5725 | if (event->asoc == assoc) { | 5761 | if (event->asoc == assoc) { |
| 5726 | sctp_sock_rfree(skb); | 5762 | sctp_sock_rfree_frag(skb); |
| 5727 | __skb_unlink(skb, &oldsp->pd_lobby); | 5763 | __skb_unlink(skb, &oldsp->pd_lobby); |
| 5728 | __skb_queue_tail(queue, skb); | 5764 | __skb_queue_tail(queue, skb); |
| 5729 | sctp_skb_set_owner_r(skb, newsk); | 5765 | sctp_skb_set_owner_r_frag(skb, newsk); |
| 5730 | } | 5766 | } |
| 5731 | } | 5767 | } |
| 5732 | 5768 | ||
| @@ -5738,6 +5774,16 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, | |||
| 5738 | 5774 | ||
| 5739 | } | 5775 | } |
| 5740 | 5776 | ||
| 5777 | sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp) { | ||
| 5778 | sctp_sock_rfree_frag(skb); | ||
| 5779 | sctp_skb_set_owner_r_frag(skb, newsk); | ||
| 5780 | } | ||
| 5781 | |||
| 5782 | sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp) { | ||
| 5783 | sctp_sock_rfree_frag(skb); | ||
| 5784 | sctp_skb_set_owner_r_frag(skb, newsk); | ||
| 5785 | } | ||
| 5786 | |||
| 5741 | /* Set the type of socket to indicate that it is peeled off from the | 5787 | /* Set the type of socket to indicate that it is peeled off from the |
| 5742 | * original UDP-style socket or created with the accept() call on a | 5788 | * original UDP-style socket or created with the accept() call on a |
| 5743 | * TCP-style socket.. | 5789 | * TCP-style socket.. |
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index bfb197e37da3..b29e3e4b72c9 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c | |||
| @@ -190,7 +190,14 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) | |||
| 190 | if (!sctp_sk(sk)->pd_mode) { | 190 | if (!sctp_sk(sk)->pd_mode) { |
| 191 | queue = &sk->sk_receive_queue; | 191 | queue = &sk->sk_receive_queue; |
| 192 | } else if (ulpq->pd_mode) { | 192 | } else if (ulpq->pd_mode) { |
| 193 | if (event->msg_flags & MSG_NOTIFICATION) | 193 | /* If the association is in partial delivery, we |
| 194 | * need to finish delivering the partially processed | ||
| 195 | * packet before passing any other data. This is | ||
| 196 | * because we don't truly support stream interleaving. | ||
| 197 | */ | ||
| 198 | if ((event->msg_flags & MSG_NOTIFICATION) || | ||
| 199 | (SCTP_DATA_NOT_FRAG == | ||
| 200 | (event->msg_flags & SCTP_DATA_FRAG_MASK))) | ||
| 194 | queue = &sctp_sk(sk)->pd_lobby; | 201 | queue = &sctp_sk(sk)->pd_lobby; |
| 195 | else { | 202 | else { |
| 196 | clear_pd = event->msg_flags & MSG_EOR; | 203 | clear_pd = event->msg_flags & MSG_EOR; |
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index 9bae4090254c..2bd23ea2aa8b 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c | |||
| @@ -383,7 +383,10 @@ void svcauth_unix_purge(void) | |||
| 383 | static inline struct ip_map * | 383 | static inline struct ip_map * |
| 384 | ip_map_cached_get(struct svc_rqst *rqstp) | 384 | ip_map_cached_get(struct svc_rqst *rqstp) |
| 385 | { | 385 | { |
| 386 | struct ip_map *ipm = rqstp->rq_sock->sk_info_authunix; | 386 | struct ip_map *ipm; |
| 387 | struct svc_sock *svsk = rqstp->rq_sock; | ||
| 388 | spin_lock_bh(&svsk->sk_defer_lock); | ||
| 389 | ipm = svsk->sk_info_authunix; | ||
| 387 | if (ipm != NULL) { | 390 | if (ipm != NULL) { |
| 388 | if (!cache_valid(&ipm->h)) { | 391 | if (!cache_valid(&ipm->h)) { |
| 389 | /* | 392 | /* |
| @@ -391,12 +394,14 @@ ip_map_cached_get(struct svc_rqst *rqstp) | |||
| 391 | * remembered, e.g. by a second mount from the | 394 | * remembered, e.g. by a second mount from the |
| 392 | * same IP address. | 395 | * same IP address. |
| 393 | */ | 396 | */ |
| 394 | rqstp->rq_sock->sk_info_authunix = NULL; | 397 | svsk->sk_info_authunix = NULL; |
| 398 | spin_unlock_bh(&svsk->sk_defer_lock); | ||
| 395 | cache_put(&ipm->h, &ip_map_cache); | 399 | cache_put(&ipm->h, &ip_map_cache); |
| 396 | return NULL; | 400 | return NULL; |
| 397 | } | 401 | } |
| 398 | cache_get(&ipm->h); | 402 | cache_get(&ipm->h); |
| 399 | } | 403 | } |
| 404 | spin_unlock_bh(&svsk->sk_defer_lock); | ||
| 400 | return ipm; | 405 | return ipm; |
| 401 | } | 406 | } |
| 402 | 407 | ||
| @@ -405,9 +410,15 @@ ip_map_cached_put(struct svc_rqst *rqstp, struct ip_map *ipm) | |||
| 405 | { | 410 | { |
| 406 | struct svc_sock *svsk = rqstp->rq_sock; | 411 | struct svc_sock *svsk = rqstp->rq_sock; |
| 407 | 412 | ||
| 408 | if (svsk->sk_sock->type == SOCK_STREAM && svsk->sk_info_authunix == NULL) | 413 | spin_lock_bh(&svsk->sk_defer_lock); |
| 409 | svsk->sk_info_authunix = ipm; /* newly cached, keep the reference */ | 414 | if (svsk->sk_sock->type == SOCK_STREAM && |
| 410 | else | 415 | svsk->sk_info_authunix == NULL) { |
| 416 | /* newly cached, keep the reference */ | ||
| 417 | svsk->sk_info_authunix = ipm; | ||
| 418 | ipm = NULL; | ||
| 419 | } | ||
| 420 | spin_unlock_bh(&svsk->sk_defer_lock); | ||
| 421 | if (ipm) | ||
| 411 | cache_put(&ipm->h, &ip_map_cache); | 422 | cache_put(&ipm->h, &ip_map_cache); |
| 412 | } | 423 | } |
| 413 | 424 | ||
