diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/caif/cfserl.c | 6 | ||||
-rw-r--r-- | net/core/datagram.c | 6 | ||||
-rw-r--r-- | net/core/drop_monitor.c | 12 | ||||
-rw-r--r-- | net/core/neighbour.c | 1 | ||||
-rw-r--r-- | net/core/rtnetlink.c | 26 | ||||
-rw-r--r-- | net/core/skbuff.c | 42 | ||||
-rw-r--r-- | net/core/sock.c | 33 | ||||
-rw-r--r-- | net/ipv4/ipmr.c | 2 | ||||
-rw-r--r-- | net/ipv4/netfilter/ip_tables.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 4 | ||||
-rw-r--r-- | net/ipv4/udp.c | 18 | ||||
-rw-r--r-- | net/ipv6/ip6_output.c | 2 | ||||
-rw-r--r-- | net/ipv6/ip6mr.c | 2 | ||||
-rw-r--r-- | net/ipv6/netfilter/ip6_tables.c | 2 | ||||
-rw-r--r-- | net/ipv6/route.c | 2 | ||||
-rw-r--r-- | net/ipv6/udp.c | 5 | ||||
-rw-r--r-- | net/iucv/af_iucv.c | 2 | ||||
-rw-r--r-- | net/iucv/iucv.c | 9 | ||||
-rw-r--r-- | net/mac80211/chan.c | 2 | ||||
-rw-r--r-- | net/netfilter/x_tables.c | 17 | ||||
-rw-r--r-- | net/netfilter/xt_TEE.c | 4 | ||||
-rw-r--r-- | net/phonet/pep.c | 6 | ||||
-rw-r--r-- | net/rds/ib_cm.c | 1 | ||||
-rw-r--r-- | net/rds/iw_cm.c | 1 | ||||
-rw-r--r-- | net/sunrpc/xprtsock.c | 29 |
25 files changed, 155 insertions, 81 deletions
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c index cb4325a3dc83..965c5baace40 100644 --- a/net/caif/cfserl.c +++ b/net/caif/cfserl.c | |||
@@ -59,16 +59,18 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt) | |||
59 | u8 stx = CFSERL_STX; | 59 | u8 stx = CFSERL_STX; |
60 | int ret; | 60 | int ret; |
61 | u16 expectlen = 0; | 61 | u16 expectlen = 0; |
62 | |||
62 | caif_assert(newpkt != NULL); | 63 | caif_assert(newpkt != NULL); |
63 | spin_lock(&layr->sync); | 64 | spin_lock(&layr->sync); |
64 | 65 | ||
65 | if (layr->incomplete_frm != NULL) { | 66 | if (layr->incomplete_frm != NULL) { |
66 | |||
67 | layr->incomplete_frm = | 67 | layr->incomplete_frm = |
68 | cfpkt_append(layr->incomplete_frm, newpkt, expectlen); | 68 | cfpkt_append(layr->incomplete_frm, newpkt, expectlen); |
69 | pkt = layr->incomplete_frm; | 69 | pkt = layr->incomplete_frm; |
70 | if (pkt == NULL) | 70 | if (pkt == NULL) { |
71 | spin_unlock(&layr->sync); | ||
71 | return -ENOMEM; | 72 | return -ENOMEM; |
73 | } | ||
72 | } else { | 74 | } else { |
73 | pkt = newpkt; | 75 | pkt = newpkt; |
74 | } | 76 | } |
diff --git a/net/core/datagram.c b/net/core/datagram.c index e0097531417a..f5b6f43a4c2e 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
@@ -229,15 +229,17 @@ EXPORT_SYMBOL(skb_free_datagram); | |||
229 | 229 | ||
230 | void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb) | 230 | void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb) |
231 | { | 231 | { |
232 | bool slow; | ||
233 | |||
232 | if (likely(atomic_read(&skb->users) == 1)) | 234 | if (likely(atomic_read(&skb->users) == 1)) |
233 | smp_rmb(); | 235 | smp_rmb(); |
234 | else if (likely(!atomic_dec_and_test(&skb->users))) | 236 | else if (likely(!atomic_dec_and_test(&skb->users))) |
235 | return; | 237 | return; |
236 | 238 | ||
237 | lock_sock_bh(sk); | 239 | slow = lock_sock_fast(sk); |
238 | skb_orphan(skb); | 240 | skb_orphan(skb); |
239 | sk_mem_reclaim_partial(sk); | 241 | sk_mem_reclaim_partial(sk); |
240 | unlock_sock_bh(sk); | 242 | unlock_sock_fast(sk, slow); |
241 | 243 | ||
242 | /* skb is now orphaned, can be freed outside of locked section */ | 244 | /* skb is now orphaned, can be freed outside of locked section */ |
243 | __kfree_skb(skb); | 245 | __kfree_skb(skb); |
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index cf208d8042b1..ad41529fb60f 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c | |||
@@ -172,12 +172,12 @@ out: | |||
172 | return; | 172 | return; |
173 | } | 173 | } |
174 | 174 | ||
175 | static void trace_kfree_skb_hit(struct sk_buff *skb, void *location) | 175 | static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location) |
176 | { | 176 | { |
177 | trace_drop_common(skb, location); | 177 | trace_drop_common(skb, location); |
178 | } | 178 | } |
179 | 179 | ||
180 | static void trace_napi_poll_hit(struct napi_struct *napi) | 180 | static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi) |
181 | { | 181 | { |
182 | struct dm_hw_stat_delta *new_stat; | 182 | struct dm_hw_stat_delta *new_stat; |
183 | 183 | ||
@@ -225,12 +225,12 @@ static int set_all_monitor_traces(int state) | |||
225 | 225 | ||
226 | switch (state) { | 226 | switch (state) { |
227 | case TRACE_ON: | 227 | case TRACE_ON: |
228 | rc |= register_trace_kfree_skb(trace_kfree_skb_hit); | 228 | rc |= register_trace_kfree_skb(trace_kfree_skb_hit, NULL); |
229 | rc |= register_trace_napi_poll(trace_napi_poll_hit); | 229 | rc |= register_trace_napi_poll(trace_napi_poll_hit, NULL); |
230 | break; | 230 | break; |
231 | case TRACE_OFF: | 231 | case TRACE_OFF: |
232 | rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit); | 232 | rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit, NULL); |
233 | rc |= unregister_trace_napi_poll(trace_napi_poll_hit); | 233 | rc |= unregister_trace_napi_poll(trace_napi_poll_hit, NULL); |
234 | 234 | ||
235 | tracepoint_synchronize_unregister(); | 235 | tracepoint_synchronize_unregister(); |
236 | 236 | ||
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index bff37908bd55..6ba1c0eece03 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -934,6 +934,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) | |||
934 | kfree_skb(buff); | 934 | kfree_skb(buff); |
935 | NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards); | 935 | NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards); |
936 | } | 936 | } |
937 | skb_dst_force(skb); | ||
937 | __skb_queue_tail(&neigh->arp_queue, skb); | 938 | __skb_queue_tail(&neigh->arp_queue, skb); |
938 | } | 939 | } |
939 | rc = 1; | 940 | rc = 1; |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 7ab86f3a1ea4..1a2af24e9e3d 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -650,11 +650,12 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev) | |||
650 | if (dev->dev.parent && dev_is_pci(dev->dev.parent)) { | 650 | if (dev->dev.parent && dev_is_pci(dev->dev.parent)) { |
651 | 651 | ||
652 | int num_vfs = dev_num_vf(dev->dev.parent); | 652 | int num_vfs = dev_num_vf(dev->dev.parent); |
653 | size_t size = nlmsg_total_size(sizeof(struct nlattr)); | 653 | size_t size = nla_total_size(sizeof(struct nlattr)); |
654 | size += nlmsg_total_size(num_vfs * sizeof(struct nlattr)); | 654 | size += nla_total_size(num_vfs * sizeof(struct nlattr)); |
655 | size += num_vfs * (sizeof(struct ifla_vf_mac) + | 655 | size += num_vfs * |
656 | sizeof(struct ifla_vf_vlan) + | 656 | (nla_total_size(sizeof(struct ifla_vf_mac)) + |
657 | sizeof(struct ifla_vf_tx_rate)); | 657 | nla_total_size(sizeof(struct ifla_vf_vlan)) + |
658 | nla_total_size(sizeof(struct ifla_vf_tx_rate))); | ||
658 | return size; | 659 | return size; |
659 | } else | 660 | } else |
660 | return 0; | 661 | return 0; |
@@ -722,14 +723,13 @@ static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev) | |||
722 | 723 | ||
723 | for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) { | 724 | for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) { |
724 | vf_port = nla_nest_start(skb, IFLA_VF_PORT); | 725 | vf_port = nla_nest_start(skb, IFLA_VF_PORT); |
725 | if (!vf_port) { | 726 | if (!vf_port) |
726 | nla_nest_cancel(skb, vf_ports); | 727 | goto nla_put_failure; |
727 | return -EMSGSIZE; | ||
728 | } | ||
729 | NLA_PUT_U32(skb, IFLA_PORT_VF, vf); | 728 | NLA_PUT_U32(skb, IFLA_PORT_VF, vf); |
730 | err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); | 729 | err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); |
730 | if (err == -EMSGSIZE) | ||
731 | goto nla_put_failure; | ||
731 | if (err) { | 732 | if (err) { |
732 | nla_put_failure: | ||
733 | nla_nest_cancel(skb, vf_port); | 733 | nla_nest_cancel(skb, vf_port); |
734 | continue; | 734 | continue; |
735 | } | 735 | } |
@@ -739,6 +739,10 @@ nla_put_failure: | |||
739 | nla_nest_end(skb, vf_ports); | 739 | nla_nest_end(skb, vf_ports); |
740 | 740 | ||
741 | return 0; | 741 | return 0; |
742 | |||
743 | nla_put_failure: | ||
744 | nla_nest_cancel(skb, vf_ports); | ||
745 | return -EMSGSIZE; | ||
742 | } | 746 | } |
743 | 747 | ||
744 | static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) | 748 | static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) |
@@ -753,7 +757,7 @@ static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) | |||
753 | err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb); | 757 | err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb); |
754 | if (err) { | 758 | if (err) { |
755 | nla_nest_cancel(skb, port_self); | 759 | nla_nest_cancel(skb, port_self); |
756 | return err; | 760 | return (err == -EMSGSIZE) ? err : 0; |
757 | } | 761 | } |
758 | 762 | ||
759 | nla_nest_end(skb, port_self); | 763 | nla_nest_end(skb, port_self); |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index f8abf68e3988..9f07e749d7b1 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -482,22 +482,22 @@ EXPORT_SYMBOL(consume_skb); | |||
482 | * reference count dropping and cleans up the skbuff as if it | 482 | * reference count dropping and cleans up the skbuff as if it |
483 | * just came from __alloc_skb(). | 483 | * just came from __alloc_skb(). |
484 | */ | 484 | */ |
485 | int skb_recycle_check(struct sk_buff *skb, int skb_size) | 485 | bool skb_recycle_check(struct sk_buff *skb, int skb_size) |
486 | { | 486 | { |
487 | struct skb_shared_info *shinfo; | 487 | struct skb_shared_info *shinfo; |
488 | 488 | ||
489 | if (irqs_disabled()) | 489 | if (irqs_disabled()) |
490 | return 0; | 490 | return false; |
491 | 491 | ||
492 | if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) | 492 | if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) |
493 | return 0; | 493 | return false; |
494 | 494 | ||
495 | skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); | 495 | skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); |
496 | if (skb_end_pointer(skb) - skb->head < skb_size) | 496 | if (skb_end_pointer(skb) - skb->head < skb_size) |
497 | return 0; | 497 | return false; |
498 | 498 | ||
499 | if (skb_shared(skb) || skb_cloned(skb)) | 499 | if (skb_shared(skb) || skb_cloned(skb)) |
500 | return 0; | 500 | return false; |
501 | 501 | ||
502 | skb_release_head_state(skb); | 502 | skb_release_head_state(skb); |
503 | 503 | ||
@@ -509,7 +509,7 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size) | |||
509 | skb->data = skb->head + NET_SKB_PAD; | 509 | skb->data = skb->head + NET_SKB_PAD; |
510 | skb_reset_tail_pointer(skb); | 510 | skb_reset_tail_pointer(skb); |
511 | 511 | ||
512 | return 1; | 512 | return true; |
513 | } | 513 | } |
514 | EXPORT_SYMBOL(skb_recycle_check); | 514 | EXPORT_SYMBOL(skb_recycle_check); |
515 | 515 | ||
@@ -2965,6 +2965,34 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) | |||
2965 | } | 2965 | } |
2966 | EXPORT_SYMBOL_GPL(skb_cow_data); | 2966 | EXPORT_SYMBOL_GPL(skb_cow_data); |
2967 | 2967 | ||
2968 | static void sock_rmem_free(struct sk_buff *skb) | ||
2969 | { | ||
2970 | struct sock *sk = skb->sk; | ||
2971 | |||
2972 | atomic_sub(skb->truesize, &sk->sk_rmem_alloc); | ||
2973 | } | ||
2974 | |||
2975 | /* | ||
2976 | * Note: We dont mem charge error packets (no sk_forward_alloc changes) | ||
2977 | */ | ||
2978 | int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) | ||
2979 | { | ||
2980 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= | ||
2981 | (unsigned)sk->sk_rcvbuf) | ||
2982 | return -ENOMEM; | ||
2983 | |||
2984 | skb_orphan(skb); | ||
2985 | skb->sk = sk; | ||
2986 | skb->destructor = sock_rmem_free; | ||
2987 | atomic_add(skb->truesize, &sk->sk_rmem_alloc); | ||
2988 | |||
2989 | skb_queue_tail(&sk->sk_error_queue, skb); | ||
2990 | if (!sock_flag(sk, SOCK_DEAD)) | ||
2991 | sk->sk_data_ready(sk, skb->len); | ||
2992 | return 0; | ||
2993 | } | ||
2994 | EXPORT_SYMBOL(sock_queue_err_skb); | ||
2995 | |||
2968 | void skb_tstamp_tx(struct sk_buff *orig_skb, | 2996 | void skb_tstamp_tx(struct sk_buff *orig_skb, |
2969 | struct skb_shared_hwtstamps *hwtstamps) | 2997 | struct skb_shared_hwtstamps *hwtstamps) |
2970 | { | 2998 | { |
@@ -2996,7 +3024,9 @@ void skb_tstamp_tx(struct sk_buff *orig_skb, | |||
2996 | memset(serr, 0, sizeof(*serr)); | 3024 | memset(serr, 0, sizeof(*serr)); |
2997 | serr->ee.ee_errno = ENOMSG; | 3025 | serr->ee.ee_errno = ENOMSG; |
2998 | serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; | 3026 | serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; |
3027 | |||
2999 | err = sock_queue_err_skb(sk, skb); | 3028 | err = sock_queue_err_skb(sk, skb); |
3029 | |||
3000 | if (err) | 3030 | if (err) |
3001 | kfree_skb(skb); | 3031 | kfree_skb(skb); |
3002 | } | 3032 | } |
diff --git a/net/core/sock.c b/net/core/sock.c index 37fe9b6adade..2cf7f9f7e775 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -2007,6 +2007,39 @@ void release_sock(struct sock *sk) | |||
2007 | } | 2007 | } |
2008 | EXPORT_SYMBOL(release_sock); | 2008 | EXPORT_SYMBOL(release_sock); |
2009 | 2009 | ||
2010 | /** | ||
2011 | * lock_sock_fast - fast version of lock_sock | ||
2012 | * @sk: socket | ||
2013 | * | ||
2014 | * This version should be used for very small section, where process wont block | ||
2015 | * return false if fast path is taken | ||
2016 | * sk_lock.slock locked, owned = 0, BH disabled | ||
2017 | * return true if slow path is taken | ||
2018 | * sk_lock.slock unlocked, owned = 1, BH enabled | ||
2019 | */ | ||
2020 | bool lock_sock_fast(struct sock *sk) | ||
2021 | { | ||
2022 | might_sleep(); | ||
2023 | spin_lock_bh(&sk->sk_lock.slock); | ||
2024 | |||
2025 | if (!sk->sk_lock.owned) | ||
2026 | /* | ||
2027 | * Note : We must disable BH | ||
2028 | */ | ||
2029 | return false; | ||
2030 | |||
2031 | __lock_sock(sk); | ||
2032 | sk->sk_lock.owned = 1; | ||
2033 | spin_unlock(&sk->sk_lock.slock); | ||
2034 | /* | ||
2035 | * The sk_lock has mutex_lock() semantics here: | ||
2036 | */ | ||
2037 | mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); | ||
2038 | local_bh_enable(); | ||
2039 | return true; | ||
2040 | } | ||
2041 | EXPORT_SYMBOL(lock_sock_fast); | ||
2042 | |||
2010 | int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) | 2043 | int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) |
2011 | { | 2044 | { |
2012 | struct timeval tv; | 2045 | struct timeval tv; |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 45889103b3e2..856123fe32f9 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -1911,7 +1911,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, | |||
1911 | struct rtattr *mp_head; | 1911 | struct rtattr *mp_head; |
1912 | 1912 | ||
1913 | /* If cache is unresolved, don't try to parse IIF and OIF */ | 1913 | /* If cache is unresolved, don't try to parse IIF and OIF */ |
1914 | if (c->mfc_parent > MAXVIFS) | 1914 | if (c->mfc_parent >= MAXVIFS) |
1915 | return -ENOENT; | 1915 | return -ENOENT; |
1916 | 1916 | ||
1917 | if (VIF_EXISTS(mrt, c->mfc_parent)) | 1917 | if (VIF_EXISTS(mrt, c->mfc_parent)) |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 63958f3394a5..4b6c5ca610fc 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -336,7 +336,7 @@ ipt_do_table(struct sk_buff *skb, | |||
336 | cpu = smp_processor_id(); | 336 | cpu = smp_processor_id(); |
337 | table_base = private->entries[cpu]; | 337 | table_base = private->entries[cpu]; |
338 | jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; | 338 | jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; |
339 | stackptr = &private->stackptr[cpu]; | 339 | stackptr = per_cpu_ptr(private->stackptr, cpu); |
340 | origptr = *stackptr; | 340 | origptr = *stackptr; |
341 | 341 | ||
342 | e = get_entry(table_base, private->hook_entry[hook]); | 342 | e = get_entry(table_base, private->hook_entry[hook]); |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 3e6dafcb1071..548d575e6cc6 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2639,7 +2639,7 @@ static void DBGUNDO(struct sock *sk, const char *msg) | |||
2639 | if (sk->sk_family == AF_INET) { | 2639 | if (sk->sk_family == AF_INET) { |
2640 | printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", | 2640 | printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", |
2641 | msg, | 2641 | msg, |
2642 | &inet->daddr, ntohs(inet->dport), | 2642 | &inet->inet_daddr, ntohs(inet->inet_dport), |
2643 | tp->snd_cwnd, tcp_left_out(tp), | 2643 | tp->snd_cwnd, tcp_left_out(tp), |
2644 | tp->snd_ssthresh, tp->prior_ssthresh, | 2644 | tp->snd_ssthresh, tp->prior_ssthresh, |
2645 | tp->packets_out); | 2645 | tp->packets_out); |
@@ -2649,7 +2649,7 @@ static void DBGUNDO(struct sock *sk, const char *msg) | |||
2649 | struct ipv6_pinfo *np = inet6_sk(sk); | 2649 | struct ipv6_pinfo *np = inet6_sk(sk); |
2650 | printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", | 2650 | printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", |
2651 | msg, | 2651 | msg, |
2652 | &np->daddr, ntohs(inet->dport), | 2652 | &np->daddr, ntohs(inet->inet_dport), |
2653 | tp->snd_cwnd, tcp_left_out(tp), | 2653 | tp->snd_cwnd, tcp_left_out(tp), |
2654 | tp->snd_ssthresh, tp->prior_ssthresh, | 2654 | tp->snd_ssthresh, tp->prior_ssthresh, |
2655 | tp->packets_out); | 2655 | tp->packets_out); |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index baeec29fe0f1..eec4ff456e33 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -633,9 +633,9 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) | |||
633 | if (!inet->recverr) { | 633 | if (!inet->recverr) { |
634 | if (!harderr || sk->sk_state != TCP_ESTABLISHED) | 634 | if (!harderr || sk->sk_state != TCP_ESTABLISHED) |
635 | goto out; | 635 | goto out; |
636 | } else { | 636 | } else |
637 | ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); | 637 | ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); |
638 | } | 638 | |
639 | sk->sk_err = err; | 639 | sk->sk_err = err; |
640 | sk->sk_error_report(sk); | 640 | sk->sk_error_report(sk); |
641 | out: | 641 | out: |
@@ -1063,10 +1063,11 @@ static unsigned int first_packet_length(struct sock *sk) | |||
1063 | spin_unlock_bh(&rcvq->lock); | 1063 | spin_unlock_bh(&rcvq->lock); |
1064 | 1064 | ||
1065 | if (!skb_queue_empty(&list_kill)) { | 1065 | if (!skb_queue_empty(&list_kill)) { |
1066 | lock_sock_bh(sk); | 1066 | bool slow = lock_sock_fast(sk); |
1067 | |||
1067 | __skb_queue_purge(&list_kill); | 1068 | __skb_queue_purge(&list_kill); |
1068 | sk_mem_reclaim_partial(sk); | 1069 | sk_mem_reclaim_partial(sk); |
1069 | unlock_sock_bh(sk); | 1070 | unlock_sock_fast(sk, slow); |
1070 | } | 1071 | } |
1071 | return res; | 1072 | return res; |
1072 | } | 1073 | } |
@@ -1123,6 +1124,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1123 | int peeked; | 1124 | int peeked; |
1124 | int err; | 1125 | int err; |
1125 | int is_udplite = IS_UDPLITE(sk); | 1126 | int is_udplite = IS_UDPLITE(sk); |
1127 | bool slow; | ||
1126 | 1128 | ||
1127 | /* | 1129 | /* |
1128 | * Check any passed addresses | 1130 | * Check any passed addresses |
@@ -1197,10 +1199,10 @@ out: | |||
1197 | return err; | 1199 | return err; |
1198 | 1200 | ||
1199 | csum_copy_err: | 1201 | csum_copy_err: |
1200 | lock_sock_bh(sk); | 1202 | slow = lock_sock_fast(sk); |
1201 | if (!skb_kill_datagram(sk, skb, flags)) | 1203 | if (!skb_kill_datagram(sk, skb, flags)) |
1202 | UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); | 1204 | UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); |
1203 | unlock_sock_bh(sk); | 1205 | unlock_sock_fast(sk, slow); |
1204 | 1206 | ||
1205 | if (noblock) | 1207 | if (noblock) |
1206 | return -EAGAIN; | 1208 | return -EAGAIN; |
@@ -1625,9 +1627,9 @@ int udp_rcv(struct sk_buff *skb) | |||
1625 | 1627 | ||
1626 | void udp_destroy_sock(struct sock *sk) | 1628 | void udp_destroy_sock(struct sock *sk) |
1627 | { | 1629 | { |
1628 | lock_sock_bh(sk); | 1630 | bool slow = lock_sock_fast(sk); |
1629 | udp_flush_pending_frames(sk); | 1631 | udp_flush_pending_frames(sk); |
1630 | unlock_sock_bh(sk); | 1632 | unlock_sock_fast(sk, slow); |
1631 | } | 1633 | } |
1632 | 1634 | ||
1633 | /* | 1635 | /* |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index cd963f64e27c..89425af0684c 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -507,7 +507,7 @@ int ip6_forward(struct sk_buff *skb) | |||
507 | if (mtu < IPV6_MIN_MTU) | 507 | if (mtu < IPV6_MIN_MTU) |
508 | mtu = IPV6_MIN_MTU; | 508 | mtu = IPV6_MIN_MTU; |
509 | 509 | ||
510 | if (skb->len > mtu) { | 510 | if (skb->len > mtu && !skb_is_gso(skb)) { |
511 | /* Again, force OUTPUT device used as source address */ | 511 | /* Again, force OUTPUT device used as source address */ |
512 | skb->dev = dst->dev; | 512 | skb->dev = dst->dev; |
513 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | 513 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index bd9e7d3e9c8e..073071f2b75b 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -2017,7 +2017,7 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, | |||
2017 | struct rtattr *mp_head; | 2017 | struct rtattr *mp_head; |
2018 | 2018 | ||
2019 | /* If cache is unresolved, don't try to parse IIF and OIF */ | 2019 | /* If cache is unresolved, don't try to parse IIF and OIF */ |
2020 | if (c->mf6c_parent > MAXMIFS) | 2020 | if (c->mf6c_parent >= MAXMIFS) |
2021 | return -ENOENT; | 2021 | return -ENOENT; |
2022 | 2022 | ||
2023 | if (MIF_EXISTS(mrt, c->mf6c_parent)) | 2023 | if (MIF_EXISTS(mrt, c->mf6c_parent)) |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 6f517bd83692..9d2d68f0e605 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -363,7 +363,7 @@ ip6t_do_table(struct sk_buff *skb, | |||
363 | cpu = smp_processor_id(); | 363 | cpu = smp_processor_id(); |
364 | table_base = private->entries[cpu]; | 364 | table_base = private->entries[cpu]; |
365 | jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; | 365 | jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; |
366 | stackptr = &private->stackptr[cpu]; | 366 | stackptr = per_cpu_ptr(private->stackptr, cpu); |
367 | origptr = *stackptr; | 367 | origptr = *stackptr; |
368 | 368 | ||
369 | e = get_entry(table_base, private->hook_entry[hook]); | 369 | e = get_entry(table_base, private->hook_entry[hook]); |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 294cbe8b0725..252d76199c41 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -814,7 +814,7 @@ struct dst_entry * ip6_route_output(struct net *net, struct sock *sk, | |||
814 | { | 814 | { |
815 | int flags = 0; | 815 | int flags = 0; |
816 | 816 | ||
817 | if (fl->oif || rt6_need_strict(&fl->fl6_dst)) | 817 | if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl->fl6_dst)) |
818 | flags |= RT6_LOOKUP_F_IFACE; | 818 | flags |= RT6_LOOKUP_F_IFACE; |
819 | 819 | ||
820 | if (!ipv6_addr_any(&fl->fl6_src)) | 820 | if (!ipv6_addr_any(&fl->fl6_src)) |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 3d7a2c0b836a..87be58673b55 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -328,6 +328,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
328 | int err; | 328 | int err; |
329 | int is_udplite = IS_UDPLITE(sk); | 329 | int is_udplite = IS_UDPLITE(sk); |
330 | int is_udp4; | 330 | int is_udp4; |
331 | bool slow; | ||
331 | 332 | ||
332 | if (addr_len) | 333 | if (addr_len) |
333 | *addr_len=sizeof(struct sockaddr_in6); | 334 | *addr_len=sizeof(struct sockaddr_in6); |
@@ -424,7 +425,7 @@ out: | |||
424 | return err; | 425 | return err; |
425 | 426 | ||
426 | csum_copy_err: | 427 | csum_copy_err: |
427 | lock_sock_bh(sk); | 428 | slow = lock_sock_fast(sk); |
428 | if (!skb_kill_datagram(sk, skb, flags)) { | 429 | if (!skb_kill_datagram(sk, skb, flags)) { |
429 | if (is_udp4) | 430 | if (is_udp4) |
430 | UDP_INC_STATS_USER(sock_net(sk), | 431 | UDP_INC_STATS_USER(sock_net(sk), |
@@ -433,7 +434,7 @@ csum_copy_err: | |||
433 | UDP6_INC_STATS_USER(sock_net(sk), | 434 | UDP6_INC_STATS_USER(sock_net(sk), |
434 | UDP_MIB_INERRORS, is_udplite); | 435 | UDP_MIB_INERRORS, is_udplite); |
435 | } | 436 | } |
436 | unlock_sock_bh(sk); | 437 | unlock_sock_fast(sk, slow); |
437 | 438 | ||
438 | if (flags & MSG_DONTWAIT) | 439 | if (flags & MSG_DONTWAIT) |
439 | return -EAGAIN; | 440 | return -EAGAIN; |
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index c8b4599a752e..9637e45744fa 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -1619,7 +1619,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) | |||
1619 | save_message: | 1619 | save_message: |
1620 | save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); | 1620 | save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); |
1621 | if (!save_msg) | 1621 | if (!save_msg) |
1622 | return; | 1622 | goto out_unlock; |
1623 | save_msg->path = path; | 1623 | save_msg->path = path; |
1624 | save_msg->msg = *msg; | 1624 | save_msg->msg = *msg; |
1625 | 1625 | ||
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index fd8b28361a64..f28ad2cc8428 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c | |||
@@ -632,13 +632,14 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self, | |||
632 | iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), | 632 | iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), |
633 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | 633 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); |
634 | if (!iucv_irq_data[cpu]) | 634 | if (!iucv_irq_data[cpu]) |
635 | return NOTIFY_BAD; | 635 | return notifier_from_errno(-ENOMEM); |
636 | |||
636 | iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), | 637 | iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), |
637 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | 638 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); |
638 | if (!iucv_param[cpu]) { | 639 | if (!iucv_param[cpu]) { |
639 | kfree(iucv_irq_data[cpu]); | 640 | kfree(iucv_irq_data[cpu]); |
640 | iucv_irq_data[cpu] = NULL; | 641 | iucv_irq_data[cpu] = NULL; |
641 | return NOTIFY_BAD; | 642 | return notifier_from_errno(-ENOMEM); |
642 | } | 643 | } |
643 | iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), | 644 | iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), |
644 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | 645 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); |
@@ -647,7 +648,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self, | |||
647 | iucv_param[cpu] = NULL; | 648 | iucv_param[cpu] = NULL; |
648 | kfree(iucv_irq_data[cpu]); | 649 | kfree(iucv_irq_data[cpu]); |
649 | iucv_irq_data[cpu] = NULL; | 650 | iucv_irq_data[cpu] = NULL; |
650 | return NOTIFY_BAD; | 651 | return notifier_from_errno(-ENOMEM); |
651 | } | 652 | } |
652 | break; | 653 | break; |
653 | case CPU_UP_CANCELED: | 654 | case CPU_UP_CANCELED: |
@@ -677,7 +678,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self, | |||
677 | cpu_clear(cpu, cpumask); | 678 | cpu_clear(cpu, cpumask); |
678 | if (cpus_empty(cpumask)) | 679 | if (cpus_empty(cpumask)) |
679 | /* Can't offline last IUCV enabled cpu. */ | 680 | /* Can't offline last IUCV enabled cpu. */ |
680 | return NOTIFY_BAD; | 681 | return notifier_from_errno(-EINVAL); |
681 | smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1); | 682 | smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1); |
682 | if (cpus_empty(iucv_irq_cpumask)) | 683 | if (cpus_empty(iucv_irq_cpumask)) |
683 | smp_call_function_single(first_cpu(iucv_buffer_cpumask), | 684 | smp_call_function_single(first_cpu(iucv_buffer_cpumask), |
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 5d218c530a4e..32be11e4c4d9 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c | |||
@@ -5,7 +5,7 @@ | |||
5 | #include <linux/nl80211.h> | 5 | #include <linux/nl80211.h> |
6 | #include "ieee80211_i.h" | 6 | #include "ieee80211_i.h" |
7 | 7 | ||
8 | enum ieee80211_chan_mode | 8 | static enum ieee80211_chan_mode |
9 | __ieee80211_get_channel_mode(struct ieee80211_local *local, | 9 | __ieee80211_get_channel_mode(struct ieee80211_local *local, |
10 | struct ieee80211_sub_if_data *ignore) | 10 | struct ieee80211_sub_if_data *ignore) |
11 | { | 11 | { |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 445de702b8b7..e34622fa0003 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
@@ -699,10 +699,8 @@ void xt_free_table_info(struct xt_table_info *info) | |||
699 | vfree(info->jumpstack); | 699 | vfree(info->jumpstack); |
700 | else | 700 | else |
701 | kfree(info->jumpstack); | 701 | kfree(info->jumpstack); |
702 | if (sizeof(unsigned int) * nr_cpu_ids > PAGE_SIZE) | 702 | |
703 | vfree(info->stackptr); | 703 | free_percpu(info->stackptr); |
704 | else | ||
705 | kfree(info->stackptr); | ||
706 | 704 | ||
707 | kfree(info); | 705 | kfree(info); |
708 | } | 706 | } |
@@ -753,14 +751,9 @@ static int xt_jumpstack_alloc(struct xt_table_info *i) | |||
753 | unsigned int size; | 751 | unsigned int size; |
754 | int cpu; | 752 | int cpu; |
755 | 753 | ||
756 | size = sizeof(unsigned int) * nr_cpu_ids; | 754 | i->stackptr = alloc_percpu(unsigned int); |
757 | if (size > PAGE_SIZE) | ||
758 | i->stackptr = vmalloc(size); | ||
759 | else | ||
760 | i->stackptr = kmalloc(size, GFP_KERNEL); | ||
761 | if (i->stackptr == NULL) | 755 | if (i->stackptr == NULL) |
762 | return -ENOMEM; | 756 | return -ENOMEM; |
763 | memset(i->stackptr, 0, size); | ||
764 | 757 | ||
765 | size = sizeof(void **) * nr_cpu_ids; | 758 | size = sizeof(void **) * nr_cpu_ids; |
766 | if (size > PAGE_SIZE) | 759 | if (size > PAGE_SIZE) |
@@ -844,10 +837,6 @@ struct xt_table *xt_register_table(struct net *net, | |||
844 | struct xt_table_info *private; | 837 | struct xt_table_info *private; |
845 | struct xt_table *t, *table; | 838 | struct xt_table *t, *table; |
846 | 839 | ||
847 | ret = xt_jumpstack_alloc(newinfo); | ||
848 | if (ret < 0) | ||
849 | return ERR_PTR(ret); | ||
850 | |||
851 | /* Don't add one object to multiple lists. */ | 840 | /* Don't add one object to multiple lists. */ |
852 | table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL); | 841 | table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL); |
853 | if (!table) { | 842 | if (!table) { |
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c index d7920d9f49e9..859d9fd429c8 100644 --- a/net/netfilter/xt_TEE.c +++ b/net/netfilter/xt_TEE.c | |||
@@ -76,7 +76,7 @@ tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info) | |||
76 | if (ip_route_output_key(net, &rt, &fl) != 0) | 76 | if (ip_route_output_key(net, &rt, &fl) != 0) |
77 | return false; | 77 | return false; |
78 | 78 | ||
79 | dst_release(skb_dst(skb)); | 79 | skb_dst_drop(skb); |
80 | skb_dst_set(skb, &rt->u.dst); | 80 | skb_dst_set(skb, &rt->u.dst); |
81 | skb->dev = rt->u.dst.dev; | 81 | skb->dev = rt->u.dst.dev; |
82 | skb->protocol = htons(ETH_P_IP); | 82 | skb->protocol = htons(ETH_P_IP); |
@@ -157,7 +157,7 @@ tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info) | |||
157 | if (dst == NULL) | 157 | if (dst == NULL) |
158 | return false; | 158 | return false; |
159 | 159 | ||
160 | dst_release(skb_dst(skb)); | 160 | skb_dst_drop(skb); |
161 | skb_dst_set(skb, dst); | 161 | skb_dst_set(skb, dst); |
162 | skb->dev = dst->dev; | 162 | skb->dev = dst->dev; |
163 | skb->protocol = htons(ETH_P_IPV6); | 163 | skb->protocol = htons(ETH_P_IPV6); |
diff --git a/net/phonet/pep.c b/net/phonet/pep.c index 7b048a35ca58..94d72e85a475 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c | |||
@@ -1045,12 +1045,12 @@ static void pep_sock_unhash(struct sock *sk) | |||
1045 | lock_sock(sk); | 1045 | lock_sock(sk); |
1046 | if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) { | 1046 | if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) { |
1047 | skparent = pn->listener; | 1047 | skparent = pn->listener; |
1048 | sk_del_node_init(sk); | ||
1049 | release_sock(sk); | 1048 | release_sock(sk); |
1050 | 1049 | ||
1051 | sk = skparent; | ||
1052 | pn = pep_sk(skparent); | 1050 | pn = pep_sk(skparent); |
1053 | lock_sock(sk); | 1051 | lock_sock(skparent); |
1052 | sk_del_node_init(sk); | ||
1053 | sk = skparent; | ||
1054 | } | 1054 | } |
1055 | /* Unhash a listening sock only when it is closed | 1055 | /* Unhash a listening sock only when it is closed |
1056 | * and all of its active connected pipes are closed. */ | 1056 | * and all of its active connected pipes are closed. */ |
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 10ed0d55f759..f68832798db2 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c | |||
@@ -475,6 +475,7 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, | |||
475 | err = rds_ib_setup_qp(conn); | 475 | err = rds_ib_setup_qp(conn); |
476 | if (err) { | 476 | if (err) { |
477 | rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err); | 477 | rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err); |
478 | mutex_unlock(&conn->c_cm_lock); | ||
478 | goto out; | 479 | goto out; |
479 | } | 480 | } |
480 | 481 | ||
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c index a9d951b4fbae..b5dd6ac39be8 100644 --- a/net/rds/iw_cm.c +++ b/net/rds/iw_cm.c | |||
@@ -452,6 +452,7 @@ int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id, | |||
452 | err = rds_iw_setup_qp(conn); | 452 | err = rds_iw_setup_qp(conn); |
453 | if (err) { | 453 | if (err) { |
454 | rds_iw_conn_error(conn, "rds_iw_setup_qp failed (%d)\n", err); | 454 | rds_iw_conn_error(conn, "rds_iw_setup_qp failed (%d)\n", err); |
455 | mutex_unlock(&conn->c_cm_lock); | ||
455 | goto out; | 456 | goto out; |
456 | } | 457 | } |
457 | 458 | ||
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index b7cd8cccbe72..2a9675136c68 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -2293,6 +2293,7 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args) | |||
2293 | struct sockaddr *addr = args->dstaddr; | 2293 | struct sockaddr *addr = args->dstaddr; |
2294 | struct rpc_xprt *xprt; | 2294 | struct rpc_xprt *xprt; |
2295 | struct sock_xprt *transport; | 2295 | struct sock_xprt *transport; |
2296 | struct rpc_xprt *ret; | ||
2296 | 2297 | ||
2297 | xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries); | 2298 | xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries); |
2298 | if (IS_ERR(xprt)) | 2299 | if (IS_ERR(xprt)) |
@@ -2330,8 +2331,8 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args) | |||
2330 | xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6); | 2331 | xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6); |
2331 | break; | 2332 | break; |
2332 | default: | 2333 | default: |
2333 | kfree(xprt); | 2334 | ret = ERR_PTR(-EAFNOSUPPORT); |
2334 | return ERR_PTR(-EAFNOSUPPORT); | 2335 | goto out_err; |
2335 | } | 2336 | } |
2336 | 2337 | ||
2337 | if (xprt_bound(xprt)) | 2338 | if (xprt_bound(xprt)) |
@@ -2346,10 +2347,11 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args) | |||
2346 | 2347 | ||
2347 | if (try_module_get(THIS_MODULE)) | 2348 | if (try_module_get(THIS_MODULE)) |
2348 | return xprt; | 2349 | return xprt; |
2349 | 2350 | ret = ERR_PTR(-EINVAL); | |
2351 | out_err: | ||
2350 | kfree(xprt->slot); | 2352 | kfree(xprt->slot); |
2351 | kfree(xprt); | 2353 | kfree(xprt); |
2352 | return ERR_PTR(-EINVAL); | 2354 | return ret; |
2353 | } | 2355 | } |
2354 | 2356 | ||
2355 | static const struct rpc_timeout xs_tcp_default_timeout = { | 2357 | static const struct rpc_timeout xs_tcp_default_timeout = { |
@@ -2368,6 +2370,7 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) | |||
2368 | struct sockaddr *addr = args->dstaddr; | 2370 | struct sockaddr *addr = args->dstaddr; |
2369 | struct rpc_xprt *xprt; | 2371 | struct rpc_xprt *xprt; |
2370 | struct sock_xprt *transport; | 2372 | struct sock_xprt *transport; |
2373 | struct rpc_xprt *ret; | ||
2371 | 2374 | ||
2372 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); | 2375 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); |
2373 | if (IS_ERR(xprt)) | 2376 | if (IS_ERR(xprt)) |
@@ -2403,8 +2406,8 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) | |||
2403 | xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6); | 2406 | xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6); |
2404 | break; | 2407 | break; |
2405 | default: | 2408 | default: |
2406 | kfree(xprt); | 2409 | ret = ERR_PTR(-EAFNOSUPPORT); |
2407 | return ERR_PTR(-EAFNOSUPPORT); | 2410 | goto out_err; |
2408 | } | 2411 | } |
2409 | 2412 | ||
2410 | if (xprt_bound(xprt)) | 2413 | if (xprt_bound(xprt)) |
@@ -2420,10 +2423,11 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) | |||
2420 | 2423 | ||
2421 | if (try_module_get(THIS_MODULE)) | 2424 | if (try_module_get(THIS_MODULE)) |
2422 | return xprt; | 2425 | return xprt; |
2423 | 2426 | ret = ERR_PTR(-EINVAL); | |
2427 | out_err: | ||
2424 | kfree(xprt->slot); | 2428 | kfree(xprt->slot); |
2425 | kfree(xprt); | 2429 | kfree(xprt); |
2426 | return ERR_PTR(-EINVAL); | 2430 | return ret; |
2427 | } | 2431 | } |
2428 | 2432 | ||
2429 | /** | 2433 | /** |
@@ -2437,6 +2441,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) | |||
2437 | struct rpc_xprt *xprt; | 2441 | struct rpc_xprt *xprt; |
2438 | struct sock_xprt *transport; | 2442 | struct sock_xprt *transport; |
2439 | struct svc_sock *bc_sock; | 2443 | struct svc_sock *bc_sock; |
2444 | struct rpc_xprt *ret; | ||
2440 | 2445 | ||
2441 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); | 2446 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); |
2442 | if (IS_ERR(xprt)) | 2447 | if (IS_ERR(xprt)) |
@@ -2476,8 +2481,8 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) | |||
2476 | RPCBIND_NETID_TCP6); | 2481 | RPCBIND_NETID_TCP6); |
2477 | break; | 2482 | break; |
2478 | default: | 2483 | default: |
2479 | kfree(xprt); | 2484 | ret = ERR_PTR(-EAFNOSUPPORT); |
2480 | return ERR_PTR(-EAFNOSUPPORT); | 2485 | goto out_err; |
2481 | } | 2486 | } |
2482 | 2487 | ||
2483 | if (xprt_bound(xprt)) | 2488 | if (xprt_bound(xprt)) |
@@ -2499,9 +2504,11 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) | |||
2499 | 2504 | ||
2500 | if (try_module_get(THIS_MODULE)) | 2505 | if (try_module_get(THIS_MODULE)) |
2501 | return xprt; | 2506 | return xprt; |
2507 | ret = ERR_PTR(-EINVAL); | ||
2508 | out_err: | ||
2502 | kfree(xprt->slot); | 2509 | kfree(xprt->slot); |
2503 | kfree(xprt); | 2510 | kfree(xprt); |
2504 | return ERR_PTR(-EINVAL); | 2511 | return ret; |
2505 | } | 2512 | } |
2506 | 2513 | ||
2507 | static struct xprt_class xs_udp_transport = { | 2514 | static struct xprt_class xs_udp_transport = { |