diff options
Diffstat (limited to 'net')
| -rw-r--r-- | net/caif/cfserl.c | 6 | ||||
| -rw-r--r-- | net/core/skbuff.c | 42 | ||||
| -rw-r--r-- | net/ipv4/netfilter/ip_tables.c | 2 | ||||
| -rw-r--r-- | net/ipv4/tcp_input.c | 4 | ||||
| -rw-r--r-- | net/ipv4/udp.c | 4 | ||||
| -rw-r--r-- | net/ipv6/netfilter/ip6_tables.c | 2 | ||||
| -rw-r--r-- | net/ipv6/route.c | 2 | ||||
| -rw-r--r-- | net/mac80211/chan.c | 2 | ||||
| -rw-r--r-- | net/netfilter/x_tables.c | 17 | ||||
| -rw-r--r-- | net/phonet/pep.c | 6 | ||||
| -rw-r--r-- | net/rds/ib_cm.c | 1 | ||||
| -rw-r--r-- | net/rds/iw_cm.c | 1 |
12 files changed, 56 insertions, 33 deletions
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c index cb4325a3dc83..965c5baace40 100644 --- a/net/caif/cfserl.c +++ b/net/caif/cfserl.c | |||
| @@ -59,16 +59,18 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt) | |||
| 59 | u8 stx = CFSERL_STX; | 59 | u8 stx = CFSERL_STX; |
| 60 | int ret; | 60 | int ret; |
| 61 | u16 expectlen = 0; | 61 | u16 expectlen = 0; |
| 62 | |||
| 62 | caif_assert(newpkt != NULL); | 63 | caif_assert(newpkt != NULL); |
| 63 | spin_lock(&layr->sync); | 64 | spin_lock(&layr->sync); |
| 64 | 65 | ||
| 65 | if (layr->incomplete_frm != NULL) { | 66 | if (layr->incomplete_frm != NULL) { |
| 66 | |||
| 67 | layr->incomplete_frm = | 67 | layr->incomplete_frm = |
| 68 | cfpkt_append(layr->incomplete_frm, newpkt, expectlen); | 68 | cfpkt_append(layr->incomplete_frm, newpkt, expectlen); |
| 69 | pkt = layr->incomplete_frm; | 69 | pkt = layr->incomplete_frm; |
| 70 | if (pkt == NULL) | 70 | if (pkt == NULL) { |
| 71 | spin_unlock(&layr->sync); | ||
| 71 | return -ENOMEM; | 72 | return -ENOMEM; |
| 73 | } | ||
| 72 | } else { | 74 | } else { |
| 73 | pkt = newpkt; | 75 | pkt = newpkt; |
| 74 | } | 76 | } |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index f8abf68e3988..9f07e749d7b1 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -482,22 +482,22 @@ EXPORT_SYMBOL(consume_skb); | |||
| 482 | * reference count dropping and cleans up the skbuff as if it | 482 | * reference count dropping and cleans up the skbuff as if it |
| 483 | * just came from __alloc_skb(). | 483 | * just came from __alloc_skb(). |
| 484 | */ | 484 | */ |
| 485 | int skb_recycle_check(struct sk_buff *skb, int skb_size) | 485 | bool skb_recycle_check(struct sk_buff *skb, int skb_size) |
| 486 | { | 486 | { |
| 487 | struct skb_shared_info *shinfo; | 487 | struct skb_shared_info *shinfo; |
| 488 | 488 | ||
| 489 | if (irqs_disabled()) | 489 | if (irqs_disabled()) |
| 490 | return 0; | 490 | return false; |
| 491 | 491 | ||
| 492 | if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) | 492 | if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) |
| 493 | return 0; | 493 | return false; |
| 494 | 494 | ||
| 495 | skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); | 495 | skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); |
| 496 | if (skb_end_pointer(skb) - skb->head < skb_size) | 496 | if (skb_end_pointer(skb) - skb->head < skb_size) |
| 497 | return 0; | 497 | return false; |
| 498 | 498 | ||
| 499 | if (skb_shared(skb) || skb_cloned(skb)) | 499 | if (skb_shared(skb) || skb_cloned(skb)) |
| 500 | return 0; | 500 | return false; |
| 501 | 501 | ||
| 502 | skb_release_head_state(skb); | 502 | skb_release_head_state(skb); |
| 503 | 503 | ||
| @@ -509,7 +509,7 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size) | |||
| 509 | skb->data = skb->head + NET_SKB_PAD; | 509 | skb->data = skb->head + NET_SKB_PAD; |
| 510 | skb_reset_tail_pointer(skb); | 510 | skb_reset_tail_pointer(skb); |
| 511 | 511 | ||
| 512 | return 1; | 512 | return true; |
| 513 | } | 513 | } |
| 514 | EXPORT_SYMBOL(skb_recycle_check); | 514 | EXPORT_SYMBOL(skb_recycle_check); |
| 515 | 515 | ||
| @@ -2965,6 +2965,34 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) | |||
| 2965 | } | 2965 | } |
| 2966 | EXPORT_SYMBOL_GPL(skb_cow_data); | 2966 | EXPORT_SYMBOL_GPL(skb_cow_data); |
| 2967 | 2967 | ||
| 2968 | static void sock_rmem_free(struct sk_buff *skb) | ||
| 2969 | { | ||
| 2970 | struct sock *sk = skb->sk; | ||
| 2971 | |||
| 2972 | atomic_sub(skb->truesize, &sk->sk_rmem_alloc); | ||
| 2973 | } | ||
| 2974 | |||
| 2975 | /* | ||
| 2976 | * Note: We dont mem charge error packets (no sk_forward_alloc changes) | ||
| 2977 | */ | ||
| 2978 | int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) | ||
| 2979 | { | ||
| 2980 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= | ||
| 2981 | (unsigned)sk->sk_rcvbuf) | ||
| 2982 | return -ENOMEM; | ||
| 2983 | |||
| 2984 | skb_orphan(skb); | ||
| 2985 | skb->sk = sk; | ||
| 2986 | skb->destructor = sock_rmem_free; | ||
| 2987 | atomic_add(skb->truesize, &sk->sk_rmem_alloc); | ||
| 2988 | |||
| 2989 | skb_queue_tail(&sk->sk_error_queue, skb); | ||
| 2990 | if (!sock_flag(sk, SOCK_DEAD)) | ||
| 2991 | sk->sk_data_ready(sk, skb->len); | ||
| 2992 | return 0; | ||
| 2993 | } | ||
| 2994 | EXPORT_SYMBOL(sock_queue_err_skb); | ||
| 2995 | |||
| 2968 | void skb_tstamp_tx(struct sk_buff *orig_skb, | 2996 | void skb_tstamp_tx(struct sk_buff *orig_skb, |
| 2969 | struct skb_shared_hwtstamps *hwtstamps) | 2997 | struct skb_shared_hwtstamps *hwtstamps) |
| 2970 | { | 2998 | { |
| @@ -2996,7 +3024,9 @@ void skb_tstamp_tx(struct sk_buff *orig_skb, | |||
| 2996 | memset(serr, 0, sizeof(*serr)); | 3024 | memset(serr, 0, sizeof(*serr)); |
| 2997 | serr->ee.ee_errno = ENOMSG; | 3025 | serr->ee.ee_errno = ENOMSG; |
| 2998 | serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; | 3026 | serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; |
| 3027 | |||
| 2999 | err = sock_queue_err_skb(sk, skb); | 3028 | err = sock_queue_err_skb(sk, skb); |
| 3029 | |||
| 3000 | if (err) | 3030 | if (err) |
| 3001 | kfree_skb(skb); | 3031 | kfree_skb(skb); |
| 3002 | } | 3032 | } |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 63958f3394a5..4b6c5ca610fc 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
| @@ -336,7 +336,7 @@ ipt_do_table(struct sk_buff *skb, | |||
| 336 | cpu = smp_processor_id(); | 336 | cpu = smp_processor_id(); |
| 337 | table_base = private->entries[cpu]; | 337 | table_base = private->entries[cpu]; |
| 338 | jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; | 338 | jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; |
| 339 | stackptr = &private->stackptr[cpu]; | 339 | stackptr = per_cpu_ptr(private->stackptr, cpu); |
| 340 | origptr = *stackptr; | 340 | origptr = *stackptr; |
| 341 | 341 | ||
| 342 | e = get_entry(table_base, private->hook_entry[hook]); | 342 | e = get_entry(table_base, private->hook_entry[hook]); |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 3e6dafcb1071..548d575e6cc6 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
| @@ -2639,7 +2639,7 @@ static void DBGUNDO(struct sock *sk, const char *msg) | |||
| 2639 | if (sk->sk_family == AF_INET) { | 2639 | if (sk->sk_family == AF_INET) { |
| 2640 | printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", | 2640 | printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", |
| 2641 | msg, | 2641 | msg, |
| 2642 | &inet->daddr, ntohs(inet->dport), | 2642 | &inet->inet_daddr, ntohs(inet->inet_dport), |
| 2643 | tp->snd_cwnd, tcp_left_out(tp), | 2643 | tp->snd_cwnd, tcp_left_out(tp), |
| 2644 | tp->snd_ssthresh, tp->prior_ssthresh, | 2644 | tp->snd_ssthresh, tp->prior_ssthresh, |
| 2645 | tp->packets_out); | 2645 | tp->packets_out); |
| @@ -2649,7 +2649,7 @@ static void DBGUNDO(struct sock *sk, const char *msg) | |||
| 2649 | struct ipv6_pinfo *np = inet6_sk(sk); | 2649 | struct ipv6_pinfo *np = inet6_sk(sk); |
| 2650 | printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", | 2650 | printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", |
| 2651 | msg, | 2651 | msg, |
| 2652 | &np->daddr, ntohs(inet->dport), | 2652 | &np->daddr, ntohs(inet->inet_dport), |
| 2653 | tp->snd_cwnd, tcp_left_out(tp), | 2653 | tp->snd_cwnd, tcp_left_out(tp), |
| 2654 | tp->snd_ssthresh, tp->prior_ssthresh, | 2654 | tp->snd_ssthresh, tp->prior_ssthresh, |
| 2655 | tp->packets_out); | 2655 | tp->packets_out); |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 58585748bdac..eec4ff456e33 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
| @@ -633,9 +633,9 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) | |||
| 633 | if (!inet->recverr) { | 633 | if (!inet->recverr) { |
| 634 | if (!harderr || sk->sk_state != TCP_ESTABLISHED) | 634 | if (!harderr || sk->sk_state != TCP_ESTABLISHED) |
| 635 | goto out; | 635 | goto out; |
| 636 | } else { | 636 | } else |
| 637 | ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); | 637 | ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); |
| 638 | } | 638 | |
| 639 | sk->sk_err = err; | 639 | sk->sk_err = err; |
| 640 | sk->sk_error_report(sk); | 640 | sk->sk_error_report(sk); |
| 641 | out: | 641 | out: |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 6f517bd83692..9d2d68f0e605 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
| @@ -363,7 +363,7 @@ ip6t_do_table(struct sk_buff *skb, | |||
| 363 | cpu = smp_processor_id(); | 363 | cpu = smp_processor_id(); |
| 364 | table_base = private->entries[cpu]; | 364 | table_base = private->entries[cpu]; |
| 365 | jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; | 365 | jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; |
| 366 | stackptr = &private->stackptr[cpu]; | 366 | stackptr = per_cpu_ptr(private->stackptr, cpu); |
| 367 | origptr = *stackptr; | 367 | origptr = *stackptr; |
| 368 | 368 | ||
| 369 | e = get_entry(table_base, private->hook_entry[hook]); | 369 | e = get_entry(table_base, private->hook_entry[hook]); |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 294cbe8b0725..252d76199c41 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
| @@ -814,7 +814,7 @@ struct dst_entry * ip6_route_output(struct net *net, struct sock *sk, | |||
| 814 | { | 814 | { |
| 815 | int flags = 0; | 815 | int flags = 0; |
| 816 | 816 | ||
| 817 | if (fl->oif || rt6_need_strict(&fl->fl6_dst)) | 817 | if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl->fl6_dst)) |
| 818 | flags |= RT6_LOOKUP_F_IFACE; | 818 | flags |= RT6_LOOKUP_F_IFACE; |
| 819 | 819 | ||
| 820 | if (!ipv6_addr_any(&fl->fl6_src)) | 820 | if (!ipv6_addr_any(&fl->fl6_src)) |
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 5d218c530a4e..32be11e4c4d9 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | #include <linux/nl80211.h> | 5 | #include <linux/nl80211.h> |
| 6 | #include "ieee80211_i.h" | 6 | #include "ieee80211_i.h" |
| 7 | 7 | ||
| 8 | enum ieee80211_chan_mode | 8 | static enum ieee80211_chan_mode |
| 9 | __ieee80211_get_channel_mode(struct ieee80211_local *local, | 9 | __ieee80211_get_channel_mode(struct ieee80211_local *local, |
| 10 | struct ieee80211_sub_if_data *ignore) | 10 | struct ieee80211_sub_if_data *ignore) |
| 11 | { | 11 | { |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 445de702b8b7..e34622fa0003 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
| @@ -699,10 +699,8 @@ void xt_free_table_info(struct xt_table_info *info) | |||
| 699 | vfree(info->jumpstack); | 699 | vfree(info->jumpstack); |
| 700 | else | 700 | else |
| 701 | kfree(info->jumpstack); | 701 | kfree(info->jumpstack); |
| 702 | if (sizeof(unsigned int) * nr_cpu_ids > PAGE_SIZE) | 702 | |
| 703 | vfree(info->stackptr); | 703 | free_percpu(info->stackptr); |
| 704 | else | ||
| 705 | kfree(info->stackptr); | ||
| 706 | 704 | ||
| 707 | kfree(info); | 705 | kfree(info); |
| 708 | } | 706 | } |
| @@ -753,14 +751,9 @@ static int xt_jumpstack_alloc(struct xt_table_info *i) | |||
| 753 | unsigned int size; | 751 | unsigned int size; |
| 754 | int cpu; | 752 | int cpu; |
| 755 | 753 | ||
| 756 | size = sizeof(unsigned int) * nr_cpu_ids; | 754 | i->stackptr = alloc_percpu(unsigned int); |
| 757 | if (size > PAGE_SIZE) | ||
| 758 | i->stackptr = vmalloc(size); | ||
| 759 | else | ||
| 760 | i->stackptr = kmalloc(size, GFP_KERNEL); | ||
| 761 | if (i->stackptr == NULL) | 755 | if (i->stackptr == NULL) |
| 762 | return -ENOMEM; | 756 | return -ENOMEM; |
| 763 | memset(i->stackptr, 0, size); | ||
| 764 | 757 | ||
| 765 | size = sizeof(void **) * nr_cpu_ids; | 758 | size = sizeof(void **) * nr_cpu_ids; |
| 766 | if (size > PAGE_SIZE) | 759 | if (size > PAGE_SIZE) |
| @@ -844,10 +837,6 @@ struct xt_table *xt_register_table(struct net *net, | |||
| 844 | struct xt_table_info *private; | 837 | struct xt_table_info *private; |
| 845 | struct xt_table *t, *table; | 838 | struct xt_table *t, *table; |
| 846 | 839 | ||
| 847 | ret = xt_jumpstack_alloc(newinfo); | ||
| 848 | if (ret < 0) | ||
| 849 | return ERR_PTR(ret); | ||
| 850 | |||
| 851 | /* Don't add one object to multiple lists. */ | 840 | /* Don't add one object to multiple lists. */ |
| 852 | table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL); | 841 | table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL); |
| 853 | if (!table) { | 842 | if (!table) { |
diff --git a/net/phonet/pep.c b/net/phonet/pep.c index 7b048a35ca58..94d72e85a475 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c | |||
| @@ -1045,12 +1045,12 @@ static void pep_sock_unhash(struct sock *sk) | |||
| 1045 | lock_sock(sk); | 1045 | lock_sock(sk); |
| 1046 | if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) { | 1046 | if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) { |
| 1047 | skparent = pn->listener; | 1047 | skparent = pn->listener; |
| 1048 | sk_del_node_init(sk); | ||
| 1049 | release_sock(sk); | 1048 | release_sock(sk); |
| 1050 | 1049 | ||
| 1051 | sk = skparent; | ||
| 1052 | pn = pep_sk(skparent); | 1050 | pn = pep_sk(skparent); |
| 1053 | lock_sock(sk); | 1051 | lock_sock(skparent); |
| 1052 | sk_del_node_init(sk); | ||
| 1053 | sk = skparent; | ||
| 1054 | } | 1054 | } |
| 1055 | /* Unhash a listening sock only when it is closed | 1055 | /* Unhash a listening sock only when it is closed |
| 1056 | * and all of its active connected pipes are closed. */ | 1056 | * and all of its active connected pipes are closed. */ |
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 10ed0d55f759..f68832798db2 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c | |||
| @@ -475,6 +475,7 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, | |||
| 475 | err = rds_ib_setup_qp(conn); | 475 | err = rds_ib_setup_qp(conn); |
| 476 | if (err) { | 476 | if (err) { |
| 477 | rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err); | 477 | rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err); |
| 478 | mutex_unlock(&conn->c_cm_lock); | ||
| 478 | goto out; | 479 | goto out; |
| 479 | } | 480 | } |
| 480 | 481 | ||
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c index a9d951b4fbae..b5dd6ac39be8 100644 --- a/net/rds/iw_cm.c +++ b/net/rds/iw_cm.c | |||
| @@ -452,6 +452,7 @@ int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id, | |||
| 452 | err = rds_iw_setup_qp(conn); | 452 | err = rds_iw_setup_qp(conn); |
| 453 | if (err) { | 453 | if (err) { |
| 454 | rds_iw_conn_error(conn, "rds_iw_setup_qp failed (%d)\n", err); | 454 | rds_iw_conn_error(conn, "rds_iw_setup_qp failed (%d)\n", err); |
| 455 | mutex_unlock(&conn->c_cm_lock); | ||
| 455 | goto out; | 456 | goto out; |
| 456 | } | 457 | } |
| 457 | 458 | ||
