aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_dev.c3
-rw-r--r--net/ax25/ax25_ds_timer.c2
-rw-r--r--net/bridge/br_netfilter.c2
-rw-r--r--net/caif/cfpkt_skbuff.c2
-rw-r--r--net/caif/cfrfml.c2
-rw-r--r--net/can/bcm.c41
-rw-r--r--net/core/dev.c2
-rw-r--r--net/dsa/Kconfig2
-rw-r--r--net/ipv4/netfilter/arp_tables.c5
-rw-r--r--net/ipv4/netfilter/ip_tables.c5
-rw-r--r--net/ipv4/tcp.c32
-rw-r--r--net/ipv4/tcp_cong.c5
-rw-r--r--net/ipv4/tcp_timer.c8
-rw-r--r--net/ipv6/netfilter/ip6_tables.c5
-rw-r--r--net/ipv6/route.c4
-rw-r--r--net/irda/af_irda.c4
-rw-r--r--net/irda/irlan/irlan_eth.c4
-rw-r--r--net/l2tp/l2tp_eth.c2
-rw-r--r--net/netlink/af_netlink.c78
-rw-r--r--net/rds/recv.c2
-rw-r--r--net/sched/act_gact.c21
-rw-r--r--net/sched/act_mirred.c15
-rw-r--r--net/sched/act_nat.c22
-rw-r--r--net/sched/act_simple.c11
-rw-r--r--net/sched/act_skbedit.c11
-rw-r--r--net/sched/sch_api.c22
-rw-r--r--net/sched/sch_atm.c4
-rw-r--r--net/sched/sch_sfq.c14
-rw-r--r--net/sched/sch_tbf.c4
-rw-r--r--net/sched/sch_teql.c2
-rw-r--r--net/xfrm/xfrm_user.c2
31 files changed, 193 insertions, 145 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 3d59c9bf8feb..3bccdd12a264 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -510,7 +510,8 @@ static int vlan_dev_open(struct net_device *dev)
510 if (vlan->flags & VLAN_FLAG_GVRP) 510 if (vlan->flags & VLAN_FLAG_GVRP)
511 vlan_gvrp_request_join(dev); 511 vlan_gvrp_request_join(dev);
512 512
513 netif_carrier_on(dev); 513 if (netif_carrier_ok(real_dev))
514 netif_carrier_on(dev);
514 return 0; 515 return 0;
515 516
516clear_allmulti: 517clear_allmulti:
diff --git a/net/ax25/ax25_ds_timer.c b/net/ax25/ax25_ds_timer.c
index 2ce79df00680..c7d81436213d 100644
--- a/net/ax25/ax25_ds_timer.c
+++ b/net/ax25/ax25_ds_timer.c
@@ -112,8 +112,8 @@ void ax25_ds_heartbeat_expiry(ax25_cb *ax25)
112 if (sk) { 112 if (sk) {
113 sock_hold(sk); 113 sock_hold(sk);
114 ax25_destroy_socket(ax25); 114 ax25_destroy_socket(ax25);
115 sock_put(sk);
116 bh_unlock_sock(sk); 115 bh_unlock_sock(sk);
116 sock_put(sk);
117 } else 117 } else
118 ax25_destroy_socket(ax25); 118 ax25_destroy_socket(ax25);
119 return; 119 return;
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 2c911c0759c2..5ed00bd7009f 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -162,8 +162,8 @@ static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
162 if (tmp) { 162 if (tmp) {
163 memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info)); 163 memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info));
164 atomic_set(&tmp->use, 1); 164 atomic_set(&tmp->use, 1);
165 nf_bridge_put(nf_bridge);
166 } 165 }
166 nf_bridge_put(nf_bridge);
167 nf_bridge = tmp; 167 nf_bridge = tmp;
168 } 168 }
169 return nf_bridge; 169 return nf_bridge;
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index 01f238ff2346..c49a6695793a 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -9,7 +9,7 @@
9#include <linux/hardirq.h> 9#include <linux/hardirq.h>
10#include <net/caif/cfpkt.h> 10#include <net/caif/cfpkt.h>
11 11
12#define PKT_PREFIX 16 12#define PKT_PREFIX 48
13#define PKT_POSTFIX 2 13#define PKT_POSTFIX 2
14#define PKT_LEN_WHEN_EXTENDING 128 14#define PKT_LEN_WHEN_EXTENDING 128
15#define PKT_ERROR(pkt, errmsg) do { \ 15#define PKT_ERROR(pkt, errmsg) do { \
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index eb1602022ac0..9a699242d104 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -7,7 +7,7 @@
7#include <linux/stddef.h> 7#include <linux/stddef.h>
8#include <linux/spinlock.h> 8#include <linux/spinlock.h>
9#include <linux/slab.h> 9#include <linux/slab.h>
10#include <linux/unaligned/le_byteshift.h> 10#include <asm/unaligned.h>
11#include <net/caif/caif_layer.h> 11#include <net/caif/caif_layer.h>
12#include <net/caif/cfsrvl.h> 12#include <net/caif/cfsrvl.h>
13#include <net/caif/cfpkt.h> 13#include <net/caif/cfpkt.h>
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 9c65e9deb9c3..08ffe9e4be20 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -60,6 +60,13 @@
60#include <net/sock.h> 60#include <net/sock.h>
61#include <net/net_namespace.h> 61#include <net/net_namespace.h>
62 62
63/*
64 * To send multiple CAN frame content within TX_SETUP or to filter
65 * CAN messages with multiplex index within RX_SETUP, the number of
66 * different filters is limited to 256 due to the one byte index value.
67 */
68#define MAX_NFRAMES 256
69
63/* use of last_frames[index].can_dlc */ 70/* use of last_frames[index].can_dlc */
64#define RX_RECV 0x40 /* received data for this element */ 71#define RX_RECV 0x40 /* received data for this element */
65#define RX_THR 0x80 /* element not been sent due to throttle feature */ 72#define RX_THR 0x80 /* element not been sent due to throttle feature */
@@ -89,16 +96,16 @@ struct bcm_op {
89 struct list_head list; 96 struct list_head list;
90 int ifindex; 97 int ifindex;
91 canid_t can_id; 98 canid_t can_id;
92 int flags; 99 u32 flags;
93 unsigned long frames_abs, frames_filtered; 100 unsigned long frames_abs, frames_filtered;
94 struct timeval ival1, ival2; 101 struct timeval ival1, ival2;
95 struct hrtimer timer, thrtimer; 102 struct hrtimer timer, thrtimer;
96 struct tasklet_struct tsklet, thrtsklet; 103 struct tasklet_struct tsklet, thrtsklet;
97 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg; 104 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
98 int rx_ifindex; 105 int rx_ifindex;
99 int count; 106 u32 count;
100 int nframes; 107 u32 nframes;
101 int currframe; 108 u32 currframe;
102 struct can_frame *frames; 109 struct can_frame *frames;
103 struct can_frame *last_frames; 110 struct can_frame *last_frames;
104 struct can_frame sframe; 111 struct can_frame sframe;
@@ -175,7 +182,7 @@ static int bcm_proc_show(struct seq_file *m, void *v)
175 182
176 seq_printf(m, "rx_op: %03X %-5s ", 183 seq_printf(m, "rx_op: %03X %-5s ",
177 op->can_id, bcm_proc_getifname(ifname, op->ifindex)); 184 op->can_id, bcm_proc_getifname(ifname, op->ifindex));
178 seq_printf(m, "[%d]%c ", op->nframes, 185 seq_printf(m, "[%u]%c ", op->nframes,
179 (op->flags & RX_CHECK_DLC)?'d':' '); 186 (op->flags & RX_CHECK_DLC)?'d':' ');
180 if (op->kt_ival1.tv64) 187 if (op->kt_ival1.tv64)
181 seq_printf(m, "timeo=%lld ", 188 seq_printf(m, "timeo=%lld ",
@@ -198,7 +205,7 @@ static int bcm_proc_show(struct seq_file *m, void *v)
198 205
199 list_for_each_entry(op, &bo->tx_ops, list) { 206 list_for_each_entry(op, &bo->tx_ops, list) {
200 207
201 seq_printf(m, "tx_op: %03X %s [%d] ", 208 seq_printf(m, "tx_op: %03X %s [%u] ",
202 op->can_id, 209 op->can_id,
203 bcm_proc_getifname(ifname, op->ifindex), 210 bcm_proc_getifname(ifname, op->ifindex),
204 op->nframes); 211 op->nframes);
@@ -283,7 +290,7 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
283 struct can_frame *firstframe; 290 struct can_frame *firstframe;
284 struct sockaddr_can *addr; 291 struct sockaddr_can *addr;
285 struct sock *sk = op->sk; 292 struct sock *sk = op->sk;
286 int datalen = head->nframes * CFSIZ; 293 unsigned int datalen = head->nframes * CFSIZ;
287 int err; 294 int err;
288 295
289 skb = alloc_skb(sizeof(*head) + datalen, gfp_any()); 296 skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
@@ -468,7 +475,7 @@ rx_changed_settime:
468 * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly 475 * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
469 * received data stored in op->last_frames[] 476 * received data stored in op->last_frames[]
470 */ 477 */
471static void bcm_rx_cmp_to_index(struct bcm_op *op, int index, 478static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
472 const struct can_frame *rxdata) 479 const struct can_frame *rxdata)
473{ 480{
474 /* 481 /*
@@ -554,7 +561,8 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
554/* 561/*
555 * bcm_rx_do_flush - helper for bcm_rx_thr_flush 562 * bcm_rx_do_flush - helper for bcm_rx_thr_flush
556 */ 563 */
557static inline int bcm_rx_do_flush(struct bcm_op *op, int update, int index) 564static inline int bcm_rx_do_flush(struct bcm_op *op, int update,
565 unsigned int index)
558{ 566{
559 if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) { 567 if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) {
560 if (update) 568 if (update)
@@ -575,7 +583,7 @@ static int bcm_rx_thr_flush(struct bcm_op *op, int update)
575 int updated = 0; 583 int updated = 0;
576 584
577 if (op->nframes > 1) { 585 if (op->nframes > 1) {
578 int i; 586 unsigned int i;
579 587
580 /* for MUX filter we start at index 1 */ 588 /* for MUX filter we start at index 1 */
581 for (i = 1; i < op->nframes; i++) 589 for (i = 1; i < op->nframes; i++)
@@ -624,7 +632,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
624{ 632{
625 struct bcm_op *op = (struct bcm_op *)data; 633 struct bcm_op *op = (struct bcm_op *)data;
626 const struct can_frame *rxframe = (struct can_frame *)skb->data; 634 const struct can_frame *rxframe = (struct can_frame *)skb->data;
627 int i; 635 unsigned int i;
628 636
629 /* disable timeout */ 637 /* disable timeout */
630 hrtimer_cancel(&op->timer); 638 hrtimer_cancel(&op->timer);
@@ -822,14 +830,15 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
822{ 830{
823 struct bcm_sock *bo = bcm_sk(sk); 831 struct bcm_sock *bo = bcm_sk(sk);
824 struct bcm_op *op; 832 struct bcm_op *op;
825 int i, err; 833 unsigned int i;
834 int err;
826 835
827 /* we need a real device to send frames */ 836 /* we need a real device to send frames */
828 if (!ifindex) 837 if (!ifindex)
829 return -ENODEV; 838 return -ENODEV;
830 839
831 /* we need at least one can_frame */ 840 /* check nframes boundaries - we need at least one can_frame */
832 if (msg_head->nframes < 1) 841 if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
833 return -EINVAL; 842 return -EINVAL;
834 843
835 /* check the given can_id */ 844 /* check the given can_id */
@@ -993,6 +1002,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
993 msg_head->nframes = 0; 1002 msg_head->nframes = 0;
994 } 1003 }
995 1004
1005 /* the first element contains the mux-mask => MAX_NFRAMES + 1 */
1006 if (msg_head->nframes > MAX_NFRAMES + 1)
1007 return -EINVAL;
1008
996 if ((msg_head->flags & RX_RTR_FRAME) && 1009 if ((msg_head->flags & RX_RTR_FRAME) &&
997 ((msg_head->nframes != 1) || 1010 ((msg_head->nframes != 1) ||
998 (!(msg_head->can_id & CAN_RTR_FLAG)))) 1011 (!(msg_head->can_id & CAN_RTR_FLAG))))
diff --git a/net/core/dev.c b/net/core/dev.c
index 1ae654391442..3721fbb9a83c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3143,7 +3143,7 @@ pull:
3143 put_page(skb_shinfo(skb)->frags[0].page); 3143 put_page(skb_shinfo(skb)->frags[0].page);
3144 memmove(skb_shinfo(skb)->frags, 3144 memmove(skb_shinfo(skb)->frags,
3145 skb_shinfo(skb)->frags + 1, 3145 skb_shinfo(skb)->frags + 1,
3146 --skb_shinfo(skb)->nr_frags); 3146 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
3147 } 3147 }
3148 } 3148 }
3149 3149
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 11201784d29a..87bb5f4de0e8 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -1,7 +1,7 @@
1menuconfig NET_DSA 1menuconfig NET_DSA
2 bool "Distributed Switch Architecture support" 2 bool "Distributed Switch Architecture support"
3 default n 3 default n
4 depends on EXPERIMENTAL && NET_ETHERNET && !S390 4 depends on EXPERIMENTAL && NETDEVICES && !S390
5 select PHYLIB 5 select PHYLIB
6 ---help--- 6 ---help---
7 This allows you to use hardware switch chips that use 7 This allows you to use hardware switch chips that use
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 6bccba31d132..e8f4f9a57f12 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -735,6 +735,7 @@ static void get_counters(const struct xt_table_info *t,
735 if (cpu == curcpu) 735 if (cpu == curcpu)
736 continue; 736 continue;
737 i = 0; 737 i = 0;
738 local_bh_disable();
738 xt_info_wrlock(cpu); 739 xt_info_wrlock(cpu);
739 xt_entry_foreach(iter, t->entries[cpu], t->size) { 740 xt_entry_foreach(iter, t->entries[cpu], t->size) {
740 ADD_COUNTER(counters[i], iter->counters.bcnt, 741 ADD_COUNTER(counters[i], iter->counters.bcnt,
@@ -742,6 +743,7 @@ static void get_counters(const struct xt_table_info *t,
742 ++i; 743 ++i;
743 } 744 }
744 xt_info_wrunlock(cpu); 745 xt_info_wrunlock(cpu);
746 local_bh_enable();
745 } 747 }
746 put_cpu(); 748 put_cpu();
747} 749}
@@ -1418,6 +1420,9 @@ static int translate_compat_table(const char *name,
1418 if (ret != 0) 1420 if (ret != 0)
1419 break; 1421 break;
1420 ++i; 1422 ++i;
1423 if (strcmp(arpt_get_target(iter1)->u.user.name,
1424 XT_ERROR_TARGET) == 0)
1425 ++newinfo->stacksize;
1421 } 1426 }
1422 if (ret) { 1427 if (ret) {
1423 /* 1428 /*
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index c439721b165a..d163f2e3b2e9 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -909,6 +909,7 @@ get_counters(const struct xt_table_info *t,
909 if (cpu == curcpu) 909 if (cpu == curcpu)
910 continue; 910 continue;
911 i = 0; 911 i = 0;
912 local_bh_disable();
912 xt_info_wrlock(cpu); 913 xt_info_wrlock(cpu);
913 xt_entry_foreach(iter, t->entries[cpu], t->size) { 914 xt_entry_foreach(iter, t->entries[cpu], t->size) {
914 ADD_COUNTER(counters[i], iter->counters.bcnt, 915 ADD_COUNTER(counters[i], iter->counters.bcnt,
@@ -916,6 +917,7 @@ get_counters(const struct xt_table_info *t,
916 ++i; /* macro does multi eval of i */ 917 ++i; /* macro does multi eval of i */
917 } 918 }
918 xt_info_wrunlock(cpu); 919 xt_info_wrunlock(cpu);
920 local_bh_enable();
919 } 921 }
920 put_cpu(); 922 put_cpu();
921} 923}
@@ -1749,6 +1751,9 @@ translate_compat_table(struct net *net,
1749 if (ret != 0) 1751 if (ret != 0)
1750 break; 1752 break;
1751 ++i; 1753 ++i;
1754 if (strcmp(ipt_get_target(iter1)->u.user.name,
1755 XT_ERROR_TARGET) == 0)
1756 ++newinfo->stacksize;
1752 } 1757 }
1753 if (ret) { 1758 if (ret) {
1754 /* 1759 /*
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 176e11aaea77..3fb1428e526e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -451,7 +451,8 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
451 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) 451 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
452 mask |= POLLOUT | POLLWRNORM; 452 mask |= POLLOUT | POLLWRNORM;
453 } 453 }
454 } 454 } else
455 mask |= POLLOUT | POLLWRNORM;
455 456
456 if (tp->urg_data & TCP_URG_VALID) 457 if (tp->urg_data & TCP_URG_VALID)
457 mask |= POLLPRI; 458 mask |= POLLPRI;
@@ -2011,11 +2012,8 @@ adjudge_to_death:
2011 } 2012 }
2012 } 2013 }
2013 if (sk->sk_state != TCP_CLOSE) { 2014 if (sk->sk_state != TCP_CLOSE) {
2014 int orphan_count = percpu_counter_read_positive(
2015 sk->sk_prot->orphan_count);
2016
2017 sk_mem_reclaim(sk); 2015 sk_mem_reclaim(sk);
2018 if (tcp_too_many_orphans(sk, orphan_count)) { 2016 if (tcp_too_many_orphans(sk, 0)) {
2019 if (net_ratelimit()) 2017 if (net_ratelimit())
2020 printk(KERN_INFO "TCP: too many of orphaned " 2018 printk(KERN_INFO "TCP: too many of orphaned "
2021 "sockets\n"); 2019 "sockets\n");
@@ -3212,7 +3210,7 @@ void __init tcp_init(void)
3212{ 3210{
3213 struct sk_buff *skb = NULL; 3211 struct sk_buff *skb = NULL;
3214 unsigned long nr_pages, limit; 3212 unsigned long nr_pages, limit;
3215 int order, i, max_share; 3213 int i, max_share, cnt;
3216 unsigned long jiffy = jiffies; 3214 unsigned long jiffy = jiffies;
3217 3215
3218 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb)); 3216 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
@@ -3261,22 +3259,12 @@ void __init tcp_init(void)
3261 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); 3259 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
3262 } 3260 }
3263 3261
3264 /* Try to be a bit smarter and adjust defaults depending 3262
3265 * on available memory. 3263 cnt = tcp_hashinfo.ehash_mask + 1;
3266 */ 3264
3267 for (order = 0; ((1 << order) << PAGE_SHIFT) < 3265 tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
3268 (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket)); 3266 sysctl_tcp_max_orphans = cnt / 2;
3269 order++) 3267 sysctl_max_syn_backlog = max(128, cnt / 256);
3270 ;
3271 if (order >= 4) {
3272 tcp_death_row.sysctl_max_tw_buckets = 180000;
3273 sysctl_tcp_max_orphans = 4096 << (order - 4);
3274 sysctl_max_syn_backlog = 1024;
3275 } else if (order < 3) {
3276 tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
3277 sysctl_tcp_max_orphans >>= (3 - order);
3278 sysctl_max_syn_backlog = 128;
3279 }
3280 3268
3281 /* Set the pressure threshold to be a fraction of global memory that 3269 /* Set the pressure threshold to be a fraction of global memory that
3282 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of 3270 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 0ec9bd0ae94f..850c737e08e2 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -196,10 +196,10 @@ void tcp_get_allowed_congestion_control(char *buf, size_t maxlen)
196int tcp_set_allowed_congestion_control(char *val) 196int tcp_set_allowed_congestion_control(char *val)
197{ 197{
198 struct tcp_congestion_ops *ca; 198 struct tcp_congestion_ops *ca;
199 char *clone, *name; 199 char *saved_clone, *clone, *name;
200 int ret = 0; 200 int ret = 0;
201 201
202 clone = kstrdup(val, GFP_USER); 202 saved_clone = clone = kstrdup(val, GFP_USER);
203 if (!clone) 203 if (!clone)
204 return -ENOMEM; 204 return -ENOMEM;
205 205
@@ -226,6 +226,7 @@ int tcp_set_allowed_congestion_control(char *val)
226 } 226 }
227out: 227out:
228 spin_unlock(&tcp_cong_list_lock); 228 spin_unlock(&tcp_cong_list_lock);
229 kfree(saved_clone);
229 230
230 return ret; 231 return ret;
231} 232}
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 808bb920c9f5..c35b469e851c 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -66,18 +66,18 @@ static void tcp_write_err(struct sock *sk)
66static int tcp_out_of_resources(struct sock *sk, int do_reset) 66static int tcp_out_of_resources(struct sock *sk, int do_reset)
67{ 67{
68 struct tcp_sock *tp = tcp_sk(sk); 68 struct tcp_sock *tp = tcp_sk(sk);
69 int orphans = percpu_counter_read_positive(&tcp_orphan_count); 69 int shift = 0;
70 70
71 /* If peer does not open window for long time, or did not transmit 71 /* If peer does not open window for long time, or did not transmit
72 * anything for long time, penalize it. */ 72 * anything for long time, penalize it. */
73 if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset) 73 if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
74 orphans <<= 1; 74 shift++;
75 75
76 /* If some dubious ICMP arrived, penalize even more. */ 76 /* If some dubious ICMP arrived, penalize even more. */
77 if (sk->sk_err_soft) 77 if (sk->sk_err_soft)
78 orphans <<= 1; 78 shift++;
79 79
80 if (tcp_too_many_orphans(sk, orphans)) { 80 if (tcp_too_many_orphans(sk, shift)) {
81 if (net_ratelimit()) 81 if (net_ratelimit())
82 printk(KERN_INFO "Out of socket memory\n"); 82 printk(KERN_INFO "Out of socket memory\n");
83 83
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 5359ef4daac5..8e754be92c24 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -922,6 +922,7 @@ get_counters(const struct xt_table_info *t,
922 if (cpu == curcpu) 922 if (cpu == curcpu)
923 continue; 923 continue;
924 i = 0; 924 i = 0;
925 local_bh_disable();
925 xt_info_wrlock(cpu); 926 xt_info_wrlock(cpu);
926 xt_entry_foreach(iter, t->entries[cpu], t->size) { 927 xt_entry_foreach(iter, t->entries[cpu], t->size) {
927 ADD_COUNTER(counters[i], iter->counters.bcnt, 928 ADD_COUNTER(counters[i], iter->counters.bcnt,
@@ -929,6 +930,7 @@ get_counters(const struct xt_table_info *t,
929 ++i; 930 ++i;
930 } 931 }
931 xt_info_wrunlock(cpu); 932 xt_info_wrunlock(cpu);
933 local_bh_enable();
932 } 934 }
933 put_cpu(); 935 put_cpu();
934} 936}
@@ -1764,6 +1766,9 @@ translate_compat_table(struct net *net,
1764 if (ret != 0) 1766 if (ret != 0)
1765 break; 1767 break;
1766 ++i; 1768 ++i;
1769 if (strcmp(ip6t_get_target(iter1)->u.user.name,
1770 XT_ERROR_TARGET) == 0)
1771 ++newinfo->stacksize;
1767 } 1772 }
1768 if (ret) { 1773 if (ret) {
1769 /* 1774 /*
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 8f2d0400cf8a..d126365ac046 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2580,7 +2580,7 @@ ctl_table ipv6_route_table_template[] = {
2580 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity, 2580 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
2581 .maxlen = sizeof(int), 2581 .maxlen = sizeof(int),
2582 .mode = 0644, 2582 .mode = 0644,
2583 .proc_handler = proc_dointvec_jiffies, 2583 .proc_handler = proc_dointvec,
2584 }, 2584 },
2585 { 2585 {
2586 .procname = "mtu_expires", 2586 .procname = "mtu_expires",
@@ -2594,7 +2594,7 @@ ctl_table ipv6_route_table_template[] = {
2594 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss, 2594 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
2595 .maxlen = sizeof(int), 2595 .maxlen = sizeof(int),
2596 .mode = 0644, 2596 .mode = 0644,
2597 .proc_handler = proc_dointvec_jiffies, 2597 .proc_handler = proc_dointvec,
2598 }, 2598 },
2599 { 2599 {
2600 .procname = "gc_min_interval_ms", 2600 .procname = "gc_min_interval_ms",
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 79986a674f6e..fd55b5135de5 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -824,8 +824,8 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
824 824
825 err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name); 825 err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name);
826 if (err < 0) { 826 if (err < 0) {
827 kfree(self->ias_obj->name); 827 irias_delete_object(self->ias_obj);
828 kfree(self->ias_obj); 828 self->ias_obj = NULL;
829 goto out; 829 goto out;
830 } 830 }
831 831
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index 9616c32d1076..5bb8353105cc 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -169,6 +169,7 @@ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
169{ 169{
170 struct irlan_cb *self = netdev_priv(dev); 170 struct irlan_cb *self = netdev_priv(dev);
171 int ret; 171 int ret;
172 unsigned int len;
172 173
173 /* skb headroom large enough to contain all IrDA-headers? */ 174 /* skb headroom large enough to contain all IrDA-headers? */
174 if ((skb_headroom(skb) < self->max_header_size) || (skb_shared(skb))) { 175 if ((skb_headroom(skb) < self->max_header_size) || (skb_shared(skb))) {
@@ -188,6 +189,7 @@ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
188 189
189 dev->trans_start = jiffies; 190 dev->trans_start = jiffies;
190 191
192 len = skb->len;
191 /* Now queue the packet in the transport layer */ 193 /* Now queue the packet in the transport layer */
192 if (self->use_udata) 194 if (self->use_udata)
193 ret = irttp_udata_request(self->tsap_data, skb); 195 ret = irttp_udata_request(self->tsap_data, skb);
@@ -209,7 +211,7 @@ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
209 self->stats.tx_dropped++; 211 self->stats.tx_dropped++;
210 } else { 212 } else {
211 self->stats.tx_packets++; 213 self->stats.tx_packets++;
212 self->stats.tx_bytes += skb->len; 214 self->stats.tx_bytes += len;
213 } 215 }
214 216
215 return NETDEV_TX_OK; 217 return NETDEV_TX_OK;
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 58c6c4cda73b..1ae697681bc7 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -132,7 +132,7 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
132 printk("\n"); 132 printk("\n");
133 } 133 }
134 134
135 if (data_len < ETH_HLEN) 135 if (!pskb_may_pull(skb, sizeof(ETH_HLEN)))
136 goto error; 136 goto error;
137 137
138 secpath_reset(skb); 138 secpath_reset(skb);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 2cbf380377d5..cd96ed3ccee4 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1406,7 +1406,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1406 struct netlink_sock *nlk = nlk_sk(sk); 1406 struct netlink_sock *nlk = nlk_sk(sk);
1407 int noblock = flags&MSG_DONTWAIT; 1407 int noblock = flags&MSG_DONTWAIT;
1408 size_t copied; 1408 size_t copied;
1409 struct sk_buff *skb; 1409 struct sk_buff *skb, *data_skb;
1410 int err; 1410 int err;
1411 1411
1412 if (flags&MSG_OOB) 1412 if (flags&MSG_OOB)
@@ -1418,59 +1418,35 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1418 if (skb == NULL) 1418 if (skb == NULL)
1419 goto out; 1419 goto out;
1420 1420
1421 data_skb = skb;
1422
1421#ifdef CONFIG_COMPAT_NETLINK_MESSAGES 1423#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
1422 if (unlikely(skb_shinfo(skb)->frag_list)) { 1424 if (unlikely(skb_shinfo(skb)->frag_list)) {
1423 bool need_compat = !!(flags & MSG_CMSG_COMPAT);
1424
1425 /* 1425 /*
1426 * If this skb has a frag_list, then here that means that 1426 * If this skb has a frag_list, then here that means that we
1427 * we will have to use the frag_list skb for compat tasks 1427 * will have to use the frag_list skb's data for compat tasks
1428 * and the regular skb for non-compat tasks. 1428 * and the regular skb's data for normal (non-compat) tasks.
1429 * 1429 *
1430 * The skb might (and likely will) be cloned, so we can't 1430 * If we need to send the compat skb, assign it to the
1431 * just reset frag_list and go on with things -- we need to 1431 * 'data_skb' variable so that it will be used below for data
1432 * keep that. For the compat case that's easy -- simply get 1432 * copying. We keep 'skb' for everything else, including
1433 * a reference to the compat skb and free the regular one 1433 * freeing both later.
1434 * including the frag. For the non-compat case, we need to
1435 * avoid sending the frag to the user -- so assign NULL but
1436 * restore it below before freeing the skb.
1437 */ 1434 */
1438 if (need_compat) { 1435 if (flags & MSG_CMSG_COMPAT)
1439 struct sk_buff *compskb = skb_shinfo(skb)->frag_list; 1436 data_skb = skb_shinfo(skb)->frag_list;
1440 skb_get(compskb);
1441 kfree_skb(skb);
1442 skb = compskb;
1443 } else {
1444 /*
1445 * Before setting frag_list to NULL, we must get a
1446 * private copy of skb if shared (because of MSG_PEEK)
1447 */
1448 if (skb_shared(skb)) {
1449 struct sk_buff *nskb;
1450
1451 nskb = pskb_copy(skb, GFP_KERNEL);
1452 kfree_skb(skb);
1453 skb = nskb;
1454 err = -ENOMEM;
1455 if (!skb)
1456 goto out;
1457 }
1458 kfree_skb(skb_shinfo(skb)->frag_list);
1459 skb_shinfo(skb)->frag_list = NULL;
1460 }
1461 } 1437 }
1462#endif 1438#endif
1463 1439
1464 msg->msg_namelen = 0; 1440 msg->msg_namelen = 0;
1465 1441
1466 copied = skb->len; 1442 copied = data_skb->len;
1467 if (len < copied) { 1443 if (len < copied) {
1468 msg->msg_flags |= MSG_TRUNC; 1444 msg->msg_flags |= MSG_TRUNC;
1469 copied = len; 1445 copied = len;
1470 } 1446 }
1471 1447
1472 skb_reset_transport_header(skb); 1448 skb_reset_transport_header(data_skb);
1473 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 1449 err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied);
1474 1450
1475 if (msg->msg_name) { 1451 if (msg->msg_name) {
1476 struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name; 1452 struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name;
@@ -1490,7 +1466,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1490 } 1466 }
1491 siocb->scm->creds = *NETLINK_CREDS(skb); 1467 siocb->scm->creds = *NETLINK_CREDS(skb);
1492 if (flags & MSG_TRUNC) 1468 if (flags & MSG_TRUNC)
1493 copied = skb->len; 1469 copied = data_skb->len;
1494 1470
1495 skb_free_datagram(sk, skb); 1471 skb_free_datagram(sk, skb);
1496 1472
@@ -2126,6 +2102,26 @@ static void __net_exit netlink_net_exit(struct net *net)
2126#endif 2102#endif
2127} 2103}
2128 2104
2105static void __init netlink_add_usersock_entry(void)
2106{
2107 unsigned long *listeners;
2108 int groups = 32;
2109
2110 listeners = kzalloc(NLGRPSZ(groups) + sizeof(struct listeners_rcu_head),
2111 GFP_KERNEL);
2112 if (!listeners)
2113 panic("netlink_add_usersock_entry: Cannot allocate listneres\n");
2114
2115 netlink_table_grab();
2116
2117 nl_table[NETLINK_USERSOCK].groups = groups;
2118 nl_table[NETLINK_USERSOCK].listeners = listeners;
2119 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
2120 nl_table[NETLINK_USERSOCK].registered = 1;
2121
2122 netlink_table_ungrab();
2123}
2124
2129static struct pernet_operations __net_initdata netlink_net_ops = { 2125static struct pernet_operations __net_initdata netlink_net_ops = {
2130 .init = netlink_net_init, 2126 .init = netlink_net_init,
2131 .exit = netlink_net_exit, 2127 .exit = netlink_net_exit,
@@ -2174,6 +2170,8 @@ static int __init netlink_proto_init(void)
2174 hash->rehash_time = jiffies; 2170 hash->rehash_time = jiffies;
2175 } 2171 }
2176 2172
2173 netlink_add_usersock_entry();
2174
2177 sock_register(&netlink_family_ops); 2175 sock_register(&netlink_family_ops);
2178 register_pernet_subsys(&netlink_net_ops); 2176 register_pernet_subsys(&netlink_net_ops);
2179 /* The netlink device handler may be needed early. */ 2177 /* The netlink device handler may be needed early. */
diff --git a/net/rds/recv.c b/net/rds/recv.c
index 795a00b7f2cb..c93588c2d553 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -297,7 +297,7 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
297int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr) 297int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
298{ 298{
299 struct rds_notifier *notifier; 299 struct rds_notifier *notifier;
300 struct rds_rdma_notify cmsg; 300 struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */
301 unsigned int count = 0, max_messages = ~0U; 301 unsigned int count = 0, max_messages = ~0U;
302 unsigned long flags; 302 unsigned long flags;
303 LIST_HEAD(copy); 303 LIST_HEAD(copy);
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 8406c6654990..c2ed90a4c0b4 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -152,21 +152,24 @@ static int tcf_gact(struct sk_buff *skb, struct tc_action *a, struct tcf_result
152static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 152static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
153{ 153{
154 unsigned char *b = skb_tail_pointer(skb); 154 unsigned char *b = skb_tail_pointer(skb);
155 struct tc_gact opt;
156 struct tcf_gact *gact = a->priv; 155 struct tcf_gact *gact = a->priv;
156 struct tc_gact opt = {
157 .index = gact->tcf_index,
158 .refcnt = gact->tcf_refcnt - ref,
159 .bindcnt = gact->tcf_bindcnt - bind,
160 .action = gact->tcf_action,
161 };
157 struct tcf_t t; 162 struct tcf_t t;
158 163
159 opt.index = gact->tcf_index;
160 opt.refcnt = gact->tcf_refcnt - ref;
161 opt.bindcnt = gact->tcf_bindcnt - bind;
162 opt.action = gact->tcf_action;
163 NLA_PUT(skb, TCA_GACT_PARMS, sizeof(opt), &opt); 164 NLA_PUT(skb, TCA_GACT_PARMS, sizeof(opt), &opt);
164#ifdef CONFIG_GACT_PROB 165#ifdef CONFIG_GACT_PROB
165 if (gact->tcfg_ptype) { 166 if (gact->tcfg_ptype) {
166 struct tc_gact_p p_opt; 167 struct tc_gact_p p_opt = {
167 p_opt.paction = gact->tcfg_paction; 168 .paction = gact->tcfg_paction,
168 p_opt.pval = gact->tcfg_pval; 169 .pval = gact->tcfg_pval,
169 p_opt.ptype = gact->tcfg_ptype; 170 .ptype = gact->tcfg_ptype,
171 };
172
170 NLA_PUT(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt); 173 NLA_PUT(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt);
171 } 174 }
172#endif 175#endif
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 11f195af2da0..0c311be92827 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -219,15 +219,16 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, i
219{ 219{
220 unsigned char *b = skb_tail_pointer(skb); 220 unsigned char *b = skb_tail_pointer(skb);
221 struct tcf_mirred *m = a->priv; 221 struct tcf_mirred *m = a->priv;
222 struct tc_mirred opt; 222 struct tc_mirred opt = {
223 .index = m->tcf_index,
224 .action = m->tcf_action,
225 .refcnt = m->tcf_refcnt - ref,
226 .bindcnt = m->tcf_bindcnt - bind,
227 .eaction = m->tcfm_eaction,
228 .ifindex = m->tcfm_ifindex,
229 };
223 struct tcf_t t; 230 struct tcf_t t;
224 231
225 opt.index = m->tcf_index;
226 opt.action = m->tcf_action;
227 opt.refcnt = m->tcf_refcnt - ref;
228 opt.bindcnt = m->tcf_bindcnt - bind;
229 opt.eaction = m->tcfm_eaction;
230 opt.ifindex = m->tcfm_ifindex;
231 NLA_PUT(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt); 232 NLA_PUT(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt);
232 t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install); 233 t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install);
233 t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse); 234 t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse);
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 509a2d53a99d..186eb837e600 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -272,19 +272,19 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
272{ 272{
273 unsigned char *b = skb_tail_pointer(skb); 273 unsigned char *b = skb_tail_pointer(skb);
274 struct tcf_nat *p = a->priv; 274 struct tcf_nat *p = a->priv;
275 struct tc_nat opt; 275 struct tc_nat opt = {
276 .old_addr = p->old_addr,
277 .new_addr = p->new_addr,
278 .mask = p->mask,
279 .flags = p->flags,
280
281 .index = p->tcf_index,
282 .action = p->tcf_action,
283 .refcnt = p->tcf_refcnt - ref,
284 .bindcnt = p->tcf_bindcnt - bind,
285 };
276 struct tcf_t t; 286 struct tcf_t t;
277 287
278 opt.old_addr = p->old_addr;
279 opt.new_addr = p->new_addr;
280 opt.mask = p->mask;
281 opt.flags = p->flags;
282
283 opt.index = p->tcf_index;
284 opt.action = p->tcf_action;
285 opt.refcnt = p->tcf_refcnt - ref;
286 opt.bindcnt = p->tcf_bindcnt - bind;
287
288 NLA_PUT(skb, TCA_NAT_PARMS, sizeof(opt), &opt); 288 NLA_PUT(skb, TCA_NAT_PARMS, sizeof(opt), &opt);
289 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); 289 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
290 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); 290 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 4a1d640b0cf1..97e84f3ee775 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -164,13 +164,14 @@ static inline int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
164{ 164{
165 unsigned char *b = skb_tail_pointer(skb); 165 unsigned char *b = skb_tail_pointer(skb);
166 struct tcf_defact *d = a->priv; 166 struct tcf_defact *d = a->priv;
167 struct tc_defact opt; 167 struct tc_defact opt = {
168 .index = d->tcf_index,
169 .refcnt = d->tcf_refcnt - ref,
170 .bindcnt = d->tcf_bindcnt - bind,
171 .action = d->tcf_action,
172 };
168 struct tcf_t t; 173 struct tcf_t t;
169 174
170 opt.index = d->tcf_index;
171 opt.refcnt = d->tcf_refcnt - ref;
172 opt.bindcnt = d->tcf_bindcnt - bind;
173 opt.action = d->tcf_action;
174 NLA_PUT(skb, TCA_DEF_PARMS, sizeof(opt), &opt); 175 NLA_PUT(skb, TCA_DEF_PARMS, sizeof(opt), &opt);
175 NLA_PUT_STRING(skb, TCA_DEF_DATA, d->tcfd_defdata); 176 NLA_PUT_STRING(skb, TCA_DEF_DATA, d->tcfd_defdata);
176 t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); 177 t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index e9607fe55b58..66cbf4eb8855 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -159,13 +159,14 @@ static inline int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
159{ 159{
160 unsigned char *b = skb_tail_pointer(skb); 160 unsigned char *b = skb_tail_pointer(skb);
161 struct tcf_skbedit *d = a->priv; 161 struct tcf_skbedit *d = a->priv;
162 struct tc_skbedit opt; 162 struct tc_skbedit opt = {
163 .index = d->tcf_index,
164 .refcnt = d->tcf_refcnt - ref,
165 .bindcnt = d->tcf_bindcnt - bind,
166 .action = d->tcf_action,
167 };
163 struct tcf_t t; 168 struct tcf_t t;
164 169
165 opt.index = d->tcf_index;
166 opt.refcnt = d->tcf_refcnt - ref;
167 opt.bindcnt = d->tcf_bindcnt - bind;
168 opt.action = d->tcf_action;
169 NLA_PUT(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt); 170 NLA_PUT(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt);
170 if (d->flags & SKBEDIT_F_PRIORITY) 171 if (d->flags & SKBEDIT_F_PRIORITY)
171 NLA_PUT(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority), 172 NLA_PUT(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority),
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index b9e8c3b7d406..408eea7086aa 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -150,22 +150,34 @@ int register_qdisc(struct Qdisc_ops *qops)
150 if (qops->enqueue == NULL) 150 if (qops->enqueue == NULL)
151 qops->enqueue = noop_qdisc_ops.enqueue; 151 qops->enqueue = noop_qdisc_ops.enqueue;
152 if (qops->peek == NULL) { 152 if (qops->peek == NULL) {
153 if (qops->dequeue == NULL) { 153 if (qops->dequeue == NULL)
154 qops->peek = noop_qdisc_ops.peek; 154 qops->peek = noop_qdisc_ops.peek;
155 } else { 155 else
156 rc = -EINVAL; 156 goto out_einval;
157 goto out;
158 }
159 } 157 }
160 if (qops->dequeue == NULL) 158 if (qops->dequeue == NULL)
161 qops->dequeue = noop_qdisc_ops.dequeue; 159 qops->dequeue = noop_qdisc_ops.dequeue;
162 160
161 if (qops->cl_ops) {
162 const struct Qdisc_class_ops *cops = qops->cl_ops;
163
164 if (!(cops->get && cops->put && cops->walk && cops->leaf))
165 goto out_einval;
166
167 if (cops->tcf_chain && !(cops->bind_tcf && cops->unbind_tcf))
168 goto out_einval;
169 }
170
163 qops->next = NULL; 171 qops->next = NULL;
164 *qp = qops; 172 *qp = qops;
165 rc = 0; 173 rc = 0;
166out: 174out:
167 write_unlock(&qdisc_mod_lock); 175 write_unlock(&qdisc_mod_lock);
168 return rc; 176 return rc;
177
178out_einval:
179 rc = -EINVAL;
180 goto out;
169} 181}
170EXPORT_SYMBOL(register_qdisc); 182EXPORT_SYMBOL(register_qdisc);
171 183
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index e114f23d5eae..340662789529 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -418,7 +418,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
418 } 418 }
419 419
420 ret = qdisc_enqueue(skb, flow->q); 420 ret = qdisc_enqueue(skb, flow->q);
421 if (ret != 0) { 421 if (ret != NET_XMIT_SUCCESS) {
422drop: __maybe_unused 422drop: __maybe_unused
423 if (net_xmit_drop_count(ret)) { 423 if (net_xmit_drop_count(ret)) {
424 sch->qstats.drops++; 424 sch->qstats.drops++;
@@ -442,7 +442,7 @@ drop: __maybe_unused
442 */ 442 */
443 if (flow == &p->link) { 443 if (flow == &p->link) {
444 sch->q.qlen++; 444 sch->q.qlen++;
445 return 0; 445 return NET_XMIT_SUCCESS;
446 } 446 }
447 tasklet_schedule(&p->task); 447 tasklet_schedule(&p->task);
448 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 448 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 534f33231c17..201cbac2b32c 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -334,7 +334,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
334 if (++sch->q.qlen <= q->limit) { 334 if (++sch->q.qlen <= q->limit) {
335 sch->bstats.bytes += qdisc_pkt_len(skb); 335 sch->bstats.bytes += qdisc_pkt_len(skb);
336 sch->bstats.packets++; 336 sch->bstats.packets++;
337 return 0; 337 return NET_XMIT_SUCCESS;
338 } 338 }
339 339
340 sfq_drop(sch); 340 sfq_drop(sch);
@@ -508,6 +508,11 @@ nla_put_failure:
508 return -1; 508 return -1;
509} 509}
510 510
511static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
512{
513 return NULL;
514}
515
511static unsigned long sfq_get(struct Qdisc *sch, u32 classid) 516static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
512{ 517{
513 return 0; 518 return 0;
@@ -519,6 +524,10 @@ static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
519 return 0; 524 return 0;
520} 525}
521 526
527static void sfq_put(struct Qdisc *q, unsigned long cl)
528{
529}
530
522static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl) 531static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
523{ 532{
524 struct sfq_sched_data *q = qdisc_priv(sch); 533 struct sfq_sched_data *q = qdisc_priv(sch);
@@ -571,9 +580,12 @@ static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
571} 580}
572 581
573static const struct Qdisc_class_ops sfq_class_ops = { 582static const struct Qdisc_class_ops sfq_class_ops = {
583 .leaf = sfq_leaf,
574 .get = sfq_get, 584 .get = sfq_get,
585 .put = sfq_put,
575 .tcf_chain = sfq_find_tcf, 586 .tcf_chain = sfq_find_tcf,
576 .bind_tcf = sfq_bind, 587 .bind_tcf = sfq_bind,
588 .unbind_tcf = sfq_put,
577 .dump = sfq_dump_class, 589 .dump = sfq_dump_class,
578 .dump_stats = sfq_dump_class_stats, 590 .dump_stats = sfq_dump_class_stats,
579 .walk = sfq_walk, 591 .walk = sfq_walk,
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 0991c640cd3e..641a30d64635 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -127,7 +127,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
127 return qdisc_reshape_fail(skb, sch); 127 return qdisc_reshape_fail(skb, sch);
128 128
129 ret = qdisc_enqueue(skb, q->qdisc); 129 ret = qdisc_enqueue(skb, q->qdisc);
130 if (ret != 0) { 130 if (ret != NET_XMIT_SUCCESS) {
131 if (net_xmit_drop_count(ret)) 131 if (net_xmit_drop_count(ret))
132 sch->qstats.drops++; 132 sch->qstats.drops++;
133 return ret; 133 return ret;
@@ -136,7 +136,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
136 sch->q.qlen++; 136 sch->q.qlen++;
137 sch->bstats.bytes += qdisc_pkt_len(skb); 137 sch->bstats.bytes += qdisc_pkt_len(skb);
138 sch->bstats.packets++; 138 sch->bstats.packets++;
139 return 0; 139 return NET_XMIT_SUCCESS;
140} 140}
141 141
142static unsigned int tbf_drop(struct Qdisc* sch) 142static unsigned int tbf_drop(struct Qdisc* sch)
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 807643bdcbac..feaabc103ce6 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -85,7 +85,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
85 __skb_queue_tail(&q->q, skb); 85 __skb_queue_tail(&q->q, skb);
86 sch->bstats.bytes += qdisc_pkt_len(skb); 86 sch->bstats.bytes += qdisc_pkt_len(skb);
87 sch->bstats.packets++; 87 sch->bstats.packets++;
88 return 0; 88 return NET_XMIT_SUCCESS;
89 } 89 }
90 90
91 kfree_skb(skb); 91 kfree_skb(skb);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index ba59983aaffe..b14ed4b1f27c 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2504,7 +2504,7 @@ static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
2504 if (p->dir > XFRM_POLICY_OUT) 2504 if (p->dir > XFRM_POLICY_OUT)
2505 return NULL; 2505 return NULL;
2506 2506
2507 xp = xfrm_policy_alloc(net, GFP_KERNEL); 2507 xp = xfrm_policy_alloc(net, GFP_ATOMIC);
2508 if (xp == NULL) { 2508 if (xp == NULL) {
2509 *dir = -ENOBUFS; 2509 *dir = -ENOBUFS;
2510 return NULL; 2510 return NULL;