diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/802/tr.c | 2 | ||||
-rw-r--r-- | net/8021q/vlan_core.c | 10 | ||||
-rw-r--r-- | net/8021q/vlan_dev.c | 3 | ||||
-rw-r--r-- | net/core/dev.c | 67 | ||||
-rw-r--r-- | net/core/net-sysfs.c | 4 | ||||
-rw-r--r-- | net/core/net_namespace.c | 89 | ||||
-rw-r--r-- | net/core/skbuff.c | 8 | ||||
-rw-r--r-- | net/core/sock.c | 3 | ||||
-rw-r--r-- | net/ipv4/cipso_ipv4.c | 9 | ||||
-rw-r--r-- | net/ipv4/icmp.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 9 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 1 | ||||
-rw-r--r-- | net/ipv4/tcp_scalable.c | 2 | ||||
-rw-r--r-- | net/ipv6/addrconf.c | 53 | ||||
-rw-r--r-- | net/ipv6/af_inet6.c | 21 | ||||
-rw-r--r-- | net/ipv6/inet6_hashtables.c | 4 | ||||
-rw-r--r-- | net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c | 5 | ||||
-rw-r--r-- | net/netfilter/nfnetlink_log.c | 8 | ||||
-rw-r--r-- | net/netfilter/x_tables.c | 199 | ||||
-rw-r--r-- | net/netfilter/xt_recent.c | 2 | ||||
-rw-r--r-- | net/netlink/af_netlink.c | 10 | ||||
-rw-r--r-- | net/sched/act_police.c | 13 | ||||
-rw-r--r-- | net/sched/sch_drr.c | 6 | ||||
-rw-r--r-- | net/sctp/protocol.c | 16 | ||||
-rw-r--r-- | net/sctp/sm_sideeffect.c | 54 | ||||
-rw-r--r-- | net/sctp/sm_statefuns.c | 16 | ||||
-rw-r--r-- | net/wireless/reg.c | 3 |
28 files changed, 380 insertions, 241 deletions
diff --git a/net/802/tr.c b/net/802/tr.c index 158150fee462..f47ae289d83b 100644 --- a/net/802/tr.c +++ b/net/802/tr.c | |||
@@ -668,3 +668,5 @@ module_init(rif_init); | |||
668 | 668 | ||
669 | EXPORT_SYMBOL(tr_type_trans); | 669 | EXPORT_SYMBOL(tr_type_trans); |
670 | EXPORT_SYMBOL(alloc_trdev); | 670 | EXPORT_SYMBOL(alloc_trdev); |
671 | |||
672 | MODULE_LICENSE("GPL"); | ||
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index e9db889d6222..2886d2fb9ab5 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
@@ -1,12 +1,16 @@ | |||
1 | #include <linux/skbuff.h> | 1 | #include <linux/skbuff.h> |
2 | #include <linux/netdevice.h> | 2 | #include <linux/netdevice.h> |
3 | #include <linux/if_vlan.h> | 3 | #include <linux/if_vlan.h> |
4 | #include <linux/netpoll.h> | ||
4 | #include "vlan.h" | 5 | #include "vlan.h" |
5 | 6 | ||
6 | /* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ | 7 | /* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ |
7 | int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, | 8 | int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, |
8 | u16 vlan_tci, int polling) | 9 | u16 vlan_tci, int polling) |
9 | { | 10 | { |
11 | if (netpoll_rx(skb)) | ||
12 | return NET_RX_DROP; | ||
13 | |||
10 | if (skb_bond_should_drop(skb)) | 14 | if (skb_bond_should_drop(skb)) |
11 | goto drop; | 15 | goto drop; |
12 | 16 | ||
@@ -100,6 +104,9 @@ int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, | |||
100 | { | 104 | { |
101 | int err = NET_RX_SUCCESS; | 105 | int err = NET_RX_SUCCESS; |
102 | 106 | ||
107 | if (netpoll_receive_skb(skb)) | ||
108 | return NET_RX_DROP; | ||
109 | |||
103 | switch (vlan_gro_common(napi, grp, vlan_tci, skb)) { | 110 | switch (vlan_gro_common(napi, grp, vlan_tci, skb)) { |
104 | case -1: | 111 | case -1: |
105 | return netif_receive_skb(skb); | 112 | return netif_receive_skb(skb); |
@@ -126,6 +133,9 @@ int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, | |||
126 | if (!skb) | 133 | if (!skb) |
127 | goto out; | 134 | goto out; |
128 | 135 | ||
136 | if (netpoll_receive_skb(skb)) | ||
137 | goto out; | ||
138 | |||
129 | err = NET_RX_SUCCESS; | 139 | err = NET_RX_SUCCESS; |
130 | 140 | ||
131 | switch (vlan_gro_common(napi, grp, vlan_tci, skb)) { | 141 | switch (vlan_gro_common(napi, grp, vlan_tci, skb)) { |
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 4a19acd3a32b..1b34135cf990 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -553,7 +553,7 @@ static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa) | |||
553 | int err = 0; | 553 | int err = 0; |
554 | 554 | ||
555 | if (netif_device_present(real_dev) && ops->ndo_neigh_setup) | 555 | if (netif_device_present(real_dev) && ops->ndo_neigh_setup) |
556 | err = ops->ndo_neigh_setup(dev, pa); | 556 | err = ops->ndo_neigh_setup(real_dev, pa); |
557 | 557 | ||
558 | return err; | 558 | return err; |
559 | } | 559 | } |
@@ -639,6 +639,7 @@ static int vlan_dev_init(struct net_device *dev) | |||
639 | dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN; | 639 | dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN; |
640 | dev->netdev_ops = &vlan_netdev_ops; | 640 | dev->netdev_ops = &vlan_netdev_ops; |
641 | } | 641 | } |
642 | netdev_resync_ops(dev); | ||
642 | 643 | ||
643 | if (is_vlan_dev(real_dev)) | 644 | if (is_vlan_dev(real_dev)) |
644 | subclass = 1; | 645 | subclass = 1; |
diff --git a/net/core/dev.c b/net/core/dev.c index a17e00662363..f1129706ce7b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2267,12 +2267,6 @@ int netif_receive_skb(struct sk_buff *skb) | |||
2267 | 2267 | ||
2268 | rcu_read_lock(); | 2268 | rcu_read_lock(); |
2269 | 2269 | ||
2270 | /* Don't receive packets in an exiting network namespace */ | ||
2271 | if (!net_alive(dev_net(skb->dev))) { | ||
2272 | kfree_skb(skb); | ||
2273 | goto out; | ||
2274 | } | ||
2275 | |||
2276 | #ifdef CONFIG_NET_CLS_ACT | 2270 | #ifdef CONFIG_NET_CLS_ACT |
2277 | if (skb->tc_verd & TC_NCLS) { | 2271 | if (skb->tc_verd & TC_NCLS) { |
2278 | skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); | 2272 | skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); |
@@ -2488,6 +2482,9 @@ static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
2488 | 2482 | ||
2489 | int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | 2483 | int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) |
2490 | { | 2484 | { |
2485 | if (netpoll_receive_skb(skb)) | ||
2486 | return NET_RX_DROP; | ||
2487 | |||
2491 | switch (__napi_gro_receive(napi, skb)) { | 2488 | switch (__napi_gro_receive(napi, skb)) { |
2492 | case -1: | 2489 | case -1: |
2493 | return netif_receive_skb(skb); | 2490 | return netif_receive_skb(skb); |
@@ -2558,6 +2555,9 @@ int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info) | |||
2558 | if (!skb) | 2555 | if (!skb) |
2559 | goto out; | 2556 | goto out; |
2560 | 2557 | ||
2558 | if (netpoll_receive_skb(skb)) | ||
2559 | goto out; | ||
2560 | |||
2561 | err = NET_RX_SUCCESS; | 2561 | err = NET_RX_SUCCESS; |
2562 | 2562 | ||
2563 | switch (__napi_gro_receive(napi, skb)) { | 2563 | switch (__napi_gro_receive(napi, skb)) { |
@@ -4282,6 +4282,39 @@ unsigned long netdev_fix_features(unsigned long features, const char *name) | |||
4282 | } | 4282 | } |
4283 | EXPORT_SYMBOL(netdev_fix_features); | 4283 | EXPORT_SYMBOL(netdev_fix_features); |
4284 | 4284 | ||
4285 | /* Some devices need to (re-)set their netdev_ops inside | ||
4286 | * ->init() or similar. If that happens, we have to setup | ||
4287 | * the compat pointers again. | ||
4288 | */ | ||
4289 | void netdev_resync_ops(struct net_device *dev) | ||
4290 | { | ||
4291 | #ifdef CONFIG_COMPAT_NET_DEV_OPS | ||
4292 | const struct net_device_ops *ops = dev->netdev_ops; | ||
4293 | |||
4294 | dev->init = ops->ndo_init; | ||
4295 | dev->uninit = ops->ndo_uninit; | ||
4296 | dev->open = ops->ndo_open; | ||
4297 | dev->change_rx_flags = ops->ndo_change_rx_flags; | ||
4298 | dev->set_rx_mode = ops->ndo_set_rx_mode; | ||
4299 | dev->set_multicast_list = ops->ndo_set_multicast_list; | ||
4300 | dev->set_mac_address = ops->ndo_set_mac_address; | ||
4301 | dev->validate_addr = ops->ndo_validate_addr; | ||
4302 | dev->do_ioctl = ops->ndo_do_ioctl; | ||
4303 | dev->set_config = ops->ndo_set_config; | ||
4304 | dev->change_mtu = ops->ndo_change_mtu; | ||
4305 | dev->neigh_setup = ops->ndo_neigh_setup; | ||
4306 | dev->tx_timeout = ops->ndo_tx_timeout; | ||
4307 | dev->get_stats = ops->ndo_get_stats; | ||
4308 | dev->vlan_rx_register = ops->ndo_vlan_rx_register; | ||
4309 | dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid; | ||
4310 | dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid; | ||
4311 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
4312 | dev->poll_controller = ops->ndo_poll_controller; | ||
4313 | #endif | ||
4314 | #endif | ||
4315 | } | ||
4316 | EXPORT_SYMBOL(netdev_resync_ops); | ||
4317 | |||
4285 | /** | 4318 | /** |
4286 | * register_netdevice - register a network device | 4319 | * register_netdevice - register a network device |
4287 | * @dev: device to register | 4320 | * @dev: device to register |
@@ -4326,27 +4359,7 @@ int register_netdevice(struct net_device *dev) | |||
4326 | * This is temporary until all network devices are converted. | 4359 | * This is temporary until all network devices are converted. |
4327 | */ | 4360 | */ |
4328 | if (dev->netdev_ops) { | 4361 | if (dev->netdev_ops) { |
4329 | const struct net_device_ops *ops = dev->netdev_ops; | 4362 | netdev_resync_ops(dev); |
4330 | |||
4331 | dev->init = ops->ndo_init; | ||
4332 | dev->uninit = ops->ndo_uninit; | ||
4333 | dev->open = ops->ndo_open; | ||
4334 | dev->change_rx_flags = ops->ndo_change_rx_flags; | ||
4335 | dev->set_rx_mode = ops->ndo_set_rx_mode; | ||
4336 | dev->set_multicast_list = ops->ndo_set_multicast_list; | ||
4337 | dev->set_mac_address = ops->ndo_set_mac_address; | ||
4338 | dev->validate_addr = ops->ndo_validate_addr; | ||
4339 | dev->do_ioctl = ops->ndo_do_ioctl; | ||
4340 | dev->set_config = ops->ndo_set_config; | ||
4341 | dev->change_mtu = ops->ndo_change_mtu; | ||
4342 | dev->tx_timeout = ops->ndo_tx_timeout; | ||
4343 | dev->get_stats = ops->ndo_get_stats; | ||
4344 | dev->vlan_rx_register = ops->ndo_vlan_rx_register; | ||
4345 | dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid; | ||
4346 | dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid; | ||
4347 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
4348 | dev->poll_controller = ops->ndo_poll_controller; | ||
4349 | #endif | ||
4350 | } else { | 4363 | } else { |
4351 | char drivername[64]; | 4364 | char drivername[64]; |
4352 | pr_info("%s (%s): not using net_device_ops yet\n", | 4365 | pr_info("%s (%s): not using net_device_ops yet\n", |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 6ac29a46e23e..484f58750eba 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -77,7 +77,9 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr, | |||
77 | if (endp == buf) | 77 | if (endp == buf) |
78 | goto err; | 78 | goto err; |
79 | 79 | ||
80 | rtnl_lock(); | 80 | if (!rtnl_trylock()) |
81 | return -ERESTARTSYS; | ||
82 | |||
81 | if (dev_isalive(net)) { | 83 | if (dev_isalive(net)) { |
82 | if ((ret = (*set)(net, new)) == 0) | 84 | if ((ret = (*set)(net, new)) == 0) |
83 | ret = len; | 85 | ret = len; |
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 55151faaf90c..e3bebd36f053 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c | |||
@@ -32,24 +32,14 @@ static __net_init int setup_net(struct net *net) | |||
32 | { | 32 | { |
33 | /* Must be called with net_mutex held */ | 33 | /* Must be called with net_mutex held */ |
34 | struct pernet_operations *ops; | 34 | struct pernet_operations *ops; |
35 | int error; | 35 | int error = 0; |
36 | struct net_generic *ng; | ||
37 | 36 | ||
38 | atomic_set(&net->count, 1); | 37 | atomic_set(&net->count, 1); |
38 | |||
39 | #ifdef NETNS_REFCNT_DEBUG | 39 | #ifdef NETNS_REFCNT_DEBUG |
40 | atomic_set(&net->use_count, 0); | 40 | atomic_set(&net->use_count, 0); |
41 | #endif | 41 | #endif |
42 | 42 | ||
43 | error = -ENOMEM; | ||
44 | ng = kzalloc(sizeof(struct net_generic) + | ||
45 | INITIAL_NET_GEN_PTRS * sizeof(void *), GFP_KERNEL); | ||
46 | if (ng == NULL) | ||
47 | goto out; | ||
48 | |||
49 | ng->len = INITIAL_NET_GEN_PTRS; | ||
50 | rcu_assign_pointer(net->gen, ng); | ||
51 | |||
52 | error = 0; | ||
53 | list_for_each_entry(ops, &pernet_list, list) { | 43 | list_for_each_entry(ops, &pernet_list, list) { |
54 | if (ops->init) { | 44 | if (ops->init) { |
55 | error = ops->init(net); | 45 | error = ops->init(net); |
@@ -70,24 +60,50 @@ out_undo: | |||
70 | } | 60 | } |
71 | 61 | ||
72 | rcu_barrier(); | 62 | rcu_barrier(); |
73 | kfree(ng); | ||
74 | goto out; | 63 | goto out; |
75 | } | 64 | } |
76 | 65 | ||
66 | static struct net_generic *net_alloc_generic(void) | ||
67 | { | ||
68 | struct net_generic *ng; | ||
69 | size_t generic_size = sizeof(struct net_generic) + | ||
70 | INITIAL_NET_GEN_PTRS * sizeof(void *); | ||
71 | |||
72 | ng = kzalloc(generic_size, GFP_KERNEL); | ||
73 | if (ng) | ||
74 | ng->len = INITIAL_NET_GEN_PTRS; | ||
75 | |||
76 | return ng; | ||
77 | } | ||
78 | |||
77 | #ifdef CONFIG_NET_NS | 79 | #ifdef CONFIG_NET_NS |
78 | static struct kmem_cache *net_cachep; | 80 | static struct kmem_cache *net_cachep; |
79 | static struct workqueue_struct *netns_wq; | 81 | static struct workqueue_struct *netns_wq; |
80 | 82 | ||
81 | static struct net *net_alloc(void) | 83 | static struct net *net_alloc(void) |
82 | { | 84 | { |
83 | return kmem_cache_zalloc(net_cachep, GFP_KERNEL); | 85 | struct net *net = NULL; |
86 | struct net_generic *ng; | ||
87 | |||
88 | ng = net_alloc_generic(); | ||
89 | if (!ng) | ||
90 | goto out; | ||
91 | |||
92 | net = kmem_cache_zalloc(net_cachep, GFP_KERNEL); | ||
93 | if (!net) | ||
94 | goto out_free; | ||
95 | |||
96 | rcu_assign_pointer(net->gen, ng); | ||
97 | out: | ||
98 | return net; | ||
99 | |||
100 | out_free: | ||
101 | kfree(ng); | ||
102 | goto out; | ||
84 | } | 103 | } |
85 | 104 | ||
86 | static void net_free(struct net *net) | 105 | static void net_free(struct net *net) |
87 | { | 106 | { |
88 | if (!net) | ||
89 | return; | ||
90 | |||
91 | #ifdef NETNS_REFCNT_DEBUG | 107 | #ifdef NETNS_REFCNT_DEBUG |
92 | if (unlikely(atomic_read(&net->use_count) != 0)) { | 108 | if (unlikely(atomic_read(&net->use_count) != 0)) { |
93 | printk(KERN_EMERG "network namespace not free! Usage: %d\n", | 109 | printk(KERN_EMERG "network namespace not free! Usage: %d\n", |
@@ -112,27 +128,28 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net) | |||
112 | err = -ENOMEM; | 128 | err = -ENOMEM; |
113 | new_net = net_alloc(); | 129 | new_net = net_alloc(); |
114 | if (!new_net) | 130 | if (!new_net) |
115 | goto out; | 131 | goto out_err; |
116 | 132 | ||
117 | mutex_lock(&net_mutex); | 133 | mutex_lock(&net_mutex); |
118 | err = setup_net(new_net); | 134 | err = setup_net(new_net); |
119 | if (err) | 135 | if (!err) { |
120 | goto out_unlock; | 136 | rtnl_lock(); |
121 | 137 | list_add_tail(&new_net->list, &net_namespace_list); | |
122 | rtnl_lock(); | 138 | rtnl_unlock(); |
123 | list_add_tail(&new_net->list, &net_namespace_list); | 139 | } |
124 | rtnl_unlock(); | ||
125 | |||
126 | |||
127 | out_unlock: | ||
128 | mutex_unlock(&net_mutex); | 140 | mutex_unlock(&net_mutex); |
141 | |||
142 | if (err) | ||
143 | goto out_free; | ||
129 | out: | 144 | out: |
130 | put_net(old_net); | 145 | put_net(old_net); |
131 | if (err) { | ||
132 | net_free(new_net); | ||
133 | new_net = ERR_PTR(err); | ||
134 | } | ||
135 | return new_net; | 146 | return new_net; |
147 | |||
148 | out_free: | ||
149 | net_free(new_net); | ||
150 | out_err: | ||
151 | new_net = ERR_PTR(err); | ||
152 | goto out; | ||
136 | } | 153 | } |
137 | 154 | ||
138 | static void cleanup_net(struct work_struct *work) | 155 | static void cleanup_net(struct work_struct *work) |
@@ -140,9 +157,6 @@ static void cleanup_net(struct work_struct *work) | |||
140 | struct pernet_operations *ops; | 157 | struct pernet_operations *ops; |
141 | struct net *net; | 158 | struct net *net; |
142 | 159 | ||
143 | /* Be very certain incoming network packets will not find us */ | ||
144 | rcu_barrier(); | ||
145 | |||
146 | net = container_of(work, struct net, work); | 160 | net = container_of(work, struct net, work); |
147 | 161 | ||
148 | mutex_lock(&net_mutex); | 162 | mutex_lock(&net_mutex); |
@@ -188,6 +202,7 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net) | |||
188 | 202 | ||
189 | static int __init net_ns_init(void) | 203 | static int __init net_ns_init(void) |
190 | { | 204 | { |
205 | struct net_generic *ng; | ||
191 | int err; | 206 | int err; |
192 | 207 | ||
193 | printk(KERN_INFO "net_namespace: %zd bytes\n", sizeof(struct net)); | 208 | printk(KERN_INFO "net_namespace: %zd bytes\n", sizeof(struct net)); |
@@ -202,6 +217,12 @@ static int __init net_ns_init(void) | |||
202 | panic("Could not create netns workq"); | 217 | panic("Could not create netns workq"); |
203 | #endif | 218 | #endif |
204 | 219 | ||
220 | ng = net_alloc_generic(); | ||
221 | if (!ng) | ||
222 | panic("Could not allocate generic netns"); | ||
223 | |||
224 | rcu_assign_pointer(init_net.gen, ng); | ||
225 | |||
205 | mutex_lock(&net_mutex); | 226 | mutex_lock(&net_mutex); |
206 | err = setup_net(&init_net); | 227 | err = setup_net(&init_net); |
207 | 228 | ||
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index da74b844f4ea..c6a6b166f8d6 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -143,14 +143,6 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here) | |||
143 | BUG(); | 143 | BUG(); |
144 | } | 144 | } |
145 | 145 | ||
146 | void skb_truesize_bug(struct sk_buff *skb) | ||
147 | { | ||
148 | WARN(net_ratelimit(), KERN_ERR "SKB BUG: Invalid truesize (%u) " | ||
149 | "len=%u, sizeof(sk_buff)=%Zd\n", | ||
150 | skb->truesize, skb->len, sizeof(struct sk_buff)); | ||
151 | } | ||
152 | EXPORT_SYMBOL(skb_truesize_bug); | ||
153 | |||
154 | /* Allocate a new skbuff. We do this ourselves so we can fill in a few | 146 | /* Allocate a new skbuff. We do this ourselves so we can fill in a few |
155 | * 'private' fields and also do memory statistics to find all the | 147 | * 'private' fields and also do memory statistics to find all the |
156 | * [BEEP] leaks. | 148 | * [BEEP] leaks. |
diff --git a/net/core/sock.c b/net/core/sock.c index 6f2e1337975d..5f97caa158e8 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -696,7 +696,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, | |||
696 | if (len < 0) | 696 | if (len < 0) |
697 | return -EINVAL; | 697 | return -EINVAL; |
698 | 698 | ||
699 | v.val = 0; | 699 | memset(&v, 0, sizeof(v)); |
700 | 700 | ||
701 | switch(optname) { | 701 | switch(optname) { |
702 | case SO_DEBUG: | 702 | case SO_DEBUG: |
@@ -1137,7 +1137,6 @@ void sock_rfree(struct sk_buff *skb) | |||
1137 | { | 1137 | { |
1138 | struct sock *sk = skb->sk; | 1138 | struct sock *sk = skb->sk; |
1139 | 1139 | ||
1140 | skb_truesize_check(skb); | ||
1141 | atomic_sub(skb->truesize, &sk->sk_rmem_alloc); | 1140 | atomic_sub(skb->truesize, &sk->sk_rmem_alloc); |
1142 | sk_mem_uncharge(skb->sk, skb->truesize); | 1141 | sk_mem_uncharge(skb->sk, skb->truesize); |
1143 | } | 1142 | } |
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 6bb2635b5ded..7bc992976d29 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c | |||
@@ -3,11 +3,16 @@ | |||
3 | * | 3 | * |
4 | * This is an implementation of the CIPSO 2.2 protocol as specified in | 4 | * This is an implementation of the CIPSO 2.2 protocol as specified in |
5 | * draft-ietf-cipso-ipsecurity-01.txt with additional tag types as found in | 5 | * draft-ietf-cipso-ipsecurity-01.txt with additional tag types as found in |
6 | * FIPS-188, copies of both documents can be found in the Documentation | 6 | * FIPS-188. While CIPSO never became a full IETF RFC standard many vendors |
7 | * directory. While CIPSO never became a full IETF RFC standard many vendors | ||
8 | * have chosen to adopt the protocol and over the years it has become a | 7 | * have chosen to adopt the protocol and over the years it has become a |
9 | * de-facto standard for labeled networking. | 8 | * de-facto standard for labeled networking. |
10 | * | 9 | * |
10 | * The CIPSO draft specification can be found in the kernel's Documentation | ||
11 | * directory as well as the following URL: | ||
12 | * http://netlabel.sourceforge.net/files/draft-ietf-cipso-ipsecurity-01.txt | ||
13 | * The FIPS-188 specification can be found at the following URL: | ||
14 | * http://www.itl.nist.gov/fipspubs/fip188.htm | ||
15 | * | ||
11 | * Author: Paul Moore <paul.moore@hp.com> | 16 | * Author: Paul Moore <paul.moore@hp.com> |
12 | * | 17 | * |
13 | */ | 18 | */ |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 705b33b184a3..fc562d29cc46 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -1205,7 +1205,7 @@ static struct pernet_operations __net_initdata icmp_sk_ops = { | |||
1205 | 1205 | ||
1206 | int __init icmp_init(void) | 1206 | int __init icmp_init(void) |
1207 | { | 1207 | { |
1208 | return register_pernet_device(&icmp_sk_ops); | 1208 | return register_pernet_subsys(&icmp_sk_ops); |
1209 | } | 1209 | } |
1210 | 1210 | ||
1211 | EXPORT_SYMBOL(icmp_err_convert); | 1211 | EXPORT_SYMBOL(icmp_err_convert); |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index a6961d75c7ea..c28976a7e596 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -1374,7 +1374,8 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk, | |||
1374 | 1374 | ||
1375 | static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, | 1375 | static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, |
1376 | struct tcp_sacktag_state *state, | 1376 | struct tcp_sacktag_state *state, |
1377 | unsigned int pcount, int shifted, int mss) | 1377 | unsigned int pcount, int shifted, int mss, |
1378 | int dup_sack) | ||
1378 | { | 1379 | { |
1379 | struct tcp_sock *tp = tcp_sk(sk); | 1380 | struct tcp_sock *tp = tcp_sk(sk); |
1380 | struct sk_buff *prev = tcp_write_queue_prev(sk, skb); | 1381 | struct sk_buff *prev = tcp_write_queue_prev(sk, skb); |
@@ -1410,7 +1411,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, | |||
1410 | } | 1411 | } |
1411 | 1412 | ||
1412 | /* We discard results */ | 1413 | /* We discard results */ |
1413 | tcp_sacktag_one(skb, sk, state, 0, pcount); | 1414 | tcp_sacktag_one(skb, sk, state, dup_sack, pcount); |
1414 | 1415 | ||
1415 | /* Difference in this won't matter, both ACKed by the same cumul. ACK */ | 1416 | /* Difference in this won't matter, both ACKed by the same cumul. ACK */ |
1416 | TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); | 1417 | TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); |
@@ -1561,7 +1562,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, | |||
1561 | 1562 | ||
1562 | if (!skb_shift(prev, skb, len)) | 1563 | if (!skb_shift(prev, skb, len)) |
1563 | goto fallback; | 1564 | goto fallback; |
1564 | if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss)) | 1565 | if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack)) |
1565 | goto out; | 1566 | goto out; |
1566 | 1567 | ||
1567 | /* Hole filled allows collapsing with the next as well, this is very | 1568 | /* Hole filled allows collapsing with the next as well, this is very |
@@ -1580,7 +1581,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, | |||
1580 | len = skb->len; | 1581 | len = skb->len; |
1581 | if (skb_shift(prev, skb, len)) { | 1582 | if (skb_shift(prev, skb, len)) { |
1582 | pcount += tcp_skb_pcount(skb); | 1583 | pcount += tcp_skb_pcount(skb); |
1583 | tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss); | 1584 | tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0); |
1584 | } | 1585 | } |
1585 | 1586 | ||
1586 | out: | 1587 | out: |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 19d7b429a262..cf74c416831a 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -2443,7 +2443,7 @@ static struct pernet_operations __net_initdata tcp_sk_ops = { | |||
2443 | void __init tcp_v4_init(void) | 2443 | void __init tcp_v4_init(void) |
2444 | { | 2444 | { |
2445 | inet_hashinfo_init(&tcp_hashinfo); | 2445 | inet_hashinfo_init(&tcp_hashinfo); |
2446 | if (register_pernet_device(&tcp_sk_ops)) | 2446 | if (register_pernet_subsys(&tcp_sk_ops)) |
2447 | panic("Failed to create the TCP control socket.\n"); | 2447 | panic("Failed to create the TCP control socket.\n"); |
2448 | } | 2448 | } |
2449 | 2449 | ||
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index dda42f0bd7a3..da2c3b8794f2 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2023,7 +2023,6 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
2023 | last_lost = tp->snd_una; | 2023 | last_lost = tp->snd_una; |
2024 | } | 2024 | } |
2025 | 2025 | ||
2026 | /* First pass: retransmit lost packets. */ | ||
2027 | tcp_for_write_queue_from(skb, sk) { | 2026 | tcp_for_write_queue_from(skb, sk) { |
2028 | __u8 sacked = TCP_SKB_CB(skb)->sacked; | 2027 | __u8 sacked = TCP_SKB_CB(skb)->sacked; |
2029 | 2028 | ||
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c index 2747ec7bfb63..4660b088a8ce 100644 --- a/net/ipv4/tcp_scalable.c +++ b/net/ipv4/tcp_scalable.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* Tom Kelly's Scalable TCP | 1 | /* Tom Kelly's Scalable TCP |
2 | * | 2 | * |
3 | * See htt://www-lce.eng.cam.ac.uk/~ctk21/scalable/ | 3 | * See http://www.deneholme.net/tom/scalable/ |
4 | * | 4 | * |
5 | * John Heffner <jheffner@sc.edu> | 5 | * John Heffner <jheffner@sc.edu> |
6 | */ | 6 | */ |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index f9afb452249c..1220e2c7831e 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -493,15 +493,17 @@ static void addrconf_forward_change(struct net *net, __s32 newf) | |||
493 | read_unlock(&dev_base_lock); | 493 | read_unlock(&dev_base_lock); |
494 | } | 494 | } |
495 | 495 | ||
496 | static void addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old) | 496 | static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old) |
497 | { | 497 | { |
498 | struct net *net; | 498 | struct net *net; |
499 | 499 | ||
500 | net = (struct net *)table->extra2; | 500 | net = (struct net *)table->extra2; |
501 | if (p == &net->ipv6.devconf_dflt->forwarding) | 501 | if (p == &net->ipv6.devconf_dflt->forwarding) |
502 | return; | 502 | return 0; |
503 | |||
504 | if (!rtnl_trylock()) | ||
505 | return -ERESTARTSYS; | ||
503 | 506 | ||
504 | rtnl_lock(); | ||
505 | if (p == &net->ipv6.devconf_all->forwarding) { | 507 | if (p == &net->ipv6.devconf_all->forwarding) { |
506 | __s32 newf = net->ipv6.devconf_all->forwarding; | 508 | __s32 newf = net->ipv6.devconf_all->forwarding; |
507 | net->ipv6.devconf_dflt->forwarding = newf; | 509 | net->ipv6.devconf_dflt->forwarding = newf; |
@@ -512,6 +514,7 @@ static void addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old) | |||
512 | 514 | ||
513 | if (*p) | 515 | if (*p) |
514 | rt6_purge_dflt_routers(net); | 516 | rt6_purge_dflt_routers(net); |
517 | return 1; | ||
515 | } | 518 | } |
516 | #endif | 519 | #endif |
517 | 520 | ||
@@ -2608,9 +2611,6 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2608 | 2611 | ||
2609 | ASSERT_RTNL(); | 2612 | ASSERT_RTNL(); |
2610 | 2613 | ||
2611 | if ((dev->flags & IFF_LOOPBACK) && how == 1) | ||
2612 | how = 0; | ||
2613 | |||
2614 | rt6_ifdown(net, dev); | 2614 | rt6_ifdown(net, dev); |
2615 | neigh_ifdown(&nd_tbl, dev); | 2615 | neigh_ifdown(&nd_tbl, dev); |
2616 | 2616 | ||
@@ -3983,7 +3983,7 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write, struct file * filp, | |||
3983 | ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos); | 3983 | ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos); |
3984 | 3984 | ||
3985 | if (write) | 3985 | if (write) |
3986 | addrconf_fixup_forwarding(ctl, valp, val); | 3986 | ret = addrconf_fixup_forwarding(ctl, valp, val); |
3987 | return ret; | 3987 | return ret; |
3988 | } | 3988 | } |
3989 | 3989 | ||
@@ -4019,8 +4019,7 @@ static int addrconf_sysctl_forward_strategy(ctl_table *table, | |||
4019 | } | 4019 | } |
4020 | 4020 | ||
4021 | *valp = new; | 4021 | *valp = new; |
4022 | addrconf_fixup_forwarding(table, valp, val); | 4022 | return addrconf_fixup_forwarding(table, valp, val); |
4023 | return 1; | ||
4024 | } | 4023 | } |
4025 | 4024 | ||
4026 | static struct addrconf_sysctl_table | 4025 | static struct addrconf_sysctl_table |
@@ -4446,25 +4445,6 @@ int unregister_inet6addr_notifier(struct notifier_block *nb) | |||
4446 | 4445 | ||
4447 | EXPORT_SYMBOL(unregister_inet6addr_notifier); | 4446 | EXPORT_SYMBOL(unregister_inet6addr_notifier); |
4448 | 4447 | ||
4449 | static void addrconf_net_exit(struct net *net) | ||
4450 | { | ||
4451 | struct net_device *dev; | ||
4452 | |||
4453 | rtnl_lock(); | ||
4454 | /* clean dev list */ | ||
4455 | for_each_netdev(net, dev) { | ||
4456 | if (__in6_dev_get(dev) == NULL) | ||
4457 | continue; | ||
4458 | addrconf_ifdown(dev, 1); | ||
4459 | } | ||
4460 | addrconf_ifdown(net->loopback_dev, 2); | ||
4461 | rtnl_unlock(); | ||
4462 | } | ||
4463 | |||
4464 | static struct pernet_operations addrconf_net_ops = { | ||
4465 | .exit = addrconf_net_exit, | ||
4466 | }; | ||
4467 | |||
4468 | /* | 4448 | /* |
4469 | * Init / cleanup code | 4449 | * Init / cleanup code |
4470 | */ | 4450 | */ |
@@ -4506,10 +4486,6 @@ int __init addrconf_init(void) | |||
4506 | if (err) | 4486 | if (err) |
4507 | goto errlo; | 4487 | goto errlo; |
4508 | 4488 | ||
4509 | err = register_pernet_device(&addrconf_net_ops); | ||
4510 | if (err) | ||
4511 | return err; | ||
4512 | |||
4513 | register_netdevice_notifier(&ipv6_dev_notf); | 4489 | register_netdevice_notifier(&ipv6_dev_notf); |
4514 | 4490 | ||
4515 | addrconf_verify(0); | 4491 | addrconf_verify(0); |
@@ -4539,15 +4515,22 @@ errlo: | |||
4539 | void addrconf_cleanup(void) | 4515 | void addrconf_cleanup(void) |
4540 | { | 4516 | { |
4541 | struct inet6_ifaddr *ifa; | 4517 | struct inet6_ifaddr *ifa; |
4518 | struct net_device *dev; | ||
4542 | int i; | 4519 | int i; |
4543 | 4520 | ||
4544 | unregister_netdevice_notifier(&ipv6_dev_notf); | 4521 | unregister_netdevice_notifier(&ipv6_dev_notf); |
4545 | unregister_pernet_device(&addrconf_net_ops); | ||
4546 | |||
4547 | unregister_pernet_subsys(&addrconf_ops); | 4522 | unregister_pernet_subsys(&addrconf_ops); |
4548 | 4523 | ||
4549 | rtnl_lock(); | 4524 | rtnl_lock(); |
4550 | 4525 | ||
4526 | /* clean dev list */ | ||
4527 | for_each_netdev(&init_net, dev) { | ||
4528 | if (__in6_dev_get(dev) == NULL) | ||
4529 | continue; | ||
4530 | addrconf_ifdown(dev, 1); | ||
4531 | } | ||
4532 | addrconf_ifdown(init_net.loopback_dev, 2); | ||
4533 | |||
4551 | /* | 4534 | /* |
4552 | * Check hash table. | 4535 | * Check hash table. |
4553 | */ | 4536 | */ |
@@ -4568,6 +4551,4 @@ void addrconf_cleanup(void) | |||
4568 | 4551 | ||
4569 | del_timer(&addr_chk_timer); | 4552 | del_timer(&addr_chk_timer); |
4570 | rtnl_unlock(); | 4553 | rtnl_unlock(); |
4571 | |||
4572 | unregister_pernet_subsys(&addrconf_net_ops); | ||
4573 | } | 4554 | } |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index c802bc1658a8..da944eca2ca6 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -72,6 +72,10 @@ MODULE_LICENSE("GPL"); | |||
72 | static struct list_head inetsw6[SOCK_MAX]; | 72 | static struct list_head inetsw6[SOCK_MAX]; |
73 | static DEFINE_SPINLOCK(inetsw6_lock); | 73 | static DEFINE_SPINLOCK(inetsw6_lock); |
74 | 74 | ||
75 | static int disable_ipv6 = 0; | ||
76 | module_param_named(disable, disable_ipv6, int, 0); | ||
77 | MODULE_PARM_DESC(disable, "Disable IPv6 such that it is non-functional"); | ||
78 | |||
75 | static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk) | 79 | static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk) |
76 | { | 80 | { |
77 | const int offset = sk->sk_prot->obj_size - sizeof(struct ipv6_pinfo); | 81 | const int offset = sk->sk_prot->obj_size - sizeof(struct ipv6_pinfo); |
@@ -991,10 +995,21 @@ static int __init inet6_init(void) | |||
991 | { | 995 | { |
992 | struct sk_buff *dummy_skb; | 996 | struct sk_buff *dummy_skb; |
993 | struct list_head *r; | 997 | struct list_head *r; |
994 | int err; | 998 | int err = 0; |
995 | 999 | ||
996 | BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > sizeof(dummy_skb->cb)); | 1000 | BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > sizeof(dummy_skb->cb)); |
997 | 1001 | ||
1002 | /* Register the socket-side information for inet6_create. */ | ||
1003 | for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r) | ||
1004 | INIT_LIST_HEAD(r); | ||
1005 | |||
1006 | if (disable_ipv6) { | ||
1007 | printk(KERN_INFO | ||
1008 | "IPv6: Loaded, but administratively disabled, " | ||
1009 | "reboot required to enable\n"); | ||
1010 | goto out; | ||
1011 | } | ||
1012 | |||
998 | err = proto_register(&tcpv6_prot, 1); | 1013 | err = proto_register(&tcpv6_prot, 1); |
999 | if (err) | 1014 | if (err) |
1000 | goto out; | 1015 | goto out; |
@@ -1012,10 +1027,6 @@ static int __init inet6_init(void) | |||
1012 | goto out_unregister_udplite_proto; | 1027 | goto out_unregister_udplite_proto; |
1013 | 1028 | ||
1014 | 1029 | ||
1015 | /* Register the socket-side information for inet6_create. */ | ||
1016 | for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r) | ||
1017 | INIT_LIST_HEAD(r); | ||
1018 | |||
1019 | /* We MUST register RAW sockets before we create the ICMP6, | 1030 | /* We MUST register RAW sockets before we create the ICMP6, |
1020 | * IGMP6, or NDISC control sockets. | 1031 | * IGMP6, or NDISC control sockets. |
1021 | */ | 1032 | */ |
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 8fe267feb81e..1bcc3431859e 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c | |||
@@ -258,11 +258,11 @@ unique: | |||
258 | 258 | ||
259 | if (twp != NULL) { | 259 | if (twp != NULL) { |
260 | *twp = tw; | 260 | *twp = tw; |
261 | NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITRECYCLED); | 261 | NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED); |
262 | } else if (tw != NULL) { | 262 | } else if (tw != NULL) { |
263 | /* Silly. Should hash-dance instead... */ | 263 | /* Silly. Should hash-dance instead... */ |
264 | inet_twsk_deschedule(tw, death_row); | 264 | inet_twsk_deschedule(tw, death_row); |
265 | NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITRECYCLED); | 265 | NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED); |
266 | 266 | ||
267 | inet_twsk_put(tw); | 267 | inet_twsk_put(tw); |
268 | } | 268 | } |
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c index c323643ffcf9..72dbb6d1a6b3 100644 --- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c | |||
@@ -201,8 +201,9 @@ icmpv6_error(struct net *net, struct sk_buff *skb, unsigned int dataoff, | |||
201 | 201 | ||
202 | if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && | 202 | if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && |
203 | nf_ip6_checksum(skb, hooknum, dataoff, IPPROTO_ICMPV6)) { | 203 | nf_ip6_checksum(skb, hooknum, dataoff, IPPROTO_ICMPV6)) { |
204 | nf_log_packet(PF_INET6, 0, skb, NULL, NULL, NULL, | 204 | if (LOG_INVALID(net, IPPROTO_ICMPV6)) |
205 | "nf_ct_icmpv6: ICMPv6 checksum failed\n"); | 205 | nf_log_packet(PF_INET6, 0, skb, NULL, NULL, NULL, |
206 | "nf_ct_icmpv6: ICMPv6 checksum failed "); | ||
206 | return -NF_ACCEPT; | 207 | return -NF_ACCEPT; |
207 | } | 208 | } |
208 | 209 | ||
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index fa49dc7fe100..c712e9fc6bba 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
@@ -39,7 +39,7 @@ | |||
39 | #endif | 39 | #endif |
40 | 40 | ||
41 | #define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE | 41 | #define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE |
42 | #define NFULNL_TIMEOUT_DEFAULT HZ /* every second */ | 42 | #define NFULNL_TIMEOUT_DEFAULT 100 /* every second */ |
43 | #define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */ | 43 | #define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */ |
44 | #define NFULNL_COPY_RANGE_MAX 0xFFFF /* max packet size is limited by 16-bit struct nfattr nfa_len field */ | 44 | #define NFULNL_COPY_RANGE_MAX 0xFFFF /* max packet size is limited by 16-bit struct nfattr nfa_len field */ |
45 | 45 | ||
@@ -590,8 +590,10 @@ nfulnl_log_packet(u_int8_t pf, | |||
590 | 590 | ||
591 | qthreshold = inst->qthreshold; | 591 | qthreshold = inst->qthreshold; |
592 | /* per-rule qthreshold overrides per-instance */ | 592 | /* per-rule qthreshold overrides per-instance */ |
593 | if (qthreshold > li->u.ulog.qthreshold) | 593 | if (li->u.ulog.qthreshold) |
594 | qthreshold = li->u.ulog.qthreshold; | 594 | if (qthreshold > li->u.ulog.qthreshold) |
595 | qthreshold = li->u.ulog.qthreshold; | ||
596 | |||
595 | 597 | ||
596 | switch (inst->copy_mode) { | 598 | switch (inst->copy_mode) { |
597 | case NFULNL_COPY_META: | 599 | case NFULNL_COPY_META: |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index bfbf521f6ea5..5baccfa5a0de 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
@@ -827,59 +827,143 @@ static const struct file_operations xt_table_ops = { | |||
827 | .release = seq_release_net, | 827 | .release = seq_release_net, |
828 | }; | 828 | }; |
829 | 829 | ||
830 | static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos) | 830 | /* |
831 | * Traverse state for ip{,6}_{tables,matches} for helping crossing | ||
832 | * the multi-AF mutexes. | ||
833 | */ | ||
834 | struct nf_mttg_trav { | ||
835 | struct list_head *head, *curr; | ||
836 | uint8_t class, nfproto; | ||
837 | }; | ||
838 | |||
839 | enum { | ||
840 | MTTG_TRAV_INIT, | ||
841 | MTTG_TRAV_NFP_UNSPEC, | ||
842 | MTTG_TRAV_NFP_SPEC, | ||
843 | MTTG_TRAV_DONE, | ||
844 | }; | ||
845 | |||
846 | static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos, | ||
847 | bool is_target) | ||
831 | { | 848 | { |
832 | struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private; | 849 | static const uint8_t next_class[] = { |
833 | u_int16_t af = (unsigned long)pde->data; | 850 | [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC, |
851 | [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE, | ||
852 | }; | ||
853 | struct nf_mttg_trav *trav = seq->private; | ||
854 | |||
855 | switch (trav->class) { | ||
856 | case MTTG_TRAV_INIT: | ||
857 | trav->class = MTTG_TRAV_NFP_UNSPEC; | ||
858 | mutex_lock(&xt[NFPROTO_UNSPEC].mutex); | ||
859 | trav->head = trav->curr = is_target ? | ||
860 | &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match; | ||
861 | break; | ||
862 | case MTTG_TRAV_NFP_UNSPEC: | ||
863 | trav->curr = trav->curr->next; | ||
864 | if (trav->curr != trav->head) | ||
865 | break; | ||
866 | mutex_unlock(&xt[NFPROTO_UNSPEC].mutex); | ||
867 | mutex_lock(&xt[trav->nfproto].mutex); | ||
868 | trav->head = trav->curr = is_target ? | ||
869 | &xt[trav->nfproto].target : &xt[trav->nfproto].match; | ||
870 | trav->class = next_class[trav->class]; | ||
871 | break; | ||
872 | case MTTG_TRAV_NFP_SPEC: | ||
873 | trav->curr = trav->curr->next; | ||
874 | if (trav->curr != trav->head) | ||
875 | break; | ||
876 | /* fallthru, _stop will unlock */ | ||
877 | default: | ||
878 | return NULL; | ||
879 | } | ||
834 | 880 | ||
835 | mutex_lock(&xt[af].mutex); | 881 | if (ppos != NULL) |
836 | return seq_list_start(&xt[af].match, *pos); | 882 | ++*ppos; |
883 | return trav; | ||
837 | } | 884 | } |
838 | 885 | ||
839 | static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 886 | static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos, |
887 | bool is_target) | ||
840 | { | 888 | { |
841 | struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private; | 889 | struct nf_mttg_trav *trav = seq->private; |
842 | u_int16_t af = (unsigned long)pde->data; | 890 | unsigned int j; |
843 | 891 | ||
844 | return seq_list_next(v, &xt[af].match, pos); | 892 | trav->class = MTTG_TRAV_INIT; |
893 | for (j = 0; j < *pos; ++j) | ||
894 | if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL) | ||
895 | return NULL; | ||
896 | return trav; | ||
845 | } | 897 | } |
846 | 898 | ||
847 | static void xt_match_seq_stop(struct seq_file *seq, void *v) | 899 | static void xt_mttg_seq_stop(struct seq_file *seq, void *v) |
848 | { | 900 | { |
849 | struct proc_dir_entry *pde = seq->private; | 901 | struct nf_mttg_trav *trav = seq->private; |
850 | u_int16_t af = (unsigned long)pde->data; | 902 | |
903 | switch (trav->class) { | ||
904 | case MTTG_TRAV_NFP_UNSPEC: | ||
905 | mutex_unlock(&xt[NFPROTO_UNSPEC].mutex); | ||
906 | break; | ||
907 | case MTTG_TRAV_NFP_SPEC: | ||
908 | mutex_unlock(&xt[trav->nfproto].mutex); | ||
909 | break; | ||
910 | } | ||
911 | } | ||
851 | 912 | ||
852 | mutex_unlock(&xt[af].mutex); | 913 | static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos) |
914 | { | ||
915 | return xt_mttg_seq_start(seq, pos, false); | ||
853 | } | 916 | } |
854 | 917 | ||
855 | static int xt_match_seq_show(struct seq_file *seq, void *v) | 918 | static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos) |
856 | { | 919 | { |
857 | struct xt_match *match = list_entry(v, struct xt_match, list); | 920 | return xt_mttg_seq_next(seq, v, ppos, false); |
921 | } | ||
858 | 922 | ||
859 | if (strlen(match->name)) | 923 | static int xt_match_seq_show(struct seq_file *seq, void *v) |
860 | return seq_printf(seq, "%s\n", match->name); | 924 | { |
861 | else | 925 | const struct nf_mttg_trav *trav = seq->private; |
862 | return 0; | 926 | const struct xt_match *match; |
927 | |||
928 | switch (trav->class) { | ||
929 | case MTTG_TRAV_NFP_UNSPEC: | ||
930 | case MTTG_TRAV_NFP_SPEC: | ||
931 | if (trav->curr == trav->head) | ||
932 | return 0; | ||
933 | match = list_entry(trav->curr, struct xt_match, list); | ||
934 | return (*match->name == '\0') ? 0 : | ||
935 | seq_printf(seq, "%s\n", match->name); | ||
936 | } | ||
937 | return 0; | ||
863 | } | 938 | } |
864 | 939 | ||
865 | static const struct seq_operations xt_match_seq_ops = { | 940 | static const struct seq_operations xt_match_seq_ops = { |
866 | .start = xt_match_seq_start, | 941 | .start = xt_match_seq_start, |
867 | .next = xt_match_seq_next, | 942 | .next = xt_match_seq_next, |
868 | .stop = xt_match_seq_stop, | 943 | .stop = xt_mttg_seq_stop, |
869 | .show = xt_match_seq_show, | 944 | .show = xt_match_seq_show, |
870 | }; | 945 | }; |
871 | 946 | ||
872 | static int xt_match_open(struct inode *inode, struct file *file) | 947 | static int xt_match_open(struct inode *inode, struct file *file) |
873 | { | 948 | { |
949 | struct seq_file *seq; | ||
950 | struct nf_mttg_trav *trav; | ||
874 | int ret; | 951 | int ret; |
875 | 952 | ||
876 | ret = seq_open(file, &xt_match_seq_ops); | 953 | trav = kmalloc(sizeof(*trav), GFP_KERNEL); |
877 | if (!ret) { | 954 | if (trav == NULL) |
878 | struct seq_file *seq = file->private_data; | 955 | return -ENOMEM; |
879 | 956 | ||
880 | seq->private = PDE(inode); | 957 | ret = seq_open(file, &xt_match_seq_ops); |
958 | if (ret < 0) { | ||
959 | kfree(trav); | ||
960 | return ret; | ||
881 | } | 961 | } |
882 | return ret; | 962 | |
963 | seq = file->private_data; | ||
964 | seq->private = trav; | ||
965 | trav->nfproto = (unsigned long)PDE(inode)->data; | ||
966 | return 0; | ||
883 | } | 967 | } |
884 | 968 | ||
885 | static const struct file_operations xt_match_ops = { | 969 | static const struct file_operations xt_match_ops = { |
@@ -887,62 +971,63 @@ static const struct file_operations xt_match_ops = { | |||
887 | .open = xt_match_open, | 971 | .open = xt_match_open, |
888 | .read = seq_read, | 972 | .read = seq_read, |
889 | .llseek = seq_lseek, | 973 | .llseek = seq_lseek, |
890 | .release = seq_release, | 974 | .release = seq_release_private, |
891 | }; | 975 | }; |
892 | 976 | ||
893 | static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos) | 977 | static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos) |
894 | { | 978 | { |
895 | struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private; | 979 | return xt_mttg_seq_start(seq, pos, true); |
896 | u_int16_t af = (unsigned long)pde->data; | ||
897 | |||
898 | mutex_lock(&xt[af].mutex); | ||
899 | return seq_list_start(&xt[af].target, *pos); | ||
900 | } | 980 | } |
901 | 981 | ||
902 | static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 982 | static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos) |
903 | { | 983 | { |
904 | struct proc_dir_entry *pde = (struct proc_dir_entry *)seq->private; | 984 | return xt_mttg_seq_next(seq, v, ppos, true); |
905 | u_int16_t af = (unsigned long)pde->data; | ||
906 | |||
907 | return seq_list_next(v, &xt[af].target, pos); | ||
908 | } | ||
909 | |||
910 | static void xt_target_seq_stop(struct seq_file *seq, void *v) | ||
911 | { | ||
912 | struct proc_dir_entry *pde = seq->private; | ||
913 | u_int16_t af = (unsigned long)pde->data; | ||
914 | |||
915 | mutex_unlock(&xt[af].mutex); | ||
916 | } | 985 | } |
917 | 986 | ||
918 | static int xt_target_seq_show(struct seq_file *seq, void *v) | 987 | static int xt_target_seq_show(struct seq_file *seq, void *v) |
919 | { | 988 | { |
920 | struct xt_target *target = list_entry(v, struct xt_target, list); | 989 | const struct nf_mttg_trav *trav = seq->private; |
921 | 990 | const struct xt_target *target; | |
922 | if (strlen(target->name)) | 991 | |
923 | return seq_printf(seq, "%s\n", target->name); | 992 | switch (trav->class) { |
924 | else | 993 | case MTTG_TRAV_NFP_UNSPEC: |
925 | return 0; | 994 | case MTTG_TRAV_NFP_SPEC: |
995 | if (trav->curr == trav->head) | ||
996 | return 0; | ||
997 | target = list_entry(trav->curr, struct xt_target, list); | ||
998 | return (*target->name == '\0') ? 0 : | ||
999 | seq_printf(seq, "%s\n", target->name); | ||
1000 | } | ||
1001 | return 0; | ||
926 | } | 1002 | } |
927 | 1003 | ||
928 | static const struct seq_operations xt_target_seq_ops = { | 1004 | static const struct seq_operations xt_target_seq_ops = { |
929 | .start = xt_target_seq_start, | 1005 | .start = xt_target_seq_start, |
930 | .next = xt_target_seq_next, | 1006 | .next = xt_target_seq_next, |
931 | .stop = xt_target_seq_stop, | 1007 | .stop = xt_mttg_seq_stop, |
932 | .show = xt_target_seq_show, | 1008 | .show = xt_target_seq_show, |
933 | }; | 1009 | }; |
934 | 1010 | ||
935 | static int xt_target_open(struct inode *inode, struct file *file) | 1011 | static int xt_target_open(struct inode *inode, struct file *file) |
936 | { | 1012 | { |
1013 | struct seq_file *seq; | ||
1014 | struct nf_mttg_trav *trav; | ||
937 | int ret; | 1015 | int ret; |
938 | 1016 | ||
939 | ret = seq_open(file, &xt_target_seq_ops); | 1017 | trav = kmalloc(sizeof(*trav), GFP_KERNEL); |
940 | if (!ret) { | 1018 | if (trav == NULL) |
941 | struct seq_file *seq = file->private_data; | 1019 | return -ENOMEM; |
942 | 1020 | ||
943 | seq->private = PDE(inode); | 1021 | ret = seq_open(file, &xt_target_seq_ops); |
1022 | if (ret < 0) { | ||
1023 | kfree(trav); | ||
1024 | return ret; | ||
944 | } | 1025 | } |
945 | return ret; | 1026 | |
1027 | seq = file->private_data; | ||
1028 | seq->private = trav; | ||
1029 | trav->nfproto = (unsigned long)PDE(inode)->data; | ||
1030 | return 0; | ||
946 | } | 1031 | } |
947 | 1032 | ||
948 | static const struct file_operations xt_target_ops = { | 1033 | static const struct file_operations xt_target_ops = { |
@@ -950,7 +1035,7 @@ static const struct file_operations xt_target_ops = { | |||
950 | .open = xt_target_open, | 1035 | .open = xt_target_open, |
951 | .read = seq_read, | 1036 | .read = seq_read, |
952 | .llseek = seq_lseek, | 1037 | .llseek = seq_lseek, |
953 | .release = seq_release, | 1038 | .release = seq_release_private, |
954 | }; | 1039 | }; |
955 | 1040 | ||
956 | #define FORMAT_TABLES "_tables_names" | 1041 | #define FORMAT_TABLES "_tables_names" |
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index fe80b614a400..791e030ea903 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c | |||
@@ -542,7 +542,7 @@ recent_mt_proc_write(struct file *file, const char __user *input, | |||
542 | struct recent_entry *e; | 542 | struct recent_entry *e; |
543 | char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:5afe:c0de")]; | 543 | char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:5afe:c0de")]; |
544 | const char *c = buf; | 544 | const char *c = buf; |
545 | union nf_inet_addr addr; | 545 | union nf_inet_addr addr = {}; |
546 | u_int16_t family; | 546 | u_int16_t family; |
547 | bool add, succ; | 547 | bool add, succ; |
548 | 548 | ||
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 9eb895c7a2a9..3ae3cb816563 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -1084,6 +1084,13 @@ out: | |||
1084 | return 0; | 1084 | return 0; |
1085 | } | 1085 | } |
1086 | 1086 | ||
1087 | /** | ||
1088 | * netlink_set_err - report error to broadcast listeners | ||
1089 | * @ssk: the kernel netlink socket, as returned by netlink_kernel_create() | ||
1090 | * @pid: the PID of a process that we want to skip (if any) | ||
1091 | * @groups: the broadcast group that will notice the error | ||
1092 | * @code: error code, must be negative (as usual in kernelspace) | ||
1093 | */ | ||
1087 | void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code) | 1094 | void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code) |
1088 | { | 1095 | { |
1089 | struct netlink_set_err_data info; | 1096 | struct netlink_set_err_data info; |
@@ -1093,7 +1100,8 @@ void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code) | |||
1093 | info.exclude_sk = ssk; | 1100 | info.exclude_sk = ssk; |
1094 | info.pid = pid; | 1101 | info.pid = pid; |
1095 | info.group = group; | 1102 | info.group = group; |
1096 | info.code = code; | 1103 | /* sk->sk_err wants a positive error value */ |
1104 | info.code = -code; | ||
1097 | 1105 | ||
1098 | read_lock(&nl_table_lock); | 1106 | read_lock(&nl_table_lock); |
1099 | 1107 | ||
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 5c72a116b1a4..f8f047b61245 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
@@ -183,13 +183,6 @@ override: | |||
183 | if (R_tab == NULL) | 183 | if (R_tab == NULL) |
184 | goto failure; | 184 | goto failure; |
185 | 185 | ||
186 | if (!est && (ret == ACT_P_CREATED || | ||
187 | !gen_estimator_active(&police->tcf_bstats, | ||
188 | &police->tcf_rate_est))) { | ||
189 | err = -EINVAL; | ||
190 | goto failure; | ||
191 | } | ||
192 | |||
193 | if (parm->peakrate.rate) { | 186 | if (parm->peakrate.rate) { |
194 | P_tab = qdisc_get_rtab(&parm->peakrate, | 187 | P_tab = qdisc_get_rtab(&parm->peakrate, |
195 | tb[TCA_POLICE_PEAKRATE]); | 188 | tb[TCA_POLICE_PEAKRATE]); |
@@ -205,6 +198,12 @@ override: | |||
205 | &police->tcf_lock, est); | 198 | &police->tcf_lock, est); |
206 | if (err) | 199 | if (err) |
207 | goto failure_unlock; | 200 | goto failure_unlock; |
201 | } else if (tb[TCA_POLICE_AVRATE] && | ||
202 | (ret == ACT_P_CREATED || | ||
203 | !gen_estimator_active(&police->tcf_bstats, | ||
204 | &police->tcf_rate_est))) { | ||
205 | err = -EINVAL; | ||
206 | goto failure_unlock; | ||
208 | } | 207 | } |
209 | 208 | ||
210 | /* No failure allowed after this point */ | 209 | /* No failure allowed after this point */ |
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index f6b4fa97df70..e36e94ab4e10 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c | |||
@@ -66,11 +66,15 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | |||
66 | { | 66 | { |
67 | struct drr_sched *q = qdisc_priv(sch); | 67 | struct drr_sched *q = qdisc_priv(sch); |
68 | struct drr_class *cl = (struct drr_class *)*arg; | 68 | struct drr_class *cl = (struct drr_class *)*arg; |
69 | struct nlattr *opt = tca[TCA_OPTIONS]; | ||
69 | struct nlattr *tb[TCA_DRR_MAX + 1]; | 70 | struct nlattr *tb[TCA_DRR_MAX + 1]; |
70 | u32 quantum; | 71 | u32 quantum; |
71 | int err; | 72 | int err; |
72 | 73 | ||
73 | err = nla_parse_nested(tb, TCA_DRR_MAX, tca[TCA_OPTIONS], drr_policy); | 74 | if (!opt) |
75 | return -EINVAL; | ||
76 | |||
77 | err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy); | ||
74 | if (err < 0) | 78 | if (err < 0) |
75 | return err; | 79 | return err; |
76 | 80 | ||
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index b78e3be69013..c4986d0f7419 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
@@ -717,15 +717,20 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev, | |||
717 | static int sctp_ctl_sock_init(void) | 717 | static int sctp_ctl_sock_init(void) |
718 | { | 718 | { |
719 | int err; | 719 | int err; |
720 | sa_family_t family; | 720 | sa_family_t family = PF_INET; |
721 | 721 | ||
722 | if (sctp_get_pf_specific(PF_INET6)) | 722 | if (sctp_get_pf_specific(PF_INET6)) |
723 | family = PF_INET6; | 723 | family = PF_INET6; |
724 | else | ||
725 | family = PF_INET; | ||
726 | 724 | ||
727 | err = inet_ctl_sock_create(&sctp_ctl_sock, family, | 725 | err = inet_ctl_sock_create(&sctp_ctl_sock, family, |
728 | SOCK_SEQPACKET, IPPROTO_SCTP, &init_net); | 726 | SOCK_SEQPACKET, IPPROTO_SCTP, &init_net); |
727 | |||
728 | /* If IPv6 socket could not be created, try the IPv4 socket */ | ||
729 | if (err < 0 && family == PF_INET6) | ||
730 | err = inet_ctl_sock_create(&sctp_ctl_sock, AF_INET, | ||
731 | SOCK_SEQPACKET, IPPROTO_SCTP, | ||
732 | &init_net); | ||
733 | |||
729 | if (err < 0) { | 734 | if (err < 0) { |
730 | printk(KERN_ERR | 735 | printk(KERN_ERR |
731 | "SCTP: Failed to create the SCTP control socket.\n"); | 736 | "SCTP: Failed to create the SCTP control socket.\n"); |
@@ -1322,9 +1327,8 @@ SCTP_STATIC __init int sctp_init(void) | |||
1322 | out: | 1327 | out: |
1323 | return status; | 1328 | return status; |
1324 | err_v6_add_protocol: | 1329 | err_v6_add_protocol: |
1325 | sctp_v6_del_protocol(); | ||
1326 | err_add_protocol: | ||
1327 | sctp_v4_del_protocol(); | 1330 | sctp_v4_del_protocol(); |
1331 | err_add_protocol: | ||
1328 | inet_ctl_sock_destroy(sctp_ctl_sock); | 1332 | inet_ctl_sock_destroy(sctp_ctl_sock); |
1329 | err_ctl_sock_init: | 1333 | err_ctl_sock_init: |
1330 | sctp_v6_protosw_exit(); | 1334 | sctp_v6_protosw_exit(); |
@@ -1335,7 +1339,6 @@ err_protosw_init: | |||
1335 | sctp_v4_pf_exit(); | 1339 | sctp_v4_pf_exit(); |
1336 | sctp_v6_pf_exit(); | 1340 | sctp_v6_pf_exit(); |
1337 | sctp_sysctl_unregister(); | 1341 | sctp_sysctl_unregister(); |
1338 | list_del(&sctp_af_inet.list); | ||
1339 | free_pages((unsigned long)sctp_port_hashtable, | 1342 | free_pages((unsigned long)sctp_port_hashtable, |
1340 | get_order(sctp_port_hashsize * | 1343 | get_order(sctp_port_hashsize * |
1341 | sizeof(struct sctp_bind_hashbucket))); | 1344 | sizeof(struct sctp_bind_hashbucket))); |
@@ -1383,7 +1386,6 @@ SCTP_STATIC __exit void sctp_exit(void) | |||
1383 | sctp_v4_pf_exit(); | 1386 | sctp_v4_pf_exit(); |
1384 | 1387 | ||
1385 | sctp_sysctl_unregister(); | 1388 | sctp_sysctl_unregister(); |
1386 | list_del(&sctp_af_inet.list); | ||
1387 | 1389 | ||
1388 | free_pages((unsigned long)sctp_assoc_hashtable, | 1390 | free_pages((unsigned long)sctp_assoc_hashtable, |
1389 | get_order(sctp_assoc_hashsize * | 1391 | get_order(sctp_assoc_hashsize * |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index e1d6076b4f59..b5495aecab60 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
@@ -787,36 +787,48 @@ static void sctp_cmd_process_operr(sctp_cmd_seq_t *cmds, | |||
787 | struct sctp_association *asoc, | 787 | struct sctp_association *asoc, |
788 | struct sctp_chunk *chunk) | 788 | struct sctp_chunk *chunk) |
789 | { | 789 | { |
790 | struct sctp_operr_chunk *operr_chunk; | ||
791 | struct sctp_errhdr *err_hdr; | 790 | struct sctp_errhdr *err_hdr; |
791 | struct sctp_ulpevent *ev; | ||
792 | 792 | ||
793 | operr_chunk = (struct sctp_operr_chunk *)chunk->chunk_hdr; | 793 | while (chunk->chunk_end > chunk->skb->data) { |
794 | err_hdr = &operr_chunk->err_hdr; | 794 | err_hdr = (struct sctp_errhdr *)(chunk->skb->data); |
795 | 795 | ||
796 | switch (err_hdr->cause) { | 796 | ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0, |
797 | case SCTP_ERROR_UNKNOWN_CHUNK: | 797 | GFP_ATOMIC); |
798 | { | 798 | if (!ev) |
799 | struct sctp_chunkhdr *unk_chunk_hdr; | 799 | return; |
800 | 800 | ||
801 | unk_chunk_hdr = (struct sctp_chunkhdr *)err_hdr->variable; | 801 | sctp_ulpq_tail_event(&asoc->ulpq, ev); |
802 | switch (unk_chunk_hdr->type) { | 802 | |
803 | /* ADDIP 4.1 A9) If the peer responds to an ASCONF with an | 803 | switch (err_hdr->cause) { |
804 | * ERROR chunk reporting that it did not recognized the ASCONF | 804 | case SCTP_ERROR_UNKNOWN_CHUNK: |
805 | * chunk type, the sender of the ASCONF MUST NOT send any | 805 | { |
806 | * further ASCONF chunks and MUST stop its T-4 timer. | 806 | sctp_chunkhdr_t *unk_chunk_hdr; |
807 | */ | 807 | |
808 | case SCTP_CID_ASCONF: | 808 | unk_chunk_hdr = (sctp_chunkhdr_t *)err_hdr->variable; |
809 | asoc->peer.asconf_capable = 0; | 809 | switch (unk_chunk_hdr->type) { |
810 | sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP, | 810 | /* ADDIP 4.1 A9) If the peer responds to an ASCONF with |
811 | * an ERROR chunk reporting that it did not recognized | ||
812 | * the ASCONF chunk type, the sender of the ASCONF MUST | ||
813 | * NOT send any further ASCONF chunks and MUST stop its | ||
814 | * T-4 timer. | ||
815 | */ | ||
816 | case SCTP_CID_ASCONF: | ||
817 | if (asoc->peer.asconf_capable == 0) | ||
818 | break; | ||
819 | |||
820 | asoc->peer.asconf_capable = 0; | ||
821 | sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP, | ||
811 | SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); | 822 | SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); |
823 | break; | ||
824 | default: | ||
825 | break; | ||
826 | } | ||
812 | break; | 827 | break; |
828 | } | ||
813 | default: | 829 | default: |
814 | break; | 830 | break; |
815 | } | 831 | } |
816 | break; | ||
817 | } | ||
818 | default: | ||
819 | break; | ||
820 | } | 832 | } |
821 | } | 833 | } |
822 | 834 | ||
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 3a0cd075914f..f88dfded0e3a 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -3163,7 +3163,6 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep, | |||
3163 | sctp_cmd_seq_t *commands) | 3163 | sctp_cmd_seq_t *commands) |
3164 | { | 3164 | { |
3165 | struct sctp_chunk *chunk = arg; | 3165 | struct sctp_chunk *chunk = arg; |
3166 | struct sctp_ulpevent *ev; | ||
3167 | 3166 | ||
3168 | if (!sctp_vtag_verify(chunk, asoc)) | 3167 | if (!sctp_vtag_verify(chunk, asoc)) |
3169 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | 3168 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); |
@@ -3173,21 +3172,10 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep, | |||
3173 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | 3172 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, |
3174 | commands); | 3173 | commands); |
3175 | 3174 | ||
3176 | while (chunk->chunk_end > chunk->skb->data) { | 3175 | sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR, |
3177 | ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0, | 3176 | SCTP_CHUNK(chunk)); |
3178 | GFP_ATOMIC); | ||
3179 | if (!ev) | ||
3180 | goto nomem; | ||
3181 | 3177 | ||
3182 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, | ||
3183 | SCTP_ULPEVENT(ev)); | ||
3184 | sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR, | ||
3185 | SCTP_CHUNK(chunk)); | ||
3186 | } | ||
3187 | return SCTP_DISPOSITION_CONSUME; | 3178 | return SCTP_DISPOSITION_CONSUME; |
3188 | |||
3189 | nomem: | ||
3190 | return SCTP_DISPOSITION_NOMEM; | ||
3191 | } | 3179 | } |
3192 | 3180 | ||
3193 | /* | 3181 | /* |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 85c9034c59b2..bd0a16c3de5e 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -380,7 +380,8 @@ static bool is_valid_reg_rule(const struct ieee80211_reg_rule *rule) | |||
380 | 380 | ||
381 | freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz; | 381 | freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz; |
382 | 382 | ||
383 | if (freq_diff <= 0 || freq_range->max_bandwidth_khz > freq_diff) | 383 | if (freq_range->end_freq_khz <= freq_range->start_freq_khz || |
384 | freq_range->max_bandwidth_khz > freq_diff) | ||
384 | return false; | 385 | return false; |
385 | 386 | ||
386 | return true; | 387 | return true; |