aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/atm/signaling.c10
-rw-r--r--net/bridge/br_if.c33
-rw-r--r--net/bridge/br_stp_if.c2
-rw-r--r--net/bridge/netfilter/Makefile2
-rw-r--r--net/bridge/netfilter/ebt_log.c7
-rw-r--r--net/core/request_sock.c1
-rw-r--r--net/dccp/ccids/ccid3.c10
-rw-r--r--net/ieee80211/ieee80211_rx.c16
-rw-r--r--net/ipv4/esp4.c185
-rw-r--r--net/ipv4/ip_output.c7
-rw-r--r--net/ipv4/netfilter/arp_tables.c2
-rw-r--r--net/ipv4/netfilter/ip_queue.c2
-rw-r--r--net/ipv4/netfilter/ipt_LOG.c7
-rw-r--r--net/ipv4/tcp_highspeed.c2
-rw-r--r--net/ipv4/tcp_output.c4
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/ah6.c2
-rw-r--r--net/ipv6/inet6_hashtables.c6
-rw-r--r--net/ipv6/ip6_output.c7
-rw-r--r--net/ipv6/netfilter/ip6_queue.c2
-rw-r--r--net/ipv6/netfilter/ip6t_LOG.c7
-rw-r--r--net/netfilter/nf_queue.c42
-rw-r--r--net/netfilter/nfnetlink_queue.c6
-rw-r--r--net/netlink/af_netlink.c5
-rw-r--r--net/sched/act_api.c2
-rw-r--r--net/sunrpc/clnt.c3
-rw-r--r--net/sunrpc/sched.c13
-rw-r--r--net/unix/af_unix.c2
-rw-r--r--net/xfrm/xfrm_policy.c7
29 files changed, 177 insertions, 219 deletions
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index 93ad59a28ef5..31d98b57e1de 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -39,25 +39,19 @@ static DECLARE_WAIT_QUEUE_HEAD(sigd_sleep);
39static void sigd_put_skb(struct sk_buff *skb) 39static void sigd_put_skb(struct sk_buff *skb)
40{ 40{
41#ifdef WAIT_FOR_DEMON 41#ifdef WAIT_FOR_DEMON
42 static unsigned long silence;
43 DECLARE_WAITQUEUE(wait,current); 42 DECLARE_WAITQUEUE(wait,current);
44 43
45 add_wait_queue(&sigd_sleep,&wait); 44 add_wait_queue(&sigd_sleep,&wait);
46 while (!sigd) { 45 while (!sigd) {
47 set_current_state(TASK_UNINTERRUPTIBLE); 46 set_current_state(TASK_UNINTERRUPTIBLE);
48 if (time_after(jiffies, silence) || silence == 0) { 47 DPRINTK("atmsvc: waiting for signaling demon...\n");
49 printk(KERN_INFO "atmsvc: waiting for signaling demon "
50 "...\n");
51 silence = (jiffies+30*HZ)|1;
52 }
53 schedule(); 48 schedule();
54 } 49 }
55 current->state = TASK_RUNNING; 50 current->state = TASK_RUNNING;
56 remove_wait_queue(&sigd_sleep,&wait); 51 remove_wait_queue(&sigd_sleep,&wait);
57#else 52#else
58 if (!sigd) { 53 if (!sigd) {
59 if (net_ratelimit()) 54 DPRINTK("atmsvc: no signaling demon\n");
60 printk(KERN_WARNING "atmsvc: no signaling demon\n");
61 kfree_skb(skb); 55 kfree_skb(skb);
62 return; 56 return;
63 } 57 }
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 7fa3a5a9971f..f36b35edd60c 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -81,26 +81,27 @@ static void port_carrier_check(void *arg)
81{ 81{
82 struct net_device *dev = arg; 82 struct net_device *dev = arg;
83 struct net_bridge_port *p; 83 struct net_bridge_port *p;
84 struct net_bridge *br;
84 85
85 rtnl_lock(); 86 rtnl_lock();
86 p = dev->br_port; 87 p = dev->br_port;
87 if (!p) 88 if (!p)
88 goto done; 89 goto done;
89 90 br = p->br;
90 if (netif_carrier_ok(p->dev)) { 91
91 u32 cost = port_cost(p->dev); 92 if (netif_carrier_ok(dev))
92 93 p->path_cost = port_cost(dev);
93 spin_lock_bh(&p->br->lock); 94
94 if (p->state == BR_STATE_DISABLED) { 95 if (br->dev->flags & IFF_UP) {
95 p->path_cost = cost; 96 spin_lock_bh(&br->lock);
96 br_stp_enable_port(p); 97 if (netif_carrier_ok(dev)) {
98 if (p->state == BR_STATE_DISABLED)
99 br_stp_enable_port(p);
100 } else {
101 if (p->state != BR_STATE_DISABLED)
102 br_stp_disable_port(p);
97 } 103 }
98 spin_unlock_bh(&p->br->lock); 104 spin_unlock_bh(&br->lock);
99 } else {
100 spin_lock_bh(&p->br->lock);
101 if (p->state != BR_STATE_DISABLED)
102 br_stp_disable_port(p);
103 spin_unlock_bh(&p->br->lock);
104 } 105 }
105done: 106done:
106 rtnl_unlock(); 107 rtnl_unlock();
@@ -168,6 +169,7 @@ static void del_nbp(struct net_bridge_port *p)
168 169
169 rcu_assign_pointer(dev->br_port, NULL); 170 rcu_assign_pointer(dev->br_port, NULL);
170 171
172 kobject_uevent(&p->kobj, KOBJ_REMOVE);
171 kobject_del(&p->kobj); 173 kobject_del(&p->kobj);
172 174
173 call_rcu(&p->rcu, destroy_nbp_rcu); 175 call_rcu(&p->rcu, destroy_nbp_rcu);
@@ -276,8 +278,9 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
276 br_init_port(p); 278 br_init_port(p);
277 p->state = BR_STATE_DISABLED; 279 p->state = BR_STATE_DISABLED;
278 INIT_WORK(&p->carrier_check, port_carrier_check, dev); 280 INIT_WORK(&p->carrier_check, port_carrier_check, dev);
279 kobject_init(&p->kobj); 281 br_stp_port_timer_init(p);
280 282
283 kobject_init(&p->kobj);
281 kobject_set_name(&p->kobj, SYSFS_BRIDGE_PORT_ATTR); 284 kobject_set_name(&p->kobj, SYSFS_BRIDGE_PORT_ATTR);
282 p->kobj.ktype = &brport_ktype; 285 p->kobj.ktype = &brport_ktype;
283 p->kobj.parent = &(dev->class_dev.kobj); 286 p->kobj.parent = &(dev->class_dev.kobj);
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 35cf3a074087..23dea1422c9a 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -39,8 +39,6 @@ void br_init_port(struct net_bridge_port *p)
39 p->state = BR_STATE_BLOCKING; 39 p->state = BR_STATE_BLOCKING;
40 p->topology_change_ack = 0; 40 p->topology_change_ack = 0;
41 p->config_pending = 0; 41 p->config_pending = 0;
42
43 br_stp_port_timer_init(p);
44} 42}
45 43
46/* called under bridge lock */ 44/* called under bridge lock */
diff --git a/net/bridge/netfilter/Makefile b/net/bridge/netfilter/Makefile
index 8bf6d9f6e9d3..905087e0d485 100644
--- a/net/bridge/netfilter/Makefile
+++ b/net/bridge/netfilter/Makefile
@@ -29,4 +29,4 @@ obj-$(CONFIG_BRIDGE_EBT_SNAT) += ebt_snat.o
29 29
30# watchers 30# watchers
31obj-$(CONFIG_BRIDGE_EBT_LOG) += ebt_log.o 31obj-$(CONFIG_BRIDGE_EBT_LOG) += ebt_log.o
32obj-$(CONFIG_BRIDGE_EBT_LOG) += ebt_ulog.o 32obj-$(CONFIG_BRIDGE_EBT_ULOG) += ebt_ulog.o
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index 0128fbbe2328..288ff1d4ccc4 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -166,7 +166,12 @@ static void ebt_log(const struct sk_buff *skb, unsigned int hooknr,
166 li.u.log.level = info->loglevel; 166 li.u.log.level = info->loglevel;
167 li.u.log.logflags = info->bitmask; 167 li.u.log.logflags = info->bitmask;
168 168
169 nf_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li, info->prefix); 169 if (info->bitmask & EBT_LOG_NFLOG)
170 nf_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li,
171 info->prefix);
172 else
173 ebt_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li,
174 info->prefix);
170} 175}
171 176
172static struct ebt_watcher log = 177static struct ebt_watcher log =
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index b8203de5ff07..98f0fc923f91 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -52,7 +52,6 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
52 get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd)); 52 get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
53 rwlock_init(&queue->syn_wait_lock); 53 rwlock_init(&queue->syn_wait_lock);
54 queue->rskq_accept_head = queue->rskq_accept_head = NULL; 54 queue->rskq_accept_head = queue->rskq_accept_head = NULL;
55 queue->rskq_defer_accept = 0;
56 lopt->nr_table_entries = nr_table_entries; 55 lopt->nr_table_entries = nr_table_entries;
57 56
58 write_lock_bh(&queue->syn_wait_lock); 57 write_lock_bh(&queue->syn_wait_lock);
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index aa68e0ab274d..35d1d347541c 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -2,7 +2,7 @@
2 * net/dccp/ccids/ccid3.c 2 * net/dccp/ccids/ccid3.c
3 * 3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
5 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> 5 * Copyright (c) 2005-6 Ian McDonald <imcdnzl@gmail.com>
6 * 6 *
7 * An implementation of the DCCP protocol 7 * An implementation of the DCCP protocol
8 * 8 *
@@ -1033,9 +1033,13 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
1033 p_prev = hcrx->ccid3hcrx_p; 1033 p_prev = hcrx->ccid3hcrx_p;
1034 1034
1035 /* Calculate loss event rate */ 1035 /* Calculate loss event rate */
1036 if (!list_empty(&hcrx->ccid3hcrx_li_hist)) 1036 if (!list_empty(&hcrx->ccid3hcrx_li_hist)) {
1037 u32 i_mean = dccp_li_hist_calc_i_mean(&hcrx->ccid3hcrx_li_hist);
1038
1037 /* Scaling up by 1000000 as fixed decimal */ 1039 /* Scaling up by 1000000 as fixed decimal */
1038 hcrx->ccid3hcrx_p = 1000000 / dccp_li_hist_calc_i_mean(&hcrx->ccid3hcrx_li_hist); 1040 if (i_mean != 0)
1041 hcrx->ccid3hcrx_p = 1000000 / i_mean;
1042 }
1039 1043
1040 if (hcrx->ccid3hcrx_p > p_prev) { 1044 if (hcrx->ccid3hcrx_p > p_prev) {
1041 ccid3_hc_rx_send_feedback(sk); 1045 ccid3_hc_rx_send_feedback(sk);
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c
index 960aa78cdb97..b410ab8bcf7a 100644
--- a/net/ieee80211/ieee80211_rx.c
+++ b/net/ieee80211/ieee80211_rx.c
@@ -1301,7 +1301,7 @@ static void update_network(struct ieee80211_network *dst,
1301 /* dst->last_associate is not overwritten */ 1301 /* dst->last_associate is not overwritten */
1302} 1302}
1303 1303
1304static inline int is_beacon(int fc) 1304static inline int is_beacon(__le16 fc)
1305{ 1305{
1306 return (WLAN_FC_GET_STYPE(le16_to_cpu(fc)) == IEEE80211_STYPE_BEACON); 1306 return (WLAN_FC_GET_STYPE(le16_to_cpu(fc)) == IEEE80211_STYPE_BEACON);
1307} 1307}
@@ -1348,9 +1348,7 @@ static void ieee80211_process_probe_response(struct ieee80211_device
1348 escape_essid(info_element->data, 1348 escape_essid(info_element->data,
1349 info_element->len), 1349 info_element->len),
1350 MAC_ARG(beacon->header.addr3), 1350 MAC_ARG(beacon->header.addr3),
1351 is_beacon(le16_to_cpu 1351 is_beacon(beacon->header.frame_ctl) ?
1352 (beacon->header.
1353 frame_ctl)) ?
1354 "BEACON" : "PROBE RESPONSE"); 1352 "BEACON" : "PROBE RESPONSE");
1355 return; 1353 return;
1356 } 1354 }
@@ -1400,9 +1398,7 @@ static void ieee80211_process_probe_response(struct ieee80211_device
1400 escape_essid(network.ssid, 1398 escape_essid(network.ssid,
1401 network.ssid_len), 1399 network.ssid_len),
1402 MAC_ARG(network.bssid), 1400 MAC_ARG(network.bssid),
1403 is_beacon(le16_to_cpu 1401 is_beacon(beacon->header.frame_ctl) ?
1404 (beacon->header.
1405 frame_ctl)) ?
1406 "BEACON" : "PROBE RESPONSE"); 1402 "BEACON" : "PROBE RESPONSE");
1407#endif 1403#endif
1408 memcpy(target, &network, sizeof(*target)); 1404 memcpy(target, &network, sizeof(*target));
@@ -1412,16 +1408,14 @@ static void ieee80211_process_probe_response(struct ieee80211_device
1412 escape_essid(target->ssid, 1408 escape_essid(target->ssid,
1413 target->ssid_len), 1409 target->ssid_len),
1414 MAC_ARG(target->bssid), 1410 MAC_ARG(target->bssid),
1415 is_beacon(le16_to_cpu 1411 is_beacon(beacon->header.frame_ctl) ?
1416 (beacon->header.
1417 frame_ctl)) ?
1418 "BEACON" : "PROBE RESPONSE"); 1412 "BEACON" : "PROBE RESPONSE");
1419 update_network(target, &network); 1413 update_network(target, &network);
1420 } 1414 }
1421 1415
1422 spin_unlock_irqrestore(&ieee->lock, flags); 1416 spin_unlock_irqrestore(&ieee->lock, flags);
1423 1417
1424 if (is_beacon(le16_to_cpu(beacon->header.frame_ctl))) { 1418 if (is_beacon(beacon->header.frame_ctl)) {
1425 if (ieee->handle_beacon != NULL) 1419 if (ieee->handle_beacon != NULL)
1426 ieee->handle_beacon(dev, beacon, &network); 1420 ieee->handle_beacon(dev, beacon, &network);
1427 } else { 1421 } else {
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 73bfcae8af9c..09590f356086 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -12,13 +12,6 @@
12#include <net/protocol.h> 12#include <net/protocol.h>
13#include <net/udp.h> 13#include <net/udp.h>
14 14
15/* decapsulation data for use when post-processing */
16struct esp_decap_data {
17 xfrm_address_t saddr;
18 __u16 sport;
19 __u8 proto;
20};
21
22static int esp_output(struct xfrm_state *x, struct sk_buff *skb) 15static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
23{ 16{
24 int err; 17 int err;
@@ -150,6 +143,10 @@ static int esp_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struc
150 int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen; 143 int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen;
151 int nfrags; 144 int nfrags;
152 int encap_len = 0; 145 int encap_len = 0;
146 u8 nexthdr[2];
147 struct scatterlist *sg;
148 u8 workbuf[60];
149 int padlen;
153 150
154 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr))) 151 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr)))
155 goto out; 152 goto out;
@@ -185,122 +182,82 @@ static int esp_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struc
185 if (esp->conf.ivlen) 182 if (esp->conf.ivlen)
186 crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm)); 183 crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm));
187 184
188 { 185 sg = &esp->sgbuf[0];
189 u8 nexthdr[2];
190 struct scatterlist *sg = &esp->sgbuf[0];
191 u8 workbuf[60];
192 int padlen;
193
194 if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
195 sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
196 if (!sg)
197 goto out;
198 }
199 skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen);
200 crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen);
201 if (unlikely(sg != &esp->sgbuf[0]))
202 kfree(sg);
203
204 if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
205 BUG();
206 186
207 padlen = nexthdr[0]; 187 if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
208 if (padlen+2 >= elen) 188 sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
189 if (!sg)
209 goto out; 190 goto out;
210
211 /* ... check padding bits here. Silly. :-) */
212
213 if (x->encap && decap && decap->decap_type) {
214 struct esp_decap_data *encap_data;
215 struct udphdr *uh = (struct udphdr *) (iph+1);
216
217 encap_data = (struct esp_decap_data *) (decap->decap_data);
218 encap_data->proto = 0;
219
220 switch (decap->decap_type) {
221 case UDP_ENCAP_ESPINUDP:
222 case UDP_ENCAP_ESPINUDP_NON_IKE:
223 encap_data->proto = AF_INET;
224 encap_data->saddr.a4 = iph->saddr;
225 encap_data->sport = uh->source;
226 encap_len = (void*)esph - (void*)uh;
227 break;
228
229 default:
230 goto out;
231 }
232 }
233
234 iph->protocol = nexthdr[1];
235 pskb_trim(skb, skb->len - alen - padlen - 2);
236 memcpy(workbuf, skb->nh.raw, iph->ihl*4);
237 skb->h.raw = skb_pull(skb, sizeof(struct ip_esp_hdr) + esp->conf.ivlen);
238 skb->nh.raw += encap_len + sizeof(struct ip_esp_hdr) + esp->conf.ivlen;
239 memcpy(skb->nh.raw, workbuf, iph->ihl*4);
240 skb->nh.iph->tot_len = htons(skb->len);
241 } 191 }
192 skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen);
193 crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen);
194 if (unlikely(sg != &esp->sgbuf[0]))
195 kfree(sg);
242 196
243 return 0; 197 if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
198 BUG();
244 199
245out: 200 padlen = nexthdr[0];
246 return -EINVAL; 201 if (padlen+2 >= elen)
247} 202 goto out;
248 203
249static int esp_post_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb) 204 /* ... check padding bits here. Silly. :-) */
250{
251
252 if (x->encap) {
253 struct xfrm_encap_tmpl *encap;
254 struct esp_decap_data *decap_data;
255 205
256 encap = x->encap; 206 if (x->encap) {
257 decap_data = (struct esp_decap_data *)(decap->decap_data); 207 struct xfrm_encap_tmpl *encap = x->encap;
208 struct udphdr *uh;
258 209
259 /* first, make sure that the decap type == the encap type */
260 if (encap->encap_type != decap->decap_type) 210 if (encap->encap_type != decap->decap_type)
261 return -EINVAL; 211 goto out;
262 212
263 switch (encap->encap_type) { 213 uh = (struct udphdr *)(iph + 1);
264 default: 214 encap_len = (void*)esph - (void*)uh;
265 case UDP_ENCAP_ESPINUDP: 215
266 case UDP_ENCAP_ESPINUDP_NON_IKE: 216 /*
267 /* 217 * 1) if the NAT-T peer's IP or port changed then
268 * 1) if the NAT-T peer's IP or port changed then 218 * advertize the change to the keying daemon.
269 * advertize the change to the keying daemon. 219 * This is an inbound SA, so just compare
270 * This is an inbound SA, so just compare 220 * SRC ports.
271 * SRC ports. 221 */
272 */ 222 if (iph->saddr != x->props.saddr.a4 ||
273 if (decap_data->proto == AF_INET && 223 uh->source != encap->encap_sport) {
274 (decap_data->saddr.a4 != x->props.saddr.a4 || 224 xfrm_address_t ipaddr;
275 decap_data->sport != encap->encap_sport)) { 225
276 xfrm_address_t ipaddr; 226 ipaddr.a4 = iph->saddr;
277 227 km_new_mapping(x, &ipaddr, uh->source);
278 ipaddr.a4 = decap_data->saddr.a4; 228
279 km_new_mapping(x, &ipaddr, decap_data->sport); 229 /* XXX: perhaps add an extra
280 230 * policy check here, to see
281 /* XXX: perhaps add an extra 231 * if we should allow or
282 * policy check here, to see 232 * reject a packet from a
283 * if we should allow or 233 * different source
284 * reject a packet from a 234 * address/port.
285 * different source
286 * address/port.
287 */
288 }
289
290 /*
291 * 2) ignore UDP/TCP checksums in case
292 * of NAT-T in Transport Mode, or
293 * perform other post-processing fixes
294 * as per * draft-ietf-ipsec-udp-encaps-06,
295 * section 3.1.2
296 */ 235 */
297 if (!x->props.mode)
298 skb->ip_summed = CHECKSUM_UNNECESSARY;
299
300 break;
301 } 236 }
237
238 /*
239 * 2) ignore UDP/TCP checksums in case
240 * of NAT-T in Transport Mode, or
241 * perform other post-processing fixes
242 * as per draft-ietf-ipsec-udp-encaps-06,
243 * section 3.1.2
244 */
245 if (!x->props.mode)
246 skb->ip_summed = CHECKSUM_UNNECESSARY;
302 } 247 }
248
249 iph->protocol = nexthdr[1];
250 pskb_trim(skb, skb->len - alen - padlen - 2);
251 memcpy(workbuf, skb->nh.raw, iph->ihl*4);
252 skb->h.raw = skb_pull(skb, sizeof(struct ip_esp_hdr) + esp->conf.ivlen);
253 skb->nh.raw += encap_len + sizeof(struct ip_esp_hdr) + esp->conf.ivlen;
254 memcpy(skb->nh.raw, workbuf, iph->ihl*4);
255 skb->nh.iph->tot_len = htons(skb->len);
256
303 return 0; 257 return 0;
258
259out:
260 return -EINVAL;
304} 261}
305 262
306static u32 esp4_get_max_size(struct xfrm_state *x, int mtu) 263static u32 esp4_get_max_size(struct xfrm_state *x, int mtu)
@@ -458,7 +415,6 @@ static struct xfrm_type esp_type =
458 .destructor = esp_destroy, 415 .destructor = esp_destroy,
459 .get_max_size = esp4_get_max_size, 416 .get_max_size = esp4_get_max_size,
460 .input = esp_input, 417 .input = esp_input,
461 .post_input = esp_post_input,
462 .output = esp_output 418 .output = esp_output
463}; 419};
464 420
@@ -470,15 +426,6 @@ static struct net_protocol esp4_protocol = {
470 426
471static int __init esp4_init(void) 427static int __init esp4_init(void)
472{ 428{
473 struct xfrm_decap_state decap;
474
475 if (sizeof(struct esp_decap_data) >
476 sizeof(decap.decap_data)) {
477 extern void decap_data_too_small(void);
478
479 decap_data_too_small();
480 }
481
482 if (xfrm_register_type(&esp_type, AF_INET) < 0) { 429 if (xfrm_register_type(&esp_type, AF_INET) < 0) {
483 printk(KERN_INFO "ip esp init: can't add xfrm type\n"); 430 printk(KERN_INFO "ip esp init: can't add xfrm type\n");
484 return -EAGAIN; 431 return -EAGAIN;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 57d290d89ec2..8ee4d016740d 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -847,10 +847,11 @@ int ip_append_data(struct sock *sk,
847 if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) && 847 if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
848 (rt->u.dst.dev->features & NETIF_F_UFO)) { 848 (rt->u.dst.dev->features & NETIF_F_UFO)) {
849 849
850 if(ip_ufo_append_data(sk, getfrag, from, length, hh_len, 850 err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
851 fragheaderlen, transhdrlen, mtu, flags)) 851 fragheaderlen, transhdrlen, mtu,
852 flags);
853 if (err)
852 goto error; 854 goto error;
853
854 return 0; 855 return 0;
855 } 856 }
856 857
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index dd1048be8a01..7d7ab94a7a2e 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -771,7 +771,7 @@ static int get_entries(const struct arpt_get_entries *entries,
771 struct arpt_table *t; 771 struct arpt_table *t;
772 772
773 t = xt_find_table_lock(NF_ARP, entries->name); 773 t = xt_find_table_lock(NF_ARP, entries->name);
774 if (t || !IS_ERR(t)) { 774 if (t && !IS_ERR(t)) {
775 struct xt_table_info *private = t->private; 775 struct xt_table_info *private = t->private;
776 duprintf("t->private->number = %u\n", 776 duprintf("t->private->number = %u\n",
777 private->number); 777 private->number);
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index 36339eb39e17..08f80e2ea2aa 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -524,7 +524,7 @@ ipq_rcv_skb(struct sk_buff *skb)
524 write_unlock_bh(&queue_lock); 524 write_unlock_bh(&queue_lock);
525 525
526 status = ipq_receive_peer(NLMSG_DATA(nlh), type, 526 status = ipq_receive_peer(NLMSG_DATA(nlh), type,
527 skblen - NLMSG_LENGTH(0)); 527 nlmsglen - NLMSG_LENGTH(0));
528 if (status < 0) 528 if (status < 0)
529 RCV_SKB_FAIL(status); 529 RCV_SKB_FAIL(status);
530 530
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
index 6606ddb66a29..cc27545ff97f 100644
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ b/net/ipv4/netfilter/ipt_LOG.c
@@ -425,7 +425,12 @@ ipt_log_target(struct sk_buff **pskb,
425 li.u.log.level = loginfo->level; 425 li.u.log.level = loginfo->level;
426 li.u.log.logflags = loginfo->logflags; 426 li.u.log.logflags = loginfo->logflags;
427 427
428 nf_log_packet(PF_INET, hooknum, *pskb, in, out, &li, loginfo->prefix); 428 if (loginfo->logflags & IPT_LOG_NFLOG)
429 nf_log_packet(PF_INET, hooknum, *pskb, in, out, &li,
430 loginfo->prefix);
431 else
432 ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li,
433 loginfo->prefix);
429 434
430 return IPT_CONTINUE; 435 return IPT_CONTINUE;
431} 436}
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c
index 63cf7e540847..e0e9d1383c7c 100644
--- a/net/ipv4/tcp_highspeed.c
+++ b/net/ipv4/tcp_highspeed.c
@@ -125,7 +125,7 @@ static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 rtt,
125 /* Update AIMD parameters */ 125 /* Update AIMD parameters */
126 if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) { 126 if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) {
127 while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd && 127 while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd &&
128 ca->ai < HSTCP_AIMD_MAX) 128 ca->ai < HSTCP_AIMD_MAX - 1)
129 ca->ai++; 129 ca->ai++;
130 } else if (tp->snd_cwnd < hstcp_aimd_vals[ca->ai].cwnd) { 130 } else if (tp->snd_cwnd < hstcp_aimd_vals[ca->ai].cwnd) {
131 while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd && 131 while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd &&
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index a7623ead39a8..9f498a6c8895 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1036,6 +1036,10 @@ static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_
1036 1036
1037 limit = min(send_win, cong_win); 1037 limit = min(send_win, cong_win);
1038 1038
1039 /* If a full-sized TSO skb can be sent, do it. */
1040 if (limit >= 65536)
1041 return 0;
1042
1039 if (sysctl_tcp_tso_win_divisor) { 1043 if (sysctl_tcp_tso_win_divisor) {
1040 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); 1044 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1041 1045
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index b7d8822c1be4..19727d941962 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -822,7 +822,7 @@ struct ipv6_saddr_score {
822 int addr_type; 822 int addr_type;
823 unsigned int attrs; 823 unsigned int attrs;
824 int matchlen; 824 int matchlen;
825 unsigned int scope; 825 int scope;
826 unsigned int rule; 826 unsigned int rule;
827}; 827};
828 828
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index c7932cb420a5..84963749ab77 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -279,7 +279,7 @@ static int ah6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struc
279 goto out; 279 goto out;
280 memcpy(tmp_hdr, skb->nh.raw, hdr_len); 280 memcpy(tmp_hdr, skb->nh.raw, hdr_len);
281 if (ipv6_clear_mutable_options(skb->nh.ipv6h, hdr_len)) 281 if (ipv6_clear_mutable_options(skb->nh.ipv6h, hdr_len))
282 goto out; 282 goto free_out;
283 skb->nh.ipv6h->priority = 0; 283 skb->nh.ipv6h->priority = 0;
284 skb->nh.ipv6h->flow_lbl[0] = 0; 284 skb->nh.ipv6h->flow_lbl[0] = 0;
285 skb->nh.ipv6h->flow_lbl[1] = 0; 285 skb->nh.ipv6h->flow_lbl[1] = 0;
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 4154f3a8b6cf..bb8ffb8a14c5 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -87,7 +87,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
87 struct inet_timewait_sock **twp) 87 struct inet_timewait_sock **twp)
88{ 88{
89 struct inet_hashinfo *hinfo = death_row->hashinfo; 89 struct inet_hashinfo *hinfo = death_row->hashinfo;
90 const struct inet_sock *inet = inet_sk(sk); 90 struct inet_sock *inet = inet_sk(sk);
91 const struct ipv6_pinfo *np = inet6_sk(sk); 91 const struct ipv6_pinfo *np = inet6_sk(sk);
92 const struct in6_addr *daddr = &np->rcv_saddr; 92 const struct in6_addr *daddr = &np->rcv_saddr;
93 const struct in6_addr *saddr = &np->daddr; 93 const struct in6_addr *saddr = &np->daddr;
@@ -129,6 +129,10 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
129 } 129 }
130 130
131unique: 131unique:
132 /* Must record num and sport now. Otherwise we will see
133 * in hash table socket with a funny identity. */
134 inet->num = lport;
135 inet->sport = htons(lport);
132 BUG_TRAP(sk_unhashed(sk)); 136 BUG_TRAP(sk_unhashed(sk));
133 __sk_add_node(sk, &head->chain); 137 __sk_add_node(sk, &head->chain);
134 sk->sk_hash = hash; 138 sk->sk_hash = hash;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index f999edd846a9..5bf70b1442ea 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -944,10 +944,11 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
944 if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) && 944 if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
945 (rt->u.dst.dev->features & NETIF_F_UFO)) { 945 (rt->u.dst.dev->features & NETIF_F_UFO)) {
946 946
947 if(ip6_ufo_append_data(sk, getfrag, from, length, hh_len, 947 err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len,
948 fragheaderlen, transhdrlen, mtu, flags)) 948 fragheaderlen, transhdrlen, mtu,
949 flags);
950 if (err)
949 goto error; 951 goto error;
950
951 return 0; 952 return 0;
952 } 953 }
953 954
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 5027bbe6415e..af0635084df8 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -522,7 +522,7 @@ ipq_rcv_skb(struct sk_buff *skb)
522 write_unlock_bh(&queue_lock); 522 write_unlock_bh(&queue_lock);
523 523
524 status = ipq_receive_peer(NLMSG_DATA(nlh), type, 524 status = ipq_receive_peer(NLMSG_DATA(nlh), type,
525 skblen - NLMSG_LENGTH(0)); 525 nlmsglen - NLMSG_LENGTH(0));
526 if (status < 0) 526 if (status < 0)
527 RCV_SKB_FAIL(status); 527 RCV_SKB_FAIL(status);
528 528
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index 77c725832dec..6b930efa9fb9 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -436,7 +436,12 @@ ip6t_log_target(struct sk_buff **pskb,
436 li.u.log.level = loginfo->level; 436 li.u.log.level = loginfo->level;
437 li.u.log.logflags = loginfo->logflags; 437 li.u.log.logflags = loginfo->logflags;
438 438
439 nf_log_packet(PF_INET6, hooknum, *pskb, in, out, &li, loginfo->prefix); 439 if (loginfo->logflags & IP6T_LOG_NFLOG)
440 nf_log_packet(PF_INET6, hooknum, *pskb, in, out, &li,
441 loginfo->prefix);
442 else
443 ip6t_log_packet(PF_INET6, hooknum, *pskb, in, out, &li,
444 loginfo->prefix);
440 445
441 return IP6T_CONTINUE; 446 return IP6T_CONTINUE;
442} 447}
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index d3a4f30a7f22..d9f0d7ef103b 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -6,6 +6,7 @@
6#include <linux/skbuff.h> 6#include <linux/skbuff.h>
7#include <linux/netfilter.h> 7#include <linux/netfilter.h>
8#include <linux/seq_file.h> 8#include <linux/seq_file.h>
9#include <linux/rcupdate.h>
9#include <net/protocol.h> 10#include <net/protocol.h>
10 11
11#include "nf_internals.h" 12#include "nf_internals.h"
@@ -16,7 +17,7 @@
16 * for queueing and must reinject all packets it receives, no matter what. 17 * for queueing and must reinject all packets it receives, no matter what.
17 */ 18 */
18static struct nf_queue_handler *queue_handler[NPROTO]; 19static struct nf_queue_handler *queue_handler[NPROTO];
19static struct nf_queue_rerouter *queue_rerouter; 20static struct nf_queue_rerouter *queue_rerouter[NPROTO];
20 21
21static DEFINE_RWLOCK(queue_handler_lock); 22static DEFINE_RWLOCK(queue_handler_lock);
22 23
@@ -64,7 +65,7 @@ int nf_register_queue_rerouter(int pf, struct nf_queue_rerouter *rer)
64 return -EINVAL; 65 return -EINVAL;
65 66
66 write_lock_bh(&queue_handler_lock); 67 write_lock_bh(&queue_handler_lock);
67 memcpy(&queue_rerouter[pf], rer, sizeof(queue_rerouter[pf])); 68 rcu_assign_pointer(queue_rerouter[pf], rer);
68 write_unlock_bh(&queue_handler_lock); 69 write_unlock_bh(&queue_handler_lock);
69 70
70 return 0; 71 return 0;
@@ -77,8 +78,9 @@ int nf_unregister_queue_rerouter(int pf)
77 return -EINVAL; 78 return -EINVAL;
78 79
79 write_lock_bh(&queue_handler_lock); 80 write_lock_bh(&queue_handler_lock);
80 memset(&queue_rerouter[pf], 0, sizeof(queue_rerouter[pf])); 81 rcu_assign_pointer(queue_rerouter[pf], NULL);
81 write_unlock_bh(&queue_handler_lock); 82 write_unlock_bh(&queue_handler_lock);
83 synchronize_rcu();
82 return 0; 84 return 0;
83} 85}
84EXPORT_SYMBOL_GPL(nf_unregister_queue_rerouter); 86EXPORT_SYMBOL_GPL(nf_unregister_queue_rerouter);
@@ -114,16 +116,17 @@ int nf_queue(struct sk_buff **skb,
114 struct net_device *physindev = NULL; 116 struct net_device *physindev = NULL;
115 struct net_device *physoutdev = NULL; 117 struct net_device *physoutdev = NULL;
116#endif 118#endif
119 struct nf_queue_rerouter *rerouter;
117 120
118 /* QUEUE == DROP if noone is waiting, to be safe. */ 121 /* QUEUE == DROP if noone is waiting, to be safe. */
119 read_lock(&queue_handler_lock); 122 read_lock(&queue_handler_lock);
120 if (!queue_handler[pf] || !queue_handler[pf]->outfn) { 123 if (!queue_handler[pf]) {
121 read_unlock(&queue_handler_lock); 124 read_unlock(&queue_handler_lock);
122 kfree_skb(*skb); 125 kfree_skb(*skb);
123 return 1; 126 return 1;
124 } 127 }
125 128
126 info = kmalloc(sizeof(*info)+queue_rerouter[pf].rer_size, GFP_ATOMIC); 129 info = kmalloc(sizeof(*info)+queue_rerouter[pf]->rer_size, GFP_ATOMIC);
127 if (!info) { 130 if (!info) {
128 if (net_ratelimit()) 131 if (net_ratelimit())
129 printk(KERN_ERR "OOM queueing packet %p\n", 132 printk(KERN_ERR "OOM queueing packet %p\n",
@@ -155,15 +158,13 @@ int nf_queue(struct sk_buff **skb,
155 if (physoutdev) dev_hold(physoutdev); 158 if (physoutdev) dev_hold(physoutdev);
156 } 159 }
157#endif 160#endif
158 if (queue_rerouter[pf].save) 161 rerouter = rcu_dereference(queue_rerouter[pf]);
159 queue_rerouter[pf].save(*skb, info); 162 if (rerouter)
163 rerouter->save(*skb, info);
160 164
161 status = queue_handler[pf]->outfn(*skb, info, queuenum, 165 status = queue_handler[pf]->outfn(*skb, info, queuenum,
162 queue_handler[pf]->data); 166 queue_handler[pf]->data);
163 167
164 if (status >= 0 && queue_rerouter[pf].reroute)
165 status = queue_rerouter[pf].reroute(skb, info);
166
167 read_unlock(&queue_handler_lock); 168 read_unlock(&queue_handler_lock);
168 169
169 if (status < 0) { 170 if (status < 0) {
@@ -189,6 +190,7 @@ void nf_reinject(struct sk_buff *skb, struct nf_info *info,
189{ 190{
190 struct list_head *elem = &info->elem->list; 191 struct list_head *elem = &info->elem->list;
191 struct list_head *i; 192 struct list_head *i;
193 struct nf_queue_rerouter *rerouter;
192 194
193 rcu_read_lock(); 195 rcu_read_lock();
194 196
@@ -212,7 +214,7 @@ void nf_reinject(struct sk_buff *skb, struct nf_info *info,
212 break; 214 break;
213 } 215 }
214 216
215 if (elem == &nf_hooks[info->pf][info->hook]) { 217 if (i == &nf_hooks[info->pf][info->hook]) {
216 /* The module which sent it to userspace is gone. */ 218 /* The module which sent it to userspace is gone. */
217 NFDEBUG("%s: module disappeared, dropping packet.\n", 219 NFDEBUG("%s: module disappeared, dropping packet.\n",
218 __FUNCTION__); 220 __FUNCTION__);
@@ -226,6 +228,12 @@ void nf_reinject(struct sk_buff *skb, struct nf_info *info,
226 } 228 }
227 229
228 if (verdict == NF_ACCEPT) { 230 if (verdict == NF_ACCEPT) {
231 rerouter = rcu_dereference(queue_rerouter[info->pf]);
232 if (rerouter && rerouter->reroute(&skb, info) < 0)
233 verdict = NF_DROP;
234 }
235
236 if (verdict == NF_ACCEPT) {
229 next_hook: 237 next_hook:
230 verdict = nf_iterate(&nf_hooks[info->pf][info->hook], 238 verdict = nf_iterate(&nf_hooks[info->pf][info->hook],
231 &skb, info->hook, 239 &skb, info->hook,
@@ -322,22 +330,12 @@ int __init netfilter_queue_init(void)
322{ 330{
323#ifdef CONFIG_PROC_FS 331#ifdef CONFIG_PROC_FS
324 struct proc_dir_entry *pde; 332 struct proc_dir_entry *pde;
325#endif
326 queue_rerouter = kmalloc(NPROTO * sizeof(struct nf_queue_rerouter),
327 GFP_KERNEL);
328 if (!queue_rerouter)
329 return -ENOMEM;
330 333
331#ifdef CONFIG_PROC_FS
332 pde = create_proc_entry("nf_queue", S_IRUGO, proc_net_netfilter); 334 pde = create_proc_entry("nf_queue", S_IRUGO, proc_net_netfilter);
333 if (!pde) { 335 if (!pde)
334 kfree(queue_rerouter);
335 return -1; 336 return -1;
336 }
337 pde->proc_fops = &nfqueue_file_ops; 337 pde->proc_fops = &nfqueue_file_ops;
338#endif 338#endif
339 memset(queue_rerouter, 0, NPROTO * sizeof(struct nf_queue_rerouter));
340
341 return 0; 339 return 0;
342} 340}
343 341
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index cac38b2e147a..2cf5fb8322c4 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -928,8 +928,12 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
928 928
929 if (nfqa[NFQA_CFG_PARAMS-1]) { 929 if (nfqa[NFQA_CFG_PARAMS-1]) {
930 struct nfqnl_msg_config_params *params; 930 struct nfqnl_msg_config_params *params;
931 params = NFA_DATA(nfqa[NFQA_CFG_PARAMS-1]);
932 931
932 if (!queue) {
933 ret = -ENOENT;
934 goto out_put;
935 }
936 params = NFA_DATA(nfqa[NFQA_CFG_PARAMS-1]);
933 nfqnl_set_mode(queue, params->copy_mode, 937 nfqnl_set_mode(queue, params->copy_mode,
934 ntohl(params->copy_range)); 938 ntohl(params->copy_range));
935 } 939 }
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 6b9772d95872..59dc7d140600 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1194,6 +1194,9 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1194 msg->msg_namelen = sizeof(*addr); 1194 msg->msg_namelen = sizeof(*addr);
1195 } 1195 }
1196 1196
1197 if (nlk->flags & NETLINK_RECV_PKTINFO)
1198 netlink_cmsg_recv_pktinfo(msg, skb);
1199
1197 if (NULL == siocb->scm) { 1200 if (NULL == siocb->scm) {
1198 memset(&scm, 0, sizeof(scm)); 1201 memset(&scm, 0, sizeof(scm));
1199 siocb->scm = &scm; 1202 siocb->scm = &scm;
@@ -1205,8 +1208,6 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1205 netlink_dump(sk); 1208 netlink_dump(sk);
1206 1209
1207 scm_recv(sock, msg, siocb->scm, flags); 1210 scm_recv(sock, msg, siocb->scm, flags);
1208 if (nlk->flags & NETLINK_RECV_PKTINFO)
1209 netlink_cmsg_recv_pktinfo(msg, skb);
1210 1211
1211out: 1212out:
1212 netlink_rcv_wake(sk); 1213 netlink_rcv_wake(sk);
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 792ce59940ec..2ffa11c6e8de 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -707,7 +707,7 @@ static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event,
707 707
708rtattr_failure: 708rtattr_failure:
709nlmsg_failure: 709nlmsg_failure:
710 skb_trim(skb, b - skb->data); 710 kfree_skb(skb);
711 return -1; 711 return -1;
712} 712}
713 713
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index d2f0550c4ba0..d78479782045 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -113,7 +113,7 @@ rpc_new_client(struct rpc_xprt *xprt, char *servname,
113 113
114 err = -EINVAL; 114 err = -EINVAL;
115 if (!xprt) 115 if (!xprt)
116 goto out_err; 116 goto out_no_xprt;
117 if (vers >= program->nrvers || !(version = program->version[vers])) 117 if (vers >= program->nrvers || !(version = program->version[vers]))
118 goto out_err; 118 goto out_err;
119 119
@@ -182,6 +182,7 @@ out_no_path:
182 kfree(clnt); 182 kfree(clnt);
183out_err: 183out_err:
184 xprt_destroy(xprt); 184 xprt_destroy(xprt);
185out_no_xprt:
185 return ERR_PTR(err); 186 return ERR_PTR(err);
186} 187}
187 188
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 802d4fe0f55c..e838d042f7f5 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -515,16 +515,14 @@ struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue)
515 */ 515 */
516void rpc_wake_up(struct rpc_wait_queue *queue) 516void rpc_wake_up(struct rpc_wait_queue *queue)
517{ 517{
518 struct rpc_task *task; 518 struct rpc_task *task, *next;
519
520 struct list_head *head; 519 struct list_head *head;
520
521 spin_lock_bh(&queue->lock); 521 spin_lock_bh(&queue->lock);
522 head = &queue->tasks[queue->maxpriority]; 522 head = &queue->tasks[queue->maxpriority];
523 for (;;) { 523 for (;;) {
524 while (!list_empty(head)) { 524 list_for_each_entry_safe(task, next, head, u.tk_wait.list)
525 task = list_entry(head->next, struct rpc_task, u.tk_wait.list);
526 __rpc_wake_up_task(task); 525 __rpc_wake_up_task(task);
527 }
528 if (head == &queue->tasks[0]) 526 if (head == &queue->tasks[0])
529 break; 527 break;
530 head--; 528 head--;
@@ -541,14 +539,13 @@ void rpc_wake_up(struct rpc_wait_queue *queue)
541 */ 539 */
542void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) 540void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
543{ 541{
542 struct rpc_task *task, *next;
544 struct list_head *head; 543 struct list_head *head;
545 struct rpc_task *task;
546 544
547 spin_lock_bh(&queue->lock); 545 spin_lock_bh(&queue->lock);
548 head = &queue->tasks[queue->maxpriority]; 546 head = &queue->tasks[queue->maxpriority];
549 for (;;) { 547 for (;;) {
550 while (!list_empty(head)) { 548 list_for_each_entry_safe(task, next, head, u.tk_wait.list) {
551 task = list_entry(head->next, struct rpc_task, u.tk_wait.list);
552 task->tk_status = status; 549 task->tk_status = status;
553 __rpc_wake_up_task(task); 550 __rpc_wake_up_task(task);
554 } 551 }
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 1b5989b1b670..c323cc6a28b0 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -547,7 +547,7 @@ static struct sock * unix_create1(struct socket *sock)
547 struct sock *sk = NULL; 547 struct sock *sk = NULL;
548 struct unix_sock *u; 548 struct unix_sock *u;
549 549
550 if (atomic_read(&unix_nr_socks) >= 2*files_stat.max_files) 550 if (atomic_read(&unix_nr_socks) >= 2*get_max_files())
551 goto out; 551 goto out;
552 552
553 sk = sk_alloc(PF_UNIX, GFP_KERNEL, &unix_proto, 1); 553 sk = sk_alloc(PF_UNIX, GFP_KERNEL, &unix_proto, 1);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 8206025d8e46..ae62054a9fc4 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -996,13 +996,6 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
996 struct sec_decap_state *xvec = &(skb->sp->x[i]); 996 struct sec_decap_state *xvec = &(skb->sp->x[i]);
997 if (!xfrm_selector_match(&xvec->xvec->sel, &fl, family)) 997 if (!xfrm_selector_match(&xvec->xvec->sel, &fl, family))
998 return 0; 998 return 0;
999
1000 /* If there is a post_input processor, try running it */
1001 if (xvec->xvec->type->post_input &&
1002 (xvec->xvec->type->post_input)(xvec->xvec,
1003 &(xvec->decap),
1004 skb) != 0)
1005 return 0;
1006 } 999 }
1007 } 1000 }
1008 1001