aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_core.c2
-rw-r--r--net/9p/trans_virtio.c17
-rw-r--r--net/atm/br2684.c7
-rw-r--r--net/batman-adv/soft-interface.c10
-rw-r--r--net/bluetooth/af_bluetooth.c6
-rw-r--r--net/bluetooth/bnep/bnep.h1
-rw-r--r--net/bluetooth/bnep/core.c13
-rw-r--r--net/bluetooth/cmtp/capi.c3
-rw-r--r--net/bluetooth/cmtp/cmtp.h1
-rw-r--r--net/bluetooth/cmtp/core.c20
-rw-r--r--net/bluetooth/hci_core.c8
-rw-r--r--net/bluetooth/hci_event.c17
-rw-r--r--net/bluetooth/hidp/core.c19
-rw-r--r--net/bluetooth/l2cap_core.c6
-rw-r--r--net/bluetooth/l2cap_sock.c30
-rw-r--r--net/bluetooth/rfcomm/core.c17
-rw-r--r--net/bluetooth/rfcomm/sock.c28
-rw-r--r--net/bluetooth/sco.c28
-rw-r--r--net/bridge/br_if.c6
-rw-r--r--net/bridge/br_multicast.c21
-rw-r--r--net/bridge/netfilter/Kconfig2
-rw-r--r--net/caif/caif_dev.c6
-rw-r--r--net/can/af_can.c2
-rw-r--r--net/can/bcm.c53
-rw-r--r--net/ceph/ceph_common.c1
-rw-r--r--net/ceph/messenger.c1
-rw-r--r--net/ceph/msgpool.c40
-rw-r--r--net/ceph/osd_client.c26
-rw-r--r--net/ceph/osdmap.c84
-rw-r--r--net/core/dev.c8
-rw-r--r--net/core/fib_rules.c4
-rw-r--r--net/core/flow.c36
-rw-r--r--net/core/neighbour.c8
-rw-r--r--net/core/netpoll.c4
-rw-r--r--net/core/skbuff.c22
-rw-r--r--net/ethernet/eth.c2
-rw-r--r--net/ipv4/af_inet.c7
-rw-r--r--net/ipv4/fib_semantics.c10
-rw-r--r--net/ipv4/igmp.c2
-rw-r--r--net/ipv4/netfilter/ip_queue.c12
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_ipv4.c49
-rw-r--r--net/ipv6/addrconf.c4
-rw-r--r--net/ipv6/datagram.c5
-rw-r--r--net/ipv6/ip6_flowlabel.c8
-rw-r--r--net/ipv6/ip6mr.c8
-rw-r--r--net/ipv6/ipv6_sockglue.c11
-rw-r--r--net/ipv6/mcast.c2
-rw-r--r--net/ipv6/netfilter/ip6_queue.c12
-rw-r--r--net/ipv6/raw.c4
-rw-r--r--net/ipv6/route.c37
-rw-r--r--net/ipv6/sit.c3
-rw-r--r--net/ipv6/tcp_ipv6.c34
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/irda/irsysctl.c6
-rw-r--r--net/irda/qos.c6
-rw-r--r--net/mac80211/main.c2
-rw-r--r--net/mac80211/sta_info.c2
-rw-r--r--net/netfilter/nf_conntrack_pptp.c1
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c6
-rw-r--r--net/netfilter/nfnetlink_queue.c4
-rw-r--r--net/netfilter/xt_rateest.c9
-rw-r--r--net/packet/af_packet.c5
-rw-r--r--net/rds/iw_rdma.c13
-rw-r--r--net/sched/act_mirred.c3
-rw-r--r--net/sched/cls_rsvp.h27
-rw-r--r--net/sctp/sm_sideeffect.c5
-rw-r--r--net/sctp/sm_statefuns.c6
-rw-r--r--net/socket.c10
-rw-r--r--net/wireless/core.c7
-rw-r--r--net/wireless/nl80211.c5
-rw-r--r--net/wireless/reg.c1
-rw-r--r--net/wireless/sme.c2
-rw-r--r--net/wireless/sysfs.c6
-rw-r--r--net/xfrm/xfrm_input.c5
-rw-r--r--net/xfrm/xfrm_policy.c10
77 files changed, 535 insertions, 381 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 5f27f8e30254..f1f2f7bb6661 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -167,6 +167,8 @@ struct sk_buff *vlan_untag(struct sk_buff *skb)
167 if (unlikely(!skb)) 167 if (unlikely(!skb))
168 goto err_free; 168 goto err_free;
169 169
170 skb_reset_network_header(skb);
171 skb_reset_transport_header(skb);
170 return skb; 172 return skb;
171 173
172err_free: 174err_free:
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 175b5135bdcf..e317583fcc73 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -263,7 +263,6 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
263{ 263{
264 int in, out, inp, outp; 264 int in, out, inp, outp;
265 struct virtio_chan *chan = client->trans; 265 struct virtio_chan *chan = client->trans;
266 char *rdata = (char *)req->rc+sizeof(struct p9_fcall);
267 unsigned long flags; 266 unsigned long flags;
268 size_t pdata_off = 0; 267 size_t pdata_off = 0;
269 struct trans_rpage_info *rpinfo = NULL; 268 struct trans_rpage_info *rpinfo = NULL;
@@ -346,7 +345,8 @@ req_retry_pinned:
346 * Arrange in such a way that server places header in the 345 * Arrange in such a way that server places header in the
347 * alloced memory and payload onto the user buffer. 346 * alloced memory and payload onto the user buffer.
348 */ 347 */
349 inp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata, 11); 348 inp = pack_sg_list(chan->sg, out,
349 VIRTQUEUE_NUM, req->rc->sdata, 11);
350 /* 350 /*
351 * Running executables in the filesystem may result in 351 * Running executables in the filesystem may result in
352 * a read request with kernel buffer as opposed to user buffer. 352 * a read request with kernel buffer as opposed to user buffer.
@@ -366,8 +366,8 @@ req_retry_pinned:
366 } 366 }
367 in += inp; 367 in += inp;
368 } else { 368 } else {
369 in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata, 369 in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM,
370 req->rc->capacity); 370 req->rc->sdata, req->rc->capacity);
371 } 371 }
372 372
373 err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc); 373 err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc);
@@ -592,7 +592,14 @@ static struct p9_trans_module p9_virtio_trans = {
592 .close = p9_virtio_close, 592 .close = p9_virtio_close,
593 .request = p9_virtio_request, 593 .request = p9_virtio_request,
594 .cancel = p9_virtio_cancel, 594 .cancel = p9_virtio_cancel,
595 .maxsize = PAGE_SIZE*VIRTQUEUE_NUM, 595
596 /*
597 * We leave one entry for input and one entry for response
598 * headers. We also skip one more entry to accomodate, address
599 * that are not at page boundary, that can result in an extra
600 * page in zero copy.
601 */
602 .maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3),
596 .pref = P9_TRANS_PREF_PAYLOAD_SEP, 603 .pref = P9_TRANS_PREF_PAYLOAD_SEP,
597 .def = 0, 604 .def = 0,
598 .owner = THIS_MODULE, 605 .owner = THIS_MODULE,
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 52cfd0c3ea71..d07223c834af 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -558,12 +558,13 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
558 spin_unlock_irqrestore(&rq->lock, flags); 558 spin_unlock_irqrestore(&rq->lock, flags);
559 559
560 skb_queue_walk_safe(&queue, skb, tmp) { 560 skb_queue_walk_safe(&queue, skb, tmp) {
561 struct net_device *dev = skb->dev; 561 struct net_device *dev;
562
563 br2684_push(atmvcc, skb);
564 dev = skb->dev;
562 565
563 dev->stats.rx_bytes -= skb->len; 566 dev->stats.rx_bytes -= skb->len;
564 dev->stats.rx_packets--; 567 dev->stats.rx_packets--;
565
566 br2684_push(atmvcc, skb);
567 } 568 }
568 569
569 /* initialize netdev carrier state */ 570 /* initialize netdev carrier state */
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 3e2f91ffa4e2..05dd35114a27 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -565,7 +565,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
565 struct orig_node *orig_node = NULL; 565 struct orig_node *orig_node = NULL;
566 int data_len = skb->len, ret; 566 int data_len = skb->len, ret;
567 short vid = -1; 567 short vid = -1;
568 bool do_bcast = false; 568 bool do_bcast;
569 569
570 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) 570 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
571 goto dropped; 571 goto dropped;
@@ -598,15 +598,15 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
598 tt_local_add(soft_iface, ethhdr->h_source); 598 tt_local_add(soft_iface, ethhdr->h_source);
599 599
600 orig_node = transtable_search(bat_priv, ethhdr->h_dest); 600 orig_node = transtable_search(bat_priv, ethhdr->h_dest);
601 if (is_multicast_ether_addr(ethhdr->h_dest) || 601 do_bcast = is_multicast_ether_addr(ethhdr->h_dest);
602 (orig_node && orig_node->gw_flags)) { 602 if (do_bcast || (orig_node && orig_node->gw_flags)) {
603 ret = gw_is_target(bat_priv, skb, orig_node); 603 ret = gw_is_target(bat_priv, skb, orig_node);
604 604
605 if (ret < 0) 605 if (ret < 0)
606 goto dropped; 606 goto dropped;
607 607
608 if (ret == 0) 608 if (ret)
609 do_bcast = true; 609 do_bcast = false;
610 } 610 }
611 611
612 /* ethernet packet should be broadcasted */ 612 /* ethernet packet should be broadcasted */
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 8add9b499912..117e0d161780 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -494,9 +494,8 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
494 BT_DBG("sk %p", sk); 494 BT_DBG("sk %p", sk);
495 495
496 add_wait_queue(sk_sleep(sk), &wait); 496 add_wait_queue(sk_sleep(sk), &wait);
497 set_current_state(TASK_INTERRUPTIBLE);
497 while (sk->sk_state != state) { 498 while (sk->sk_state != state) {
498 set_current_state(TASK_INTERRUPTIBLE);
499
500 if (!timeo) { 499 if (!timeo) {
501 err = -EINPROGRESS; 500 err = -EINPROGRESS;
502 break; 501 break;
@@ -510,12 +509,13 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
510 release_sock(sk); 509 release_sock(sk);
511 timeo = schedule_timeout(timeo); 510 timeo = schedule_timeout(timeo);
512 lock_sock(sk); 511 lock_sock(sk);
512 set_current_state(TASK_INTERRUPTIBLE);
513 513
514 err = sock_error(sk); 514 err = sock_error(sk);
515 if (err) 515 if (err)
516 break; 516 break;
517 } 517 }
518 set_current_state(TASK_RUNNING); 518 __set_current_state(TASK_RUNNING);
519 remove_wait_queue(sk_sleep(sk), &wait); 519 remove_wait_queue(sk_sleep(sk), &wait);
520 return err; 520 return err;
521} 521}
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index 8e6c06158f8e..e7ee5314f39a 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -155,6 +155,7 @@ struct bnep_session {
155 unsigned int role; 155 unsigned int role;
156 unsigned long state; 156 unsigned long state;
157 unsigned long flags; 157 unsigned long flags;
158 atomic_t terminate;
158 struct task_struct *task; 159 struct task_struct *task;
159 160
160 struct ethhdr eh; 161 struct ethhdr eh;
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index ca39fcf010ce..d9edfe8bf9d6 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -484,9 +484,11 @@ static int bnep_session(void *arg)
484 484
485 init_waitqueue_entry(&wait, current); 485 init_waitqueue_entry(&wait, current);
486 add_wait_queue(sk_sleep(sk), &wait); 486 add_wait_queue(sk_sleep(sk), &wait);
487 while (!kthread_should_stop()) { 487 while (1) {
488 set_current_state(TASK_INTERRUPTIBLE); 488 set_current_state(TASK_INTERRUPTIBLE);
489 489
490 if (atomic_read(&s->terminate))
491 break;
490 /* RX */ 492 /* RX */
491 while ((skb = skb_dequeue(&sk->sk_receive_queue))) { 493 while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
492 skb_orphan(skb); 494 skb_orphan(skb);
@@ -504,7 +506,7 @@ static int bnep_session(void *arg)
504 506
505 schedule(); 507 schedule();
506 } 508 }
507 set_current_state(TASK_RUNNING); 509 __set_current_state(TASK_RUNNING);
508 remove_wait_queue(sk_sleep(sk), &wait); 510 remove_wait_queue(sk_sleep(sk), &wait);
509 511
510 /* Cleanup session */ 512 /* Cleanup session */
@@ -640,9 +642,10 @@ int bnep_del_connection(struct bnep_conndel_req *req)
640 down_read(&bnep_session_sem); 642 down_read(&bnep_session_sem);
641 643
642 s = __bnep_get_session(req->dst); 644 s = __bnep_get_session(req->dst);
643 if (s) 645 if (s) {
644 kthread_stop(s->task); 646 atomic_inc(&s->terminate);
645 else 647 wake_up_process(s->task);
648 } else
646 err = -ENOENT; 649 err = -ENOENT;
647 650
648 up_read(&bnep_session_sem); 651 up_read(&bnep_session_sem);
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 040f67b12978..50f0d135eb8f 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -386,7 +386,8 @@ static void cmtp_reset_ctr(struct capi_ctr *ctrl)
386 386
387 capi_ctr_down(ctrl); 387 capi_ctr_down(ctrl);
388 388
389 kthread_stop(session->task); 389 atomic_inc(&session->terminate);
390 wake_up_process(session->task);
390} 391}
391 392
392static void cmtp_register_appl(struct capi_ctr *ctrl, __u16 appl, capi_register_params *rp) 393static void cmtp_register_appl(struct capi_ctr *ctrl, __u16 appl, capi_register_params *rp)
diff --git a/net/bluetooth/cmtp/cmtp.h b/net/bluetooth/cmtp/cmtp.h
index db43b54ac9af..c32638dddbf9 100644
--- a/net/bluetooth/cmtp/cmtp.h
+++ b/net/bluetooth/cmtp/cmtp.h
@@ -81,6 +81,7 @@ struct cmtp_session {
81 81
82 char name[BTNAMSIZ]; 82 char name[BTNAMSIZ];
83 83
84 atomic_t terminate;
84 struct task_struct *task; 85 struct task_struct *task;
85 86
86 wait_queue_head_t wait; 87 wait_queue_head_t wait;
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index c5b11af908be..521baa4fe835 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -292,9 +292,11 @@ static int cmtp_session(void *arg)
292 292
293 init_waitqueue_entry(&wait, current); 293 init_waitqueue_entry(&wait, current);
294 add_wait_queue(sk_sleep(sk), &wait); 294 add_wait_queue(sk_sleep(sk), &wait);
295 while (!kthread_should_stop()) { 295 while (1) {
296 set_current_state(TASK_INTERRUPTIBLE); 296 set_current_state(TASK_INTERRUPTIBLE);
297 297
298 if (atomic_read(&session->terminate))
299 break;
298 if (sk->sk_state != BT_CONNECTED) 300 if (sk->sk_state != BT_CONNECTED)
299 break; 301 break;
300 302
@@ -307,7 +309,7 @@ static int cmtp_session(void *arg)
307 309
308 schedule(); 310 schedule();
309 } 311 }
310 set_current_state(TASK_RUNNING); 312 __set_current_state(TASK_RUNNING);
311 remove_wait_queue(sk_sleep(sk), &wait); 313 remove_wait_queue(sk_sleep(sk), &wait);
312 314
313 down_write(&cmtp_session_sem); 315 down_write(&cmtp_session_sem);
@@ -380,16 +382,17 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
380 382
381 if (!(session->flags & (1 << CMTP_LOOPBACK))) { 383 if (!(session->flags & (1 << CMTP_LOOPBACK))) {
382 err = cmtp_attach_device(session); 384 err = cmtp_attach_device(session);
383 if (err < 0) 385 if (err < 0) {
384 goto detach; 386 atomic_inc(&session->terminate);
387 wake_up_process(session->task);
388 up_write(&cmtp_session_sem);
389 return err;
390 }
385 } 391 }
386 392
387 up_write(&cmtp_session_sem); 393 up_write(&cmtp_session_sem);
388 return 0; 394 return 0;
389 395
390detach:
391 cmtp_detach_device(session);
392
393unlink: 396unlink:
394 __cmtp_unlink_session(session); 397 __cmtp_unlink_session(session);
395 398
@@ -414,7 +417,8 @@ int cmtp_del_connection(struct cmtp_conndel_req *req)
414 skb_queue_purge(&session->transmit); 417 skb_queue_purge(&session->transmit);
415 418
416 /* Stop session thread */ 419 /* Stop session thread */
417 kthread_stop(session->task); 420 atomic_inc(&session->terminate);
421 wake_up_process(session->task);
418 } else 422 } else
419 err = -ENOENT; 423 err = -ENOENT;
420 424
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index ec0bc3f60f2e..56943add45cc 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1209,7 +1209,6 @@ static void hci_cmd_timer(unsigned long arg)
1209 1209
1210 BT_ERR("%s command tx timeout", hdev->name); 1210 BT_ERR("%s command tx timeout", hdev->name);
1211 atomic_set(&hdev->cmd_cnt, 1); 1211 atomic_set(&hdev->cmd_cnt, 1);
1212 clear_bit(HCI_RESET, &hdev->flags);
1213 tasklet_schedule(&hdev->cmd_task); 1212 tasklet_schedule(&hdev->cmd_task);
1214} 1213}
1215 1214
@@ -1327,7 +1326,7 @@ int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1327 1326
1328 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL); 1327 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1329 if (!entry) { 1328 if (!entry) {
1330 return -ENOMEM; 1329 err = -ENOMEM;
1331 goto err; 1330 goto err;
1332 } 1331 }
1333 1332
@@ -2408,7 +2407,10 @@ static void hci_cmd_task(unsigned long arg)
2408 if (hdev->sent_cmd) { 2407 if (hdev->sent_cmd) {
2409 atomic_dec(&hdev->cmd_cnt); 2408 atomic_dec(&hdev->cmd_cnt);
2410 hci_send_frame(skb); 2409 hci_send_frame(skb);
2411 mod_timer(&hdev->cmd_timer, 2410 if (test_bit(HCI_RESET, &hdev->flags))
2411 del_timer(&hdev->cmd_timer);
2412 else
2413 mod_timer(&hdev->cmd_timer,
2412 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT)); 2414 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2413 } else { 2415 } else {
2414 skb_queue_head(&hdev->cmd_q, skb); 2416 skb_queue_head(&hdev->cmd_q, skb);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index a40170e022e8..7ef4eb4435fb 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -58,8 +58,8 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
58 if (status) 58 if (status)
59 return; 59 return;
60 60
61 if (test_bit(HCI_MGMT, &hdev->flags) && 61 if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
62 test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 62 test_bit(HCI_MGMT, &hdev->flags))
63 mgmt_discovering(hdev->id, 0); 63 mgmt_discovering(hdev->id, 0);
64 64
65 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status); 65 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
@@ -76,8 +76,8 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
76 if (status) 76 if (status)
77 return; 77 return;
78 78
79 if (test_bit(HCI_MGMT, &hdev->flags) && 79 if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
80 test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 80 test_bit(HCI_MGMT, &hdev->flags))
81 mgmt_discovering(hdev->id, 0); 81 mgmt_discovering(hdev->id, 0);
82 82
83 hci_conn_check_pending(hdev); 83 hci_conn_check_pending(hdev);
@@ -959,9 +959,8 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
959 return; 959 return;
960 } 960 }
961 961
962 if (test_bit(HCI_MGMT, &hdev->flags) && 962 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags) &&
963 !test_and_set_bit(HCI_INQUIRY, 963 test_bit(HCI_MGMT, &hdev->flags))
964 &hdev->flags))
965 mgmt_discovering(hdev->id, 1); 964 mgmt_discovering(hdev->id, 1);
966} 965}
967 966
@@ -1340,8 +1339,8 @@ static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff
1340 1339
1341 BT_DBG("%s status %d", hdev->name, status); 1340 BT_DBG("%s status %d", hdev->name, status);
1342 1341
1343 if (test_bit(HCI_MGMT, &hdev->flags) && 1342 if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
1344 test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 1343 test_bit(HCI_MGMT, &hdev->flags))
1345 mgmt_discovering(hdev->id, 0); 1344 mgmt_discovering(hdev->id, 0);
1346 1345
1347 hci_req_complete(hdev, HCI_OP_INQUIRY, status); 1346 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 43b4c2deb7cc..fb68f344c34a 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -764,6 +764,7 @@ static int hidp_session(void *arg)
764 764
765 up_write(&hidp_session_sem); 765 up_write(&hidp_session_sem);
766 766
767 kfree(session->rd_data);
767 kfree(session); 768 kfree(session);
768 return 0; 769 return 0;
769} 770}
@@ -841,7 +842,8 @@ static int hidp_setup_input(struct hidp_session *session,
841 842
842 err = input_register_device(input); 843 err = input_register_device(input);
843 if (err < 0) { 844 if (err < 0) {
844 hci_conn_put_device(session->conn); 845 input_free_device(input);
846 session->input = NULL;
845 return err; 847 return err;
846 } 848 }
847 849
@@ -1044,8 +1046,12 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
1044 } 1046 }
1045 1047
1046 err = hid_add_device(session->hid); 1048 err = hid_add_device(session->hid);
1047 if (err < 0) 1049 if (err < 0) {
1048 goto err_add_device; 1050 atomic_inc(&session->terminate);
1051 wake_up_process(session->task);
1052 up_write(&hidp_session_sem);
1053 return err;
1054 }
1049 1055
1050 if (session->input) { 1056 if (session->input) {
1051 hidp_send_ctrl_message(session, 1057 hidp_send_ctrl_message(session,
@@ -1059,12 +1065,6 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
1059 up_write(&hidp_session_sem); 1065 up_write(&hidp_session_sem);
1060 return 0; 1066 return 0;
1061 1067
1062err_add_device:
1063 hid_destroy_device(session->hid);
1064 session->hid = NULL;
1065 atomic_inc(&session->terminate);
1066 wake_up_process(session->task);
1067
1068unlink: 1068unlink:
1069 hidp_del_timer(session); 1069 hidp_del_timer(session);
1070 1070
@@ -1090,7 +1090,6 @@ purge:
1090failed: 1090failed:
1091 up_write(&hidp_session_sem); 1091 up_write(&hidp_session_sem);
1092 1092
1093 input_free_device(session->input);
1094 kfree(session); 1093 kfree(session);
1095 return err; 1094 return err;
1096} 1095}
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 3204ba8a701c..b3bdb482bbe6 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1159,9 +1159,8 @@ int __l2cap_wait_ack(struct sock *sk)
1159 int timeo = HZ/5; 1159 int timeo = HZ/5;
1160 1160
1161 add_wait_queue(sk_sleep(sk), &wait); 1161 add_wait_queue(sk_sleep(sk), &wait);
1162 while ((chan->unacked_frames > 0 && chan->conn)) { 1162 set_current_state(TASK_INTERRUPTIBLE);
1163 set_current_state(TASK_INTERRUPTIBLE); 1163 while (chan->unacked_frames > 0 && chan->conn) {
1164
1165 if (!timeo) 1164 if (!timeo)
1166 timeo = HZ/5; 1165 timeo = HZ/5;
1167 1166
@@ -1173,6 +1172,7 @@ int __l2cap_wait_ack(struct sock *sk)
1173 release_sock(sk); 1172 release_sock(sk);
1174 timeo = schedule_timeout(timeo); 1173 timeo = schedule_timeout(timeo);
1175 lock_sock(sk); 1174 lock_sock(sk);
1175 set_current_state(TASK_INTERRUPTIBLE);
1176 1176
1177 err = sock_error(sk); 1177 err = sock_error(sk);
1178 if (err) 1178 if (err)
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 5c36b3e8739c..61f1f623091d 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -235,30 +235,26 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl
235 235
236 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 236 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
237 237
238 if (sk->sk_state != BT_LISTEN) {
239 err = -EBADFD;
240 goto done;
241 }
242
243 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 238 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
244 239
245 BT_DBG("sk %p timeo %ld", sk, timeo); 240 BT_DBG("sk %p timeo %ld", sk, timeo);
246 241
247 /* Wait for an incoming connection. (wake-one). */ 242 /* Wait for an incoming connection. (wake-one). */
248 add_wait_queue_exclusive(sk_sleep(sk), &wait); 243 add_wait_queue_exclusive(sk_sleep(sk), &wait);
249 while (!(nsk = bt_accept_dequeue(sk, newsock))) { 244 while (1) {
250 set_current_state(TASK_INTERRUPTIBLE); 245 set_current_state(TASK_INTERRUPTIBLE);
251 if (!timeo) { 246
252 err = -EAGAIN; 247 if (sk->sk_state != BT_LISTEN) {
248 err = -EBADFD;
253 break; 249 break;
254 } 250 }
255 251
256 release_sock(sk); 252 nsk = bt_accept_dequeue(sk, newsock);
257 timeo = schedule_timeout(timeo); 253 if (nsk)
258 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 254 break;
259 255
260 if (sk->sk_state != BT_LISTEN) { 256 if (!timeo) {
261 err = -EBADFD; 257 err = -EAGAIN;
262 break; 258 break;
263 } 259 }
264 260
@@ -266,8 +262,12 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl
266 err = sock_intr_errno(timeo); 262 err = sock_intr_errno(timeo);
267 break; 263 break;
268 } 264 }
265
266 release_sock(sk);
267 timeo = schedule_timeout(timeo);
268 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
269 } 269 }
270 set_current_state(TASK_RUNNING); 270 __set_current_state(TASK_RUNNING);
271 remove_wait_queue(sk_sleep(sk), &wait); 271 remove_wait_queue(sk_sleep(sk), &wait);
272 272
273 if (err) 273 if (err)
@@ -993,7 +993,7 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p
993 INIT_LIST_HEAD(&bt_sk(sk)->accept_q); 993 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
994 994
995 sk->sk_destruct = l2cap_sock_destruct; 995 sk->sk_destruct = l2cap_sock_destruct;
996 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT); 996 sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
997 997
998 sock_reset_flag(sk, SOCK_ZAPPED); 998 sock_reset_flag(sk, SOCK_ZAPPED);
999 999
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 5759bb7054f7..5ba3f6df665c 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -62,7 +62,6 @@ static DEFINE_MUTEX(rfcomm_mutex);
62#define rfcomm_lock() mutex_lock(&rfcomm_mutex) 62#define rfcomm_lock() mutex_lock(&rfcomm_mutex)
63#define rfcomm_unlock() mutex_unlock(&rfcomm_mutex) 63#define rfcomm_unlock() mutex_unlock(&rfcomm_mutex)
64 64
65static unsigned long rfcomm_event;
66 65
67static LIST_HEAD(session_list); 66static LIST_HEAD(session_list);
68 67
@@ -120,7 +119,6 @@ static inline void rfcomm_schedule(void)
120{ 119{
121 if (!rfcomm_thread) 120 if (!rfcomm_thread)
122 return; 121 return;
123 set_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event);
124 wake_up_process(rfcomm_thread); 122 wake_up_process(rfcomm_thread);
125} 123}
126 124
@@ -2038,19 +2036,18 @@ static int rfcomm_run(void *unused)
2038 2036
2039 rfcomm_add_listener(BDADDR_ANY); 2037 rfcomm_add_listener(BDADDR_ANY);
2040 2038
2041 while (!kthread_should_stop()) { 2039 while (1) {
2042 set_current_state(TASK_INTERRUPTIBLE); 2040 set_current_state(TASK_INTERRUPTIBLE);
2043 if (!test_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event)) { 2041
2044 /* No pending events. Let's sleep. 2042 if (kthread_should_stop())
2045 * Incoming connections and data will wake us up. */ 2043 break;
2046 schedule();
2047 }
2048 set_current_state(TASK_RUNNING);
2049 2044
2050 /* Process stuff */ 2045 /* Process stuff */
2051 clear_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event);
2052 rfcomm_process_sessions(); 2046 rfcomm_process_sessions();
2047
2048 schedule();
2053 } 2049 }
2050 __set_current_state(TASK_RUNNING);
2054 2051
2055 rfcomm_kill_listener(); 2052 rfcomm_kill_listener();
2056 2053
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 8f01e6b11a70..482722bbc7a0 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -485,11 +485,6 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
485 485
486 lock_sock(sk); 486 lock_sock(sk);
487 487
488 if (sk->sk_state != BT_LISTEN) {
489 err = -EBADFD;
490 goto done;
491 }
492
493 if (sk->sk_type != SOCK_STREAM) { 488 if (sk->sk_type != SOCK_STREAM) {
494 err = -EINVAL; 489 err = -EINVAL;
495 goto done; 490 goto done;
@@ -501,19 +496,20 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
501 496
502 /* Wait for an incoming connection. (wake-one). */ 497 /* Wait for an incoming connection. (wake-one). */
503 add_wait_queue_exclusive(sk_sleep(sk), &wait); 498 add_wait_queue_exclusive(sk_sleep(sk), &wait);
504 while (!(nsk = bt_accept_dequeue(sk, newsock))) { 499 while (1) {
505 set_current_state(TASK_INTERRUPTIBLE); 500 set_current_state(TASK_INTERRUPTIBLE);
506 if (!timeo) { 501
507 err = -EAGAIN; 502 if (sk->sk_state != BT_LISTEN) {
503 err = -EBADFD;
508 break; 504 break;
509 } 505 }
510 506
511 release_sock(sk); 507 nsk = bt_accept_dequeue(sk, newsock);
512 timeo = schedule_timeout(timeo); 508 if (nsk)
513 lock_sock(sk); 509 break;
514 510
515 if (sk->sk_state != BT_LISTEN) { 511 if (!timeo) {
516 err = -EBADFD; 512 err = -EAGAIN;
517 break; 513 break;
518 } 514 }
519 515
@@ -521,8 +517,12 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
521 err = sock_intr_errno(timeo); 517 err = sock_intr_errno(timeo);
522 break; 518 break;
523 } 519 }
520
521 release_sock(sk);
522 timeo = schedule_timeout(timeo);
523 lock_sock(sk);
524 } 524 }
525 set_current_state(TASK_RUNNING); 525 __set_current_state(TASK_RUNNING);
526 remove_wait_queue(sk_sleep(sk), &wait); 526 remove_wait_queue(sk_sleep(sk), &wait);
527 527
528 if (err) 528 if (err)
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 4c3621b5e0aa..8270f05e3f1f 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -564,30 +564,26 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
564 564
565 lock_sock(sk); 565 lock_sock(sk);
566 566
567 if (sk->sk_state != BT_LISTEN) {
568 err = -EBADFD;
569 goto done;
570 }
571
572 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 567 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
573 568
574 BT_DBG("sk %p timeo %ld", sk, timeo); 569 BT_DBG("sk %p timeo %ld", sk, timeo);
575 570
576 /* Wait for an incoming connection. (wake-one). */ 571 /* Wait for an incoming connection. (wake-one). */
577 add_wait_queue_exclusive(sk_sleep(sk), &wait); 572 add_wait_queue_exclusive(sk_sleep(sk), &wait);
578 while (!(ch = bt_accept_dequeue(sk, newsock))) { 573 while (1) {
579 set_current_state(TASK_INTERRUPTIBLE); 574 set_current_state(TASK_INTERRUPTIBLE);
580 if (!timeo) { 575
581 err = -EAGAIN; 576 if (sk->sk_state != BT_LISTEN) {
577 err = -EBADFD;
582 break; 578 break;
583 } 579 }
584 580
585 release_sock(sk); 581 ch = bt_accept_dequeue(sk, newsock);
586 timeo = schedule_timeout(timeo); 582 if (ch)
587 lock_sock(sk); 583 break;
588 584
589 if (sk->sk_state != BT_LISTEN) { 585 if (!timeo) {
590 err = -EBADFD; 586 err = -EAGAIN;
591 break; 587 break;
592 } 588 }
593 589
@@ -595,8 +591,12 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
595 err = sock_intr_errno(timeo); 591 err = sock_intr_errno(timeo);
596 break; 592 break;
597 } 593 }
594
595 release_sock(sk);
596 timeo = schedule_timeout(timeo);
597 lock_sock(sk);
598 } 598 }
599 set_current_state(TASK_RUNNING); 599 __set_current_state(TASK_RUNNING);
600 remove_wait_queue(sk_sleep(sk), &wait); 600 remove_wait_queue(sk_sleep(sk), &wait);
601 601
602 if (err) 602 if (err)
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 2cdf0070419f..e73815456adf 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -231,6 +231,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
231int br_add_bridge(struct net *net, const char *name) 231int br_add_bridge(struct net *net, const char *name)
232{ 232{
233 struct net_device *dev; 233 struct net_device *dev;
234 int res;
234 235
235 dev = alloc_netdev(sizeof(struct net_bridge), name, 236 dev = alloc_netdev(sizeof(struct net_bridge), name,
236 br_dev_setup); 237 br_dev_setup);
@@ -240,7 +241,10 @@ int br_add_bridge(struct net *net, const char *name)
240 241
241 dev_net_set(dev, net); 242 dev_net_set(dev, net);
242 243
243 return register_netdev(dev); 244 res = register_netdev(dev);
245 if (res)
246 free_netdev(dev);
247 return res;
244} 248}
245 249
246int br_del_bridge(struct net *net, const char *name) 250int br_del_bridge(struct net *net, const char *name)
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 2d85ca7111d3..995cbe0ac0b2 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1456,7 +1456,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1456{ 1456{
1457 struct sk_buff *skb2; 1457 struct sk_buff *skb2;
1458 const struct ipv6hdr *ip6h; 1458 const struct ipv6hdr *ip6h;
1459 struct icmp6hdr *icmp6h; 1459 u8 icmp6_type;
1460 u8 nexthdr; 1460 u8 nexthdr;
1461 unsigned len; 1461 unsigned len;
1462 int offset; 1462 int offset;
@@ -1502,9 +1502,9 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1502 __skb_pull(skb2, offset); 1502 __skb_pull(skb2, offset);
1503 skb_reset_transport_header(skb2); 1503 skb_reset_transport_header(skb2);
1504 1504
1505 icmp6h = icmp6_hdr(skb2); 1505 icmp6_type = icmp6_hdr(skb2)->icmp6_type;
1506 1506
1507 switch (icmp6h->icmp6_type) { 1507 switch (icmp6_type) {
1508 case ICMPV6_MGM_QUERY: 1508 case ICMPV6_MGM_QUERY:
1509 case ICMPV6_MGM_REPORT: 1509 case ICMPV6_MGM_REPORT:
1510 case ICMPV6_MGM_REDUCTION: 1510 case ICMPV6_MGM_REDUCTION:
@@ -1520,16 +1520,23 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1520 err = pskb_trim_rcsum(skb2, len); 1520 err = pskb_trim_rcsum(skb2, len);
1521 if (err) 1521 if (err)
1522 goto out; 1522 goto out;
1523 err = -EINVAL;
1523 } 1524 }
1524 1525
1526 ip6h = ipv6_hdr(skb2);
1527
1525 switch (skb2->ip_summed) { 1528 switch (skb2->ip_summed) {
1526 case CHECKSUM_COMPLETE: 1529 case CHECKSUM_COMPLETE:
1527 if (!csum_fold(skb2->csum)) 1530 if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb2->len,
1531 IPPROTO_ICMPV6, skb2->csum))
1528 break; 1532 break;
1529 /*FALLTHROUGH*/ 1533 /*FALLTHROUGH*/
1530 case CHECKSUM_NONE: 1534 case CHECKSUM_NONE:
1531 skb2->csum = 0; 1535 skb2->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr,
1532 if (skb_checksum_complete(skb2)) 1536 &ip6h->daddr,
1537 skb2->len,
1538 IPPROTO_ICMPV6, 0));
1539 if (__skb_checksum_complete(skb2))
1533 goto out; 1540 goto out;
1534 } 1541 }
1535 1542
@@ -1537,7 +1544,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1537 1544
1538 BR_INPUT_SKB_CB(skb)->igmp = 1; 1545 BR_INPUT_SKB_CB(skb)->igmp = 1;
1539 1546
1540 switch (icmp6h->icmp6_type) { 1547 switch (icmp6_type) {
1541 case ICMPV6_MGM_REPORT: 1548 case ICMPV6_MGM_REPORT:
1542 { 1549 {
1543 struct mld_msg *mld; 1550 struct mld_msg *mld;
diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig
index ba6f73eb06c6..a9aff9c7d027 100644
--- a/net/bridge/netfilter/Kconfig
+++ b/net/bridge/netfilter/Kconfig
@@ -4,7 +4,7 @@
4 4
5menuconfig BRIDGE_NF_EBTABLES 5menuconfig BRIDGE_NF_EBTABLES
6 tristate "Ethernet Bridge tables (ebtables) support" 6 tristate "Ethernet Bridge tables (ebtables) support"
7 depends on BRIDGE && BRIDGE_NETFILTER 7 depends on BRIDGE && NETFILTER
8 select NETFILTER_XTABLES 8 select NETFILTER_XTABLES
9 help 9 help
10 ebtables is a general, extensible frame/packet identification 10 ebtables is a general, extensible frame/packet identification
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 7c2fa0a08148..7f9ac0742d19 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -93,10 +93,14 @@ static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
93 caifdevs = caif_device_list(dev_net(dev)); 93 caifdevs = caif_device_list(dev_net(dev));
94 BUG_ON(!caifdevs); 94 BUG_ON(!caifdevs);
95 95
96 caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC); 96 caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
97 if (!caifd) 97 if (!caifd)
98 return NULL; 98 return NULL;
99 caifd->pcpu_refcnt = alloc_percpu(int); 99 caifd->pcpu_refcnt = alloc_percpu(int);
100 if (!caifd->pcpu_refcnt) {
101 kfree(caifd);
102 return NULL;
103 }
100 caifd->netdev = dev; 104 caifd->netdev = dev;
101 dev_hold(dev); 105 dev_hold(dev);
102 return caifd; 106 return caifd;
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 8ce926d3b2cb..9b0c32a2690c 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -857,7 +857,7 @@ static __exit void can_exit(void)
857 struct net_device *dev; 857 struct net_device *dev;
858 858
859 if (stats_timer) 859 if (stats_timer)
860 del_timer(&can_stattimer); 860 del_timer_sync(&can_stattimer);
861 861
862 can_remove_proc(); 862 can_remove_proc();
863 863
diff --git a/net/can/bcm.c b/net/can/bcm.c
index d6c8ae5b2e6a..c84963d2dee6 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -344,6 +344,18 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
344 } 344 }
345} 345}
346 346
347static void bcm_tx_start_timer(struct bcm_op *op)
348{
349 if (op->kt_ival1.tv64 && op->count)
350 hrtimer_start(&op->timer,
351 ktime_add(ktime_get(), op->kt_ival1),
352 HRTIMER_MODE_ABS);
353 else if (op->kt_ival2.tv64)
354 hrtimer_start(&op->timer,
355 ktime_add(ktime_get(), op->kt_ival2),
356 HRTIMER_MODE_ABS);
357}
358
347static void bcm_tx_timeout_tsklet(unsigned long data) 359static void bcm_tx_timeout_tsklet(unsigned long data)
348{ 360{
349 struct bcm_op *op = (struct bcm_op *)data; 361 struct bcm_op *op = (struct bcm_op *)data;
@@ -365,26 +377,12 @@ static void bcm_tx_timeout_tsklet(unsigned long data)
365 377
366 bcm_send_to_user(op, &msg_head, NULL, 0); 378 bcm_send_to_user(op, &msg_head, NULL, 0);
367 } 379 }
368 }
369
370 if (op->kt_ival1.tv64 && (op->count > 0)) {
371
372 /* send (next) frame */
373 bcm_can_tx(op); 380 bcm_can_tx(op);
374 hrtimer_start(&op->timer,
375 ktime_add(ktime_get(), op->kt_ival1),
376 HRTIMER_MODE_ABS);
377 381
378 } else { 382 } else if (op->kt_ival2.tv64)
379 if (op->kt_ival2.tv64) { 383 bcm_can_tx(op);
380 384
381 /* send (next) frame */ 385 bcm_tx_start_timer(op);
382 bcm_can_tx(op);
383 hrtimer_start(&op->timer,
384 ktime_add(ktime_get(), op->kt_ival2),
385 HRTIMER_MODE_ABS);
386 }
387 }
388} 386}
389 387
390/* 388/*
@@ -964,23 +962,20 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
964 hrtimer_cancel(&op->timer); 962 hrtimer_cancel(&op->timer);
965 } 963 }
966 964
967 if ((op->flags & STARTTIMER) && 965 if (op->flags & STARTTIMER) {
968 ((op->kt_ival1.tv64 && op->count) || op->kt_ival2.tv64)) { 966 hrtimer_cancel(&op->timer);
969
970 /* spec: send can_frame when starting timer */ 967 /* spec: send can_frame when starting timer */
971 op->flags |= TX_ANNOUNCE; 968 op->flags |= TX_ANNOUNCE;
972
973 if (op->kt_ival1.tv64 && (op->count > 0)) {
974 /* op->count-- is done in bcm_tx_timeout_handler */
975 hrtimer_start(&op->timer, op->kt_ival1,
976 HRTIMER_MODE_REL);
977 } else
978 hrtimer_start(&op->timer, op->kt_ival2,
979 HRTIMER_MODE_REL);
980 } 969 }
981 970
982 if (op->flags & TX_ANNOUNCE) 971 if (op->flags & TX_ANNOUNCE) {
983 bcm_can_tx(op); 972 bcm_can_tx(op);
973 if (op->count)
974 op->count--;
975 }
976
977 if (op->flags & STARTTIMER)
978 bcm_tx_start_timer(op);
984 979
985 return msg_head->nframes * CFSIZ + MHSIZ; 980 return msg_head->nframes * CFSIZ + MHSIZ;
986} 981}
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 132963abc266..2883ea01e680 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -232,6 +232,7 @@ void ceph_destroy_options(struct ceph_options *opt)
232 ceph_crypto_key_destroy(opt->key); 232 ceph_crypto_key_destroy(opt->key);
233 kfree(opt->key); 233 kfree(opt->key);
234 } 234 }
235 kfree(opt->mon_addr);
235 kfree(opt); 236 kfree(opt);
236} 237}
237EXPORT_SYMBOL(ceph_destroy_options); 238EXPORT_SYMBOL(ceph_destroy_options);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index c340e2e0765b..9918e9eb276e 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -2307,6 +2307,7 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags)
2307 m->front_max = front_len; 2307 m->front_max = front_len;
2308 m->front_is_vmalloc = false; 2308 m->front_is_vmalloc = false;
2309 m->more_to_follow = false; 2309 m->more_to_follow = false;
2310 m->ack_stamp = 0;
2310 m->pool = NULL; 2311 m->pool = NULL;
2311 2312
2312 /* middle */ 2313 /* middle */
diff --git a/net/ceph/msgpool.c b/net/ceph/msgpool.c
index d5f2d97ac05c..1f4cb30a42c5 100644
--- a/net/ceph/msgpool.c
+++ b/net/ceph/msgpool.c
@@ -7,27 +7,37 @@
7 7
8#include <linux/ceph/msgpool.h> 8#include <linux/ceph/msgpool.h>
9 9
10static void *alloc_fn(gfp_t gfp_mask, void *arg) 10static void *msgpool_alloc(gfp_t gfp_mask, void *arg)
11{ 11{
12 struct ceph_msgpool *pool = arg; 12 struct ceph_msgpool *pool = arg;
13 void *p; 13 struct ceph_msg *msg;
14 14
15 p = ceph_msg_new(0, pool->front_len, gfp_mask); 15 msg = ceph_msg_new(0, pool->front_len, gfp_mask);
16 if (!p) 16 if (!msg) {
17 pr_err("msgpool %s alloc failed\n", pool->name); 17 dout("msgpool_alloc %s failed\n", pool->name);
18 return p; 18 } else {
19 dout("msgpool_alloc %s %p\n", pool->name, msg);
20 msg->pool = pool;
21 }
22 return msg;
19} 23}
20 24
21static void free_fn(void *element, void *arg) 25static void msgpool_free(void *element, void *arg)
22{ 26{
23 ceph_msg_put(element); 27 struct ceph_msgpool *pool = arg;
28 struct ceph_msg *msg = element;
29
30 dout("msgpool_release %s %p\n", pool->name, msg);
31 msg->pool = NULL;
32 ceph_msg_put(msg);
24} 33}
25 34
26int ceph_msgpool_init(struct ceph_msgpool *pool, 35int ceph_msgpool_init(struct ceph_msgpool *pool,
27 int front_len, int size, bool blocking, const char *name) 36 int front_len, int size, bool blocking, const char *name)
28{ 37{
38 dout("msgpool %s init\n", name);
29 pool->front_len = front_len; 39 pool->front_len = front_len;
30 pool->pool = mempool_create(size, alloc_fn, free_fn, pool); 40 pool->pool = mempool_create(size, msgpool_alloc, msgpool_free, pool);
31 if (!pool->pool) 41 if (!pool->pool)
32 return -ENOMEM; 42 return -ENOMEM;
33 pool->name = name; 43 pool->name = name;
@@ -36,14 +46,17 @@ int ceph_msgpool_init(struct ceph_msgpool *pool,
36 46
37void ceph_msgpool_destroy(struct ceph_msgpool *pool) 47void ceph_msgpool_destroy(struct ceph_msgpool *pool)
38{ 48{
49 dout("msgpool %s destroy\n", pool->name);
39 mempool_destroy(pool->pool); 50 mempool_destroy(pool->pool);
40} 51}
41 52
42struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, 53struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
43 int front_len) 54 int front_len)
44{ 55{
56 struct ceph_msg *msg;
57
45 if (front_len > pool->front_len) { 58 if (front_len > pool->front_len) {
46 pr_err("msgpool_get pool %s need front %d, pool size is %d\n", 59 dout("msgpool_get %s need front %d, pool size is %d\n",
47 pool->name, front_len, pool->front_len); 60 pool->name, front_len, pool->front_len);
48 WARN_ON(1); 61 WARN_ON(1);
49 62
@@ -51,14 +64,19 @@ struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
51 return ceph_msg_new(0, front_len, GFP_NOFS); 64 return ceph_msg_new(0, front_len, GFP_NOFS);
52 } 65 }
53 66
54 return mempool_alloc(pool->pool, GFP_NOFS); 67 msg = mempool_alloc(pool->pool, GFP_NOFS);
68 dout("msgpool_get %s %p\n", pool->name, msg);
69 return msg;
55} 70}
56 71
57void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg) 72void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg)
58{ 73{
74 dout("msgpool_put %s %p\n", pool->name, msg);
75
59 /* reset msg front_len; user may have changed it */ 76 /* reset msg front_len; user may have changed it */
60 msg->front.iov_len = pool->front_len; 77 msg->front.iov_len = pool->front_len;
61 msg->hdr.front_len = cpu_to_le32(pool->front_len); 78 msg->hdr.front_len = cpu_to_le32(pool->front_len);
62 79
63 kref_init(&msg->kref); /* retake single ref */ 80 kref_init(&msg->kref); /* retake single ref */
81 mempool_free(msg, pool->pool);
64} 82}
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index ce310eee708d..88ad8a2501b5 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -217,6 +217,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
217 INIT_LIST_HEAD(&req->r_unsafe_item); 217 INIT_LIST_HEAD(&req->r_unsafe_item);
218 INIT_LIST_HEAD(&req->r_linger_item); 218 INIT_LIST_HEAD(&req->r_linger_item);
219 INIT_LIST_HEAD(&req->r_linger_osd); 219 INIT_LIST_HEAD(&req->r_linger_osd);
220 INIT_LIST_HEAD(&req->r_req_lru_item);
220 req->r_flags = flags; 221 req->r_flags = flags;
221 222
222 WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0); 223 WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0);
@@ -685,6 +686,18 @@ static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
685 put_osd(osd); 686 put_osd(osd);
686} 687}
687 688
689static void remove_all_osds(struct ceph_osd_client *osdc)
690{
691 dout("__remove_old_osds %p\n", osdc);
692 mutex_lock(&osdc->request_mutex);
693 while (!RB_EMPTY_ROOT(&osdc->osds)) {
694 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
695 struct ceph_osd, o_node);
696 __remove_osd(osdc, osd);
697 }
698 mutex_unlock(&osdc->request_mutex);
699}
700
688static void __move_osd_to_lru(struct ceph_osd_client *osdc, 701static void __move_osd_to_lru(struct ceph_osd_client *osdc,
689 struct ceph_osd *osd) 702 struct ceph_osd *osd)
690{ 703{
@@ -701,14 +714,14 @@ static void __remove_osd_from_lru(struct ceph_osd *osd)
701 list_del_init(&osd->o_osd_lru); 714 list_del_init(&osd->o_osd_lru);
702} 715}
703 716
704static void remove_old_osds(struct ceph_osd_client *osdc, int remove_all) 717static void remove_old_osds(struct ceph_osd_client *osdc)
705{ 718{
706 struct ceph_osd *osd, *nosd; 719 struct ceph_osd *osd, *nosd;
707 720
708 dout("__remove_old_osds %p\n", osdc); 721 dout("__remove_old_osds %p\n", osdc);
709 mutex_lock(&osdc->request_mutex); 722 mutex_lock(&osdc->request_mutex);
710 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) { 723 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
711 if (!remove_all && time_before(jiffies, osd->lru_ttl)) 724 if (time_before(jiffies, osd->lru_ttl))
712 break; 725 break;
713 __remove_osd(osdc, osd); 726 __remove_osd(osdc, osd);
714 } 727 }
@@ -751,6 +764,7 @@ static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
751 struct rb_node *parent = NULL; 764 struct rb_node *parent = NULL;
752 struct ceph_osd *osd = NULL; 765 struct ceph_osd *osd = NULL;
753 766
767 dout("__insert_osd %p osd%d\n", new, new->o_osd);
754 while (*p) { 768 while (*p) {
755 parent = *p; 769 parent = *p;
756 osd = rb_entry(parent, struct ceph_osd, o_node); 770 osd = rb_entry(parent, struct ceph_osd, o_node);
@@ -803,13 +817,10 @@ static void __register_request(struct ceph_osd_client *osdc,
803{ 817{
804 req->r_tid = ++osdc->last_tid; 818 req->r_tid = ++osdc->last_tid;
805 req->r_request->hdr.tid = cpu_to_le64(req->r_tid); 819 req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
806 INIT_LIST_HEAD(&req->r_req_lru_item);
807
808 dout("__register_request %p tid %lld\n", req, req->r_tid); 820 dout("__register_request %p tid %lld\n", req, req->r_tid);
809 __insert_request(osdc, req); 821 __insert_request(osdc, req);
810 ceph_osdc_get_request(req); 822 ceph_osdc_get_request(req);
811 osdc->num_requests++; 823 osdc->num_requests++;
812
813 if (osdc->num_requests == 1) { 824 if (osdc->num_requests == 1) {
814 dout(" first request, scheduling timeout\n"); 825 dout(" first request, scheduling timeout\n");
815 __schedule_osd_timeout(osdc); 826 __schedule_osd_timeout(osdc);
@@ -1144,7 +1155,7 @@ static void handle_osds_timeout(struct work_struct *work)
1144 1155
1145 dout("osds timeout\n"); 1156 dout("osds timeout\n");
1146 down_read(&osdc->map_sem); 1157 down_read(&osdc->map_sem);
1147 remove_old_osds(osdc, 0); 1158 remove_old_osds(osdc);
1148 up_read(&osdc->map_sem); 1159 up_read(&osdc->map_sem);
1149 1160
1150 schedule_delayed_work(&osdc->osds_timeout_work, 1161 schedule_delayed_work(&osdc->osds_timeout_work,
@@ -1862,8 +1873,7 @@ void ceph_osdc_stop(struct ceph_osd_client *osdc)
1862 ceph_osdmap_destroy(osdc->osdmap); 1873 ceph_osdmap_destroy(osdc->osdmap);
1863 osdc->osdmap = NULL; 1874 osdc->osdmap = NULL;
1864 } 1875 }
1865 remove_old_osds(osdc, 1); 1876 remove_all_osds(osdc);
1866 WARN_ON(!RB_EMPTY_ROOT(&osdc->osds));
1867 mempool_destroy(osdc->req_mempool); 1877 mempool_destroy(osdc->req_mempool);
1868 ceph_msgpool_destroy(&osdc->msgpool_op); 1878 ceph_msgpool_destroy(&osdc->msgpool_op);
1869 ceph_msgpool_destroy(&osdc->msgpool_op_reply); 1879 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index e97c3588c3ec..fd863fe76934 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -339,6 +339,7 @@ static int __insert_pg_mapping(struct ceph_pg_mapping *new,
339 struct ceph_pg_mapping *pg = NULL; 339 struct ceph_pg_mapping *pg = NULL;
340 int c; 340 int c;
341 341
342 dout("__insert_pg_mapping %llx %p\n", *(u64 *)&new->pgid, new);
342 while (*p) { 343 while (*p) {
343 parent = *p; 344 parent = *p;
344 pg = rb_entry(parent, struct ceph_pg_mapping, node); 345 pg = rb_entry(parent, struct ceph_pg_mapping, node);
@@ -366,16 +367,33 @@ static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root,
366 while (n) { 367 while (n) {
367 pg = rb_entry(n, struct ceph_pg_mapping, node); 368 pg = rb_entry(n, struct ceph_pg_mapping, node);
368 c = pgid_cmp(pgid, pg->pgid); 369 c = pgid_cmp(pgid, pg->pgid);
369 if (c < 0) 370 if (c < 0) {
370 n = n->rb_left; 371 n = n->rb_left;
371 else if (c > 0) 372 } else if (c > 0) {
372 n = n->rb_right; 373 n = n->rb_right;
373 else 374 } else {
375 dout("__lookup_pg_mapping %llx got %p\n",
376 *(u64 *)&pgid, pg);
374 return pg; 377 return pg;
378 }
375 } 379 }
376 return NULL; 380 return NULL;
377} 381}
378 382
383static int __remove_pg_mapping(struct rb_root *root, struct ceph_pg pgid)
384{
385 struct ceph_pg_mapping *pg = __lookup_pg_mapping(root, pgid);
386
387 if (pg) {
388 dout("__remove_pg_mapping %llx %p\n", *(u64 *)&pgid, pg);
389 rb_erase(&pg->node, root);
390 kfree(pg);
391 return 0;
392 }
393 dout("__remove_pg_mapping %llx dne\n", *(u64 *)&pgid);
394 return -ENOENT;
395}
396
379/* 397/*
380 * rbtree of pg pool info 398 * rbtree of pg pool info
381 */ 399 */
@@ -711,7 +729,6 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
711 void *start = *p; 729 void *start = *p;
712 int err = -EINVAL; 730 int err = -EINVAL;
713 u16 version; 731 u16 version;
714 struct rb_node *rbp;
715 732
716 ceph_decode_16_safe(p, end, version, bad); 733 ceph_decode_16_safe(p, end, version, bad);
717 if (version > CEPH_OSDMAP_INC_VERSION) { 734 if (version > CEPH_OSDMAP_INC_VERSION) {
@@ -861,7 +878,6 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
861 } 878 }
862 879
863 /* new_pg_temp */ 880 /* new_pg_temp */
864 rbp = rb_first(&map->pg_temp);
865 ceph_decode_32_safe(p, end, len, bad); 881 ceph_decode_32_safe(p, end, len, bad);
866 while (len--) { 882 while (len--) {
867 struct ceph_pg_mapping *pg; 883 struct ceph_pg_mapping *pg;
@@ -872,18 +888,6 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
872 ceph_decode_copy(p, &pgid, sizeof(pgid)); 888 ceph_decode_copy(p, &pgid, sizeof(pgid));
873 pglen = ceph_decode_32(p); 889 pglen = ceph_decode_32(p);
874 890
875 /* remove any? */
876 while (rbp && pgid_cmp(rb_entry(rbp, struct ceph_pg_mapping,
877 node)->pgid, pgid) <= 0) {
878 struct ceph_pg_mapping *cur =
879 rb_entry(rbp, struct ceph_pg_mapping, node);
880
881 rbp = rb_next(rbp);
882 dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid);
883 rb_erase(&cur->node, &map->pg_temp);
884 kfree(cur);
885 }
886
887 if (pglen) { 891 if (pglen) {
888 /* insert */ 892 /* insert */
889 ceph_decode_need(p, end, pglen*sizeof(u32), bad); 893 ceph_decode_need(p, end, pglen*sizeof(u32), bad);
@@ -903,17 +907,11 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
903 } 907 }
904 dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, 908 dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid,
905 pglen); 909 pglen);
910 } else {
911 /* remove */
912 __remove_pg_mapping(&map->pg_temp, pgid);
906 } 913 }
907 } 914 }
908 while (rbp) {
909 struct ceph_pg_mapping *cur =
910 rb_entry(rbp, struct ceph_pg_mapping, node);
911
912 rbp = rb_next(rbp);
913 dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid);
914 rb_erase(&cur->node, &map->pg_temp);
915 kfree(cur);
916 }
917 915
918 /* ignore the rest */ 916 /* ignore the rest */
919 *p = end; 917 *p = end;
@@ -1046,10 +1044,25 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
1046 struct ceph_pg_mapping *pg; 1044 struct ceph_pg_mapping *pg;
1047 struct ceph_pg_pool_info *pool; 1045 struct ceph_pg_pool_info *pool;
1048 int ruleno; 1046 int ruleno;
1049 unsigned poolid, ps, pps; 1047 unsigned poolid, ps, pps, t;
1050 int preferred; 1048 int preferred;
1051 1049
1050 poolid = le32_to_cpu(pgid.pool);
1051 ps = le16_to_cpu(pgid.ps);
1052 preferred = (s16)le16_to_cpu(pgid.preferred);
1053
1054 pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
1055 if (!pool)
1056 return NULL;
1057
1052 /* pg_temp? */ 1058 /* pg_temp? */
1059 if (preferred >= 0)
1060 t = ceph_stable_mod(ps, le32_to_cpu(pool->v.lpg_num),
1061 pool->lpgp_num_mask);
1062 else
1063 t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num),
1064 pool->pgp_num_mask);
1065 pgid.ps = cpu_to_le16(t);
1053 pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid); 1066 pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
1054 if (pg) { 1067 if (pg) {
1055 *num = pg->len; 1068 *num = pg->len;
@@ -1057,18 +1070,6 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
1057 } 1070 }
1058 1071
1059 /* crush */ 1072 /* crush */
1060 poolid = le32_to_cpu(pgid.pool);
1061 ps = le16_to_cpu(pgid.ps);
1062 preferred = (s16)le16_to_cpu(pgid.preferred);
1063
1064 /* don't forcefeed bad device ids to crush */
1065 if (preferred >= osdmap->max_osd ||
1066 preferred >= osdmap->crush->max_devices)
1067 preferred = -1;
1068
1069 pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
1070 if (!pool)
1071 return NULL;
1072 ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset, 1073 ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset,
1073 pool->v.type, pool->v.size); 1074 pool->v.type, pool->v.size);
1074 if (ruleno < 0) { 1075 if (ruleno < 0) {
@@ -1078,6 +1079,11 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
1078 return NULL; 1079 return NULL;
1079 } 1080 }
1080 1081
1082 /* don't forcefeed bad device ids to crush */
1083 if (preferred >= osdmap->max_osd ||
1084 preferred >= osdmap->crush->max_devices)
1085 preferred = -1;
1086
1081 if (preferred >= 0) 1087 if (preferred >= 0)
1082 pps = ceph_stable_mod(ps, 1088 pps = ceph_stable_mod(ps,
1083 le32_to_cpu(pool->v.lpgp_num), 1089 le32_to_cpu(pool->v.lpgp_num),
diff --git a/net/core/dev.c b/net/core/dev.c
index 17d67b579beb..b10ff0a71855 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1515,6 +1515,14 @@ static inline bool is_skb_forwardable(struct net_device *dev,
1515 */ 1515 */
1516int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 1516int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1517{ 1517{
1518 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1519 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1520 atomic_long_inc(&dev->rx_dropped);
1521 kfree_skb(skb);
1522 return NET_RX_DROP;
1523 }
1524 }
1525
1518 skb_orphan(skb); 1526 skb_orphan(skb);
1519 nf_reset(skb); 1527 nf_reset(skb);
1520 1528
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index e7ab0c0285b5..3231b468bb72 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -384,8 +384,8 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
384 */ 384 */
385 list_for_each_entry(r, &ops->rules_list, list) { 385 list_for_each_entry(r, &ops->rules_list, list) {
386 if (r->action == FR_ACT_GOTO && 386 if (r->action == FR_ACT_GOTO &&
387 r->target == rule->pref) { 387 r->target == rule->pref &&
388 BUG_ON(rtnl_dereference(r->ctarget) != NULL); 388 rtnl_dereference(r->ctarget) == NULL) {
389 rcu_assign_pointer(r->ctarget, rule); 389 rcu_assign_pointer(r->ctarget, rule);
390 if (--ops->unresolved_rules == 0) 390 if (--ops->unresolved_rules == 0)
391 break; 391 break;
diff --git a/net/core/flow.c b/net/core/flow.c
index bf32c33cad3b..555a456efb07 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -30,6 +30,7 @@ struct flow_cache_entry {
30 struct hlist_node hlist; 30 struct hlist_node hlist;
31 struct list_head gc_list; 31 struct list_head gc_list;
32 } u; 32 } u;
33 struct net *net;
33 u16 family; 34 u16 family;
34 u8 dir; 35 u8 dir;
35 u32 genid; 36 u32 genid;
@@ -172,29 +173,26 @@ static void flow_new_hash_rnd(struct flow_cache *fc,
172 173
173static u32 flow_hash_code(struct flow_cache *fc, 174static u32 flow_hash_code(struct flow_cache *fc,
174 struct flow_cache_percpu *fcp, 175 struct flow_cache_percpu *fcp,
175 const struct flowi *key) 176 const struct flowi *key,
177 size_t keysize)
176{ 178{
177 const u32 *k = (const u32 *) key; 179 const u32 *k = (const u32 *) key;
180 const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32);
178 181
179 return jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd) 182 return jhash2(k, length, fcp->hash_rnd)
180 & (flow_cache_hash_size(fc) - 1); 183 & (flow_cache_hash_size(fc) - 1);
181} 184}
182 185
183typedef unsigned long flow_compare_t;
184
185/* I hear what you're saying, use memcmp. But memcmp cannot make 186/* I hear what you're saying, use memcmp. But memcmp cannot make
186 * important assumptions that we can here, such as alignment and 187 * important assumptions that we can here, such as alignment.
187 * constant size.
188 */ 188 */
189static int flow_key_compare(const struct flowi *key1, const struct flowi *key2) 189static int flow_key_compare(const struct flowi *key1, const struct flowi *key2,
190 size_t keysize)
190{ 191{
191 const flow_compare_t *k1, *k1_lim, *k2; 192 const flow_compare_t *k1, *k1_lim, *k2;
192 const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
193
194 BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t));
195 193
196 k1 = (const flow_compare_t *) key1; 194 k1 = (const flow_compare_t *) key1;
197 k1_lim = k1 + n_elem; 195 k1_lim = k1 + keysize;
198 196
199 k2 = (const flow_compare_t *) key2; 197 k2 = (const flow_compare_t *) key2;
200 198
@@ -215,6 +213,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
215 struct flow_cache_entry *fle, *tfle; 213 struct flow_cache_entry *fle, *tfle;
216 struct hlist_node *entry; 214 struct hlist_node *entry;
217 struct flow_cache_object *flo; 215 struct flow_cache_object *flo;
216 size_t keysize;
218 unsigned int hash; 217 unsigned int hash;
219 218
220 local_bh_disable(); 219 local_bh_disable();
@@ -222,6 +221,11 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
222 221
223 fle = NULL; 222 fle = NULL;
224 flo = NULL; 223 flo = NULL;
224
225 keysize = flow_key_size(family);
226 if (!keysize)
227 goto nocache;
228
225 /* Packet really early in init? Making flow_cache_init a 229 /* Packet really early in init? Making flow_cache_init a
226 * pre-smp initcall would solve this. --RR */ 230 * pre-smp initcall would solve this. --RR */
227 if (!fcp->hash_table) 231 if (!fcp->hash_table)
@@ -230,11 +234,12 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
230 if (fcp->hash_rnd_recalc) 234 if (fcp->hash_rnd_recalc)
231 flow_new_hash_rnd(fc, fcp); 235 flow_new_hash_rnd(fc, fcp);
232 236
233 hash = flow_hash_code(fc, fcp, key); 237 hash = flow_hash_code(fc, fcp, key, keysize);
234 hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) { 238 hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) {
235 if (tfle->family == family && 239 if (tfle->net == net &&
240 tfle->family == family &&
236 tfle->dir == dir && 241 tfle->dir == dir &&
237 flow_key_compare(key, &tfle->key) == 0) { 242 flow_key_compare(key, &tfle->key, keysize) == 0) {
238 fle = tfle; 243 fle = tfle;
239 break; 244 break;
240 } 245 }
@@ -246,9 +251,10 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
246 251
247 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); 252 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
248 if (fle) { 253 if (fle) {
254 fle->net = net;
249 fle->family = family; 255 fle->family = family;
250 fle->dir = dir; 256 fle->dir = dir;
251 memcpy(&fle->key, key, sizeof(*key)); 257 memcpy(&fle->key, key, keysize * sizeof(flow_compare_t));
252 fle->object = NULL; 258 fle->object = NULL;
253 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]); 259 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
254 fcp->hash_count++; 260 fcp->hash_count++;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 8fab9b0bb203..1334d7e56f02 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1319,11 +1319,15 @@ static void neigh_proxy_process(unsigned long arg)
1319 1319
1320 if (tdif <= 0) { 1320 if (tdif <= 0) {
1321 struct net_device *dev = skb->dev; 1321 struct net_device *dev = skb->dev;
1322
1322 __skb_unlink(skb, &tbl->proxy_queue); 1323 __skb_unlink(skb, &tbl->proxy_queue);
1323 if (tbl->proxy_redo && netif_running(dev)) 1324 if (tbl->proxy_redo && netif_running(dev)) {
1325 rcu_read_lock();
1324 tbl->proxy_redo(skb); 1326 tbl->proxy_redo(skb);
1325 else 1327 rcu_read_unlock();
1328 } else {
1326 kfree_skb(skb); 1329 kfree_skb(skb);
1330 }
1327 1331
1328 dev_put(dev); 1332 dev_put(dev);
1329 } else if (!sched_next || tdif < sched_next) 1333 } else if (!sched_next || tdif < sched_next)
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index adf84dd8c7b5..52622517e0d8 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -558,13 +558,14 @@ int __netpoll_rx(struct sk_buff *skb)
558 if (skb_shared(skb)) 558 if (skb_shared(skb))
559 goto out; 559 goto out;
560 560
561 iph = (struct iphdr *)skb->data;
562 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 561 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
563 goto out; 562 goto out;
563 iph = (struct iphdr *)skb->data;
564 if (iph->ihl < 5 || iph->version != 4) 564 if (iph->ihl < 5 || iph->version != 4)
565 goto out; 565 goto out;
566 if (!pskb_may_pull(skb, iph->ihl*4)) 566 if (!pskb_may_pull(skb, iph->ihl*4))
567 goto out; 567 goto out;
568 iph = (struct iphdr *)skb->data;
568 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0) 569 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
569 goto out; 570 goto out;
570 571
@@ -579,6 +580,7 @@ int __netpoll_rx(struct sk_buff *skb)
579 if (pskb_trim_rcsum(skb, len)) 580 if (pskb_trim_rcsum(skb, len))
580 goto out; 581 goto out;
581 582
583 iph = (struct iphdr *)skb->data;
582 if (iph->protocol != IPPROTO_UDP) 584 if (iph->protocol != IPPROTO_UDP)
583 goto out; 585 goto out;
584 586
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 27002dffe7ed..387703f56fce 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -611,8 +611,21 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
611} 611}
612EXPORT_SYMBOL_GPL(skb_morph); 612EXPORT_SYMBOL_GPL(skb_morph);
613 613
614/* skb frags copy userspace buffers to kernel */ 614/* skb_copy_ubufs - copy userspace skb frags buffers to kernel
615static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 615 * @skb: the skb to modify
616 * @gfp_mask: allocation priority
617 *
618 * This must be called on SKBTX_DEV_ZEROCOPY skb.
619 * It will copy all frags into kernel and drop the reference
620 * to userspace pages.
621 *
622 * If this function is called from an interrupt gfp_mask() must be
623 * %GFP_ATOMIC.
624 *
625 * Returns 0 on success or a negative error code on failure
626 * to allocate kernel memory to copy to.
627 */
628int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
616{ 629{
617 int i; 630 int i;
618 int num_frags = skb_shinfo(skb)->nr_frags; 631 int num_frags = skb_shinfo(skb)->nr_frags;
@@ -652,6 +665,8 @@ static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
652 skb_shinfo(skb)->frags[i - 1].page = head; 665 skb_shinfo(skb)->frags[i - 1].page = head;
653 head = (struct page *)head->private; 666 head = (struct page *)head->private;
654 } 667 }
668
669 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
655 return 0; 670 return 0;
656} 671}
657 672
@@ -677,7 +692,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
677 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 692 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
678 if (skb_copy_ubufs(skb, gfp_mask)) 693 if (skb_copy_ubufs(skb, gfp_mask))
679 return NULL; 694 return NULL;
680 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
681 } 695 }
682 696
683 n = skb + 1; 697 n = skb + 1;
@@ -803,7 +817,6 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
803 n = NULL; 817 n = NULL;
804 goto out; 818 goto out;
805 } 819 }
806 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
807 } 820 }
808 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 821 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
809 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 822 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
@@ -896,7 +909,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
896 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 909 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
897 if (skb_copy_ubufs(skb, gfp_mask)) 910 if (skb_copy_ubufs(skb, gfp_mask))
898 goto nofrags; 911 goto nofrags;
899 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
900 } 912 }
901 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 913 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
902 get_page(skb_shinfo(skb)->frags[i].page); 914 get_page(skb_shinfo(skb)->frags[i].page);
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 27997d35ebd3..a2468363978e 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -340,7 +340,7 @@ void ether_setup(struct net_device *dev)
340 dev->addr_len = ETH_ALEN; 340 dev->addr_len = ETH_ALEN;
341 dev->tx_queue_len = 1000; /* Ethernet wants good queues */ 341 dev->tx_queue_len = 1000; /* Ethernet wants good queues */
342 dev->flags = IFF_BROADCAST|IFF_MULTICAST; 342 dev->flags = IFF_BROADCAST|IFF_MULTICAST;
343 dev->priv_flags = IFF_TX_SKB_SHARING; 343 dev->priv_flags |= IFF_TX_SKB_SHARING;
344 344
345 memset(dev->broadcast, 0xFF, ETH_ALEN); 345 memset(dev->broadcast, 0xFF, ETH_ALEN);
346 346
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 1b745d412cf6..dd2b9478ddd1 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -466,8 +466,13 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
466 goto out; 466 goto out;
467 467
468 if (addr->sin_family != AF_INET) { 468 if (addr->sin_family != AF_INET) {
469 /* Compatibility games : accept AF_UNSPEC (mapped to AF_INET)
470 * only if s_addr is INADDR_ANY.
471 */
469 err = -EAFNOSUPPORT; 472 err = -EAFNOSUPPORT;
470 goto out; 473 if (addr->sin_family != AF_UNSPEC ||
474 addr->sin_addr.s_addr != htonl(INADDR_ANY))
475 goto out;
471 } 476 }
472 477
473 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr); 478 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 33e2c35b74b7..80106d89d548 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -142,6 +142,14 @@ const struct fib_prop fib_props[RTN_MAX + 1] = {
142}; 142};
143 143
144/* Release a nexthop info record */ 144/* Release a nexthop info record */
145static void free_fib_info_rcu(struct rcu_head *head)
146{
147 struct fib_info *fi = container_of(head, struct fib_info, rcu);
148
149 if (fi->fib_metrics != (u32 *) dst_default_metrics)
150 kfree(fi->fib_metrics);
151 kfree(fi);
152}
145 153
146void free_fib_info(struct fib_info *fi) 154void free_fib_info(struct fib_info *fi)
147{ 155{
@@ -156,7 +164,7 @@ void free_fib_info(struct fib_info *fi)
156 } endfor_nexthops(fi); 164 } endfor_nexthops(fi);
157 fib_info_cnt--; 165 fib_info_cnt--;
158 release_net(fi->fib_net); 166 release_net(fi->fib_net);
159 kfree_rcu(fi, rcu); 167 call_rcu(&fi->rcu, free_fib_info_rcu);
160} 168}
161 169
162void fib_release_info(struct fib_info *fi) 170void fib_release_info(struct fib_info *fi)
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 283c0a26e03f..d577199eabd5 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -767,7 +767,7 @@ static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
767 break; 767 break;
768 for (i=0; i<nsrcs; i++) { 768 for (i=0; i<nsrcs; i++) {
769 /* skip inactive filters */ 769 /* skip inactive filters */
770 if (pmc->sfcount[MCAST_INCLUDE] || 770 if (psf->sf_count[MCAST_INCLUDE] ||
771 pmc->sfcount[MCAST_EXCLUDE] != 771 pmc->sfcount[MCAST_EXCLUDE] !=
772 psf->sf_count[MCAST_EXCLUDE]) 772 psf->sf_count[MCAST_EXCLUDE])
773 continue; 773 continue;
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index 5c9b9d963918..e59aabd0eae4 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -218,6 +218,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
218 return skb; 218 return skb;
219 219
220nlmsg_failure: 220nlmsg_failure:
221 kfree_skb(skb);
221 *errp = -EINVAL; 222 *errp = -EINVAL;
222 printk(KERN_ERR "ip_queue: error creating packet message\n"); 223 printk(KERN_ERR "ip_queue: error creating packet message\n");
223 return NULL; 224 return NULL;
@@ -313,7 +314,7 @@ ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
313{ 314{
314 struct nf_queue_entry *entry; 315 struct nf_queue_entry *entry;
315 316
316 if (vmsg->value > NF_MAX_VERDICT) 317 if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN)
317 return -EINVAL; 318 return -EINVAL;
318 319
319 entry = ipq_find_dequeue_entry(vmsg->id); 320 entry = ipq_find_dequeue_entry(vmsg->id);
@@ -358,12 +359,9 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg,
358 break; 359 break;
359 360
360 case IPQM_VERDICT: 361 case IPQM_VERDICT:
361 if (pmsg->msg.verdict.value > NF_MAX_VERDICT) 362 status = ipq_set_verdict(&pmsg->msg.verdict,
362 status = -EINVAL; 363 len - sizeof(*pmsg));
363 else 364 break;
364 status = ipq_set_verdict(&pmsg->msg.verdict,
365 len - sizeof(*pmsg));
366 break;
367 default: 365 default:
368 status = -EINVAL; 366 status = -EINVAL;
369 } 367 }
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index b14ec7d03b6e..4bfad5da94f4 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -254,6 +254,8 @@ static const struct snmp_mib snmp4_net_list[] = {
254 SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP), 254 SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP),
255 SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER), 255 SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER),
256 SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW), 256 SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW),
257 SNMP_MIB_ITEM("TCPReqQFullDoCookies", LINUX_MIB_TCPREQQFULLDOCOOKIES),
258 SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP),
257 SNMP_MIB_SENTINEL 259 SNMP_MIB_SENTINEL
258}; 260};
259 261
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index ea0d2183df4b..21fab3edb92c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1124,7 +1124,7 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack,
1124 return 0; 1124 return 0;
1125 1125
1126 /* ...Then it's D-SACK, and must reside below snd_una completely */ 1126 /* ...Then it's D-SACK, and must reside below snd_una completely */
1127 if (!after(end_seq, tp->snd_una)) 1127 if (after(end_seq, tp->snd_una))
1128 return 0; 1128 return 0;
1129 1129
1130 if (!before(start_seq, tp->undo_marker)) 1130 if (!before(start_seq, tp->undo_marker))
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 1c12b8ec849d..c34f01513945 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -808,20 +808,38 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
808 kfree(inet_rsk(req)->opt); 808 kfree(inet_rsk(req)->opt);
809} 809}
810 810
811static void syn_flood_warning(const struct sk_buff *skb) 811/*
812 * Return 1 if a syncookie should be sent
813 */
814int tcp_syn_flood_action(struct sock *sk,
815 const struct sk_buff *skb,
816 const char *proto)
812{ 817{
813 const char *msg; 818 const char *msg = "Dropping request";
819 int want_cookie = 0;
820 struct listen_sock *lopt;
821
822
814 823
815#ifdef CONFIG_SYN_COOKIES 824#ifdef CONFIG_SYN_COOKIES
816 if (sysctl_tcp_syncookies) 825 if (sysctl_tcp_syncookies) {
817 msg = "Sending cookies"; 826 msg = "Sending cookies";
818 else 827 want_cookie = 1;
828 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
829 } else
819#endif 830#endif
820 msg = "Dropping request"; 831 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
821 832
822 pr_info("TCP: Possible SYN flooding on port %d. %s.\n", 833 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
823 ntohs(tcp_hdr(skb)->dest), msg); 834 if (!lopt->synflood_warned) {
835 lopt->synflood_warned = 1;
836 pr_info("%s: Possible SYN flooding on port %d. %s. "
837 " Check SNMP counters.\n",
838 proto, ntohs(tcp_hdr(skb)->dest), msg);
839 }
840 return want_cookie;
824} 841}
842EXPORT_SYMBOL(tcp_syn_flood_action);
825 843
826/* 844/*
827 * Save and compile IPv4 options into the request_sock if needed. 845 * Save and compile IPv4 options into the request_sock if needed.
@@ -1235,11 +1253,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1235 __be32 saddr = ip_hdr(skb)->saddr; 1253 __be32 saddr = ip_hdr(skb)->saddr;
1236 __be32 daddr = ip_hdr(skb)->daddr; 1254 __be32 daddr = ip_hdr(skb)->daddr;
1237 __u32 isn = TCP_SKB_CB(skb)->when; 1255 __u32 isn = TCP_SKB_CB(skb)->when;
1238#ifdef CONFIG_SYN_COOKIES
1239 int want_cookie = 0; 1256 int want_cookie = 0;
1240#else
1241#define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1242#endif
1243 1257
1244 /* Never answer to SYNs send to broadcast or multicast */ 1258 /* Never answer to SYNs send to broadcast or multicast */
1245 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 1259 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -1250,14 +1264,9 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1250 * evidently real one. 1264 * evidently real one.
1251 */ 1265 */
1252 if (inet_csk_reqsk_queue_is_full(sk) && !isn) { 1266 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1253 if (net_ratelimit()) 1267 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1254 syn_flood_warning(skb); 1268 if (!want_cookie)
1255#ifdef CONFIG_SYN_COOKIES 1269 goto drop;
1256 if (sysctl_tcp_syncookies) {
1257 want_cookie = 1;
1258 } else
1259#endif
1260 goto drop;
1261 } 1270 }
1262 1271
1263 /* Accept backlog is full. If we have already queued enough 1272 /* Accept backlog is full. If we have already queued enough
@@ -1303,9 +1312,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1303 while (l-- > 0) 1312 while (l-- > 0)
1304 *c++ ^= *hash_location++; 1313 *c++ ^= *hash_location++;
1305 1314
1306#ifdef CONFIG_SYN_COOKIES
1307 want_cookie = 0; /* not our kind of cookie */ 1315 want_cookie = 0; /* not our kind of cookie */
1308#endif
1309 tmp_ext.cookie_out_never = 0; /* false */ 1316 tmp_ext.cookie_out_never = 0; /* false */
1310 tmp_ext.cookie_plus = tmp_opt.cookie_plus; 1317 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1311 } else if (!tp->rx_opt.cookie_in_always) { 1318 } else if (!tp->rx_opt.cookie_in_always) {
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index f012ebd87b43..12368c586068 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -374,8 +374,8 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
374 "%s(): cannot allocate memory for statistics; dev=%s.\n", 374 "%s(): cannot allocate memory for statistics; dev=%s.\n",
375 __func__, dev->name)); 375 __func__, dev->name));
376 neigh_parms_release(&nd_tbl, ndev->nd_parms); 376 neigh_parms_release(&nd_tbl, ndev->nd_parms);
377 ndev->dead = 1; 377 dev_put(dev);
378 in6_dev_finish_destroy(ndev); 378 kfree(ndev);
379 return NULL; 379 return NULL;
380 } 380 }
381 381
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 9ef1831746ef..b46e9f88ce37 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -599,7 +599,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
599 return 0; 599 return 0;
600} 600}
601 601
602int datagram_send_ctl(struct net *net, 602int datagram_send_ctl(struct net *net, struct sock *sk,
603 struct msghdr *msg, struct flowi6 *fl6, 603 struct msghdr *msg, struct flowi6 *fl6,
604 struct ipv6_txoptions *opt, 604 struct ipv6_txoptions *opt,
605 int *hlimit, int *tclass, int *dontfrag) 605 int *hlimit, int *tclass, int *dontfrag)
@@ -658,7 +658,8 @@ int datagram_send_ctl(struct net *net,
658 658
659 if (addr_type != IPV6_ADDR_ANY) { 659 if (addr_type != IPV6_ADDR_ANY) {
660 int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL; 660 int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL;
661 if (!ipv6_chk_addr(net, &src_info->ipi6_addr, 661 if (!inet_sk(sk)->transparent &&
662 !ipv6_chk_addr(net, &src_info->ipi6_addr,
662 strict ? dev : NULL, 0)) 663 strict ? dev : NULL, 0))
663 err = -EINVAL; 664 err = -EINVAL;
664 else 665 else
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index f3caf1b8d572..543039450193 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -322,8 +322,8 @@ static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned lo
322} 322}
323 323
324static struct ip6_flowlabel * 324static struct ip6_flowlabel *
325fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval, 325fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
326 int optlen, int *err_p) 326 char __user *optval, int optlen, int *err_p)
327{ 327{
328 struct ip6_flowlabel *fl = NULL; 328 struct ip6_flowlabel *fl = NULL;
329 int olen; 329 int olen;
@@ -360,7 +360,7 @@ fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
360 msg.msg_control = (void*)(fl->opt+1); 360 msg.msg_control = (void*)(fl->opt+1);
361 memset(&flowi6, 0, sizeof(flowi6)); 361 memset(&flowi6, 0, sizeof(flowi6));
362 362
363 err = datagram_send_ctl(net, &msg, &flowi6, fl->opt, &junk, 363 err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk,
364 &junk, &junk); 364 &junk, &junk);
365 if (err) 365 if (err)
366 goto done; 366 goto done;
@@ -528,7 +528,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
528 if (freq.flr_label & ~IPV6_FLOWLABEL_MASK) 528 if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
529 return -EINVAL; 529 return -EINVAL;
530 530
531 fl = fl_create(net, &freq, optval, optlen, &err); 531 fl = fl_create(net, sk, &freq, optval, optlen, &err);
532 if (fl == NULL) 532 if (fl == NULL)
533 return err; 533 return err;
534 sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL); 534 sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 705c82886281..def0538e2413 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -696,8 +696,10 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
696 int err; 696 int err;
697 697
698 err = ip6mr_fib_lookup(net, &fl6, &mrt); 698 err = ip6mr_fib_lookup(net, &fl6, &mrt);
699 if (err < 0) 699 if (err < 0) {
700 kfree_skb(skb);
700 return err; 701 return err;
702 }
701 703
702 read_lock(&mrt_lock); 704 read_lock(&mrt_lock);
703 dev->stats.tx_bytes += skb->len; 705 dev->stats.tx_bytes += skb->len;
@@ -2052,8 +2054,10 @@ int ip6_mr_input(struct sk_buff *skb)
2052 int err; 2054 int err;
2053 2055
2054 err = ip6mr_fib_lookup(net, &fl6, &mrt); 2056 err = ip6mr_fib_lookup(net, &fl6, &mrt);
2055 if (err < 0) 2057 if (err < 0) {
2058 kfree_skb(skb);
2056 return err; 2059 return err;
2060 }
2057 2061
2058 read_lock(&mrt_lock); 2062 read_lock(&mrt_lock);
2059 cache = ip6mr_cache_find(mrt, 2063 cache = ip6mr_cache_find(mrt,
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 9cb191ecaba8..2fbda5fc4cc4 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -475,7 +475,7 @@ sticky_done:
475 msg.msg_controllen = optlen; 475 msg.msg_controllen = optlen;
476 msg.msg_control = (void*)(opt+1); 476 msg.msg_control = (void*)(opt+1);
477 477
478 retv = datagram_send_ctl(net, &msg, &fl6, opt, &junk, &junk, 478 retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk,
479 &junk); 479 &junk);
480 if (retv) 480 if (retv)
481 goto done; 481 goto done;
@@ -913,7 +913,7 @@ static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt,
913} 913}
914 914
915static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, 915static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
916 char __user *optval, int __user *optlen) 916 char __user *optval, int __user *optlen, unsigned flags)
917{ 917{
918 struct ipv6_pinfo *np = inet6_sk(sk); 918 struct ipv6_pinfo *np = inet6_sk(sk);
919 int len; 919 int len;
@@ -962,7 +962,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
962 962
963 msg.msg_control = optval; 963 msg.msg_control = optval;
964 msg.msg_controllen = len; 964 msg.msg_controllen = len;
965 msg.msg_flags = 0; 965 msg.msg_flags = flags;
966 966
967 lock_sock(sk); 967 lock_sock(sk);
968 skb = np->pktoptions; 968 skb = np->pktoptions;
@@ -1222,7 +1222,7 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname,
1222 if(level != SOL_IPV6) 1222 if(level != SOL_IPV6)
1223 return -ENOPROTOOPT; 1223 return -ENOPROTOOPT;
1224 1224
1225 err = do_ipv6_getsockopt(sk, level, optname, optval, optlen); 1225 err = do_ipv6_getsockopt(sk, level, optname, optval, optlen, 0);
1226#ifdef CONFIG_NETFILTER 1226#ifdef CONFIG_NETFILTER
1227 /* we need to exclude all possible ENOPROTOOPTs except default case */ 1227 /* we need to exclude all possible ENOPROTOOPTs except default case */
1228 if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) { 1228 if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) {
@@ -1264,7 +1264,8 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
1264 return compat_mc_getsockopt(sk, level, optname, optval, optlen, 1264 return compat_mc_getsockopt(sk, level, optname, optval, optlen,
1265 ipv6_getsockopt); 1265 ipv6_getsockopt);
1266 1266
1267 err = do_ipv6_getsockopt(sk, level, optname, optval, optlen); 1267 err = do_ipv6_getsockopt(sk, level, optname, optval, optlen,
1268 MSG_CMSG_COMPAT);
1268#ifdef CONFIG_NETFILTER 1269#ifdef CONFIG_NETFILTER
1269 /* we need to exclude all possible ENOPROTOOPTs except default case */ 1270 /* we need to exclude all possible ENOPROTOOPTs except default case */
1270 if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) { 1271 if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) {
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 3e6ebcdb4779..ee7839f4d6e3 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1059,7 +1059,7 @@ static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1059 break; 1059 break;
1060 for (i=0; i<nsrcs; i++) { 1060 for (i=0; i<nsrcs; i++) {
1061 /* skip inactive filters */ 1061 /* skip inactive filters */
1062 if (pmc->mca_sfcount[MCAST_INCLUDE] || 1062 if (psf->sf_count[MCAST_INCLUDE] ||
1063 pmc->mca_sfcount[MCAST_EXCLUDE] != 1063 pmc->mca_sfcount[MCAST_EXCLUDE] !=
1064 psf->sf_count[MCAST_EXCLUDE]) 1064 psf->sf_count[MCAST_EXCLUDE])
1065 continue; 1065 continue;
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 249394863284..e63c3972a739 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -218,6 +218,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
218 return skb; 218 return skb;
219 219
220nlmsg_failure: 220nlmsg_failure:
221 kfree_skb(skb);
221 *errp = -EINVAL; 222 *errp = -EINVAL;
222 printk(KERN_ERR "ip6_queue: error creating packet message\n"); 223 printk(KERN_ERR "ip6_queue: error creating packet message\n");
223 return NULL; 224 return NULL;
@@ -313,7 +314,7 @@ ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
313{ 314{
314 struct nf_queue_entry *entry; 315 struct nf_queue_entry *entry;
315 316
316 if (vmsg->value > NF_MAX_VERDICT) 317 if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN)
317 return -EINVAL; 318 return -EINVAL;
318 319
319 entry = ipq_find_dequeue_entry(vmsg->id); 320 entry = ipq_find_dequeue_entry(vmsg->id);
@@ -358,12 +359,9 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg,
358 break; 359 break;
359 360
360 case IPQM_VERDICT: 361 case IPQM_VERDICT:
361 if (pmsg->msg.verdict.value > NF_MAX_VERDICT) 362 status = ipq_set_verdict(&pmsg->msg.verdict,
362 status = -EINVAL; 363 len - sizeof(*pmsg));
363 else 364 break;
364 status = ipq_set_verdict(&pmsg->msg.verdict,
365 len - sizeof(*pmsg));
366 break;
367 default: 365 default:
368 status = -EINVAL; 366 status = -EINVAL;
369 } 367 }
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 6a79f3081bdb..343852e5c703 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -817,8 +817,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
817 memset(opt, 0, sizeof(struct ipv6_txoptions)); 817 memset(opt, 0, sizeof(struct ipv6_txoptions));
818 opt->tot_len = sizeof(struct ipv6_txoptions); 818 opt->tot_len = sizeof(struct ipv6_txoptions);
819 819
820 err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit, 820 err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
821 &tclass, &dontfrag); 821 &hlimit, &tclass, &dontfrag);
822 if (err < 0) { 822 if (err < 0) {
823 fl6_sock_release(flowlabel); 823 fl6_sock_release(flowlabel);
824 return err; 824 return err;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 9e69eb0ec6dd..fb545edef6ea 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -104,6 +104,9 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
104 struct inet_peer *peer; 104 struct inet_peer *peer;
105 u32 *p = NULL; 105 u32 *p = NULL;
106 106
107 if (!(rt->dst.flags & DST_HOST))
108 return NULL;
109
107 if (!rt->rt6i_peer) 110 if (!rt->rt6i_peer)
108 rt6_bind_peer(rt, 1); 111 rt6_bind_peer(rt, 1);
109 112
@@ -241,7 +244,9 @@ static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops,
241{ 244{
242 struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags); 245 struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags);
243 246
244 memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry)); 247 if (rt != NULL)
248 memset(&rt->rt6i_table, 0,
249 sizeof(*rt) - sizeof(struct dst_entry));
245 250
246 return rt; 251 return rt;
247} 252}
@@ -252,6 +257,9 @@ static void ip6_dst_destroy(struct dst_entry *dst)
252 struct inet6_dev *idev = rt->rt6i_idev; 257 struct inet6_dev *idev = rt->rt6i_idev;
253 struct inet_peer *peer = rt->rt6i_peer; 258 struct inet_peer *peer = rt->rt6i_peer;
254 259
260 if (!(rt->dst.flags & DST_HOST))
261 dst_destroy_metrics_generic(dst);
262
255 if (idev != NULL) { 263 if (idev != NULL) {
256 rt->rt6i_idev = NULL; 264 rt->rt6i_idev = NULL;
257 in6_dev_put(idev); 265 in6_dev_put(idev);
@@ -723,9 +731,7 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
723 ipv6_addr_copy(&rt->rt6i_gateway, daddr); 731 ipv6_addr_copy(&rt->rt6i_gateway, daddr);
724 } 732 }
725 733
726 rt->rt6i_dst.plen = 128;
727 rt->rt6i_flags |= RTF_CACHE; 734 rt->rt6i_flags |= RTF_CACHE;
728 rt->dst.flags |= DST_HOST;
729 735
730#ifdef CONFIG_IPV6_SUBTREES 736#ifdef CONFIG_IPV6_SUBTREES
731 if (rt->rt6i_src.plen && saddr) { 737 if (rt->rt6i_src.plen && saddr) {
@@ -775,9 +781,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
775 struct rt6_info *rt = ip6_rt_copy(ort, daddr); 781 struct rt6_info *rt = ip6_rt_copy(ort, daddr);
776 782
777 if (rt) { 783 if (rt) {
778 rt->rt6i_dst.plen = 128;
779 rt->rt6i_flags |= RTF_CACHE; 784 rt->rt6i_flags |= RTF_CACHE;
780 rt->dst.flags |= DST_HOST;
781 dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst))); 785 dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst)));
782 } 786 }
783 return rt; 787 return rt;
@@ -1078,12 +1082,15 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1078 neigh = NULL; 1082 neigh = NULL;
1079 } 1083 }
1080 1084
1081 rt->rt6i_idev = idev; 1085 rt->dst.flags |= DST_HOST;
1086 rt->dst.output = ip6_output;
1082 dst_set_neighbour(&rt->dst, neigh); 1087 dst_set_neighbour(&rt->dst, neigh);
1083 atomic_set(&rt->dst.__refcnt, 1); 1088 atomic_set(&rt->dst.__refcnt, 1);
1084 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
1085 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255); 1089 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
1086 rt->dst.output = ip6_output; 1090
1091 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
1092 rt->rt6i_dst.plen = 128;
1093 rt->rt6i_idev = idev;
1087 1094
1088 spin_lock_bh(&icmp6_dst_lock); 1095 spin_lock_bh(&icmp6_dst_lock);
1089 rt->dst.next = icmp6_dst_gc_list; 1096 rt->dst.next = icmp6_dst_gc_list;
@@ -1261,6 +1268,14 @@ int ip6_route_add(struct fib6_config *cfg)
1261 if (rt->rt6i_dst.plen == 128) 1268 if (rt->rt6i_dst.plen == 128)
1262 rt->dst.flags |= DST_HOST; 1269 rt->dst.flags |= DST_HOST;
1263 1270
1271 if (!(rt->dst.flags & DST_HOST) && cfg->fc_mx) {
1272 u32 *metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
1273 if (!metrics) {
1274 err = -ENOMEM;
1275 goto out;
1276 }
1277 dst_init_metrics(&rt->dst, metrics, 0);
1278 }
1264#ifdef CONFIG_IPV6_SUBTREES 1279#ifdef CONFIG_IPV6_SUBTREES
1265 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len); 1280 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1266 rt->rt6i_src.plen = cfg->fc_src_len; 1281 rt->rt6i_src.plen = cfg->fc_src_len;
@@ -1607,9 +1622,6 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
1607 if (on_link) 1622 if (on_link)
1608 nrt->rt6i_flags &= ~RTF_GATEWAY; 1623 nrt->rt6i_flags &= ~RTF_GATEWAY;
1609 1624
1610 nrt->rt6i_dst.plen = 128;
1611 nrt->dst.flags |= DST_HOST;
1612
1613 ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key); 1625 ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
1614 dst_set_neighbour(&nrt->dst, neigh_clone(neigh)); 1626 dst_set_neighbour(&nrt->dst, neigh_clone(neigh));
1615 1627
@@ -1754,9 +1766,10 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
1754 if (rt) { 1766 if (rt) {
1755 rt->dst.input = ort->dst.input; 1767 rt->dst.input = ort->dst.input;
1756 rt->dst.output = ort->dst.output; 1768 rt->dst.output = ort->dst.output;
1769 rt->dst.flags |= DST_HOST;
1757 1770
1758 ipv6_addr_copy(&rt->rt6i_dst.addr, dest); 1771 ipv6_addr_copy(&rt->rt6i_dst.addr, dest);
1759 rt->rt6i_dst.plen = ort->rt6i_dst.plen; 1772 rt->rt6i_dst.plen = 128;
1760 dst_copy_metrics(&rt->dst, &ort->dst); 1773 dst_copy_metrics(&rt->dst, &ort->dst);
1761 rt->dst.error = ort->dst.error; 1774 rt->dst.error = ort->dst.error;
1762 rt->rt6i_idev = ort->rt6i_idev; 1775 rt->rt6i_idev = ort->rt6i_idev;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 07bf1085458f..00b15ac7a702 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -672,6 +672,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
672 if (skb->protocol != htons(ETH_P_IPV6)) 672 if (skb->protocol != htons(ETH_P_IPV6))
673 goto tx_error; 673 goto tx_error;
674 674
675 if (tos == 1)
676 tos = ipv6_get_dsfield(iph6);
677
675 /* ISATAP (RFC4214) - must come before 6to4 */ 678 /* ISATAP (RFC4214) - must come before 6to4 */
676 if (dev->priv_flags & IFF_ISATAP) { 679 if (dev->priv_flags & IFF_ISATAP) {
677 struct neighbour *neigh = NULL; 680 struct neighbour *neigh = NULL;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index d1fb63f4aeb7..79cc6469508d 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -531,20 +531,6 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
531 return tcp_v6_send_synack(sk, req, rvp); 531 return tcp_v6_send_synack(sk, req, rvp);
532} 532}
533 533
534static inline void syn_flood_warning(struct sk_buff *skb)
535{
536#ifdef CONFIG_SYN_COOKIES
537 if (sysctl_tcp_syncookies)
538 printk(KERN_INFO
539 "TCPv6: Possible SYN flooding on port %d. "
540 "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
541 else
542#endif
543 printk(KERN_INFO
544 "TCPv6: Possible SYN flooding on port %d. "
545 "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
546}
547
548static void tcp_v6_reqsk_destructor(struct request_sock *req) 534static void tcp_v6_reqsk_destructor(struct request_sock *req)
549{ 535{
550 kfree_skb(inet6_rsk(req)->pktopts); 536 kfree_skb(inet6_rsk(req)->pktopts);
@@ -1179,11 +1165,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1179 struct tcp_sock *tp = tcp_sk(sk); 1165 struct tcp_sock *tp = tcp_sk(sk);
1180 __u32 isn = TCP_SKB_CB(skb)->when; 1166 __u32 isn = TCP_SKB_CB(skb)->when;
1181 struct dst_entry *dst = NULL; 1167 struct dst_entry *dst = NULL;
1182#ifdef CONFIG_SYN_COOKIES
1183 int want_cookie = 0; 1168 int want_cookie = 0;
1184#else
1185#define want_cookie 0
1186#endif
1187 1169
1188 if (skb->protocol == htons(ETH_P_IP)) 1170 if (skb->protocol == htons(ETH_P_IP))
1189 return tcp_v4_conn_request(sk, skb); 1171 return tcp_v4_conn_request(sk, skb);
@@ -1192,14 +1174,9 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1192 goto drop; 1174 goto drop;
1193 1175
1194 if (inet_csk_reqsk_queue_is_full(sk) && !isn) { 1176 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1195 if (net_ratelimit()) 1177 want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
1196 syn_flood_warning(skb); 1178 if (!want_cookie)
1197#ifdef CONFIG_SYN_COOKIES 1179 goto drop;
1198 if (sysctl_tcp_syncookies)
1199 want_cookie = 1;
1200 else
1201#endif
1202 goto drop;
1203 } 1180 }
1204 1181
1205 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) 1182 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
@@ -1249,9 +1226,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1249 while (l-- > 0) 1226 while (l-- > 0)
1250 *c++ ^= *hash_location++; 1227 *c++ ^= *hash_location++;
1251 1228
1252#ifdef CONFIG_SYN_COOKIES
1253 want_cookie = 0; /* not our kind of cookie */ 1229 want_cookie = 0; /* not our kind of cookie */
1254#endif
1255 tmp_ext.cookie_out_never = 0; /* false */ 1230 tmp_ext.cookie_out_never = 0; /* false */
1256 tmp_ext.cookie_plus = tmp_opt.cookie_plus; 1231 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1257 } else if (!tp->rx_opt.cookie_in_always) { 1232 } else if (!tp->rx_opt.cookie_in_always) {
@@ -1408,6 +1383,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1408 newtp->af_specific = &tcp_sock_ipv6_mapped_specific; 1383 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1409#endif 1384#endif
1410 1385
1386 newnp->ipv6_ac_list = NULL;
1387 newnp->ipv6_fl_list = NULL;
1411 newnp->pktoptions = NULL; 1388 newnp->pktoptions = NULL;
1412 newnp->opt = NULL; 1389 newnp->opt = NULL;
1413 newnp->mcast_oif = inet6_iif(skb); 1390 newnp->mcast_oif = inet6_iif(skb);
@@ -1472,6 +1449,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1472 First: no IPv4 options. 1449 First: no IPv4 options.
1473 */ 1450 */
1474 newinet->inet_opt = NULL; 1451 newinet->inet_opt = NULL;
1452 newnp->ipv6_ac_list = NULL;
1475 newnp->ipv6_fl_list = NULL; 1453 newnp->ipv6_fl_list = NULL;
1476 1454
1477 /* Clone RX bits */ 1455 /* Clone RX bits */
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 29213b51c499..bb95e8e1c6f9 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1090,8 +1090,8 @@ do_udp_sendmsg:
1090 memset(opt, 0, sizeof(struct ipv6_txoptions)); 1090 memset(opt, 0, sizeof(struct ipv6_txoptions));
1091 opt->tot_len = sizeof(*opt); 1091 opt->tot_len = sizeof(*opt);
1092 1092
1093 err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit, 1093 err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
1094 &tclass, &dontfrag); 1094 &hlimit, &tclass, &dontfrag);
1095 if (err < 0) { 1095 if (err < 0) {
1096 fl6_sock_release(flowlabel); 1096 fl6_sock_release(flowlabel);
1097 return err; 1097 return err;
diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c
index d0b70dadf73b..2615ffc8e785 100644
--- a/net/irda/irsysctl.c
+++ b/net/irda/irsysctl.c
@@ -40,9 +40,9 @@ extern int sysctl_slot_timeout;
40extern int sysctl_fast_poll_increase; 40extern int sysctl_fast_poll_increase;
41extern char sysctl_devname[]; 41extern char sysctl_devname[];
42extern int sysctl_max_baud_rate; 42extern int sysctl_max_baud_rate;
43extern int sysctl_min_tx_turn_time; 43extern unsigned int sysctl_min_tx_turn_time;
44extern int sysctl_max_tx_data_size; 44extern unsigned int sysctl_max_tx_data_size;
45extern int sysctl_max_tx_window; 45extern unsigned int sysctl_max_tx_window;
46extern int sysctl_max_noreply_time; 46extern int sysctl_max_noreply_time;
47extern int sysctl_warn_noreply_time; 47extern int sysctl_warn_noreply_time;
48extern int sysctl_lap_keepalive_time; 48extern int sysctl_lap_keepalive_time;
diff --git a/net/irda/qos.c b/net/irda/qos.c
index 1b51bcf42394..4369f7f41bcb 100644
--- a/net/irda/qos.c
+++ b/net/irda/qos.c
@@ -60,7 +60,7 @@ int sysctl_max_noreply_time = 12;
60 * Default is 10us which means using the unmodified value given by the 60 * Default is 10us which means using the unmodified value given by the
61 * peer except if it's 0 (0 is likely a bug in the other stack). 61 * peer except if it's 0 (0 is likely a bug in the other stack).
62 */ 62 */
63unsigned sysctl_min_tx_turn_time = 10; 63unsigned int sysctl_min_tx_turn_time = 10;
64/* 64/*
65 * Maximum data size to be used in transmission in payload of LAP frame. 65 * Maximum data size to be used in transmission in payload of LAP frame.
66 * There is a bit of confusion in the IrDA spec : 66 * There is a bit of confusion in the IrDA spec :
@@ -75,13 +75,13 @@ unsigned sysctl_min_tx_turn_time = 10;
75 * bytes frames or all negotiated frame sizes, but you can use the sysctl 75 * bytes frames or all negotiated frame sizes, but you can use the sysctl
76 * to play with this value anyway. 76 * to play with this value anyway.
77 * Jean II */ 77 * Jean II */
78unsigned sysctl_max_tx_data_size = 2042; 78unsigned int sysctl_max_tx_data_size = 2042;
79/* 79/*
80 * Maximum transmit window, i.e. number of LAP frames between turn-around. 80 * Maximum transmit window, i.e. number of LAP frames between turn-around.
81 * This allow to override what the peer told us. Some peers are buggy and 81 * This allow to override what the peer told us. Some peers are buggy and
82 * don't always support what they tell us. 82 * don't always support what they tell us.
83 * Jean II */ 83 * Jean II */
84unsigned sysctl_max_tx_window = 7; 84unsigned int sysctl_max_tx_window = 7;
85 85
86static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get); 86static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get);
87static int irlap_param_link_disconnect(void *instance, irda_param_t *parm, 87static int irlap_param_link_disconnect(void *instance, irda_param_t *parm,
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 866f269183cf..acb44230b251 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -1012,7 +1012,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
1012 cancel_work_sync(&local->reconfig_filter); 1012 cancel_work_sync(&local->reconfig_filter);
1013 1013
1014 ieee80211_clear_tx_pending(local); 1014 ieee80211_clear_tx_pending(local);
1015 sta_info_stop(local);
1016 rate_control_deinitialize(local); 1015 rate_control_deinitialize(local);
1017 1016
1018 if (skb_queue_len(&local->skb_queue) || 1017 if (skb_queue_len(&local->skb_queue) ||
@@ -1024,6 +1023,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
1024 1023
1025 destroy_workqueue(local->workqueue); 1024 destroy_workqueue(local->workqueue);
1026 wiphy_unregister(local->hw.wiphy); 1025 wiphy_unregister(local->hw.wiphy);
1026 sta_info_stop(local);
1027 ieee80211_wep_free(local); 1027 ieee80211_wep_free(local);
1028 ieee80211_led_exit(local); 1028 ieee80211_led_exit(local);
1029 kfree(local->int_scan_req); 1029 kfree(local->int_scan_req);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 3db78b696c5c..21070e9bc8d0 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -665,7 +665,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
665 BUG_ON(!sdata->bss); 665 BUG_ON(!sdata->bss);
666 666
667 atomic_dec(&sdata->bss->num_sta_ps); 667 atomic_dec(&sdata->bss->num_sta_ps);
668 __sta_info_clear_tim_bit(sdata->bss, sta); 668 sta_info_clear_tim_bit(sta);
669 } 669 }
670 670
671 local->num_sta--; 671 local->num_sta--;
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 2fd4565144de..31d56b23b9e9 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -364,6 +364,7 @@ pptp_inbound_pkt(struct sk_buff *skb,
364 break; 364 break;
365 365
366 case PPTP_WAN_ERROR_NOTIFY: 366 case PPTP_WAN_ERROR_NOTIFY:
367 case PPTP_SET_LINK_INFO:
367 case PPTP_ECHO_REQUEST: 368 case PPTP_ECHO_REQUEST:
368 case PPTP_ECHO_REPLY: 369 case PPTP_ECHO_REPLY:
369 /* I don't have to explain these ;) */ 370 /* I don't have to explain these ;) */
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 37bf94394be0..8235b86b4e87 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -409,7 +409,7 @@ static void tcp_options(const struct sk_buff *skb,
409 if (opsize < 2) /* "silly options" */ 409 if (opsize < 2) /* "silly options" */
410 return; 410 return;
411 if (opsize > length) 411 if (opsize > length)
412 break; /* don't parse partial options */ 412 return; /* don't parse partial options */
413 413
414 if (opcode == TCPOPT_SACK_PERM 414 if (opcode == TCPOPT_SACK_PERM
415 && opsize == TCPOLEN_SACK_PERM) 415 && opsize == TCPOLEN_SACK_PERM)
@@ -447,7 +447,7 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
447 BUG_ON(ptr == NULL); 447 BUG_ON(ptr == NULL);
448 448
449 /* Fast path for timestamp-only option */ 449 /* Fast path for timestamp-only option */
450 if (length == TCPOLEN_TSTAMP_ALIGNED*4 450 if (length == TCPOLEN_TSTAMP_ALIGNED
451 && *(__be32 *)ptr == htonl((TCPOPT_NOP << 24) 451 && *(__be32 *)ptr == htonl((TCPOPT_NOP << 24)
452 | (TCPOPT_NOP << 16) 452 | (TCPOPT_NOP << 16)
453 | (TCPOPT_TIMESTAMP << 8) 453 | (TCPOPT_TIMESTAMP << 8)
@@ -469,7 +469,7 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
469 if (opsize < 2) /* "silly options" */ 469 if (opsize < 2) /* "silly options" */
470 return; 470 return;
471 if (opsize > length) 471 if (opsize > length)
472 break; /* don't parse partial options */ 472 return; /* don't parse partial options */
473 473
474 if (opcode == TCPOPT_SACK 474 if (opcode == TCPOPT_SACK
475 && opsize >= (TCPOLEN_SACK_BASE 475 && opsize >= (TCPOLEN_SACK_BASE
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 00bd475eab4b..a80b0cb03f17 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -646,8 +646,8 @@ verdicthdr_get(const struct nlattr * const nfqa[])
646 return NULL; 646 return NULL;
647 647
648 vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]); 648 vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
649 verdict = ntohl(vhdr->verdict); 649 verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK;
650 if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) 650 if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN)
651 return NULL; 651 return NULL;
652 return vhdr; 652 return vhdr;
653} 653}
diff --git a/net/netfilter/xt_rateest.c b/net/netfilter/xt_rateest.c
index 76a083184d8e..ed0db15ab00e 100644
--- a/net/netfilter/xt_rateest.c
+++ b/net/netfilter/xt_rateest.c
@@ -78,7 +78,7 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
78{ 78{
79 struct xt_rateest_match_info *info = par->matchinfo; 79 struct xt_rateest_match_info *info = par->matchinfo;
80 struct xt_rateest *est1, *est2; 80 struct xt_rateest *est1, *est2;
81 int ret = false; 81 int ret = -EINVAL;
82 82
83 if (hweight32(info->flags & (XT_RATEEST_MATCH_ABS | 83 if (hweight32(info->flags & (XT_RATEEST_MATCH_ABS |
84 XT_RATEEST_MATCH_REL)) != 1) 84 XT_RATEEST_MATCH_REL)) != 1)
@@ -101,13 +101,12 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
101 if (!est1) 101 if (!est1)
102 goto err1; 102 goto err1;
103 103
104 est2 = NULL;
104 if (info->flags & XT_RATEEST_MATCH_REL) { 105 if (info->flags & XT_RATEEST_MATCH_REL) {
105 est2 = xt_rateest_lookup(info->name2); 106 est2 = xt_rateest_lookup(info->name2);
106 if (!est2) 107 if (!est2)
107 goto err2; 108 goto err2;
108 } else 109 }
109 est2 = NULL;
110
111 110
112 info->est1 = est1; 111 info->est1 = est1;
113 info->est2 = est2; 112 info->est2 = est2;
@@ -116,7 +115,7 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
116err2: 115err2:
117 xt_rateest_put(est1); 116 xt_rateest_put(est1);
118err1: 117err1:
119 return -EINVAL; 118 return ret;
120} 119}
121 120
122static void xt_rateest_mt_destroy(const struct xt_mtdtor_param *par) 121static void xt_rateest_mt_destroy(const struct xt_mtdtor_param *par)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index c698cec0a445..fabb4fafa281 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -961,7 +961,10 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
961 return 0; 961 return 0;
962 962
963drop_n_acct: 963drop_n_acct:
964 po->stats.tp_drops = atomic_inc_return(&sk->sk_drops); 964 spin_lock(&sk->sk_receive_queue.lock);
965 po->stats.tp_drops++;
966 atomic_inc(&sk->sk_drops);
967 spin_unlock(&sk->sk_receive_queue.lock);
965 968
966drop_n_restore: 969drop_n_restore:
967 if (skb_head != skb->data && skb_shared(skb)) { 970 if (skb_head != skb->data && skb_shared(skb)) {
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
index 8b77edbab272..4e1de171866c 100644
--- a/net/rds/iw_rdma.c
+++ b/net/rds/iw_rdma.c
@@ -84,7 +84,8 @@ static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool,
84static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); 84static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
85static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, 85static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
86 struct list_head *unmap_list, 86 struct list_head *unmap_list,
87 struct list_head *kill_list); 87 struct list_head *kill_list,
88 int *unpinned);
88static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); 89static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
89 90
90static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id) 91static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id)
@@ -499,7 +500,7 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
499 LIST_HEAD(unmap_list); 500 LIST_HEAD(unmap_list);
500 LIST_HEAD(kill_list); 501 LIST_HEAD(kill_list);
501 unsigned long flags; 502 unsigned long flags;
502 unsigned int nfreed = 0, ncleaned = 0, free_goal; 503 unsigned int nfreed = 0, ncleaned = 0, unpinned = 0, free_goal;
503 int ret = 0; 504 int ret = 0;
504 505
505 rds_iw_stats_inc(s_iw_rdma_mr_pool_flush); 506 rds_iw_stats_inc(s_iw_rdma_mr_pool_flush);
@@ -524,7 +525,8 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
524 * will be destroyed by the unmap function. 525 * will be destroyed by the unmap function.
525 */ 526 */
526 if (!list_empty(&unmap_list)) { 527 if (!list_empty(&unmap_list)) {
527 ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list, &kill_list); 528 ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list,
529 &kill_list, &unpinned);
528 /* If we've been asked to destroy all MRs, move those 530 /* If we've been asked to destroy all MRs, move those
529 * that were simply cleaned to the kill list */ 531 * that were simply cleaned to the kill list */
530 if (free_all) 532 if (free_all)
@@ -548,6 +550,7 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
548 spin_unlock_irqrestore(&pool->list_lock, flags); 550 spin_unlock_irqrestore(&pool->list_lock, flags);
549 } 551 }
550 552
553 atomic_sub(unpinned, &pool->free_pinned);
551 atomic_sub(ncleaned, &pool->dirty_count); 554 atomic_sub(ncleaned, &pool->dirty_count);
552 atomic_sub(nfreed, &pool->item_count); 555 atomic_sub(nfreed, &pool->item_count);
553 556
@@ -828,7 +831,8 @@ static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool,
828 831
829static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, 832static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
830 struct list_head *unmap_list, 833 struct list_head *unmap_list,
831 struct list_head *kill_list) 834 struct list_head *kill_list,
835 int *unpinned)
832{ 836{
833 struct rds_iw_mapping *mapping, *next; 837 struct rds_iw_mapping *mapping, *next;
834 unsigned int ncleaned = 0; 838 unsigned int ncleaned = 0;
@@ -855,6 +859,7 @@ static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
855 859
856 spin_lock_irqsave(&pool->list_lock, flags); 860 spin_lock_irqsave(&pool->list_lock, flags);
857 list_for_each_entry_safe(mapping, next, unmap_list, m_list) { 861 list_for_each_entry_safe(mapping, next, unmap_list, m_list) {
862 *unpinned += mapping->m_sg.len;
858 list_move(&mapping->m_list, &laundered); 863 list_move(&mapping->m_list, &laundered);
859 ncleaned++; 864 ncleaned++;
860 } 865 }
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 102fc212cd64..e051398fdf6b 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -196,8 +196,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
196 196
197 skb2->skb_iif = skb->dev->ifindex; 197 skb2->skb_iif = skb->dev->ifindex;
198 skb2->dev = dev; 198 skb2->dev = dev;
199 dev_queue_xmit(skb2); 199 err = dev_queue_xmit(skb2);
200 err = 0;
201 200
202out: 201out:
203 if (err) { 202 if (err) {
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index be4505ee67a9..b01427924f81 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -425,7 +425,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
425 struct rsvp_filter *f, **fp; 425 struct rsvp_filter *f, **fp;
426 struct rsvp_session *s, **sp; 426 struct rsvp_session *s, **sp;
427 struct tc_rsvp_pinfo *pinfo = NULL; 427 struct tc_rsvp_pinfo *pinfo = NULL;
428 struct nlattr *opt = tca[TCA_OPTIONS-1]; 428 struct nlattr *opt = tca[TCA_OPTIONS];
429 struct nlattr *tb[TCA_RSVP_MAX + 1]; 429 struct nlattr *tb[TCA_RSVP_MAX + 1];
430 struct tcf_exts e; 430 struct tcf_exts e;
431 unsigned int h1, h2; 431 unsigned int h1, h2;
@@ -439,7 +439,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
439 if (err < 0) 439 if (err < 0)
440 return err; 440 return err;
441 441
442 err = tcf_exts_validate(tp, tb, tca[TCA_RATE-1], &e, &rsvp_ext_map); 442 err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &rsvp_ext_map);
443 if (err < 0) 443 if (err < 0)
444 return err; 444 return err;
445 445
@@ -449,8 +449,8 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
449 449
450 if (f->handle != handle && handle) 450 if (f->handle != handle && handle)
451 goto errout2; 451 goto errout2;
452 if (tb[TCA_RSVP_CLASSID-1]) { 452 if (tb[TCA_RSVP_CLASSID]) {
453 f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID-1]); 453 f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
454 tcf_bind_filter(tp, &f->res, base); 454 tcf_bind_filter(tp, &f->res, base);
455 } 455 }
456 456
@@ -462,7 +462,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
462 err = -EINVAL; 462 err = -EINVAL;
463 if (handle) 463 if (handle)
464 goto errout2; 464 goto errout2;
465 if (tb[TCA_RSVP_DST-1] == NULL) 465 if (tb[TCA_RSVP_DST] == NULL)
466 goto errout2; 466 goto errout2;
467 467
468 err = -ENOBUFS; 468 err = -ENOBUFS;
@@ -471,19 +471,19 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
471 goto errout2; 471 goto errout2;
472 472
473 h2 = 16; 473 h2 = 16;
474 if (tb[TCA_RSVP_SRC-1]) { 474 if (tb[TCA_RSVP_SRC]) {
475 memcpy(f->src, nla_data(tb[TCA_RSVP_SRC-1]), sizeof(f->src)); 475 memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src));
476 h2 = hash_src(f->src); 476 h2 = hash_src(f->src);
477 } 477 }
478 if (tb[TCA_RSVP_PINFO-1]) { 478 if (tb[TCA_RSVP_PINFO]) {
479 pinfo = nla_data(tb[TCA_RSVP_PINFO-1]); 479 pinfo = nla_data(tb[TCA_RSVP_PINFO]);
480 f->spi = pinfo->spi; 480 f->spi = pinfo->spi;
481 f->tunnelhdr = pinfo->tunnelhdr; 481 f->tunnelhdr = pinfo->tunnelhdr;
482 } 482 }
483 if (tb[TCA_RSVP_CLASSID-1]) 483 if (tb[TCA_RSVP_CLASSID])
484 f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID-1]); 484 f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
485 485
486 dst = nla_data(tb[TCA_RSVP_DST-1]); 486 dst = nla_data(tb[TCA_RSVP_DST]);
487 h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0); 487 h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0);
488 488
489 err = -ENOMEM; 489 err = -ENOMEM;
@@ -642,8 +642,7 @@ nla_put_failure:
642 return -1; 642 return -1;
643} 643}
644 644
645static struct tcf_proto_ops RSVP_OPS = { 645static struct tcf_proto_ops RSVP_OPS __read_mostly = {
646 .next = NULL,
647 .kind = RSVP_ID, 646 .kind = RSVP_ID,
648 .classify = rsvp_classify, 647 .classify = rsvp_classify,
649 .init = rsvp_init, 648 .init = rsvp_init,
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 167c880cf8da..76388b083f28 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1689,6 +1689,11 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1689 case SCTP_CMD_PURGE_ASCONF_QUEUE: 1689 case SCTP_CMD_PURGE_ASCONF_QUEUE:
1690 sctp_asconf_queue_teardown(asoc); 1690 sctp_asconf_queue_teardown(asoc);
1691 break; 1691 break;
1692
1693 case SCTP_CMD_SET_ASOC:
1694 asoc = cmd->obj.asoc;
1695 break;
1696
1692 default: 1697 default:
1693 pr_warn("Impossible command: %u, %p\n", 1698 pr_warn("Impossible command: %u, %p\n",
1694 cmd->verb, cmd->obj.ptr); 1699 cmd->verb, cmd->obj.ptr);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 49b847b00f99..a0f31e6c1c63 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -2047,6 +2047,12 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
2047 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); 2047 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
2048 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); 2048 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
2049 2049
2050 /* Restore association pointer to provide SCTP command interpeter
2051 * with a valid context in case it needs to manipulate
2052 * the queues */
2053 sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC,
2054 SCTP_ASOC((struct sctp_association *)asoc));
2055
2050 return retval; 2056 return retval;
2051 2057
2052nomem: 2058nomem:
diff --git a/net/socket.c b/net/socket.c
index 24a77400b65e..ffe92ca32f2a 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1965,8 +1965,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
1965 * used_address->name_len is initialized to UINT_MAX so that the first 1965 * used_address->name_len is initialized to UINT_MAX so that the first
1966 * destination address never matches. 1966 * destination address never matches.
1967 */ 1967 */
1968 if (used_address && used_address->name_len == msg_sys->msg_namelen && 1968 if (used_address && msg_sys->msg_name &&
1969 !memcmp(&used_address->name, msg->msg_name, 1969 used_address->name_len == msg_sys->msg_namelen &&
1970 !memcmp(&used_address->name, msg_sys->msg_name,
1970 used_address->name_len)) { 1971 used_address->name_len)) {
1971 err = sock_sendmsg_nosec(sock, msg_sys, total_len); 1972 err = sock_sendmsg_nosec(sock, msg_sys, total_len);
1972 goto out_freectl; 1973 goto out_freectl;
@@ -1978,8 +1979,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
1978 */ 1979 */
1979 if (used_address && err >= 0) { 1980 if (used_address && err >= 0) {
1980 used_address->name_len = msg_sys->msg_namelen; 1981 used_address->name_len = msg_sys->msg_namelen;
1981 memcpy(&used_address->name, msg->msg_name, 1982 if (msg_sys->msg_name)
1982 used_address->name_len); 1983 memcpy(&used_address->name, msg_sys->msg_name,
1984 used_address->name_len);
1983 } 1985 }
1984 1986
1985out_freectl: 1987out_freectl:
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 645437cfc464..c14865172da7 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -616,6 +616,9 @@ int wiphy_register(struct wiphy *wiphy)
616 if (res) 616 if (res)
617 goto out_rm_dev; 617 goto out_rm_dev;
618 618
619 rtnl_lock();
620 rdev->wiphy.registered = true;
621 rtnl_unlock();
619 return 0; 622 return 0;
620 623
621out_rm_dev: 624out_rm_dev:
@@ -647,6 +650,10 @@ void wiphy_unregister(struct wiphy *wiphy)
647{ 650{
648 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 651 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
649 652
653 rtnl_lock();
654 rdev->wiphy.registered = false;
655 rtnl_unlock();
656
650 rfkill_unregister(rdev->rfkill); 657 rfkill_unregister(rdev->rfkill);
651 658
652 /* protect the device list */ 659 /* protect the device list */
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index e83e7fee3bc0..ea40d540a990 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -4113,9 +4113,12 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
4113 if (len % sizeof(u32)) 4113 if (len % sizeof(u32))
4114 return -EINVAL; 4114 return -EINVAL;
4115 4115
4116 if (settings->n_akm_suites > NL80211_MAX_NR_AKM_SUITES)
4117 return -EINVAL;
4118
4116 memcpy(settings->akm_suites, data, len); 4119 memcpy(settings->akm_suites, data, len);
4117 4120
4118 for (i = 0; i < settings->n_ciphers_pairwise; i++) 4121 for (i = 0; i < settings->n_akm_suites; i++)
4119 if (!nl80211_valid_akm_suite(settings->akm_suites[i])) 4122 if (!nl80211_valid_akm_suite(settings->akm_suites[i]))
4120 return -EINVAL; 4123 return -EINVAL;
4121 } 4124 }
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 02751dbc5a97..68a471ba193f 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -852,6 +852,7 @@ static void handle_channel(struct wiphy *wiphy,
852 return; 852 return;
853 } 853 }
854 854
855 chan->beacon_found = false;
855 chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags); 856 chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags);
856 chan->max_antenna_gain = min(chan->orig_mag, 857 chan->max_antenna_gain = min(chan->orig_mag,
857 (int) MBI_TO_DBI(power_rule->max_antenna_gain)); 858 (int) MBI_TO_DBI(power_rule->max_antenna_gain));
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index b7b6ff8be553..dec0fa28372e 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -118,6 +118,8 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
118 i++, j++) 118 i++, j++)
119 request->channels[i] = 119 request->channels[i] =
120 &wdev->wiphy->bands[band]->channels[j]; 120 &wdev->wiphy->bands[band]->channels[j];
121 request->rates[band] =
122 (1 << wdev->wiphy->bands[band]->n_bitrates) - 1;
121 } 123 }
122 } 124 }
123 request->n_channels = n_channels; 125 request->n_channels = n_channels;
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index c6e4ca6a7d2e..ff574597a854 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -93,7 +93,8 @@ static int wiphy_suspend(struct device *dev, pm_message_t state)
93 93
94 if (rdev->ops->suspend) { 94 if (rdev->ops->suspend) {
95 rtnl_lock(); 95 rtnl_lock();
96 ret = rdev->ops->suspend(&rdev->wiphy, rdev->wowlan); 96 if (rdev->wiphy.registered)
97 ret = rdev->ops->suspend(&rdev->wiphy, rdev->wowlan);
97 rtnl_unlock(); 98 rtnl_unlock();
98 } 99 }
99 100
@@ -112,7 +113,8 @@ static int wiphy_resume(struct device *dev)
112 113
113 if (rdev->ops->resume) { 114 if (rdev->ops->resume) {
114 rtnl_lock(); 115 rtnl_lock();
115 ret = rdev->ops->resume(&rdev->wiphy); 116 if (rdev->wiphy.registered)
117 ret = rdev->ops->resume(&rdev->wiphy);
116 rtnl_unlock(); 118 rtnl_unlock();
117 } 119 }
118 120
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index a026b0ef2443..54a0dc2e2f8d 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -212,6 +212,11 @@ resume:
212 /* only the first xfrm gets the encap type */ 212 /* only the first xfrm gets the encap type */
213 encap_type = 0; 213 encap_type = 0;
214 214
215 if (async && x->repl->check(x, skb, seq)) {
216 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
217 goto drop_unlock;
218 }
219
215 x->repl->advance(x, seq); 220 x->repl->advance(x, seq);
216 221
217 x->curlft.bytes += skb->len; 222 x->curlft.bytes += skb->len;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 94fdcc7f1030..552df27dcf53 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1349,14 +1349,16 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1349 BUG(); 1349 BUG();
1350 } 1350 }
1351 xdst = dst_alloc(dst_ops, NULL, 0, 0, 0); 1351 xdst = dst_alloc(dst_ops, NULL, 0, 0, 0);
1352 memset(&xdst->u.rt6.rt6i_table, 0, sizeof(*xdst) - sizeof(struct dst_entry));
1353 xfrm_policy_put_afinfo(afinfo);
1354 1352
1355 if (likely(xdst)) 1353 if (likely(xdst)) {
1354 memset(&xdst->u.rt6.rt6i_table, 0,
1355 sizeof(*xdst) - sizeof(struct dst_entry));
1356 xdst->flo.ops = &xfrm_bundle_fc_ops; 1356 xdst->flo.ops = &xfrm_bundle_fc_ops;
1357 else 1357 } else
1358 xdst = ERR_PTR(-ENOBUFS); 1358 xdst = ERR_PTR(-ENOBUFS);
1359 1359
1360 xfrm_policy_put_afinfo(afinfo);
1361
1360 return xdst; 1362 return xdst;
1361} 1363}
1362 1364