aboutsummaryrefslogtreecommitdiffstats
path: root/net/l2tp/l2tp_core.c
diff options
context:
space:
mode:
authorJames Chapman <jchapman@katalix.com>2012-04-29 17:48:46 -0400
committerDavid S. Miller <davem@davemloft.net>2012-05-01 09:30:54 -0400
commit5de7aee5413cdfe6f96289a84a5ad22b1314e873 (patch)
treef1c6cf02dde574fd0c23598132b9352512cb03a5 /net/l2tp/l2tp_core.c
parent80bcb4238dd858d8ae460b62aac2f4165db58c3c (diff)
l2tp: fix locking of 64-bit counters for smp
L2TP uses 64-bit counters but since these are not updated atomically, we need to make them safe for smp. This patch addresses that. Signed-off-by: James Chapman <jchapman@katalix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/l2tp/l2tp_core.c')
-rw-r--r--net/l2tp/l2tp_core.c75
1 files changed, 56 insertions, 19 deletions
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 0ca9bc39150f..f1bfae3e1ba6 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -330,8 +330,10 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk
330 struct sk_buff *skbp; 330 struct sk_buff *skbp;
331 struct sk_buff *tmp; 331 struct sk_buff *tmp;
332 u32 ns = L2TP_SKB_CB(skb)->ns; 332 u32 ns = L2TP_SKB_CB(skb)->ns;
333 struct l2tp_stats *sstats;
333 334
334 spin_lock_bh(&session->reorder_q.lock); 335 spin_lock_bh(&session->reorder_q.lock);
336 sstats = &session->stats;
335 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) { 337 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
336 if (L2TP_SKB_CB(skbp)->ns > ns) { 338 if (L2TP_SKB_CB(skbp)->ns > ns) {
337 __skb_queue_before(&session->reorder_q, skbp, skb); 339 __skb_queue_before(&session->reorder_q, skbp, skb);
@@ -339,7 +341,9 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk
339 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n", 341 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
340 session->name, ns, L2TP_SKB_CB(skbp)->ns, 342 session->name, ns, L2TP_SKB_CB(skbp)->ns,
341 skb_queue_len(&session->reorder_q)); 343 skb_queue_len(&session->reorder_q));
342 session->stats.rx_oos_packets++; 344 u64_stats_update_begin(&sstats->syncp);
345 sstats->rx_oos_packets++;
346 u64_stats_update_end(&sstats->syncp);
343 goto out; 347 goto out;
344 } 348 }
345 } 349 }
@@ -356,16 +360,23 @@ static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *
356{ 360{
357 struct l2tp_tunnel *tunnel = session->tunnel; 361 struct l2tp_tunnel *tunnel = session->tunnel;
358 int length = L2TP_SKB_CB(skb)->length; 362 int length = L2TP_SKB_CB(skb)->length;
363 struct l2tp_stats *tstats, *sstats;
359 364
360 /* We're about to requeue the skb, so return resources 365 /* We're about to requeue the skb, so return resources
361 * to its current owner (a socket receive buffer). 366 * to its current owner (a socket receive buffer).
362 */ 367 */
363 skb_orphan(skb); 368 skb_orphan(skb);
364 369
365 tunnel->stats.rx_packets++; 370 tstats = &tunnel->stats;
366 tunnel->stats.rx_bytes += length; 371 u64_stats_update_begin(&tstats->syncp);
367 session->stats.rx_packets++; 372 sstats = &session->stats;
368 session->stats.rx_bytes += length; 373 u64_stats_update_begin(&sstats->syncp);
374 tstats->rx_packets++;
375 tstats->rx_bytes += length;
376 sstats->rx_packets++;
377 sstats->rx_bytes += length;
378 u64_stats_update_end(&tstats->syncp);
379 u64_stats_update_end(&sstats->syncp);
369 380
370 if (L2TP_SKB_CB(skb)->has_seq) { 381 if (L2TP_SKB_CB(skb)->has_seq) {
371 /* Bump our Nr */ 382 /* Bump our Nr */
@@ -396,6 +407,7 @@ static void l2tp_recv_dequeue(struct l2tp_session *session)
396{ 407{
397 struct sk_buff *skb; 408 struct sk_buff *skb;
398 struct sk_buff *tmp; 409 struct sk_buff *tmp;
410 struct l2tp_stats *sstats;
399 411
400 /* If the pkt at the head of the queue has the nr that we 412 /* If the pkt at the head of the queue has the nr that we
401 * expect to send up next, dequeue it and any other 413 * expect to send up next, dequeue it and any other
@@ -403,10 +415,13 @@ static void l2tp_recv_dequeue(struct l2tp_session *session)
403 */ 415 */
404start: 416start:
405 spin_lock_bh(&session->reorder_q.lock); 417 spin_lock_bh(&session->reorder_q.lock);
418 sstats = &session->stats;
406 skb_queue_walk_safe(&session->reorder_q, skb, tmp) { 419 skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
407 if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) { 420 if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) {
408 session->stats.rx_seq_discards++; 421 u64_stats_update_begin(&sstats->syncp);
409 session->stats.rx_errors++; 422 sstats->rx_seq_discards++;
423 sstats->rx_errors++;
424 u64_stats_update_end(&sstats->syncp);
410 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, 425 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
411 "%s: oos pkt %u len %d discarded (too old), " 426 "%s: oos pkt %u len %d discarded (too old), "
412 "waiting for %u, reorder_q_len=%d\n", 427 "waiting for %u, reorder_q_len=%d\n",
@@ -558,6 +573,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
558 struct l2tp_tunnel *tunnel = session->tunnel; 573 struct l2tp_tunnel *tunnel = session->tunnel;
559 int offset; 574 int offset;
560 u32 ns, nr; 575 u32 ns, nr;
576 struct l2tp_stats *sstats = &session->stats;
561 577
562 /* The ref count is increased since we now hold a pointer to 578 /* The ref count is increased since we now hold a pointer to
563 * the session. Take care to decrement the refcnt when exiting 579 * the session. Take care to decrement the refcnt when exiting
@@ -573,7 +589,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
573 PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO, 589 PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
574 "%s: cookie mismatch (%u/%u). Discarding.\n", 590 "%s: cookie mismatch (%u/%u). Discarding.\n",
575 tunnel->name, tunnel->tunnel_id, session->session_id); 591 tunnel->name, tunnel->tunnel_id, session->session_id);
576 session->stats.rx_cookie_discards++; 592 u64_stats_update_begin(&sstats->syncp);
593 sstats->rx_cookie_discards++;
594 u64_stats_update_end(&sstats->syncp);
577 goto discard; 595 goto discard;
578 } 596 }
579 ptr += session->peer_cookie_len; 597 ptr += session->peer_cookie_len;
@@ -642,7 +660,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
642 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING, 660 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING,
643 "%s: recv data has no seq numbers when required. " 661 "%s: recv data has no seq numbers when required. "
644 "Discarding\n", session->name); 662 "Discarding\n", session->name);
645 session->stats.rx_seq_discards++; 663 u64_stats_update_begin(&sstats->syncp);
664 sstats->rx_seq_discards++;
665 u64_stats_update_end(&sstats->syncp);
646 goto discard; 666 goto discard;
647 } 667 }
648 668
@@ -661,7 +681,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
661 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING, 681 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING,
662 "%s: recv data has no seq numbers when required. " 682 "%s: recv data has no seq numbers when required. "
663 "Discarding\n", session->name); 683 "Discarding\n", session->name);
664 session->stats.rx_seq_discards++; 684 u64_stats_update_begin(&sstats->syncp);
685 sstats->rx_seq_discards++;
686 u64_stats_update_end(&sstats->syncp);
665 goto discard; 687 goto discard;
666 } 688 }
667 } 689 }
@@ -715,7 +737,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
715 * packets 737 * packets
716 */ 738 */
717 if (L2TP_SKB_CB(skb)->ns != session->nr) { 739 if (L2TP_SKB_CB(skb)->ns != session->nr) {
718 session->stats.rx_seq_discards++; 740 u64_stats_update_begin(&sstats->syncp);
741 sstats->rx_seq_discards++;
742 u64_stats_update_end(&sstats->syncp);
719 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, 743 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
720 "%s: oos pkt %u len %d discarded, " 744 "%s: oos pkt %u len %d discarded, "
721 "waiting for %u, reorder_q_len=%d\n", 745 "waiting for %u, reorder_q_len=%d\n",
@@ -742,7 +766,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
742 return; 766 return;
743 767
744discard: 768discard:
745 session->stats.rx_errors++; 769 u64_stats_update_begin(&sstats->syncp);
770 sstats->rx_errors++;
771 u64_stats_update_end(&sstats->syncp);
746 kfree_skb(skb); 772 kfree_skb(skb);
747 773
748 if (session->deref) 774 if (session->deref)
@@ -768,6 +794,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
768 int offset; 794 int offset;
769 u16 version; 795 u16 version;
770 int length; 796 int length;
797 struct l2tp_stats *tstats;
771 798
772 if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb)) 799 if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb))
773 goto discard_bad_csum; 800 goto discard_bad_csum;
@@ -860,7 +887,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
860discard_bad_csum: 887discard_bad_csum:
861 LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name); 888 LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
862 UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0); 889 UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0);
863 tunnel->stats.rx_errors++; 890 tstats = &tunnel->stats;
891 u64_stats_update_begin(&tstats->syncp);
892 tstats->rx_errors++;
893 u64_stats_update_end(&tstats->syncp);
864 kfree_skb(skb); 894 kfree_skb(skb);
865 895
866 return 0; 896 return 0;
@@ -986,6 +1016,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
986 struct l2tp_tunnel *tunnel = session->tunnel; 1016 struct l2tp_tunnel *tunnel = session->tunnel;
987 unsigned int len = skb->len; 1017 unsigned int len = skb->len;
988 int error; 1018 int error;
1019 struct l2tp_stats *tstats, *sstats;
989 1020
990 /* Debug */ 1021 /* Debug */
991 if (session->send_seq) 1022 if (session->send_seq)
@@ -1022,15 +1053,21 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
1022 error = ip_queue_xmit(skb, fl); 1053 error = ip_queue_xmit(skb, fl);
1023 1054
1024 /* Update stats */ 1055 /* Update stats */
1056 tstats = &tunnel->stats;
1057 u64_stats_update_begin(&tstats->syncp);
1058 sstats = &session->stats;
1059 u64_stats_update_begin(&sstats->syncp);
1025 if (error >= 0) { 1060 if (error >= 0) {
1026 tunnel->stats.tx_packets++; 1061 tstats->tx_packets++;
1027 tunnel->stats.tx_bytes += len; 1062 tstats->tx_bytes += len;
1028 session->stats.tx_packets++; 1063 sstats->tx_packets++;
1029 session->stats.tx_bytes += len; 1064 sstats->tx_bytes += len;
1030 } else { 1065 } else {
1031 tunnel->stats.tx_errors++; 1066 tstats->tx_errors++;
1032 session->stats.tx_errors++; 1067 sstats->tx_errors++;
1033 } 1068 }
1069 u64_stats_update_end(&tstats->syncp);
1070 u64_stats_update_end(&sstats->syncp);
1034 1071
1035 return 0; 1072 return 0;
1036} 1073}