diff options
author | Marek Lindner <lindner_marek@yahoo.de> | 2011-01-25 16:52:11 -0500 |
---|---|---|
committer | Marek Lindner <lindner_marek@yahoo.de> | 2011-03-05 06:50:09 -0500 |
commit | f3e0008f01b275bd08bd416cfcaa7021dd6bc277 (patch) | |
tree | 47c1787dce5d3bda821a4ccb015d54b6b9cadc82 /net/batman-adv/routing.c | |
parent | 0ede9f41b217d8982ab426e3c8c1b692a280a16f (diff) |
batman-adv: make broadcast seqno operations atomic
Batman-adv could receive several payload broadcasts at the same time
that would trigger access to the broadcast seqno sliding window to
determine whether this is a new broadcast or not. If these incoming
broadcasts are accessing the sliding window simultaneously it could
be left in an inconsistent state. Therefore it is necessary to make
sure this access is atomic.
Reported-by: Linus Lüssing <linus.luessing@web.de>
Signed-off-by: Marek Lindner <lindner_marek@yahoo.de>
Diffstat (limited to 'net/batman-adv/routing.c')
-rw-r--r-- | net/batman-adv/routing.c | 56 |
1 files changed, 34 insertions, 22 deletions
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 29a689ac5693..ce68815f3eb3 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c | |||
@@ -1473,81 +1473,93 @@ int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if) | |||
1473 | int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if) | 1473 | int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if) |
1474 | { | 1474 | { |
1475 | struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); | 1475 | struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); |
1476 | struct orig_node *orig_node; | 1476 | struct orig_node *orig_node = NULL; |
1477 | struct bcast_packet *bcast_packet; | 1477 | struct bcast_packet *bcast_packet; |
1478 | struct ethhdr *ethhdr; | 1478 | struct ethhdr *ethhdr; |
1479 | int hdr_size = sizeof(struct bcast_packet); | 1479 | int hdr_size = sizeof(struct bcast_packet); |
1480 | int ret = NET_RX_DROP; | ||
1480 | int32_t seq_diff; | 1481 | int32_t seq_diff; |
1481 | 1482 | ||
1482 | /* drop packet if it has not necessary minimum size */ | 1483 | /* drop packet if it has not necessary minimum size */ |
1483 | if (unlikely(!pskb_may_pull(skb, hdr_size))) | 1484 | if (unlikely(!pskb_may_pull(skb, hdr_size))) |
1484 | return NET_RX_DROP; | 1485 | goto out; |
1485 | 1486 | ||
1486 | ethhdr = (struct ethhdr *)skb_mac_header(skb); | 1487 | ethhdr = (struct ethhdr *)skb_mac_header(skb); |
1487 | 1488 | ||
1488 | /* packet with broadcast indication but unicast recipient */ | 1489 | /* packet with broadcast indication but unicast recipient */ |
1489 | if (!is_broadcast_ether_addr(ethhdr->h_dest)) | 1490 | if (!is_broadcast_ether_addr(ethhdr->h_dest)) |
1490 | return NET_RX_DROP; | 1491 | goto out; |
1491 | 1492 | ||
1492 | /* packet with broadcast sender address */ | 1493 | /* packet with broadcast sender address */ |
1493 | if (is_broadcast_ether_addr(ethhdr->h_source)) | 1494 | if (is_broadcast_ether_addr(ethhdr->h_source)) |
1494 | return NET_RX_DROP; | 1495 | goto out; |
1495 | 1496 | ||
1496 | /* ignore broadcasts sent by myself */ | 1497 | /* ignore broadcasts sent by myself */ |
1497 | if (is_my_mac(ethhdr->h_source)) | 1498 | if (is_my_mac(ethhdr->h_source)) |
1498 | return NET_RX_DROP; | 1499 | goto out; |
1499 | 1500 | ||
1500 | bcast_packet = (struct bcast_packet *)skb->data; | 1501 | bcast_packet = (struct bcast_packet *)skb->data; |
1501 | 1502 | ||
1502 | /* ignore broadcasts originated by myself */ | 1503 | /* ignore broadcasts originated by myself */ |
1503 | if (is_my_mac(bcast_packet->orig)) | 1504 | if (is_my_mac(bcast_packet->orig)) |
1504 | return NET_RX_DROP; | 1505 | goto out; |
1505 | 1506 | ||
1506 | if (bcast_packet->ttl < 2) | 1507 | if (bcast_packet->ttl < 2) |
1507 | return NET_RX_DROP; | 1508 | goto out; |
1508 | 1509 | ||
1509 | spin_lock_bh(&bat_priv->orig_hash_lock); | 1510 | spin_lock_bh(&bat_priv->orig_hash_lock); |
1510 | rcu_read_lock(); | 1511 | rcu_read_lock(); |
1511 | orig_node = ((struct orig_node *) | 1512 | orig_node = ((struct orig_node *) |
1512 | hash_find(bat_priv->orig_hash, compare_orig, choose_orig, | 1513 | hash_find(bat_priv->orig_hash, compare_orig, choose_orig, |
1513 | bcast_packet->orig)); | 1514 | bcast_packet->orig)); |
1515 | |||
1516 | if (!orig_node) | ||
1517 | goto rcu_unlock; | ||
1518 | |||
1519 | kref_get(&orig_node->refcount); | ||
1514 | rcu_read_unlock(); | 1520 | rcu_read_unlock(); |
1515 | 1521 | ||
1516 | if (!orig_node) { | 1522 | spin_lock_bh(&orig_node->bcast_seqno_lock); |
1517 | spin_unlock_bh(&bat_priv->orig_hash_lock); | ||
1518 | return NET_RX_DROP; | ||
1519 | } | ||
1520 | 1523 | ||
1521 | /* check whether the packet is a duplicate */ | 1524 | /* check whether the packet is a duplicate */ |
1522 | if (get_bit_status(orig_node->bcast_bits, | 1525 | if (get_bit_status(orig_node->bcast_bits, orig_node->last_bcast_seqno, |
1523 | orig_node->last_bcast_seqno, | 1526 | ntohl(bcast_packet->seqno))) |
1524 | ntohl(bcast_packet->seqno))) { | 1527 | goto spin_unlock; |
1525 | spin_unlock_bh(&bat_priv->orig_hash_lock); | ||
1526 | return NET_RX_DROP; | ||
1527 | } | ||
1528 | 1528 | ||
1529 | seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno; | 1529 | seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno; |
1530 | 1530 | ||
1531 | /* check whether the packet is old and the host just restarted. */ | 1531 | /* check whether the packet is old and the host just restarted. */ |
1532 | if (window_protected(bat_priv, seq_diff, | 1532 | if (window_protected(bat_priv, seq_diff, |
1533 | &orig_node->bcast_seqno_reset)) { | 1533 | &orig_node->bcast_seqno_reset)) |
1534 | spin_unlock_bh(&bat_priv->orig_hash_lock); | 1534 | goto spin_unlock; |
1535 | return NET_RX_DROP; | ||
1536 | } | ||
1537 | 1535 | ||
1538 | /* mark broadcast in flood history, update window position | 1536 | /* mark broadcast in flood history, update window position |
1539 | * if required. */ | 1537 | * if required. */ |
1540 | if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1)) | 1538 | if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1)) |
1541 | orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno); | 1539 | orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno); |
1542 | 1540 | ||
1541 | spin_unlock_bh(&orig_node->bcast_seqno_lock); | ||
1543 | spin_unlock_bh(&bat_priv->orig_hash_lock); | 1542 | spin_unlock_bh(&bat_priv->orig_hash_lock); |
1543 | |||
1544 | /* rebroadcast packet */ | 1544 | /* rebroadcast packet */ |
1545 | add_bcast_packet_to_list(bat_priv, skb); | 1545 | add_bcast_packet_to_list(bat_priv, skb); |
1546 | 1546 | ||
1547 | /* broadcast for me */ | 1547 | /* broadcast for me */ |
1548 | interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); | 1548 | interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); |
1549 | ret = NET_RX_SUCCESS; | ||
1550 | goto out; | ||
1549 | 1551 | ||
1550 | return NET_RX_SUCCESS; | 1552 | rcu_unlock: |
1553 | rcu_read_unlock(); | ||
1554 | spin_unlock_bh(&bat_priv->orig_hash_lock); | ||
1555 | goto out; | ||
1556 | spin_unlock: | ||
1557 | spin_unlock_bh(&orig_node->bcast_seqno_lock); | ||
1558 | spin_unlock_bh(&bat_priv->orig_hash_lock); | ||
1559 | out: | ||
1560 | if (orig_node) | ||
1561 | kref_put(&orig_node->refcount, orig_node_free_ref); | ||
1562 | return ret; | ||
1551 | } | 1563 | } |
1552 | 1564 | ||
1553 | int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if) | 1565 | int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if) |