diff options
author | Sven Eckelmann <sven@narfation.org> | 2012-07-15 16:26:51 -0400 |
---|---|---|
committer | Antonio Quartulli <ordex@autistici.org> | 2012-08-23 08:20:13 -0400 |
commit | 807736f6e00714fdeb443b31061d1c27fa903296 (patch) | |
tree | a070c2e9316365424e4d08e2fa50e5a28729670d | |
parent | 624463079e0af455a2d70d2a59b9e2f6b5827aea (diff) |
batman-adv: Split batadv_priv in sub-structures for features
The structure batadv_priv grows everytime a new feature is introduced. It gets
hard to find the parts of the struct that belongs to a specific feature. This
becomes even harder by the fact that not every feature uses a prefix in the
member name.
The variables for bridge loop avoidence, gateway handling, translation table
and visualization server are moved into separate structs that are included in
the bat_priv main struct.
Signed-off-by: Sven Eckelmann <sven@narfation.org>
Signed-off-by: Antonio Quartulli <ordex@autistici.org>
-rw-r--r-- | net/batman-adv/bat_iv_ogm.c | 4 | ||||
-rw-r--r-- | net/batman-adv/bridge_loop_avoidance.c | 115 | ||||
-rw-r--r-- | net/batman-adv/gateway_client.c | 32 | ||||
-rw-r--r-- | net/batman-adv/hard-interface.c | 5 | ||||
-rw-r--r-- | net/batman-adv/main.c | 24 | ||||
-rw-r--r-- | net/batman-adv/routing.c | 6 | ||||
-rw-r--r-- | net/batman-adv/soft-interface.c | 17 | ||||
-rw-r--r-- | net/batman-adv/translation-table.c | 208 | ||||
-rw-r--r-- | net/batman-adv/types.h | 113 | ||||
-rw-r--r-- | net/batman-adv/vis.c | 130 |
10 files changed, 343 insertions, 311 deletions
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index e877af8bdd1e..eb507c901696 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c | |||
@@ -603,8 +603,8 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface) | |||
603 | htonl((uint32_t)atomic_read(&hard_iface->seqno)); | 603 | htonl((uint32_t)atomic_read(&hard_iface->seqno)); |
604 | atomic_inc(&hard_iface->seqno); | 604 | atomic_inc(&hard_iface->seqno); |
605 | 605 | ||
606 | batadv_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn); | 606 | batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn); |
607 | batadv_ogm_packet->tt_crc = htons(bat_priv->tt_crc); | 607 | batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc); |
608 | if (tt_num_changes >= 0) | 608 | if (tt_num_changes >= 0) |
609 | batadv_ogm_packet->tt_num_changes = tt_num_changes; | 609 | batadv_ogm_packet->tt_num_changes = tt_num_changes; |
610 | 610 | ||
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index c9732acd59ff..ad18017da4e7 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c | |||
@@ -133,7 +133,7 @@ static void batadv_claim_free_ref(struct batadv_claim *claim) | |||
133 | static struct batadv_claim *batadv_claim_hash_find(struct batadv_priv *bat_priv, | 133 | static struct batadv_claim *batadv_claim_hash_find(struct batadv_priv *bat_priv, |
134 | struct batadv_claim *data) | 134 | struct batadv_claim *data) |
135 | { | 135 | { |
136 | struct batadv_hashtable *hash = bat_priv->claim_hash; | 136 | struct batadv_hashtable *hash = bat_priv->bla.claim_hash; |
137 | struct hlist_head *head; | 137 | struct hlist_head *head; |
138 | struct hlist_node *node; | 138 | struct hlist_node *node; |
139 | struct batadv_claim *claim; | 139 | struct batadv_claim *claim; |
@@ -174,7 +174,7 @@ static struct batadv_backbone_gw * | |||
174 | batadv_backbone_hash_find(struct batadv_priv *bat_priv, | 174 | batadv_backbone_hash_find(struct batadv_priv *bat_priv, |
175 | uint8_t *addr, short vid) | 175 | uint8_t *addr, short vid) |
176 | { | 176 | { |
177 | struct batadv_hashtable *hash = bat_priv->backbone_hash; | 177 | struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; |
178 | struct hlist_head *head; | 178 | struct hlist_head *head; |
179 | struct hlist_node *node; | 179 | struct hlist_node *node; |
180 | struct batadv_backbone_gw search_entry, *backbone_gw; | 180 | struct batadv_backbone_gw search_entry, *backbone_gw; |
@@ -218,7 +218,7 @@ batadv_bla_del_backbone_claims(struct batadv_backbone_gw *backbone_gw) | |||
218 | int i; | 218 | int i; |
219 | spinlock_t *list_lock; /* protects write access to the hash lists */ | 219 | spinlock_t *list_lock; /* protects write access to the hash lists */ |
220 | 220 | ||
221 | hash = backbone_gw->bat_priv->claim_hash; | 221 | hash = backbone_gw->bat_priv->bla.claim_hash; |
222 | if (!hash) | 222 | if (!hash) |
223 | return; | 223 | return; |
224 | 224 | ||
@@ -265,7 +265,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac, | |||
265 | if (!primary_if) | 265 | if (!primary_if) |
266 | return; | 266 | return; |
267 | 267 | ||
268 | memcpy(&local_claim_dest, &bat_priv->claim_dest, | 268 | memcpy(&local_claim_dest, &bat_priv->bla.claim_dest, |
269 | sizeof(local_claim_dest)); | 269 | sizeof(local_claim_dest)); |
270 | local_claim_dest.type = claimtype; | 270 | local_claim_dest.type = claimtype; |
271 | 271 | ||
@@ -391,7 +391,7 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig, | |||
391 | /* one for the hash, one for returning */ | 391 | /* one for the hash, one for returning */ |
392 | atomic_set(&entry->refcount, 2); | 392 | atomic_set(&entry->refcount, 2); |
393 | 393 | ||
394 | hash_added = batadv_hash_add(bat_priv->backbone_hash, | 394 | hash_added = batadv_hash_add(bat_priv->bla.backbone_hash, |
395 | batadv_compare_backbone_gw, | 395 | batadv_compare_backbone_gw, |
396 | batadv_choose_backbone_gw, entry, | 396 | batadv_choose_backbone_gw, entry, |
397 | &entry->hash_entry); | 397 | &entry->hash_entry); |
@@ -458,7 +458,7 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv, | |||
458 | if (!backbone_gw) | 458 | if (!backbone_gw) |
459 | return; | 459 | return; |
460 | 460 | ||
461 | hash = bat_priv->claim_hash; | 461 | hash = bat_priv->bla.claim_hash; |
462 | for (i = 0; i < hash->size; i++) { | 462 | for (i = 0; i < hash->size; i++) { |
463 | head = &hash->table[i]; | 463 | head = &hash->table[i]; |
464 | 464 | ||
@@ -499,7 +499,7 @@ static void batadv_bla_send_request(struct batadv_backbone_gw *backbone_gw) | |||
499 | 499 | ||
500 | /* no local broadcasts should be sent or received, for now. */ | 500 | /* no local broadcasts should be sent or received, for now. */ |
501 | if (!atomic_read(&backbone_gw->request_sent)) { | 501 | if (!atomic_read(&backbone_gw->request_sent)) { |
502 | atomic_inc(&backbone_gw->bat_priv->bla_num_requests); | 502 | atomic_inc(&backbone_gw->bat_priv->bla.num_requests); |
503 | atomic_set(&backbone_gw->request_sent, 1); | 503 | atomic_set(&backbone_gw->request_sent, 1); |
504 | } | 504 | } |
505 | } | 505 | } |
@@ -559,7 +559,7 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv, | |||
559 | batadv_dbg(BATADV_DBG_BLA, bat_priv, | 559 | batadv_dbg(BATADV_DBG_BLA, bat_priv, |
560 | "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n", | 560 | "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n", |
561 | mac, vid); | 561 | mac, vid); |
562 | hash_added = batadv_hash_add(bat_priv->claim_hash, | 562 | hash_added = batadv_hash_add(bat_priv->bla.claim_hash, |
563 | batadv_compare_claim, | 563 | batadv_compare_claim, |
564 | batadv_choose_claim, claim, | 564 | batadv_choose_claim, claim, |
565 | &claim->hash_entry); | 565 | &claim->hash_entry); |
@@ -612,7 +612,7 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv, | |||
612 | batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", | 612 | batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", |
613 | mac, vid); | 613 | mac, vid); |
614 | 614 | ||
615 | batadv_hash_remove(bat_priv->claim_hash, batadv_compare_claim, | 615 | batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim, |
616 | batadv_choose_claim, claim); | 616 | batadv_choose_claim, claim); |
617 | batadv_claim_free_ref(claim); /* reference from the hash is gone */ | 617 | batadv_claim_free_ref(claim); /* reference from the hash is gone */ |
618 | 618 | ||
@@ -659,7 +659,7 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv, | |||
659 | * we can allow traffic again. | 659 | * we can allow traffic again. |
660 | */ | 660 | */ |
661 | if (atomic_read(&backbone_gw->request_sent)) { | 661 | if (atomic_read(&backbone_gw->request_sent)) { |
662 | atomic_dec(&backbone_gw->bat_priv->bla_num_requests); | 662 | atomic_dec(&backbone_gw->bat_priv->bla.num_requests); |
663 | atomic_set(&backbone_gw->request_sent, 0); | 663 | atomic_set(&backbone_gw->request_sent, 0); |
664 | } | 664 | } |
665 | } | 665 | } |
@@ -774,7 +774,7 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv, | |||
774 | struct batadv_bla_claim_dst *bla_dst, *bla_dst_own; | 774 | struct batadv_bla_claim_dst *bla_dst, *bla_dst_own; |
775 | 775 | ||
776 | bla_dst = (struct batadv_bla_claim_dst *)hw_dst; | 776 | bla_dst = (struct batadv_bla_claim_dst *)hw_dst; |
777 | bla_dst_own = &bat_priv->claim_dest; | 777 | bla_dst_own = &bat_priv->bla.claim_dest; |
778 | 778 | ||
779 | /* check if it is a claim packet in general */ | 779 | /* check if it is a claim packet in general */ |
780 | if (memcmp(bla_dst->magic, bla_dst_own->magic, | 780 | if (memcmp(bla_dst->magic, bla_dst_own->magic, |
@@ -947,7 +947,7 @@ static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now) | |||
947 | spinlock_t *list_lock; /* protects write access to the hash lists */ | 947 | spinlock_t *list_lock; /* protects write access to the hash lists */ |
948 | int i; | 948 | int i; |
949 | 949 | ||
950 | hash = bat_priv->backbone_hash; | 950 | hash = bat_priv->bla.backbone_hash; |
951 | if (!hash) | 951 | if (!hash) |
952 | return; | 952 | return; |
953 | 953 | ||
@@ -971,7 +971,7 @@ static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now) | |||
971 | purge_now: | 971 | purge_now: |
972 | /* don't wait for the pending request anymore */ | 972 | /* don't wait for the pending request anymore */ |
973 | if (atomic_read(&backbone_gw->request_sent)) | 973 | if (atomic_read(&backbone_gw->request_sent)) |
974 | atomic_dec(&bat_priv->bla_num_requests); | 974 | atomic_dec(&bat_priv->bla.num_requests); |
975 | 975 | ||
976 | batadv_bla_del_backbone_claims(backbone_gw); | 976 | batadv_bla_del_backbone_claims(backbone_gw); |
977 | 977 | ||
@@ -1001,7 +1001,7 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv, | |||
1001 | struct batadv_hashtable *hash; | 1001 | struct batadv_hashtable *hash; |
1002 | int i; | 1002 | int i; |
1003 | 1003 | ||
1004 | hash = bat_priv->claim_hash; | 1004 | hash = bat_priv->bla.claim_hash; |
1005 | if (!hash) | 1005 | if (!hash) |
1006 | return; | 1006 | return; |
1007 | 1007 | ||
@@ -1048,11 +1048,12 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv, | |||
1048 | struct hlist_node *node; | 1048 | struct hlist_node *node; |
1049 | struct hlist_head *head; | 1049 | struct hlist_head *head; |
1050 | struct batadv_hashtable *hash; | 1050 | struct batadv_hashtable *hash; |
1051 | __be16 group; | ||
1051 | int i; | 1052 | int i; |
1052 | 1053 | ||
1053 | /* reset bridge loop avoidance group id */ | 1054 | /* reset bridge loop avoidance group id */ |
1054 | bat_priv->claim_dest.group = | 1055 | group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN)); |
1055 | htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN)); | 1056 | bat_priv->bla.claim_dest.group = group; |
1056 | 1057 | ||
1057 | if (!oldif) { | 1058 | if (!oldif) { |
1058 | batadv_bla_purge_claims(bat_priv, NULL, 1); | 1059 | batadv_bla_purge_claims(bat_priv, NULL, 1); |
@@ -1060,7 +1061,7 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv, | |||
1060 | return; | 1061 | return; |
1061 | } | 1062 | } |
1062 | 1063 | ||
1063 | hash = bat_priv->backbone_hash; | 1064 | hash = bat_priv->bla.backbone_hash; |
1064 | if (!hash) | 1065 | if (!hash) |
1065 | return; | 1066 | return; |
1066 | 1067 | ||
@@ -1090,8 +1091,8 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv, | |||
1090 | /* (re)start the timer */ | 1091 | /* (re)start the timer */ |
1091 | static void batadv_bla_start_timer(struct batadv_priv *bat_priv) | 1092 | static void batadv_bla_start_timer(struct batadv_priv *bat_priv) |
1092 | { | 1093 | { |
1093 | INIT_DELAYED_WORK(&bat_priv->bla_work, batadv_bla_periodic_work); | 1094 | INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work); |
1094 | queue_delayed_work(batadv_event_workqueue, &bat_priv->bla_work, | 1095 | queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work, |
1095 | msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH)); | 1096 | msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH)); |
1096 | } | 1097 | } |
1097 | 1098 | ||
@@ -1104,6 +1105,7 @@ static void batadv_bla_periodic_work(struct work_struct *work) | |||
1104 | struct delayed_work *delayed_work = | 1105 | struct delayed_work *delayed_work = |
1105 | container_of(work, struct delayed_work, work); | 1106 | container_of(work, struct delayed_work, work); |
1106 | struct batadv_priv *bat_priv; | 1107 | struct batadv_priv *bat_priv; |
1108 | struct batadv_priv_bla *priv_bla; | ||
1107 | struct hlist_node *node; | 1109 | struct hlist_node *node; |
1108 | struct hlist_head *head; | 1110 | struct hlist_head *head; |
1109 | struct batadv_backbone_gw *backbone_gw; | 1111 | struct batadv_backbone_gw *backbone_gw; |
@@ -1111,7 +1113,8 @@ static void batadv_bla_periodic_work(struct work_struct *work) | |||
1111 | struct batadv_hard_iface *primary_if; | 1113 | struct batadv_hard_iface *primary_if; |
1112 | int i; | 1114 | int i; |
1113 | 1115 | ||
1114 | bat_priv = container_of(delayed_work, struct batadv_priv, bla_work); | 1116 | priv_bla = container_of(delayed_work, struct batadv_priv_bla, work); |
1117 | bat_priv = container_of(priv_bla, struct batadv_priv, bla); | ||
1115 | primary_if = batadv_primary_if_get_selected(bat_priv); | 1118 | primary_if = batadv_primary_if_get_selected(bat_priv); |
1116 | if (!primary_if) | 1119 | if (!primary_if) |
1117 | goto out; | 1120 | goto out; |
@@ -1122,7 +1125,7 @@ static void batadv_bla_periodic_work(struct work_struct *work) | |||
1122 | if (!atomic_read(&bat_priv->bridge_loop_avoidance)) | 1125 | if (!atomic_read(&bat_priv->bridge_loop_avoidance)) |
1123 | goto out; | 1126 | goto out; |
1124 | 1127 | ||
1125 | hash = bat_priv->backbone_hash; | 1128 | hash = bat_priv->bla.backbone_hash; |
1126 | if (!hash) | 1129 | if (!hash) |
1127 | goto out; | 1130 | goto out; |
1128 | 1131 | ||
@@ -1162,40 +1165,41 @@ int batadv_bla_init(struct batadv_priv *bat_priv) | |||
1162 | int i; | 1165 | int i; |
1163 | uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00}; | 1166 | uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00}; |
1164 | struct batadv_hard_iface *primary_if; | 1167 | struct batadv_hard_iface *primary_if; |
1168 | uint16_t crc; | ||
1169 | unsigned long entrytime; | ||
1165 | 1170 | ||
1166 | batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n"); | 1171 | batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n"); |
1167 | 1172 | ||
1168 | /* setting claim destination address */ | 1173 | /* setting claim destination address */ |
1169 | memcpy(&bat_priv->claim_dest.magic, claim_dest, 3); | 1174 | memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3); |
1170 | bat_priv->claim_dest.type = 0; | 1175 | bat_priv->bla.claim_dest.type = 0; |
1171 | primary_if = batadv_primary_if_get_selected(bat_priv); | 1176 | primary_if = batadv_primary_if_get_selected(bat_priv); |
1172 | if (primary_if) { | 1177 | if (primary_if) { |
1173 | bat_priv->claim_dest.group = | 1178 | crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN); |
1174 | htons(crc16(0, primary_if->net_dev->dev_addr, | 1179 | bat_priv->bla.claim_dest.group = htons(crc); |
1175 | ETH_ALEN)); | ||
1176 | batadv_hardif_free_ref(primary_if); | 1180 | batadv_hardif_free_ref(primary_if); |
1177 | } else { | 1181 | } else { |
1178 | bat_priv->claim_dest.group = 0; /* will be set later */ | 1182 | bat_priv->bla.claim_dest.group = 0; /* will be set later */ |
1179 | } | 1183 | } |
1180 | 1184 | ||
1181 | /* initialize the duplicate list */ | 1185 | /* initialize the duplicate list */ |
1186 | entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT); | ||
1182 | for (i = 0; i < BATADV_DUPLIST_SIZE; i++) | 1187 | for (i = 0; i < BATADV_DUPLIST_SIZE; i++) |
1183 | bat_priv->bcast_duplist[i].entrytime = | 1188 | bat_priv->bla.bcast_duplist[i].entrytime = entrytime; |
1184 | jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT); | 1189 | bat_priv->bla.bcast_duplist_curr = 0; |
1185 | bat_priv->bcast_duplist_curr = 0; | ||
1186 | 1190 | ||
1187 | if (bat_priv->claim_hash) | 1191 | if (bat_priv->bla.claim_hash) |
1188 | return 0; | 1192 | return 0; |
1189 | 1193 | ||
1190 | bat_priv->claim_hash = batadv_hash_new(128); | 1194 | bat_priv->bla.claim_hash = batadv_hash_new(128); |
1191 | bat_priv->backbone_hash = batadv_hash_new(32); | 1195 | bat_priv->bla.backbone_hash = batadv_hash_new(32); |
1192 | 1196 | ||
1193 | if (!bat_priv->claim_hash || !bat_priv->backbone_hash) | 1197 | if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash) |
1194 | return -ENOMEM; | 1198 | return -ENOMEM; |
1195 | 1199 | ||
1196 | batadv_hash_set_lock_class(bat_priv->claim_hash, | 1200 | batadv_hash_set_lock_class(bat_priv->bla.claim_hash, |
1197 | &batadv_claim_hash_lock_class_key); | 1201 | &batadv_claim_hash_lock_class_key); |
1198 | batadv_hash_set_lock_class(bat_priv->backbone_hash, | 1202 | batadv_hash_set_lock_class(bat_priv->bla.backbone_hash, |
1199 | &batadv_backbone_hash_lock_class_key); | 1203 | &batadv_backbone_hash_lock_class_key); |
1200 | 1204 | ||
1201 | batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n"); | 1205 | batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n"); |
@@ -1236,8 +1240,9 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, | |||
1236 | crc = crc16(0, content, length); | 1240 | crc = crc16(0, content, length); |
1237 | 1241 | ||
1238 | for (i = 0; i < BATADV_DUPLIST_SIZE; i++) { | 1242 | for (i = 0; i < BATADV_DUPLIST_SIZE; i++) { |
1239 | curr = (bat_priv->bcast_duplist_curr + i) % BATADV_DUPLIST_SIZE; | 1243 | curr = (bat_priv->bla.bcast_duplist_curr + i); |
1240 | entry = &bat_priv->bcast_duplist[curr]; | 1244 | curr %= BATADV_DUPLIST_SIZE; |
1245 | entry = &bat_priv->bla.bcast_duplist[curr]; | ||
1241 | 1246 | ||
1242 | /* we can stop searching if the entry is too old ; | 1247 | /* we can stop searching if the entry is too old ; |
1243 | * later entries will be even older | 1248 | * later entries will be even older |
@@ -1258,13 +1263,13 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, | |||
1258 | return 1; | 1263 | return 1; |
1259 | } | 1264 | } |
1260 | /* not found, add a new entry (overwrite the oldest entry) */ | 1265 | /* not found, add a new entry (overwrite the oldest entry) */ |
1261 | curr = (bat_priv->bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1); | 1266 | curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1); |
1262 | curr %= BATADV_DUPLIST_SIZE; | 1267 | curr %= BATADV_DUPLIST_SIZE; |
1263 | entry = &bat_priv->bcast_duplist[curr]; | 1268 | entry = &bat_priv->bla.bcast_duplist[curr]; |
1264 | entry->crc = crc; | 1269 | entry->crc = crc; |
1265 | entry->entrytime = jiffies; | 1270 | entry->entrytime = jiffies; |
1266 | memcpy(entry->orig, bcast_packet->orig, ETH_ALEN); | 1271 | memcpy(entry->orig, bcast_packet->orig, ETH_ALEN); |
1267 | bat_priv->bcast_duplist_curr = curr; | 1272 | bat_priv->bla.bcast_duplist_curr = curr; |
1268 | 1273 | ||
1269 | /* allow it, its the first occurence. */ | 1274 | /* allow it, its the first occurence. */ |
1270 | return 0; | 1275 | return 0; |
@@ -1281,7 +1286,7 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, | |||
1281 | */ | 1286 | */ |
1282 | int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig) | 1287 | int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig) |
1283 | { | 1288 | { |
1284 | struct batadv_hashtable *hash = bat_priv->backbone_hash; | 1289 | struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; |
1285 | struct hlist_head *head; | 1290 | struct hlist_head *head; |
1286 | struct hlist_node *node; | 1291 | struct hlist_node *node; |
1287 | struct batadv_backbone_gw *backbone_gw; | 1292 | struct batadv_backbone_gw *backbone_gw; |
@@ -1361,18 +1366,18 @@ void batadv_bla_free(struct batadv_priv *bat_priv) | |||
1361 | { | 1366 | { |
1362 | struct batadv_hard_iface *primary_if; | 1367 | struct batadv_hard_iface *primary_if; |
1363 | 1368 | ||
1364 | cancel_delayed_work_sync(&bat_priv->bla_work); | 1369 | cancel_delayed_work_sync(&bat_priv->bla.work); |
1365 | primary_if = batadv_primary_if_get_selected(bat_priv); | 1370 | primary_if = batadv_primary_if_get_selected(bat_priv); |
1366 | 1371 | ||
1367 | if (bat_priv->claim_hash) { | 1372 | if (bat_priv->bla.claim_hash) { |
1368 | batadv_bla_purge_claims(bat_priv, primary_if, 1); | 1373 | batadv_bla_purge_claims(bat_priv, primary_if, 1); |
1369 | batadv_hash_destroy(bat_priv->claim_hash); | 1374 | batadv_hash_destroy(bat_priv->bla.claim_hash); |
1370 | bat_priv->claim_hash = NULL; | 1375 | bat_priv->bla.claim_hash = NULL; |
1371 | } | 1376 | } |
1372 | if (bat_priv->backbone_hash) { | 1377 | if (bat_priv->bla.backbone_hash) { |
1373 | batadv_bla_purge_backbone_gw(bat_priv, 1); | 1378 | batadv_bla_purge_backbone_gw(bat_priv, 1); |
1374 | batadv_hash_destroy(bat_priv->backbone_hash); | 1379 | batadv_hash_destroy(bat_priv->bla.backbone_hash); |
1375 | bat_priv->backbone_hash = NULL; | 1380 | bat_priv->bla.backbone_hash = NULL; |
1376 | } | 1381 | } |
1377 | if (primary_if) | 1382 | if (primary_if) |
1378 | batadv_hardif_free_ref(primary_if); | 1383 | batadv_hardif_free_ref(primary_if); |
@@ -1411,7 +1416,7 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid, | |||
1411 | goto allow; | 1416 | goto allow; |
1412 | 1417 | ||
1413 | 1418 | ||
1414 | if (unlikely(atomic_read(&bat_priv->bla_num_requests))) | 1419 | if (unlikely(atomic_read(&bat_priv->bla.num_requests))) |
1415 | /* don't allow broadcasts while requests are in flight */ | 1420 | /* don't allow broadcasts while requests are in flight */ |
1416 | if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) | 1421 | if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) |
1417 | goto handled; | 1422 | goto handled; |
@@ -1510,7 +1515,7 @@ int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid) | |||
1510 | 1515 | ||
1511 | ethhdr = (struct ethhdr *)skb_mac_header(skb); | 1516 | ethhdr = (struct ethhdr *)skb_mac_header(skb); |
1512 | 1517 | ||
1513 | if (unlikely(atomic_read(&bat_priv->bla_num_requests))) | 1518 | if (unlikely(atomic_read(&bat_priv->bla.num_requests))) |
1514 | /* don't allow broadcasts while requests are in flight */ | 1519 | /* don't allow broadcasts while requests are in flight */ |
1515 | if (is_multicast_ether_addr(ethhdr->h_dest)) | 1520 | if (is_multicast_ether_addr(ethhdr->h_dest)) |
1516 | goto handled; | 1521 | goto handled; |
@@ -1566,7 +1571,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset) | |||
1566 | { | 1571 | { |
1567 | struct net_device *net_dev = (struct net_device *)seq->private; | 1572 | struct net_device *net_dev = (struct net_device *)seq->private; |
1568 | struct batadv_priv *bat_priv = netdev_priv(net_dev); | 1573 | struct batadv_priv *bat_priv = netdev_priv(net_dev); |
1569 | struct batadv_hashtable *hash = bat_priv->claim_hash; | 1574 | struct batadv_hashtable *hash = bat_priv->bla.claim_hash; |
1570 | struct batadv_claim *claim; | 1575 | struct batadv_claim *claim; |
1571 | struct batadv_hard_iface *primary_if; | 1576 | struct batadv_hard_iface *primary_if; |
1572 | struct hlist_node *node; | 1577 | struct hlist_node *node; |
@@ -1595,7 +1600,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset) | |||
1595 | seq_printf(seq, | 1600 | seq_printf(seq, |
1596 | "Claims announced for the mesh %s (orig %pM, group id %04x)\n", | 1601 | "Claims announced for the mesh %s (orig %pM, group id %04x)\n", |
1597 | net_dev->name, primary_addr, | 1602 | net_dev->name, primary_addr, |
1598 | ntohs(bat_priv->claim_dest.group)); | 1603 | ntohs(bat_priv->bla.claim_dest.group)); |
1599 | seq_printf(seq, " %-17s %-5s %-17s [o] (%-4s)\n", | 1604 | seq_printf(seq, " %-17s %-5s %-17s [o] (%-4s)\n", |
1600 | "Client", "VID", "Originator", "CRC"); | 1605 | "Client", "VID", "Originator", "CRC"); |
1601 | for (i = 0; i < hash->size; i++) { | 1606 | for (i = 0; i < hash->size; i++) { |
@@ -1623,7 +1628,7 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset) | |||
1623 | { | 1628 | { |
1624 | struct net_device *net_dev = (struct net_device *)seq->private; | 1629 | struct net_device *net_dev = (struct net_device *)seq->private; |
1625 | struct batadv_priv *bat_priv = netdev_priv(net_dev); | 1630 | struct batadv_priv *bat_priv = netdev_priv(net_dev); |
1626 | struct batadv_hashtable *hash = bat_priv->backbone_hash; | 1631 | struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; |
1627 | struct batadv_backbone_gw *backbone_gw; | 1632 | struct batadv_backbone_gw *backbone_gw; |
1628 | struct batadv_hard_iface *primary_if; | 1633 | struct batadv_hard_iface *primary_if; |
1629 | struct hlist_node *node; | 1634 | struct hlist_node *node; |
@@ -1653,7 +1658,7 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset) | |||
1653 | seq_printf(seq, | 1658 | seq_printf(seq, |
1654 | "Backbones announced for the mesh %s (orig %pM, group id %04x)\n", | 1659 | "Backbones announced for the mesh %s (orig %pM, group id %04x)\n", |
1655 | net_dev->name, primary_addr, | 1660 | net_dev->name, primary_addr, |
1656 | ntohs(bat_priv->claim_dest.group)); | 1661 | ntohs(bat_priv->bla.claim_dest.group)); |
1657 | seq_printf(seq, " %-17s %-5s %-9s (%-4s)\n", | 1662 | seq_printf(seq, " %-17s %-5s %-9s (%-4s)\n", |
1658 | "Originator", "VID", "last seen", "CRC"); | 1663 | "Originator", "VID", "last seen", "CRC"); |
1659 | for (i = 0; i < hash->size; i++) { | 1664 | for (i = 0; i < hash->size; i++) { |
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index fc866f2e4528..eef7cc739397 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c | |||
@@ -48,7 +48,7 @@ batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv) | |||
48 | struct batadv_gw_node *gw_node; | 48 | struct batadv_gw_node *gw_node; |
49 | 49 | ||
50 | rcu_read_lock(); | 50 | rcu_read_lock(); |
51 | gw_node = rcu_dereference(bat_priv->curr_gw); | 51 | gw_node = rcu_dereference(bat_priv->gw.curr_gw); |
52 | if (!gw_node) | 52 | if (!gw_node) |
53 | goto out; | 53 | goto out; |
54 | 54 | ||
@@ -91,23 +91,23 @@ static void batadv_gw_select(struct batadv_priv *bat_priv, | |||
91 | { | 91 | { |
92 | struct batadv_gw_node *curr_gw_node; | 92 | struct batadv_gw_node *curr_gw_node; |
93 | 93 | ||
94 | spin_lock_bh(&bat_priv->gw_list_lock); | 94 | spin_lock_bh(&bat_priv->gw.list_lock); |
95 | 95 | ||
96 | if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount)) | 96 | if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount)) |
97 | new_gw_node = NULL; | 97 | new_gw_node = NULL; |
98 | 98 | ||
99 | curr_gw_node = rcu_dereference_protected(bat_priv->curr_gw, 1); | 99 | curr_gw_node = rcu_dereference_protected(bat_priv->gw.curr_gw, 1); |
100 | rcu_assign_pointer(bat_priv->curr_gw, new_gw_node); | 100 | rcu_assign_pointer(bat_priv->gw.curr_gw, new_gw_node); |
101 | 101 | ||
102 | if (curr_gw_node) | 102 | if (curr_gw_node) |
103 | batadv_gw_node_free_ref(curr_gw_node); | 103 | batadv_gw_node_free_ref(curr_gw_node); |
104 | 104 | ||
105 | spin_unlock_bh(&bat_priv->gw_list_lock); | 105 | spin_unlock_bh(&bat_priv->gw.list_lock); |
106 | } | 106 | } |
107 | 107 | ||
108 | void batadv_gw_deselect(struct batadv_priv *bat_priv) | 108 | void batadv_gw_deselect(struct batadv_priv *bat_priv) |
109 | { | 109 | { |
110 | atomic_set(&bat_priv->gw_reselect, 1); | 110 | atomic_set(&bat_priv->gw.reselect, 1); |
111 | } | 111 | } |
112 | 112 | ||
113 | static struct batadv_gw_node * | 113 | static struct batadv_gw_node * |
@@ -122,7 +122,7 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv) | |||
122 | struct batadv_orig_node *orig_node; | 122 | struct batadv_orig_node *orig_node; |
123 | 123 | ||
124 | rcu_read_lock(); | 124 | rcu_read_lock(); |
125 | hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { | 125 | hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) { |
126 | if (gw_node->deleted) | 126 | if (gw_node->deleted) |
127 | continue; | 127 | continue; |
128 | 128 | ||
@@ -202,7 +202,7 @@ void batadv_gw_election(struct batadv_priv *bat_priv) | |||
202 | 202 | ||
203 | curr_gw = batadv_gw_get_selected_gw_node(bat_priv); | 203 | curr_gw = batadv_gw_get_selected_gw_node(bat_priv); |
204 | 204 | ||
205 | if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect) && curr_gw) | 205 | if (!batadv_atomic_dec_not_zero(&bat_priv->gw.reselect) && curr_gw) |
206 | goto out; | 206 | goto out; |
207 | 207 | ||
208 | next_gw = batadv_gw_get_best_gw_node(bat_priv); | 208 | next_gw = batadv_gw_get_best_gw_node(bat_priv); |
@@ -321,9 +321,9 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv, | |||
321 | gw_node->orig_node = orig_node; | 321 | gw_node->orig_node = orig_node; |
322 | atomic_set(&gw_node->refcount, 1); | 322 | atomic_set(&gw_node->refcount, 1); |
323 | 323 | ||
324 | spin_lock_bh(&bat_priv->gw_list_lock); | 324 | spin_lock_bh(&bat_priv->gw.list_lock); |
325 | hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list); | 325 | hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.list); |
326 | spin_unlock_bh(&bat_priv->gw_list_lock); | 326 | spin_unlock_bh(&bat_priv->gw.list_lock); |
327 | 327 | ||
328 | batadv_gw_bandwidth_to_kbit(new_gwflags, &down, &up); | 328 | batadv_gw_bandwidth_to_kbit(new_gwflags, &down, &up); |
329 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, | 329 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, |
@@ -350,7 +350,7 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv, | |||
350 | curr_gw = batadv_gw_get_selected_gw_node(bat_priv); | 350 | curr_gw = batadv_gw_get_selected_gw_node(bat_priv); |
351 | 351 | ||
352 | rcu_read_lock(); | 352 | rcu_read_lock(); |
353 | hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { | 353 | hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) { |
354 | if (gw_node->orig_node != orig_node) | 354 | if (gw_node->orig_node != orig_node) |
355 | continue; | 355 | continue; |
356 | 356 | ||
@@ -404,10 +404,10 @@ void batadv_gw_node_purge(struct batadv_priv *bat_priv) | |||
404 | 404 | ||
405 | curr_gw = batadv_gw_get_selected_gw_node(bat_priv); | 405 | curr_gw = batadv_gw_get_selected_gw_node(bat_priv); |
406 | 406 | ||
407 | spin_lock_bh(&bat_priv->gw_list_lock); | 407 | spin_lock_bh(&bat_priv->gw.list_lock); |
408 | 408 | ||
409 | hlist_for_each_entry_safe(gw_node, node, node_tmp, | 409 | hlist_for_each_entry_safe(gw_node, node, node_tmp, |
410 | &bat_priv->gw_list, list) { | 410 | &bat_priv->gw.list, list) { |
411 | if (((!gw_node->deleted) || | 411 | if (((!gw_node->deleted) || |
412 | (time_before(jiffies, gw_node->deleted + timeout))) && | 412 | (time_before(jiffies, gw_node->deleted + timeout))) && |
413 | atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) | 413 | atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) |
@@ -420,7 +420,7 @@ void batadv_gw_node_purge(struct batadv_priv *bat_priv) | |||
420 | batadv_gw_node_free_ref(gw_node); | 420 | batadv_gw_node_free_ref(gw_node); |
421 | } | 421 | } |
422 | 422 | ||
423 | spin_unlock_bh(&bat_priv->gw_list_lock); | 423 | spin_unlock_bh(&bat_priv->gw.list_lock); |
424 | 424 | ||
425 | /* gw_deselect() needs to acquire the gw_list_lock */ | 425 | /* gw_deselect() needs to acquire the gw_list_lock */ |
426 | if (do_deselect) | 426 | if (do_deselect) |
@@ -496,7 +496,7 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset) | |||
496 | primary_if->net_dev->dev_addr, net_dev->name); | 496 | primary_if->net_dev->dev_addr, net_dev->name); |
497 | 497 | ||
498 | rcu_read_lock(); | 498 | rcu_read_lock(); |
499 | hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { | 499 | hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) { |
500 | if (gw_node->deleted) | 500 | if (gw_node->deleted) |
501 | continue; | 501 | continue; |
502 | 502 | ||
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 2c5a247a8f12..d112fd6750b0 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c | |||
@@ -103,13 +103,14 @@ static void batadv_primary_if_update_addr(struct batadv_priv *bat_priv, | |||
103 | { | 103 | { |
104 | struct batadv_vis_packet *vis_packet; | 104 | struct batadv_vis_packet *vis_packet; |
105 | struct batadv_hard_iface *primary_if; | 105 | struct batadv_hard_iface *primary_if; |
106 | struct sk_buff *skb; | ||
106 | 107 | ||
107 | primary_if = batadv_primary_if_get_selected(bat_priv); | 108 | primary_if = batadv_primary_if_get_selected(bat_priv); |
108 | if (!primary_if) | 109 | if (!primary_if) |
109 | goto out; | 110 | goto out; |
110 | 111 | ||
111 | vis_packet = (struct batadv_vis_packet *) | 112 | skb = bat_priv->vis.my_info->skb_packet; |
112 | bat_priv->my_vis_info->skb_packet->data; | 113 | vis_packet = (struct batadv_vis_packet *)skb->data; |
113 | memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN); | 114 | memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN); |
114 | memcpy(vis_packet->sender_orig, | 115 | memcpy(vis_packet->sender_orig, |
115 | primary_if->net_dev->dev_addr, ETH_ALEN); | 116 | primary_if->net_dev->dev_addr, ETH_ALEN); |
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index 2a1f24328b81..b4aa470bc4a6 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c | |||
@@ -94,20 +94,20 @@ int batadv_mesh_init(struct net_device *soft_iface) | |||
94 | 94 | ||
95 | spin_lock_init(&bat_priv->forw_bat_list_lock); | 95 | spin_lock_init(&bat_priv->forw_bat_list_lock); |
96 | spin_lock_init(&bat_priv->forw_bcast_list_lock); | 96 | spin_lock_init(&bat_priv->forw_bcast_list_lock); |
97 | spin_lock_init(&bat_priv->tt_changes_list_lock); | 97 | spin_lock_init(&bat_priv->tt.changes_list_lock); |
98 | spin_lock_init(&bat_priv->tt_req_list_lock); | 98 | spin_lock_init(&bat_priv->tt.req_list_lock); |
99 | spin_lock_init(&bat_priv->tt_roam_list_lock); | 99 | spin_lock_init(&bat_priv->tt.roam_list_lock); |
100 | spin_lock_init(&bat_priv->tt_buff_lock); | 100 | spin_lock_init(&bat_priv->tt.last_changeset_lock); |
101 | spin_lock_init(&bat_priv->gw_list_lock); | 101 | spin_lock_init(&bat_priv->gw.list_lock); |
102 | spin_lock_init(&bat_priv->vis_hash_lock); | 102 | spin_lock_init(&bat_priv->vis.hash_lock); |
103 | spin_lock_init(&bat_priv->vis_list_lock); | 103 | spin_lock_init(&bat_priv->vis.list_lock); |
104 | 104 | ||
105 | INIT_HLIST_HEAD(&bat_priv->forw_bat_list); | 105 | INIT_HLIST_HEAD(&bat_priv->forw_bat_list); |
106 | INIT_HLIST_HEAD(&bat_priv->forw_bcast_list); | 106 | INIT_HLIST_HEAD(&bat_priv->forw_bcast_list); |
107 | INIT_HLIST_HEAD(&bat_priv->gw_list); | 107 | INIT_HLIST_HEAD(&bat_priv->gw.list); |
108 | INIT_LIST_HEAD(&bat_priv->tt_changes_list); | 108 | INIT_LIST_HEAD(&bat_priv->tt.changes_list); |
109 | INIT_LIST_HEAD(&bat_priv->tt_req_list); | 109 | INIT_LIST_HEAD(&bat_priv->tt.req_list); |
110 | INIT_LIST_HEAD(&bat_priv->tt_roam_list); | 110 | INIT_LIST_HEAD(&bat_priv->tt.roam_list); |
111 | 111 | ||
112 | ret = batadv_originator_init(bat_priv); | 112 | ret = batadv_originator_init(bat_priv); |
113 | if (ret < 0) | 113 | if (ret < 0) |
@@ -128,7 +128,7 @@ int batadv_mesh_init(struct net_device *soft_iface) | |||
128 | if (ret < 0) | 128 | if (ret < 0) |
129 | goto err; | 129 | goto err; |
130 | 130 | ||
131 | atomic_set(&bat_priv->gw_reselect, 0); | 131 | atomic_set(&bat_priv->gw.reselect, 0); |
132 | atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE); | 132 | atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE); |
133 | 133 | ||
134 | return 0; | 134 | return 0; |
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index bc2b88bbea1f..d5edee7ecfa8 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c | |||
@@ -721,7 +721,7 @@ int batadv_recv_roam_adv(struct sk_buff *skb, struct batadv_hard_iface *recv_if) | |||
721 | * been incremented yet. This flag will make me check all the incoming | 721 | * been incremented yet. This flag will make me check all the incoming |
722 | * packets for the correct destination. | 722 | * packets for the correct destination. |
723 | */ | 723 | */ |
724 | bat_priv->tt_poss_change = true; | 724 | bat_priv->tt.poss_change = true; |
725 | 725 | ||
726 | batadv_orig_node_free_ref(orig_node); | 726 | batadv_orig_node_free_ref(orig_node); |
727 | out: | 727 | out: |
@@ -947,8 +947,8 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, | |||
947 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | 947 | unicast_packet = (struct batadv_unicast_packet *)skb->data; |
948 | 948 | ||
949 | if (batadv_is_my_mac(unicast_packet->dest)) { | 949 | if (batadv_is_my_mac(unicast_packet->dest)) { |
950 | tt_poss_change = bat_priv->tt_poss_change; | 950 | tt_poss_change = bat_priv->tt.poss_change; |
951 | curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn); | 951 | curr_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn); |
952 | } else { | 952 | } else { |
953 | orig_node = batadv_orig_hash_find(bat_priv, | 953 | orig_node = batadv_orig_hash_find(bat_priv, |
954 | unicast_packet->dest); | 954 | unicast_packet->dest); |
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 2b3842b99096..6d44625c0f5e 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
@@ -420,14 +420,15 @@ struct net_device *batadv_softif_create(const char *name) | |||
420 | 420 | ||
421 | atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE); | 421 | atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE); |
422 | atomic_set(&bat_priv->bcast_seqno, 1); | 422 | atomic_set(&bat_priv->bcast_seqno, 1); |
423 | atomic_set(&bat_priv->ttvn, 0); | 423 | atomic_set(&bat_priv->tt.vn, 0); |
424 | atomic_set(&bat_priv->tt_local_changes, 0); | 424 | atomic_set(&bat_priv->tt.local_changes, 0); |
425 | atomic_set(&bat_priv->tt_ogm_append_cnt, 0); | 425 | atomic_set(&bat_priv->tt.ogm_append_cnt, 0); |
426 | atomic_set(&bat_priv->bla_num_requests, 0); | 426 | #ifdef CONFIG_BATMAN_ADV_BLA |
427 | 427 | atomic_set(&bat_priv->bla.num_requests, 0); | |
428 | bat_priv->tt_buff = NULL; | 428 | #endif |
429 | bat_priv->tt_buff_len = 0; | 429 | bat_priv->tt.last_changeset = NULL; |
430 | bat_priv->tt_poss_change = false; | 430 | bat_priv->tt.last_changeset_len = 0; |
431 | bat_priv->tt.poss_change = false; | ||
431 | 432 | ||
432 | bat_priv->primary_if = NULL; | 433 | bat_priv->primary_if = NULL; |
433 | bat_priv->num_ifaces = 0; | 434 | bat_priv->num_ifaces = 0; |
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index eb8490e504e2..b01049a7a912 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c | |||
@@ -46,8 +46,8 @@ static int batadv_compare_tt(const struct hlist_node *node, const void *data2) | |||
46 | 46 | ||
47 | static void batadv_tt_start_timer(struct batadv_priv *bat_priv) | 47 | static void batadv_tt_start_timer(struct batadv_priv *bat_priv) |
48 | { | 48 | { |
49 | INIT_DELAYED_WORK(&bat_priv->tt_work, batadv_tt_purge); | 49 | INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge); |
50 | queue_delayed_work(batadv_event_workqueue, &bat_priv->tt_work, | 50 | queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work, |
51 | msecs_to_jiffies(5000)); | 51 | msecs_to_jiffies(5000)); |
52 | } | 52 | } |
53 | 53 | ||
@@ -88,7 +88,7 @@ batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const void *data) | |||
88 | struct batadv_tt_common_entry *tt_common_entry; | 88 | struct batadv_tt_common_entry *tt_common_entry; |
89 | struct batadv_tt_local_entry *tt_local_entry = NULL; | 89 | struct batadv_tt_local_entry *tt_local_entry = NULL; |
90 | 90 | ||
91 | tt_common_entry = batadv_tt_hash_find(bat_priv->tt_local_hash, data); | 91 | tt_common_entry = batadv_tt_hash_find(bat_priv->tt.local_hash, data); |
92 | if (tt_common_entry) | 92 | if (tt_common_entry) |
93 | tt_local_entry = container_of(tt_common_entry, | 93 | tt_local_entry = container_of(tt_common_entry, |
94 | struct batadv_tt_local_entry, | 94 | struct batadv_tt_local_entry, |
@@ -102,7 +102,7 @@ batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const void *data) | |||
102 | struct batadv_tt_common_entry *tt_common_entry; | 102 | struct batadv_tt_common_entry *tt_common_entry; |
103 | struct batadv_tt_global_entry *tt_global_entry = NULL; | 103 | struct batadv_tt_global_entry *tt_global_entry = NULL; |
104 | 104 | ||
105 | tt_common_entry = batadv_tt_hash_find(bat_priv->tt_global_hash, data); | 105 | tt_common_entry = batadv_tt_hash_find(bat_priv->tt.global_hash, data); |
106 | if (tt_common_entry) | 106 | if (tt_common_entry) |
107 | tt_global_entry = container_of(tt_common_entry, | 107 | tt_global_entry = container_of(tt_common_entry, |
108 | struct batadv_tt_global_entry, | 108 | struct batadv_tt_global_entry, |
@@ -177,8 +177,8 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv, | |||
177 | del_op_requested = flags & BATADV_TT_CLIENT_DEL; | 177 | del_op_requested = flags & BATADV_TT_CLIENT_DEL; |
178 | 178 | ||
179 | /* check for ADD+DEL or DEL+ADD events */ | 179 | /* check for ADD+DEL or DEL+ADD events */ |
180 | spin_lock_bh(&bat_priv->tt_changes_list_lock); | 180 | spin_lock_bh(&bat_priv->tt.changes_list_lock); |
181 | list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list, | 181 | list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list, |
182 | list) { | 182 | list) { |
183 | if (!batadv_compare_eth(entry->change.addr, addr)) | 183 | if (!batadv_compare_eth(entry->change.addr, addr)) |
184 | continue; | 184 | continue; |
@@ -205,15 +205,15 @@ del: | |||
205 | } | 205 | } |
206 | 206 | ||
207 | /* track the change in the OGMinterval list */ | 207 | /* track the change in the OGMinterval list */ |
208 | list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list); | 208 | list_add_tail(&tt_change_node->list, &bat_priv->tt.changes_list); |
209 | 209 | ||
210 | unlock: | 210 | unlock: |
211 | spin_unlock_bh(&bat_priv->tt_changes_list_lock); | 211 | spin_unlock_bh(&bat_priv->tt.changes_list_lock); |
212 | 212 | ||
213 | if (event_removed) | 213 | if (event_removed) |
214 | atomic_dec(&bat_priv->tt_local_changes); | 214 | atomic_dec(&bat_priv->tt.local_changes); |
215 | else | 215 | else |
216 | atomic_inc(&bat_priv->tt_local_changes); | 216 | atomic_inc(&bat_priv->tt.local_changes); |
217 | } | 217 | } |
218 | 218 | ||
219 | int batadv_tt_len(int changes_num) | 219 | int batadv_tt_len(int changes_num) |
@@ -223,12 +223,12 @@ int batadv_tt_len(int changes_num) | |||
223 | 223 | ||
224 | static int batadv_tt_local_init(struct batadv_priv *bat_priv) | 224 | static int batadv_tt_local_init(struct batadv_priv *bat_priv) |
225 | { | 225 | { |
226 | if (bat_priv->tt_local_hash) | 226 | if (bat_priv->tt.local_hash) |
227 | return 0; | 227 | return 0; |
228 | 228 | ||
229 | bat_priv->tt_local_hash = batadv_hash_new(1024); | 229 | bat_priv->tt.local_hash = batadv_hash_new(1024); |
230 | 230 | ||
231 | if (!bat_priv->tt_local_hash) | 231 | if (!bat_priv->tt.local_hash) |
232 | return -ENOMEM; | 232 | return -ENOMEM; |
233 | 233 | ||
234 | return 0; | 234 | return 0; |
@@ -260,7 +260,7 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr, | |||
260 | 260 | ||
261 | batadv_dbg(BATADV_DBG_TT, bat_priv, | 261 | batadv_dbg(BATADV_DBG_TT, bat_priv, |
262 | "Creating new local tt entry: %pM (ttvn: %d)\n", addr, | 262 | "Creating new local tt entry: %pM (ttvn: %d)\n", addr, |
263 | (uint8_t)atomic_read(&bat_priv->ttvn)); | 263 | (uint8_t)atomic_read(&bat_priv->tt.vn)); |
264 | 264 | ||
265 | memcpy(tt_local_entry->common.addr, addr, ETH_ALEN); | 265 | memcpy(tt_local_entry->common.addr, addr, ETH_ALEN); |
266 | tt_local_entry->common.flags = BATADV_NO_FLAGS; | 266 | tt_local_entry->common.flags = BATADV_NO_FLAGS; |
@@ -279,7 +279,7 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr, | |||
279 | */ | 279 | */ |
280 | tt_local_entry->common.flags |= BATADV_TT_CLIENT_NEW; | 280 | tt_local_entry->common.flags |= BATADV_TT_CLIENT_NEW; |
281 | 281 | ||
282 | hash_added = batadv_hash_add(bat_priv->tt_local_hash, batadv_compare_tt, | 282 | hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt, |
283 | batadv_choose_orig, | 283 | batadv_choose_orig, |
284 | &tt_local_entry->common, | 284 | &tt_local_entry->common, |
285 | &tt_local_entry->common.hash_entry); | 285 | &tt_local_entry->common.hash_entry); |
@@ -350,7 +350,7 @@ static void batadv_tt_prepare_packet_buff(struct batadv_priv *bat_priv, | |||
350 | primary_if = batadv_primary_if_get_selected(bat_priv); | 350 | primary_if = batadv_primary_if_get_selected(bat_priv); |
351 | 351 | ||
352 | req_len = min_packet_len; | 352 | req_len = min_packet_len; |
353 | req_len += batadv_tt_len(atomic_read(&bat_priv->tt_local_changes)); | 353 | req_len += batadv_tt_len(atomic_read(&bat_priv->tt.local_changes)); |
354 | 354 | ||
355 | /* if we have too many changes for one packet don't send any | 355 | /* if we have too many changes for one packet don't send any |
356 | * and wait for the tt table request which will be fragmented | 356 | * and wait for the tt table request which will be fragmented |
@@ -383,10 +383,10 @@ static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv, | |||
383 | if (new_len > 0) | 383 | if (new_len > 0) |
384 | tot_changes = new_len / batadv_tt_len(1); | 384 | tot_changes = new_len / batadv_tt_len(1); |
385 | 385 | ||
386 | spin_lock_bh(&bat_priv->tt_changes_list_lock); | 386 | spin_lock_bh(&bat_priv->tt.changes_list_lock); |
387 | atomic_set(&bat_priv->tt_local_changes, 0); | 387 | atomic_set(&bat_priv->tt.local_changes, 0); |
388 | 388 | ||
389 | list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list, | 389 | list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list, |
390 | list) { | 390 | list) { |
391 | if (count < tot_changes) { | 391 | if (count < tot_changes) { |
392 | memcpy(tt_buff + batadv_tt_len(count), | 392 | memcpy(tt_buff + batadv_tt_len(count), |
@@ -396,25 +396,25 @@ static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv, | |||
396 | list_del(&entry->list); | 396 | list_del(&entry->list); |
397 | kfree(entry); | 397 | kfree(entry); |
398 | } | 398 | } |
399 | spin_unlock_bh(&bat_priv->tt_changes_list_lock); | 399 | spin_unlock_bh(&bat_priv->tt.changes_list_lock); |
400 | 400 | ||
401 | /* Keep the buffer for possible tt_request */ | 401 | /* Keep the buffer for possible tt_request */ |
402 | spin_lock_bh(&bat_priv->tt_buff_lock); | 402 | spin_lock_bh(&bat_priv->tt.last_changeset_lock); |
403 | kfree(bat_priv->tt_buff); | 403 | kfree(bat_priv->tt.last_changeset); |
404 | bat_priv->tt_buff_len = 0; | 404 | bat_priv->tt.last_changeset_len = 0; |
405 | bat_priv->tt_buff = NULL; | 405 | bat_priv->tt.last_changeset = NULL; |
406 | /* check whether this new OGM has no changes due to size problems */ | 406 | /* check whether this new OGM has no changes due to size problems */ |
407 | if (new_len > 0) { | 407 | if (new_len > 0) { |
408 | /* if kmalloc() fails we will reply with the full table | 408 | /* if kmalloc() fails we will reply with the full table |
409 | * instead of providing the diff | 409 | * instead of providing the diff |
410 | */ | 410 | */ |
411 | bat_priv->tt_buff = kmalloc(new_len, GFP_ATOMIC); | 411 | bat_priv->tt.last_changeset = kmalloc(new_len, GFP_ATOMIC); |
412 | if (bat_priv->tt_buff) { | 412 | if (bat_priv->tt.last_changeset) { |
413 | memcpy(bat_priv->tt_buff, tt_buff, new_len); | 413 | memcpy(bat_priv->tt.last_changeset, tt_buff, new_len); |
414 | bat_priv->tt_buff_len = new_len; | 414 | bat_priv->tt.last_changeset_len = new_len; |
415 | } | 415 | } |
416 | } | 416 | } |
417 | spin_unlock_bh(&bat_priv->tt_buff_lock); | 417 | spin_unlock_bh(&bat_priv->tt.last_changeset_lock); |
418 | 418 | ||
419 | return count; | 419 | return count; |
420 | } | 420 | } |
@@ -423,7 +423,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset) | |||
423 | { | 423 | { |
424 | struct net_device *net_dev = (struct net_device *)seq->private; | 424 | struct net_device *net_dev = (struct net_device *)seq->private; |
425 | struct batadv_priv *bat_priv = netdev_priv(net_dev); | 425 | struct batadv_priv *bat_priv = netdev_priv(net_dev); |
426 | struct batadv_hashtable *hash = bat_priv->tt_local_hash; | 426 | struct batadv_hashtable *hash = bat_priv->tt.local_hash; |
427 | struct batadv_tt_common_entry *tt_common_entry; | 427 | struct batadv_tt_common_entry *tt_common_entry; |
428 | struct batadv_hard_iface *primary_if; | 428 | struct batadv_hard_iface *primary_if; |
429 | struct hlist_node *node; | 429 | struct hlist_node *node; |
@@ -448,7 +448,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset) | |||
448 | 448 | ||
449 | seq_printf(seq, | 449 | seq_printf(seq, |
450 | "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n", | 450 | "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n", |
451 | net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn)); | 451 | net_dev->name, (uint8_t)atomic_read(&bat_priv->tt.vn)); |
452 | 452 | ||
453 | for (i = 0; i < hash->size; i++) { | 453 | for (i = 0; i < hash->size; i++) { |
454 | head = &hash->table[i]; | 454 | head = &hash->table[i]; |
@@ -546,7 +546,7 @@ static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv, | |||
546 | 546 | ||
547 | static void batadv_tt_local_purge(struct batadv_priv *bat_priv) | 547 | static void batadv_tt_local_purge(struct batadv_priv *bat_priv) |
548 | { | 548 | { |
549 | struct batadv_hashtable *hash = bat_priv->tt_local_hash; | 549 | struct batadv_hashtable *hash = bat_priv->tt.local_hash; |
550 | struct hlist_head *head; | 550 | struct hlist_head *head; |
551 | spinlock_t *list_lock; /* protects write access to the hash lists */ | 551 | spinlock_t *list_lock; /* protects write access to the hash lists */ |
552 | uint32_t i; | 552 | uint32_t i; |
@@ -572,10 +572,10 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv) | |||
572 | struct hlist_head *head; | 572 | struct hlist_head *head; |
573 | uint32_t i; | 573 | uint32_t i; |
574 | 574 | ||
575 | if (!bat_priv->tt_local_hash) | 575 | if (!bat_priv->tt.local_hash) |
576 | return; | 576 | return; |
577 | 577 | ||
578 | hash = bat_priv->tt_local_hash; | 578 | hash = bat_priv->tt.local_hash; |
579 | 579 | ||
580 | for (i = 0; i < hash->size; i++) { | 580 | for (i = 0; i < hash->size; i++) { |
581 | head = &hash->table[i]; | 581 | head = &hash->table[i]; |
@@ -595,17 +595,17 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv) | |||
595 | 595 | ||
596 | batadv_hash_destroy(hash); | 596 | batadv_hash_destroy(hash); |
597 | 597 | ||
598 | bat_priv->tt_local_hash = NULL; | 598 | bat_priv->tt.local_hash = NULL; |
599 | } | 599 | } |
600 | 600 | ||
601 | static int batadv_tt_global_init(struct batadv_priv *bat_priv) | 601 | static int batadv_tt_global_init(struct batadv_priv *bat_priv) |
602 | { | 602 | { |
603 | if (bat_priv->tt_global_hash) | 603 | if (bat_priv->tt.global_hash) |
604 | return 0; | 604 | return 0; |
605 | 605 | ||
606 | bat_priv->tt_global_hash = batadv_hash_new(1024); | 606 | bat_priv->tt.global_hash = batadv_hash_new(1024); |
607 | 607 | ||
608 | if (!bat_priv->tt_global_hash) | 608 | if (!bat_priv->tt.global_hash) |
609 | return -ENOMEM; | 609 | return -ENOMEM; |
610 | 610 | ||
611 | return 0; | 611 | return 0; |
@@ -615,16 +615,16 @@ static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv) | |||
615 | { | 615 | { |
616 | struct batadv_tt_change_node *entry, *safe; | 616 | struct batadv_tt_change_node *entry, *safe; |
617 | 617 | ||
618 | spin_lock_bh(&bat_priv->tt_changes_list_lock); | 618 | spin_lock_bh(&bat_priv->tt.changes_list_lock); |
619 | 619 | ||
620 | list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list, | 620 | list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list, |
621 | list) { | 621 | list) { |
622 | list_del(&entry->list); | 622 | list_del(&entry->list); |
623 | kfree(entry); | 623 | kfree(entry); |
624 | } | 624 | } |
625 | 625 | ||
626 | atomic_set(&bat_priv->tt_local_changes, 0); | 626 | atomic_set(&bat_priv->tt.local_changes, 0); |
627 | spin_unlock_bh(&bat_priv->tt_changes_list_lock); | 627 | spin_unlock_bh(&bat_priv->tt.changes_list_lock); |
628 | } | 628 | } |
629 | 629 | ||
630 | /* retrieves the orig_tt_list_entry belonging to orig_node from the | 630 | /* retrieves the orig_tt_list_entry belonging to orig_node from the |
@@ -733,7 +733,7 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv, | |||
733 | INIT_HLIST_HEAD(&tt_global_entry->orig_list); | 733 | INIT_HLIST_HEAD(&tt_global_entry->orig_list); |
734 | spin_lock_init(&tt_global_entry->list_lock); | 734 | spin_lock_init(&tt_global_entry->list_lock); |
735 | 735 | ||
736 | hash_added = batadv_hash_add(bat_priv->tt_global_hash, | 736 | hash_added = batadv_hash_add(bat_priv->tt.global_hash, |
737 | batadv_compare_tt, | 737 | batadv_compare_tt, |
738 | batadv_choose_orig, common, | 738 | batadv_choose_orig, common, |
739 | &common->hash_entry); | 739 | &common->hash_entry); |
@@ -812,7 +812,7 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset) | |||
812 | { | 812 | { |
813 | struct net_device *net_dev = (struct net_device *)seq->private; | 813 | struct net_device *net_dev = (struct net_device *)seq->private; |
814 | struct batadv_priv *bat_priv = netdev_priv(net_dev); | 814 | struct batadv_priv *bat_priv = netdev_priv(net_dev); |
815 | struct batadv_hashtable *hash = bat_priv->tt_global_hash; | 815 | struct batadv_hashtable *hash = bat_priv->tt.global_hash; |
816 | struct batadv_tt_common_entry *tt_common_entry; | 816 | struct batadv_tt_common_entry *tt_common_entry; |
817 | struct batadv_tt_global_entry *tt_global; | 817 | struct batadv_tt_global_entry *tt_global; |
818 | struct batadv_hard_iface *primary_if; | 818 | struct batadv_hard_iface *primary_if; |
@@ -913,7 +913,7 @@ batadv_tt_global_del_struct(struct batadv_priv *bat_priv, | |||
913 | "Deleting global tt entry %pM: %s\n", | 913 | "Deleting global tt entry %pM: %s\n", |
914 | tt_global_entry->common.addr, message); | 914 | tt_global_entry->common.addr, message); |
915 | 915 | ||
916 | batadv_hash_remove(bat_priv->tt_global_hash, batadv_compare_tt, | 916 | batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt, |
917 | batadv_choose_orig, tt_global_entry->common.addr); | 917 | batadv_choose_orig, tt_global_entry->common.addr); |
918 | batadv_tt_global_entry_free_ref(tt_global_entry); | 918 | batadv_tt_global_entry_free_ref(tt_global_entry); |
919 | 919 | ||
@@ -1024,7 +1024,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv, | |||
1024 | struct batadv_tt_global_entry *tt_global; | 1024 | struct batadv_tt_global_entry *tt_global; |
1025 | struct batadv_tt_common_entry *tt_common_entry; | 1025 | struct batadv_tt_common_entry *tt_common_entry; |
1026 | uint32_t i; | 1026 | uint32_t i; |
1027 | struct batadv_hashtable *hash = bat_priv->tt_global_hash; | 1027 | struct batadv_hashtable *hash = bat_priv->tt.global_hash; |
1028 | struct hlist_node *node, *safe; | 1028 | struct hlist_node *node, *safe; |
1029 | struct hlist_head *head; | 1029 | struct hlist_head *head; |
1030 | spinlock_t *list_lock; /* protects write access to the hash lists */ | 1030 | spinlock_t *list_lock; /* protects write access to the hash lists */ |
@@ -1088,7 +1088,7 @@ static void batadv_tt_global_roam_purge_list(struct batadv_priv *bat_priv, | |||
1088 | 1088 | ||
1089 | static void batadv_tt_global_roam_purge(struct batadv_priv *bat_priv) | 1089 | static void batadv_tt_global_roam_purge(struct batadv_priv *bat_priv) |
1090 | { | 1090 | { |
1091 | struct batadv_hashtable *hash = bat_priv->tt_global_hash; | 1091 | struct batadv_hashtable *hash = bat_priv->tt.global_hash; |
1092 | struct hlist_head *head; | 1092 | struct hlist_head *head; |
1093 | spinlock_t *list_lock; /* protects write access to the hash lists */ | 1093 | spinlock_t *list_lock; /* protects write access to the hash lists */ |
1094 | uint32_t i; | 1094 | uint32_t i; |
@@ -1114,10 +1114,10 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv) | |||
1114 | struct hlist_head *head; | 1114 | struct hlist_head *head; |
1115 | uint32_t i; | 1115 | uint32_t i; |
1116 | 1116 | ||
1117 | if (!bat_priv->tt_global_hash) | 1117 | if (!bat_priv->tt.global_hash) |
1118 | return; | 1118 | return; |
1119 | 1119 | ||
1120 | hash = bat_priv->tt_global_hash; | 1120 | hash = bat_priv->tt.global_hash; |
1121 | 1121 | ||
1122 | for (i = 0; i < hash->size; i++) { | 1122 | for (i = 0; i < hash->size; i++) { |
1123 | head = &hash->table[i]; | 1123 | head = &hash->table[i]; |
@@ -1137,7 +1137,7 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv) | |||
1137 | 1137 | ||
1138 | batadv_hash_destroy(hash); | 1138 | batadv_hash_destroy(hash); |
1139 | 1139 | ||
1140 | bat_priv->tt_global_hash = NULL; | 1140 | bat_priv->tt.global_hash = NULL; |
1141 | } | 1141 | } |
1142 | 1142 | ||
1143 | static bool | 1143 | static bool |
@@ -1216,7 +1216,7 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv, | |||
1216 | struct batadv_orig_node *orig_node) | 1216 | struct batadv_orig_node *orig_node) |
1217 | { | 1217 | { |
1218 | uint16_t total = 0, total_one; | 1218 | uint16_t total = 0, total_one; |
1219 | struct batadv_hashtable *hash = bat_priv->tt_global_hash; | 1219 | struct batadv_hashtable *hash = bat_priv->tt.global_hash; |
1220 | struct batadv_tt_common_entry *tt_common; | 1220 | struct batadv_tt_common_entry *tt_common; |
1221 | struct batadv_tt_global_entry *tt_global; | 1221 | struct batadv_tt_global_entry *tt_global; |
1222 | struct hlist_node *node; | 1222 | struct hlist_node *node; |
@@ -1263,7 +1263,7 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv, | |||
1263 | static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv) | 1263 | static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv) |
1264 | { | 1264 | { |
1265 | uint16_t total = 0, total_one; | 1265 | uint16_t total = 0, total_one; |
1266 | struct batadv_hashtable *hash = bat_priv->tt_local_hash; | 1266 | struct batadv_hashtable *hash = bat_priv->tt.local_hash; |
1267 | struct batadv_tt_common_entry *tt_common; | 1267 | struct batadv_tt_common_entry *tt_common; |
1268 | struct hlist_node *node; | 1268 | struct hlist_node *node; |
1269 | struct hlist_head *head; | 1269 | struct hlist_head *head; |
@@ -1296,14 +1296,14 @@ static void batadv_tt_req_list_free(struct batadv_priv *bat_priv) | |||
1296 | { | 1296 | { |
1297 | struct batadv_tt_req_node *node, *safe; | 1297 | struct batadv_tt_req_node *node, *safe; |
1298 | 1298 | ||
1299 | spin_lock_bh(&bat_priv->tt_req_list_lock); | 1299 | spin_lock_bh(&bat_priv->tt.req_list_lock); |
1300 | 1300 | ||
1301 | list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) { | 1301 | list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) { |
1302 | list_del(&node->list); | 1302 | list_del(&node->list); |
1303 | kfree(node); | 1303 | kfree(node); |
1304 | } | 1304 | } |
1305 | 1305 | ||
1306 | spin_unlock_bh(&bat_priv->tt_req_list_lock); | 1306 | spin_unlock_bh(&bat_priv->tt.req_list_lock); |
1307 | } | 1307 | } |
1308 | 1308 | ||
1309 | static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv, | 1309 | static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv, |
@@ -1333,15 +1333,15 @@ static void batadv_tt_req_purge(struct batadv_priv *bat_priv) | |||
1333 | { | 1333 | { |
1334 | struct batadv_tt_req_node *node, *safe; | 1334 | struct batadv_tt_req_node *node, *safe; |
1335 | 1335 | ||
1336 | spin_lock_bh(&bat_priv->tt_req_list_lock); | 1336 | spin_lock_bh(&bat_priv->tt.req_list_lock); |
1337 | list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) { | 1337 | list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) { |
1338 | if (batadv_has_timed_out(node->issued_at, | 1338 | if (batadv_has_timed_out(node->issued_at, |
1339 | BATADV_TT_REQUEST_TIMEOUT)) { | 1339 | BATADV_TT_REQUEST_TIMEOUT)) { |
1340 | list_del(&node->list); | 1340 | list_del(&node->list); |
1341 | kfree(node); | 1341 | kfree(node); |
1342 | } | 1342 | } |
1343 | } | 1343 | } |
1344 | spin_unlock_bh(&bat_priv->tt_req_list_lock); | 1344 | spin_unlock_bh(&bat_priv->tt.req_list_lock); |
1345 | } | 1345 | } |
1346 | 1346 | ||
1347 | /* returns the pointer to the new tt_req_node struct if no request | 1347 | /* returns the pointer to the new tt_req_node struct if no request |
@@ -1353,8 +1353,8 @@ batadv_new_tt_req_node(struct batadv_priv *bat_priv, | |||
1353 | { | 1353 | { |
1354 | struct batadv_tt_req_node *tt_req_node_tmp, *tt_req_node = NULL; | 1354 | struct batadv_tt_req_node *tt_req_node_tmp, *tt_req_node = NULL; |
1355 | 1355 | ||
1356 | spin_lock_bh(&bat_priv->tt_req_list_lock); | 1356 | spin_lock_bh(&bat_priv->tt.req_list_lock); |
1357 | list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) { | 1357 | list_for_each_entry(tt_req_node_tmp, &bat_priv->tt.req_list, list) { |
1358 | if (batadv_compare_eth(tt_req_node_tmp, orig_node) && | 1358 | if (batadv_compare_eth(tt_req_node_tmp, orig_node) && |
1359 | !batadv_has_timed_out(tt_req_node_tmp->issued_at, | 1359 | !batadv_has_timed_out(tt_req_node_tmp->issued_at, |
1360 | BATADV_TT_REQUEST_TIMEOUT)) | 1360 | BATADV_TT_REQUEST_TIMEOUT)) |
@@ -1368,9 +1368,9 @@ batadv_new_tt_req_node(struct batadv_priv *bat_priv, | |||
1368 | memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN); | 1368 | memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN); |
1369 | tt_req_node->issued_at = jiffies; | 1369 | tt_req_node->issued_at = jiffies; |
1370 | 1370 | ||
1371 | list_add(&tt_req_node->list, &bat_priv->tt_req_list); | 1371 | list_add(&tt_req_node->list, &bat_priv->tt.req_list); |
1372 | unlock: | 1372 | unlock: |
1373 | spin_unlock_bh(&bat_priv->tt_req_list_lock); | 1373 | spin_unlock_bh(&bat_priv->tt.req_list_lock); |
1374 | return tt_req_node; | 1374 | return tt_req_node; |
1375 | } | 1375 | } |
1376 | 1376 | ||
@@ -1536,9 +1536,9 @@ out: | |||
1536 | if (ret) | 1536 | if (ret) |
1537 | kfree_skb(skb); | 1537 | kfree_skb(skb); |
1538 | if (ret && tt_req_node) { | 1538 | if (ret && tt_req_node) { |
1539 | spin_lock_bh(&bat_priv->tt_req_list_lock); | 1539 | spin_lock_bh(&bat_priv->tt.req_list_lock); |
1540 | list_del(&tt_req_node->list); | 1540 | list_del(&tt_req_node->list); |
1541 | spin_unlock_bh(&bat_priv->tt_req_list_lock); | 1541 | spin_unlock_bh(&bat_priv->tt.req_list_lock); |
1542 | kfree(tt_req_node); | 1542 | kfree(tt_req_node); |
1543 | } | 1543 | } |
1544 | return ret; | 1544 | return ret; |
@@ -1629,7 +1629,7 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv, | |||
1629 | ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn); | 1629 | ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn); |
1630 | 1630 | ||
1631 | skb = batadv_tt_response_fill_table(tt_len, ttvn, | 1631 | skb = batadv_tt_response_fill_table(tt_len, ttvn, |
1632 | bat_priv->tt_global_hash, | 1632 | bat_priv->tt.global_hash, |
1633 | primary_if, | 1633 | primary_if, |
1634 | batadv_tt_global_valid, | 1634 | batadv_tt_global_valid, |
1635 | req_dst_orig_node); | 1635 | req_dst_orig_node); |
@@ -1700,7 +1700,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv, | |||
1700 | (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.')); | 1700 | (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.')); |
1701 | 1701 | ||
1702 | 1702 | ||
1703 | my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn); | 1703 | my_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn); |
1704 | req_ttvn = tt_request->ttvn; | 1704 | req_ttvn = tt_request->ttvn; |
1705 | 1705 | ||
1706 | orig_node = batadv_orig_hash_find(bat_priv, tt_request->src); | 1706 | orig_node = batadv_orig_hash_find(bat_priv, tt_request->src); |
@@ -1719,7 +1719,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv, | |||
1719 | * is too big send the whole local translation table | 1719 | * is too big send the whole local translation table |
1720 | */ | 1720 | */ |
1721 | if (tt_request->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn || | 1721 | if (tt_request->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn || |
1722 | !bat_priv->tt_buff) | 1722 | !bat_priv->tt.last_changeset) |
1723 | full_table = true; | 1723 | full_table = true; |
1724 | else | 1724 | else |
1725 | full_table = false; | 1725 | full_table = false; |
@@ -1728,8 +1728,8 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv, | |||
1728 | * I'll send only one packet with as much TT entries as I can | 1728 | * I'll send only one packet with as much TT entries as I can |
1729 | */ | 1729 | */ |
1730 | if (!full_table) { | 1730 | if (!full_table) { |
1731 | spin_lock_bh(&bat_priv->tt_buff_lock); | 1731 | spin_lock_bh(&bat_priv->tt.last_changeset_lock); |
1732 | tt_len = bat_priv->tt_buff_len; | 1732 | tt_len = bat_priv->tt.last_changeset_len; |
1733 | tt_tot = tt_len / sizeof(struct batadv_tt_change); | 1733 | tt_tot = tt_len / sizeof(struct batadv_tt_change); |
1734 | 1734 | ||
1735 | len = sizeof(*tt_response) + tt_len; | 1735 | len = sizeof(*tt_response) + tt_len; |
@@ -1744,16 +1744,16 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv, | |||
1744 | tt_response->tt_data = htons(tt_tot); | 1744 | tt_response->tt_data = htons(tt_tot); |
1745 | 1745 | ||
1746 | tt_buff = skb->data + sizeof(*tt_response); | 1746 | tt_buff = skb->data + sizeof(*tt_response); |
1747 | memcpy(tt_buff, bat_priv->tt_buff, | 1747 | memcpy(tt_buff, bat_priv->tt.last_changeset, |
1748 | bat_priv->tt_buff_len); | 1748 | bat_priv->tt.last_changeset_len); |
1749 | spin_unlock_bh(&bat_priv->tt_buff_lock); | 1749 | spin_unlock_bh(&bat_priv->tt.last_changeset_lock); |
1750 | } else { | 1750 | } else { |
1751 | tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt); | 1751 | tt_len = (uint16_t)atomic_read(&bat_priv->tt.local_entry_num); |
1752 | tt_len *= sizeof(struct batadv_tt_change); | 1752 | tt_len *= sizeof(struct batadv_tt_change); |
1753 | ttvn = (uint8_t)atomic_read(&bat_priv->ttvn); | 1753 | ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn); |
1754 | 1754 | ||
1755 | skb = batadv_tt_response_fill_table(tt_len, ttvn, | 1755 | skb = batadv_tt_response_fill_table(tt_len, ttvn, |
1756 | bat_priv->tt_local_hash, | 1756 | bat_priv->tt.local_hash, |
1757 | primary_if, | 1757 | primary_if, |
1758 | batadv_tt_local_valid_entry, | 1758 | batadv_tt_local_valid_entry, |
1759 | NULL); | 1759 | NULL); |
@@ -1785,7 +1785,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv, | |||
1785 | goto out; | 1785 | goto out; |
1786 | 1786 | ||
1787 | unlock: | 1787 | unlock: |
1788 | spin_unlock_bh(&bat_priv->tt_buff_lock); | 1788 | spin_unlock_bh(&bat_priv->tt.last_changeset_lock); |
1789 | out: | 1789 | out: |
1790 | if (orig_node) | 1790 | if (orig_node) |
1791 | batadv_orig_node_free_ref(orig_node); | 1791 | batadv_orig_node_free_ref(orig_node); |
@@ -1938,14 +1938,14 @@ void batadv_handle_tt_response(struct batadv_priv *bat_priv, | |||
1938 | } | 1938 | } |
1939 | 1939 | ||
1940 | /* Delete the tt_req_node from pending tt_requests list */ | 1940 | /* Delete the tt_req_node from pending tt_requests list */ |
1941 | spin_lock_bh(&bat_priv->tt_req_list_lock); | 1941 | spin_lock_bh(&bat_priv->tt.req_list_lock); |
1942 | list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) { | 1942 | list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) { |
1943 | if (!batadv_compare_eth(node->addr, tt_response->src)) | 1943 | if (!batadv_compare_eth(node->addr, tt_response->src)) |
1944 | continue; | 1944 | continue; |
1945 | list_del(&node->list); | 1945 | list_del(&node->list); |
1946 | kfree(node); | 1946 | kfree(node); |
1947 | } | 1947 | } |
1948 | spin_unlock_bh(&bat_priv->tt_req_list_lock); | 1948 | spin_unlock_bh(&bat_priv->tt.req_list_lock); |
1949 | 1949 | ||
1950 | /* Recalculate the CRC for this orig_node and store it */ | 1950 | /* Recalculate the CRC for this orig_node and store it */ |
1951 | orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node); | 1951 | orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node); |
@@ -1979,22 +1979,22 @@ static void batadv_tt_roam_list_free(struct batadv_priv *bat_priv) | |||
1979 | { | 1979 | { |
1980 | struct batadv_tt_roam_node *node, *safe; | 1980 | struct batadv_tt_roam_node *node, *safe; |
1981 | 1981 | ||
1982 | spin_lock_bh(&bat_priv->tt_roam_list_lock); | 1982 | spin_lock_bh(&bat_priv->tt.roam_list_lock); |
1983 | 1983 | ||
1984 | list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) { | 1984 | list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) { |
1985 | list_del(&node->list); | 1985 | list_del(&node->list); |
1986 | kfree(node); | 1986 | kfree(node); |
1987 | } | 1987 | } |
1988 | 1988 | ||
1989 | spin_unlock_bh(&bat_priv->tt_roam_list_lock); | 1989 | spin_unlock_bh(&bat_priv->tt.roam_list_lock); |
1990 | } | 1990 | } |
1991 | 1991 | ||
1992 | static void batadv_tt_roam_purge(struct batadv_priv *bat_priv) | 1992 | static void batadv_tt_roam_purge(struct batadv_priv *bat_priv) |
1993 | { | 1993 | { |
1994 | struct batadv_tt_roam_node *node, *safe; | 1994 | struct batadv_tt_roam_node *node, *safe; |
1995 | 1995 | ||
1996 | spin_lock_bh(&bat_priv->tt_roam_list_lock); | 1996 | spin_lock_bh(&bat_priv->tt.roam_list_lock); |
1997 | list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) { | 1997 | list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) { |
1998 | if (!batadv_has_timed_out(node->first_time, | 1998 | if (!batadv_has_timed_out(node->first_time, |
1999 | BATADV_ROAMING_MAX_TIME)) | 1999 | BATADV_ROAMING_MAX_TIME)) |
2000 | continue; | 2000 | continue; |
@@ -2002,7 +2002,7 @@ static void batadv_tt_roam_purge(struct batadv_priv *bat_priv) | |||
2002 | list_del(&node->list); | 2002 | list_del(&node->list); |
2003 | kfree(node); | 2003 | kfree(node); |
2004 | } | 2004 | } |
2005 | spin_unlock_bh(&bat_priv->tt_roam_list_lock); | 2005 | spin_unlock_bh(&bat_priv->tt.roam_list_lock); |
2006 | } | 2006 | } |
2007 | 2007 | ||
2008 | /* This function checks whether the client already reached the | 2008 | /* This function checks whether the client already reached the |
@@ -2017,11 +2017,11 @@ static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv, | |||
2017 | struct batadv_tt_roam_node *tt_roam_node; | 2017 | struct batadv_tt_roam_node *tt_roam_node; |
2018 | bool ret = false; | 2018 | bool ret = false; |
2019 | 2019 | ||
2020 | spin_lock_bh(&bat_priv->tt_roam_list_lock); | 2020 | spin_lock_bh(&bat_priv->tt.roam_list_lock); |
2021 | /* The new tt_req will be issued only if I'm not waiting for a | 2021 | /* The new tt_req will be issued only if I'm not waiting for a |
2022 | * reply from the same orig_node yet | 2022 | * reply from the same orig_node yet |
2023 | */ | 2023 | */ |
2024 | list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) { | 2024 | list_for_each_entry(tt_roam_node, &bat_priv->tt.roam_list, list) { |
2025 | if (!batadv_compare_eth(tt_roam_node->addr, client)) | 2025 | if (!batadv_compare_eth(tt_roam_node->addr, client)) |
2026 | continue; | 2026 | continue; |
2027 | 2027 | ||
@@ -2046,12 +2046,12 @@ static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv, | |||
2046 | BATADV_ROAMING_MAX_COUNT - 1); | 2046 | BATADV_ROAMING_MAX_COUNT - 1); |
2047 | memcpy(tt_roam_node->addr, client, ETH_ALEN); | 2047 | memcpy(tt_roam_node->addr, client, ETH_ALEN); |
2048 | 2048 | ||
2049 | list_add(&tt_roam_node->list, &bat_priv->tt_roam_list); | 2049 | list_add(&tt_roam_node->list, &bat_priv->tt.roam_list); |
2050 | ret = true; | 2050 | ret = true; |
2051 | } | 2051 | } |
2052 | 2052 | ||
2053 | unlock: | 2053 | unlock: |
2054 | spin_unlock_bh(&bat_priv->tt_roam_list_lock); | 2054 | spin_unlock_bh(&bat_priv->tt.roam_list_lock); |
2055 | return ret; | 2055 | return ret; |
2056 | } | 2056 | } |
2057 | 2057 | ||
@@ -2115,10 +2115,12 @@ out: | |||
2115 | static void batadv_tt_purge(struct work_struct *work) | 2115 | static void batadv_tt_purge(struct work_struct *work) |
2116 | { | 2116 | { |
2117 | struct delayed_work *delayed_work; | 2117 | struct delayed_work *delayed_work; |
2118 | struct batadv_priv_tt *priv_tt; | ||
2118 | struct batadv_priv *bat_priv; | 2119 | struct batadv_priv *bat_priv; |
2119 | 2120 | ||
2120 | delayed_work = container_of(work, struct delayed_work, work); | 2121 | delayed_work = container_of(work, struct delayed_work, work); |
2121 | bat_priv = container_of(delayed_work, struct batadv_priv, tt_work); | 2122 | priv_tt = container_of(delayed_work, struct batadv_priv_tt, work); |
2123 | bat_priv = container_of(priv_tt, struct batadv_priv, tt); | ||
2122 | 2124 | ||
2123 | batadv_tt_local_purge(bat_priv); | 2125 | batadv_tt_local_purge(bat_priv); |
2124 | batadv_tt_global_roam_purge(bat_priv); | 2126 | batadv_tt_global_roam_purge(bat_priv); |
@@ -2130,7 +2132,7 @@ static void batadv_tt_purge(struct work_struct *work) | |||
2130 | 2132 | ||
2131 | void batadv_tt_free(struct batadv_priv *bat_priv) | 2133 | void batadv_tt_free(struct batadv_priv *bat_priv) |
2132 | { | 2134 | { |
2133 | cancel_delayed_work_sync(&bat_priv->tt_work); | 2135 | cancel_delayed_work_sync(&bat_priv->tt.work); |
2134 | 2136 | ||
2135 | batadv_tt_local_table_free(bat_priv); | 2137 | batadv_tt_local_table_free(bat_priv); |
2136 | batadv_tt_global_table_free(bat_priv); | 2138 | batadv_tt_global_table_free(bat_priv); |
@@ -2138,7 +2140,7 @@ void batadv_tt_free(struct batadv_priv *bat_priv) | |||
2138 | batadv_tt_changes_list_free(bat_priv); | 2140 | batadv_tt_changes_list_free(bat_priv); |
2139 | batadv_tt_roam_list_free(bat_priv); | 2141 | batadv_tt_roam_list_free(bat_priv); |
2140 | 2142 | ||
2141 | kfree(bat_priv->tt_buff); | 2143 | kfree(bat_priv->tt.last_changeset); |
2142 | } | 2144 | } |
2143 | 2145 | ||
2144 | /* This function will enable or disable the specified flags for all the entries | 2146 | /* This function will enable or disable the specified flags for all the entries |
@@ -2182,7 +2184,7 @@ out: | |||
2182 | /* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */ | 2184 | /* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */ |
2183 | static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv) | 2185 | static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv) |
2184 | { | 2186 | { |
2185 | struct batadv_hashtable *hash = bat_priv->tt_local_hash; | 2187 | struct batadv_hashtable *hash = bat_priv->tt.local_hash; |
2186 | struct batadv_tt_common_entry *tt_common; | 2188 | struct batadv_tt_common_entry *tt_common; |
2187 | struct batadv_tt_local_entry *tt_local; | 2189 | struct batadv_tt_local_entry *tt_local; |
2188 | struct hlist_node *node, *node_tmp; | 2190 | struct hlist_node *node, *node_tmp; |
@@ -2207,7 +2209,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv) | |||
2207 | "Deleting local tt entry (%pM): pending\n", | 2209 | "Deleting local tt entry (%pM): pending\n", |
2208 | tt_common->addr); | 2210 | tt_common->addr); |
2209 | 2211 | ||
2210 | atomic_dec(&bat_priv->num_local_tt); | 2212 | atomic_dec(&bat_priv->tt.local_entry_num); |
2211 | hlist_del_rcu(node); | 2213 | hlist_del_rcu(node); |
2212 | tt_local = container_of(tt_common, | 2214 | tt_local = container_of(tt_common, |
2213 | struct batadv_tt_local_entry, | 2215 | struct batadv_tt_local_entry, |
@@ -2225,26 +2227,26 @@ static int batadv_tt_commit_changes(struct batadv_priv *bat_priv, | |||
2225 | { | 2227 | { |
2226 | uint16_t changed_num = 0; | 2228 | uint16_t changed_num = 0; |
2227 | 2229 | ||
2228 | if (atomic_read(&bat_priv->tt_local_changes) < 1) | 2230 | if (atomic_read(&bat_priv->tt.local_changes) < 1) |
2229 | return -ENOENT; | 2231 | return -ENOENT; |
2230 | 2232 | ||
2231 | changed_num = batadv_tt_set_flags(bat_priv->tt_local_hash, | 2233 | changed_num = batadv_tt_set_flags(bat_priv->tt.local_hash, |
2232 | BATADV_TT_CLIENT_NEW, false); | 2234 | BATADV_TT_CLIENT_NEW, false); |
2233 | 2235 | ||
2234 | /* all reset entries have to be counted as local entries */ | 2236 | /* all reset entries have to be counted as local entries */ |
2235 | atomic_add(changed_num, &bat_priv->num_local_tt); | 2237 | atomic_add(changed_num, &bat_priv->tt.local_entry_num); |
2236 | batadv_tt_local_purge_pending_clients(bat_priv); | 2238 | batadv_tt_local_purge_pending_clients(bat_priv); |
2237 | bat_priv->tt_crc = batadv_tt_local_crc(bat_priv); | 2239 | bat_priv->tt.local_crc = batadv_tt_local_crc(bat_priv); |
2238 | 2240 | ||
2239 | /* Increment the TTVN only once per OGM interval */ | 2241 | /* Increment the TTVN only once per OGM interval */ |
2240 | atomic_inc(&bat_priv->ttvn); | 2242 | atomic_inc(&bat_priv->tt.vn); |
2241 | batadv_dbg(BATADV_DBG_TT, bat_priv, | 2243 | batadv_dbg(BATADV_DBG_TT, bat_priv, |
2242 | "Local changes committed, updating to ttvn %u\n", | 2244 | "Local changes committed, updating to ttvn %u\n", |
2243 | (uint8_t)atomic_read(&bat_priv->ttvn)); | 2245 | (uint8_t)atomic_read(&bat_priv->tt.vn)); |
2244 | bat_priv->tt_poss_change = false; | 2246 | bat_priv->tt.poss_change = false; |
2245 | 2247 | ||
2246 | /* reset the sending counter */ | 2248 | /* reset the sending counter */ |
2247 | atomic_set(&bat_priv->tt_ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX); | 2249 | atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX); |
2248 | 2250 | ||
2249 | return batadv_tt_changes_fill_buff(bat_priv, packet_buff, | 2251 | return batadv_tt_changes_fill_buff(bat_priv, packet_buff, |
2250 | packet_buff_len, packet_min_len); | 2252 | packet_buff_len, packet_min_len); |
@@ -2264,7 +2266,7 @@ int batadv_tt_append_diff(struct batadv_priv *bat_priv, | |||
2264 | 2266 | ||
2265 | /* if the changes have been sent often enough */ | 2267 | /* if the changes have been sent often enough */ |
2266 | if ((tt_num_changes < 0) && | 2268 | if ((tt_num_changes < 0) && |
2267 | (!batadv_atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))) { | 2269 | (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt))) { |
2268 | batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len, | 2270 | batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len, |
2269 | packet_min_len, packet_min_len); | 2271 | packet_min_len, packet_min_len); |
2270 | tt_num_changes = 0; | 2272 | tt_num_changes = 0; |
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 664ef8148555..97c4978dee69 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h | |||
@@ -165,6 +165,67 @@ enum batadv_counters { | |||
165 | BATADV_CNT_NUM, | 165 | BATADV_CNT_NUM, |
166 | }; | 166 | }; |
167 | 167 | ||
168 | /** | ||
169 | * struct batadv_priv_tt - per mesh interface translation table data | ||
170 | * @vn: translation table version number | ||
171 | * @local_changes: changes registered in an originator interval | ||
172 | * @poss_change: Detect an ongoing roaming phase. If true, then this node | ||
173 | * received a roaming_adv and has to inspect every packet directed to it to | ||
174 | * check whether it still is the true destination or not. This flag will be | ||
175 | * reset to false as soon as the this node's ttvn is increased | ||
176 | * @changes_list: tracks tt local changes within an originator interval | ||
177 | * @req_list: list of pending tt_requests | ||
178 | * @local_crc: Checksum of the local table, recomputed before sending a new OGM | ||
179 | */ | ||
180 | struct batadv_priv_tt { | ||
181 | atomic_t vn; | ||
182 | atomic_t ogm_append_cnt; | ||
183 | atomic_t local_changes; | ||
184 | bool poss_change; | ||
185 | struct list_head changes_list; | ||
186 | struct batadv_hashtable *local_hash; | ||
187 | struct batadv_hashtable *global_hash; | ||
188 | struct list_head req_list; | ||
189 | struct list_head roam_list; | ||
190 | spinlock_t changes_list_lock; /* protects changes */ | ||
191 | spinlock_t req_list_lock; /* protects req_list */ | ||
192 | spinlock_t roam_list_lock; /* protects roam_list */ | ||
193 | atomic_t local_entry_num; | ||
194 | uint16_t local_crc; | ||
195 | unsigned char *last_changeset; | ||
196 | int16_t last_changeset_len; | ||
197 | spinlock_t last_changeset_lock; /* protects last_changeset */ | ||
198 | struct delayed_work work; | ||
199 | }; | ||
200 | |||
201 | #ifdef CONFIG_BATMAN_ADV_BLA | ||
202 | struct batadv_priv_bla { | ||
203 | atomic_t num_requests; /* number of bla requests in flight */ | ||
204 | struct batadv_hashtable *claim_hash; | ||
205 | struct batadv_hashtable *backbone_hash; | ||
206 | struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE]; | ||
207 | int bcast_duplist_curr; | ||
208 | struct batadv_bla_claim_dst claim_dest; | ||
209 | struct delayed_work work; | ||
210 | }; | ||
211 | #endif | ||
212 | |||
213 | struct batadv_priv_gw { | ||
214 | struct hlist_head list; | ||
215 | spinlock_t list_lock; /* protects gw_list and curr_gw */ | ||
216 | struct batadv_gw_node __rcu *curr_gw; /* rcu protected pointer */ | ||
217 | atomic_t reselect; | ||
218 | }; | ||
219 | |||
220 | struct batadv_priv_vis { | ||
221 | struct list_head send_list; | ||
222 | struct batadv_hashtable *hash; | ||
223 | spinlock_t hash_lock; /* protects hash */ | ||
224 | spinlock_t list_lock; /* protects info::recv_list */ | ||
225 | struct delayed_work work; | ||
226 | struct batadv_vis_info *my_info; | ||
227 | }; | ||
228 | |||
168 | struct batadv_priv { | 229 | struct batadv_priv { |
169 | atomic_t mesh_state; | 230 | atomic_t mesh_state; |
170 | struct net_device_stats stats; | 231 | struct net_device_stats stats; |
@@ -184,64 +245,24 @@ struct batadv_priv { | |||
184 | atomic_t bcast_seqno; | 245 | atomic_t bcast_seqno; |
185 | atomic_t bcast_queue_left; | 246 | atomic_t bcast_queue_left; |
186 | atomic_t batman_queue_left; | 247 | atomic_t batman_queue_left; |
187 | atomic_t ttvn; /* translation table version number */ | ||
188 | atomic_t tt_ogm_append_cnt; | ||
189 | atomic_t tt_local_changes; /* changes registered in a OGM interval */ | ||
190 | atomic_t bla_num_requests; /* number of bla requests in flight */ | ||
191 | /* The tt_poss_change flag is used to detect an ongoing roaming phase. | ||
192 | * If true, then I received a Roaming_adv and I have to inspect every | ||
193 | * packet directed to me to check whether I am still the true | ||
194 | * destination or not. This flag will be reset to false as soon as I | ||
195 | * increase my TTVN | ||
196 | */ | ||
197 | bool tt_poss_change; | ||
198 | char num_ifaces; | 248 | char num_ifaces; |
199 | struct batadv_debug_log *debug_log; | 249 | struct batadv_debug_log *debug_log; |
200 | struct kobject *mesh_obj; | 250 | struct kobject *mesh_obj; |
201 | struct dentry *debug_dir; | 251 | struct dentry *debug_dir; |
202 | struct hlist_head forw_bat_list; | 252 | struct hlist_head forw_bat_list; |
203 | struct hlist_head forw_bcast_list; | 253 | struct hlist_head forw_bcast_list; |
204 | struct hlist_head gw_list; | ||
205 | struct list_head tt_changes_list; /* tracks changes in a OGM int */ | ||
206 | struct list_head vis_send_list; | ||
207 | struct batadv_hashtable *orig_hash; | 254 | struct batadv_hashtable *orig_hash; |
208 | struct batadv_hashtable *tt_local_hash; | ||
209 | struct batadv_hashtable *tt_global_hash; | ||
210 | #ifdef CONFIG_BATMAN_ADV_BLA | ||
211 | struct batadv_hashtable *claim_hash; | ||
212 | struct batadv_hashtable *backbone_hash; | ||
213 | #endif | ||
214 | struct list_head tt_req_list; /* list of pending tt_requests */ | ||
215 | struct list_head tt_roam_list; | ||
216 | struct batadv_hashtable *vis_hash; | ||
217 | #ifdef CONFIG_BATMAN_ADV_BLA | ||
218 | struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE]; | ||
219 | int bcast_duplist_curr; | ||
220 | struct batadv_bla_claim_dst claim_dest; | ||
221 | #endif | ||
222 | spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ | 255 | spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ |
223 | spinlock_t forw_bcast_list_lock; /* protects */ | 256 | spinlock_t forw_bcast_list_lock; /* protects */ |
224 | spinlock_t tt_changes_list_lock; /* protects tt_changes */ | ||
225 | spinlock_t tt_req_list_lock; /* protects tt_req_list */ | ||
226 | spinlock_t tt_roam_list_lock; /* protects tt_roam_list */ | ||
227 | spinlock_t gw_list_lock; /* protects gw_list and curr_gw */ | ||
228 | spinlock_t vis_hash_lock; /* protects vis_hash */ | ||
229 | spinlock_t vis_list_lock; /* protects vis_info::recv_list */ | ||
230 | atomic_t num_local_tt; | ||
231 | /* Checksum of the local table, recomputed before sending a new OGM */ | ||
232 | uint16_t tt_crc; | ||
233 | unsigned char *tt_buff; | ||
234 | int16_t tt_buff_len; | ||
235 | spinlock_t tt_buff_lock; /* protects tt_buff */ | ||
236 | struct delayed_work tt_work; | ||
237 | struct delayed_work orig_work; | 257 | struct delayed_work orig_work; |
238 | struct delayed_work vis_work; | ||
239 | struct delayed_work bla_work; | ||
240 | struct batadv_gw_node __rcu *curr_gw; /* rcu protected pointer */ | ||
241 | atomic_t gw_reselect; | ||
242 | struct batadv_hard_iface __rcu *primary_if; /* rcu protected pointer */ | 258 | struct batadv_hard_iface __rcu *primary_if; /* rcu protected pointer */ |
243 | struct batadv_vis_info *my_vis_info; | ||
244 | struct batadv_algo_ops *bat_algo_ops; | 259 | struct batadv_algo_ops *bat_algo_ops; |
260 | #ifdef CONFIG_BATMAN_ADV_BLA | ||
261 | struct batadv_priv_bla bla; | ||
262 | #endif | ||
263 | struct batadv_priv_gw gw; | ||
264 | struct batadv_priv_tt tt; | ||
265 | struct batadv_priv_vis vis; | ||
245 | }; | 266 | }; |
246 | 267 | ||
247 | struct batadv_socket_client { | 268 | struct batadv_socket_client { |
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c index 2a2ea0681469..4608c1b22d44 100644 --- a/net/batman-adv/vis.c +++ b/net/batman-adv/vis.c | |||
@@ -41,13 +41,13 @@ static void batadv_free_info(struct kref *ref) | |||
41 | bat_priv = info->bat_priv; | 41 | bat_priv = info->bat_priv; |
42 | 42 | ||
43 | list_del_init(&info->send_list); | 43 | list_del_init(&info->send_list); |
44 | spin_lock_bh(&bat_priv->vis_list_lock); | 44 | spin_lock_bh(&bat_priv->vis.list_lock); |
45 | list_for_each_entry_safe(entry, tmp, &info->recv_list, list) { | 45 | list_for_each_entry_safe(entry, tmp, &info->recv_list, list) { |
46 | list_del(&entry->list); | 46 | list_del(&entry->list); |
47 | kfree(entry); | 47 | kfree(entry); |
48 | } | 48 | } |
49 | 49 | ||
50 | spin_unlock_bh(&bat_priv->vis_list_lock); | 50 | spin_unlock_bh(&bat_priv->vis.list_lock); |
51 | kfree_skb(info->skb_packet); | 51 | kfree_skb(info->skb_packet); |
52 | kfree(info); | 52 | kfree(info); |
53 | } | 53 | } |
@@ -94,7 +94,7 @@ static uint32_t batadv_vis_info_choose(const void *data, uint32_t size) | |||
94 | static struct batadv_vis_info * | 94 | static struct batadv_vis_info * |
95 | batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data) | 95 | batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data) |
96 | { | 96 | { |
97 | struct batadv_hashtable *hash = bat_priv->vis_hash; | 97 | struct batadv_hashtable *hash = bat_priv->vis.hash; |
98 | struct hlist_head *head; | 98 | struct hlist_head *head; |
99 | struct hlist_node *node; | 99 | struct hlist_node *node; |
100 | struct batadv_vis_info *vis_info, *vis_info_tmp = NULL; | 100 | struct batadv_vis_info *vis_info, *vis_info_tmp = NULL; |
@@ -252,7 +252,7 @@ int batadv_vis_seq_print_text(struct seq_file *seq, void *offset) | |||
252 | struct hlist_head *head; | 252 | struct hlist_head *head; |
253 | struct net_device *net_dev = (struct net_device *)seq->private; | 253 | struct net_device *net_dev = (struct net_device *)seq->private; |
254 | struct batadv_priv *bat_priv = netdev_priv(net_dev); | 254 | struct batadv_priv *bat_priv = netdev_priv(net_dev); |
255 | struct batadv_hashtable *hash = bat_priv->vis_hash; | 255 | struct batadv_hashtable *hash = bat_priv->vis.hash; |
256 | uint32_t i; | 256 | uint32_t i; |
257 | int ret = 0; | 257 | int ret = 0; |
258 | int vis_server = atomic_read(&bat_priv->vis_mode); | 258 | int vis_server = atomic_read(&bat_priv->vis_mode); |
@@ -264,12 +264,12 @@ int batadv_vis_seq_print_text(struct seq_file *seq, void *offset) | |||
264 | if (vis_server == BATADV_VIS_TYPE_CLIENT_UPDATE) | 264 | if (vis_server == BATADV_VIS_TYPE_CLIENT_UPDATE) |
265 | goto out; | 265 | goto out; |
266 | 266 | ||
267 | spin_lock_bh(&bat_priv->vis_hash_lock); | 267 | spin_lock_bh(&bat_priv->vis.hash_lock); |
268 | for (i = 0; i < hash->size; i++) { | 268 | for (i = 0; i < hash->size; i++) { |
269 | head = &hash->table[i]; | 269 | head = &hash->table[i]; |
270 | batadv_vis_seq_print_text_bucket(seq, head); | 270 | batadv_vis_seq_print_text_bucket(seq, head); |
271 | } | 271 | } |
272 | spin_unlock_bh(&bat_priv->vis_hash_lock); | 272 | spin_unlock_bh(&bat_priv->vis.hash_lock); |
273 | 273 | ||
274 | out: | 274 | out: |
275 | if (primary_if) | 275 | if (primary_if) |
@@ -285,7 +285,7 @@ static void batadv_send_list_add(struct batadv_priv *bat_priv, | |||
285 | { | 285 | { |
286 | if (list_empty(&info->send_list)) { | 286 | if (list_empty(&info->send_list)) { |
287 | kref_get(&info->refcount); | 287 | kref_get(&info->refcount); |
288 | list_add_tail(&info->send_list, &bat_priv->vis_send_list); | 288 | list_add_tail(&info->send_list, &bat_priv->vis.send_list); |
289 | } | 289 | } |
290 | } | 290 | } |
291 | 291 | ||
@@ -311,9 +311,9 @@ static void batadv_recv_list_add(struct batadv_priv *bat_priv, | |||
311 | return; | 311 | return; |
312 | 312 | ||
313 | memcpy(entry->mac, mac, ETH_ALEN); | 313 | memcpy(entry->mac, mac, ETH_ALEN); |
314 | spin_lock_bh(&bat_priv->vis_list_lock); | 314 | spin_lock_bh(&bat_priv->vis.list_lock); |
315 | list_add_tail(&entry->list, recv_list); | 315 | list_add_tail(&entry->list, recv_list); |
316 | spin_unlock_bh(&bat_priv->vis_list_lock); | 316 | spin_unlock_bh(&bat_priv->vis.list_lock); |
317 | } | 317 | } |
318 | 318 | ||
319 | /* returns 1 if this mac is in the recv_list */ | 319 | /* returns 1 if this mac is in the recv_list */ |
@@ -323,14 +323,14 @@ static int batadv_recv_list_is_in(struct batadv_priv *bat_priv, | |||
323 | { | 323 | { |
324 | const struct batadv_recvlist_node *entry; | 324 | const struct batadv_recvlist_node *entry; |
325 | 325 | ||
326 | spin_lock_bh(&bat_priv->vis_list_lock); | 326 | spin_lock_bh(&bat_priv->vis.list_lock); |
327 | list_for_each_entry(entry, recv_list, list) { | 327 | list_for_each_entry(entry, recv_list, list) { |
328 | if (batadv_compare_eth(entry->mac, mac)) { | 328 | if (batadv_compare_eth(entry->mac, mac)) { |
329 | spin_unlock_bh(&bat_priv->vis_list_lock); | 329 | spin_unlock_bh(&bat_priv->vis.list_lock); |
330 | return 1; | 330 | return 1; |
331 | } | 331 | } |
332 | } | 332 | } |
333 | spin_unlock_bh(&bat_priv->vis_list_lock); | 333 | spin_unlock_bh(&bat_priv->vis.list_lock); |
334 | return 0; | 334 | return 0; |
335 | } | 335 | } |
336 | 336 | ||
@@ -354,7 +354,7 @@ batadv_add_packet(struct batadv_priv *bat_priv, | |||
354 | 354 | ||
355 | *is_new = 0; | 355 | *is_new = 0; |
356 | /* sanity check */ | 356 | /* sanity check */ |
357 | if (!bat_priv->vis_hash) | 357 | if (!bat_priv->vis.hash) |
358 | return NULL; | 358 | return NULL; |
359 | 359 | ||
360 | /* see if the packet is already in vis_hash */ | 360 | /* see if the packet is already in vis_hash */ |
@@ -385,7 +385,7 @@ batadv_add_packet(struct batadv_priv *bat_priv, | |||
385 | } | 385 | } |
386 | } | 386 | } |
387 | /* remove old entry */ | 387 | /* remove old entry */ |
388 | batadv_hash_remove(bat_priv->vis_hash, batadv_vis_info_cmp, | 388 | batadv_hash_remove(bat_priv->vis.hash, batadv_vis_info_cmp, |
389 | batadv_vis_info_choose, old_info); | 389 | batadv_vis_info_choose, old_info); |
390 | batadv_send_list_del(old_info); | 390 | batadv_send_list_del(old_info); |
391 | kref_put(&old_info->refcount, batadv_free_info); | 391 | kref_put(&old_info->refcount, batadv_free_info); |
@@ -426,7 +426,7 @@ batadv_add_packet(struct batadv_priv *bat_priv, | |||
426 | batadv_recv_list_add(bat_priv, &info->recv_list, packet->sender_orig); | 426 | batadv_recv_list_add(bat_priv, &info->recv_list, packet->sender_orig); |
427 | 427 | ||
428 | /* try to add it */ | 428 | /* try to add it */ |
429 | hash_added = batadv_hash_add(bat_priv->vis_hash, batadv_vis_info_cmp, | 429 | hash_added = batadv_hash_add(bat_priv->vis.hash, batadv_vis_info_cmp, |
430 | batadv_vis_info_choose, info, | 430 | batadv_vis_info_choose, info, |
431 | &info->hash_entry); | 431 | &info->hash_entry); |
432 | if (hash_added != 0) { | 432 | if (hash_added != 0) { |
@@ -449,7 +449,7 @@ void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv, | |||
449 | 449 | ||
450 | make_broadcast = (vis_server == BATADV_VIS_TYPE_SERVER_SYNC); | 450 | make_broadcast = (vis_server == BATADV_VIS_TYPE_SERVER_SYNC); |
451 | 451 | ||
452 | spin_lock_bh(&bat_priv->vis_hash_lock); | 452 | spin_lock_bh(&bat_priv->vis.hash_lock); |
453 | info = batadv_add_packet(bat_priv, vis_packet, vis_info_len, | 453 | info = batadv_add_packet(bat_priv, vis_packet, vis_info_len, |
454 | &is_new, make_broadcast); | 454 | &is_new, make_broadcast); |
455 | if (!info) | 455 | if (!info) |
@@ -461,7 +461,7 @@ void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv, | |||
461 | if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC && is_new) | 461 | if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC && is_new) |
462 | batadv_send_list_add(bat_priv, info); | 462 | batadv_send_list_add(bat_priv, info); |
463 | end: | 463 | end: |
464 | spin_unlock_bh(&bat_priv->vis_hash_lock); | 464 | spin_unlock_bh(&bat_priv->vis.hash_lock); |
465 | } | 465 | } |
466 | 466 | ||
467 | /* handle an incoming client update packet and schedule forward if needed. */ | 467 | /* handle an incoming client update packet and schedule forward if needed. */ |
@@ -484,7 +484,7 @@ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv, | |||
484 | batadv_is_my_mac(vis_packet->target_orig)) | 484 | batadv_is_my_mac(vis_packet->target_orig)) |
485 | are_target = 1; | 485 | are_target = 1; |
486 | 486 | ||
487 | spin_lock_bh(&bat_priv->vis_hash_lock); | 487 | spin_lock_bh(&bat_priv->vis.hash_lock); |
488 | info = batadv_add_packet(bat_priv, vis_packet, vis_info_len, | 488 | info = batadv_add_packet(bat_priv, vis_packet, vis_info_len, |
489 | &is_new, are_target); | 489 | &is_new, are_target); |
490 | 490 | ||
@@ -505,7 +505,7 @@ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv, | |||
505 | } | 505 | } |
506 | 506 | ||
507 | end: | 507 | end: |
508 | spin_unlock_bh(&bat_priv->vis_hash_lock); | 508 | spin_unlock_bh(&bat_priv->vis.hash_lock); |
509 | } | 509 | } |
510 | 510 | ||
511 | /* Walk the originators and find the VIS server with the best tq. Set the packet | 511 | /* Walk the originators and find the VIS server with the best tq. Set the packet |
@@ -574,7 +574,7 @@ static int batadv_generate_vis_packet(struct batadv_priv *bat_priv) | |||
574 | struct hlist_head *head; | 574 | struct hlist_head *head; |
575 | struct batadv_orig_node *orig_node; | 575 | struct batadv_orig_node *orig_node; |
576 | struct batadv_neigh_node *router; | 576 | struct batadv_neigh_node *router; |
577 | struct batadv_vis_info *info = bat_priv->my_vis_info; | 577 | struct batadv_vis_info *info = bat_priv->vis.my_info; |
578 | struct batadv_vis_packet *packet; | 578 | struct batadv_vis_packet *packet; |
579 | struct batadv_vis_info_entry *entry; | 579 | struct batadv_vis_info_entry *entry; |
580 | struct batadv_tt_common_entry *tt_common_entry; | 580 | struct batadv_tt_common_entry *tt_common_entry; |
@@ -636,7 +636,7 @@ next: | |||
636 | rcu_read_unlock(); | 636 | rcu_read_unlock(); |
637 | } | 637 | } |
638 | 638 | ||
639 | hash = bat_priv->tt_local_hash; | 639 | hash = bat_priv->tt.local_hash; |
640 | 640 | ||
641 | for (i = 0; i < hash->size; i++) { | 641 | for (i = 0; i < hash->size; i++) { |
642 | head = &hash->table[i]; | 642 | head = &hash->table[i]; |
@@ -671,7 +671,7 @@ unlock: | |||
671 | static void batadv_purge_vis_packets(struct batadv_priv *bat_priv) | 671 | static void batadv_purge_vis_packets(struct batadv_priv *bat_priv) |
672 | { | 672 | { |
673 | uint32_t i; | 673 | uint32_t i; |
674 | struct batadv_hashtable *hash = bat_priv->vis_hash; | 674 | struct batadv_hashtable *hash = bat_priv->vis.hash; |
675 | struct hlist_node *node, *node_tmp; | 675 | struct hlist_node *node, *node_tmp; |
676 | struct hlist_head *head; | 676 | struct hlist_head *head; |
677 | struct batadv_vis_info *info; | 677 | struct batadv_vis_info *info; |
@@ -682,7 +682,7 @@ static void batadv_purge_vis_packets(struct batadv_priv *bat_priv) | |||
682 | hlist_for_each_entry_safe(info, node, node_tmp, | 682 | hlist_for_each_entry_safe(info, node, node_tmp, |
683 | head, hash_entry) { | 683 | head, hash_entry) { |
684 | /* never purge own data. */ | 684 | /* never purge own data. */ |
685 | if (info == bat_priv->my_vis_info) | 685 | if (info == bat_priv->vis.my_info) |
686 | continue; | 686 | continue; |
687 | 687 | ||
688 | if (batadv_has_timed_out(info->first_seen, | 688 | if (batadv_has_timed_out(info->first_seen, |
@@ -817,31 +817,33 @@ static void batadv_send_vis_packets(struct work_struct *work) | |||
817 | struct delayed_work *delayed_work = | 817 | struct delayed_work *delayed_work = |
818 | container_of(work, struct delayed_work, work); | 818 | container_of(work, struct delayed_work, work); |
819 | struct batadv_priv *bat_priv; | 819 | struct batadv_priv *bat_priv; |
820 | struct batadv_priv_vis *priv_vis; | ||
820 | struct batadv_vis_info *info; | 821 | struct batadv_vis_info *info; |
821 | 822 | ||
822 | bat_priv = container_of(delayed_work, struct batadv_priv, vis_work); | 823 | priv_vis = container_of(delayed_work, struct batadv_priv_vis, work); |
823 | spin_lock_bh(&bat_priv->vis_hash_lock); | 824 | bat_priv = container_of(priv_vis, struct batadv_priv, vis); |
825 | spin_lock_bh(&bat_priv->vis.hash_lock); | ||
824 | batadv_purge_vis_packets(bat_priv); | 826 | batadv_purge_vis_packets(bat_priv); |
825 | 827 | ||
826 | if (batadv_generate_vis_packet(bat_priv) == 0) { | 828 | if (batadv_generate_vis_packet(bat_priv) == 0) { |
827 | /* schedule if generation was successful */ | 829 | /* schedule if generation was successful */ |
828 | batadv_send_list_add(bat_priv, bat_priv->my_vis_info); | 830 | batadv_send_list_add(bat_priv, bat_priv->vis.my_info); |
829 | } | 831 | } |
830 | 832 | ||
831 | while (!list_empty(&bat_priv->vis_send_list)) { | 833 | while (!list_empty(&bat_priv->vis.send_list)) { |
832 | info = list_first_entry(&bat_priv->vis_send_list, | 834 | info = list_first_entry(&bat_priv->vis.send_list, |
833 | typeof(*info), send_list); | 835 | typeof(*info), send_list); |
834 | 836 | ||
835 | kref_get(&info->refcount); | 837 | kref_get(&info->refcount); |
836 | spin_unlock_bh(&bat_priv->vis_hash_lock); | 838 | spin_unlock_bh(&bat_priv->vis.hash_lock); |
837 | 839 | ||
838 | batadv_send_vis_packet(bat_priv, info); | 840 | batadv_send_vis_packet(bat_priv, info); |
839 | 841 | ||
840 | spin_lock_bh(&bat_priv->vis_hash_lock); | 842 | spin_lock_bh(&bat_priv->vis.hash_lock); |
841 | batadv_send_list_del(info); | 843 | batadv_send_list_del(info); |
842 | kref_put(&info->refcount, batadv_free_info); | 844 | kref_put(&info->refcount, batadv_free_info); |
843 | } | 845 | } |
844 | spin_unlock_bh(&bat_priv->vis_hash_lock); | 846 | spin_unlock_bh(&bat_priv->vis.hash_lock); |
845 | batadv_start_vis_timer(bat_priv); | 847 | batadv_start_vis_timer(bat_priv); |
846 | } | 848 | } |
847 | 849 | ||
@@ -856,37 +858,37 @@ int batadv_vis_init(struct batadv_priv *bat_priv) | |||
856 | unsigned long first_seen; | 858 | unsigned long first_seen; |
857 | struct sk_buff *tmp_skb; | 859 | struct sk_buff *tmp_skb; |
858 | 860 | ||
859 | if (bat_priv->vis_hash) | 861 | if (bat_priv->vis.hash) |
860 | return 0; | 862 | return 0; |
861 | 863 | ||
862 | spin_lock_bh(&bat_priv->vis_hash_lock); | 864 | spin_lock_bh(&bat_priv->vis.hash_lock); |
863 | 865 | ||
864 | bat_priv->vis_hash = batadv_hash_new(256); | 866 | bat_priv->vis.hash = batadv_hash_new(256); |
865 | if (!bat_priv->vis_hash) { | 867 | if (!bat_priv->vis.hash) { |
866 | pr_err("Can't initialize vis_hash\n"); | 868 | pr_err("Can't initialize vis_hash\n"); |
867 | goto err; | 869 | goto err; |
868 | } | 870 | } |
869 | 871 | ||
870 | bat_priv->my_vis_info = kmalloc(BATADV_MAX_VIS_PACKET_SIZE, GFP_ATOMIC); | 872 | bat_priv->vis.my_info = kmalloc(BATADV_MAX_VIS_PACKET_SIZE, GFP_ATOMIC); |
871 | if (!bat_priv->my_vis_info) | 873 | if (!bat_priv->vis.my_info) |
872 | goto err; | 874 | goto err; |
873 | 875 | ||
874 | len = sizeof(*packet) + BATADV_MAX_VIS_PACKET_SIZE + ETH_HLEN; | 876 | len = sizeof(*packet) + BATADV_MAX_VIS_PACKET_SIZE + ETH_HLEN; |
875 | bat_priv->my_vis_info->skb_packet = dev_alloc_skb(len); | 877 | bat_priv->vis.my_info->skb_packet = dev_alloc_skb(len); |
876 | if (!bat_priv->my_vis_info->skb_packet) | 878 | if (!bat_priv->vis.my_info->skb_packet) |
877 | goto free_info; | 879 | goto free_info; |
878 | 880 | ||
879 | skb_reserve(bat_priv->my_vis_info->skb_packet, ETH_HLEN); | 881 | skb_reserve(bat_priv->vis.my_info->skb_packet, ETH_HLEN); |
880 | tmp_skb = bat_priv->my_vis_info->skb_packet; | 882 | tmp_skb = bat_priv->vis.my_info->skb_packet; |
881 | packet = (struct batadv_vis_packet *)skb_put(tmp_skb, sizeof(*packet)); | 883 | packet = (struct batadv_vis_packet *)skb_put(tmp_skb, sizeof(*packet)); |
882 | 884 | ||
883 | /* prefill the vis info */ | 885 | /* prefill the vis info */ |
884 | first_seen = jiffies - msecs_to_jiffies(BATADV_VIS_INTERVAL); | 886 | first_seen = jiffies - msecs_to_jiffies(BATADV_VIS_INTERVAL); |
885 | bat_priv->my_vis_info->first_seen = first_seen; | 887 | bat_priv->vis.my_info->first_seen = first_seen; |
886 | INIT_LIST_HEAD(&bat_priv->my_vis_info->recv_list); | 888 | INIT_LIST_HEAD(&bat_priv->vis.my_info->recv_list); |
887 | INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list); | 889 | INIT_LIST_HEAD(&bat_priv->vis.my_info->send_list); |
888 | kref_init(&bat_priv->my_vis_info->refcount); | 890 | kref_init(&bat_priv->vis.my_info->refcount); |
889 | bat_priv->my_vis_info->bat_priv = bat_priv; | 891 | bat_priv->vis.my_info->bat_priv = bat_priv; |
890 | packet->header.version = BATADV_COMPAT_VERSION; | 892 | packet->header.version = BATADV_COMPAT_VERSION; |
891 | packet->header.packet_type = BATADV_VIS; | 893 | packet->header.packet_type = BATADV_VIS; |
892 | packet->header.ttl = BATADV_TTL; | 894 | packet->header.ttl = BATADV_TTL; |
@@ -894,28 +896,28 @@ int batadv_vis_init(struct batadv_priv *bat_priv) | |||
894 | packet->reserved = 0; | 896 | packet->reserved = 0; |
895 | packet->entries = 0; | 897 | packet->entries = 0; |
896 | 898 | ||
897 | INIT_LIST_HEAD(&bat_priv->vis_send_list); | 899 | INIT_LIST_HEAD(&bat_priv->vis.send_list); |
898 | 900 | ||
899 | hash_added = batadv_hash_add(bat_priv->vis_hash, batadv_vis_info_cmp, | 901 | hash_added = batadv_hash_add(bat_priv->vis.hash, batadv_vis_info_cmp, |
900 | batadv_vis_info_choose, | 902 | batadv_vis_info_choose, |
901 | bat_priv->my_vis_info, | 903 | bat_priv->vis.my_info, |
902 | &bat_priv->my_vis_info->hash_entry); | 904 | &bat_priv->vis.my_info->hash_entry); |
903 | if (hash_added != 0) { | 905 | if (hash_added != 0) { |
904 | pr_err("Can't add own vis packet into hash\n"); | 906 | pr_err("Can't add own vis packet into hash\n"); |
905 | /* not in hash, need to remove it manually. */ | 907 | /* not in hash, need to remove it manually. */ |
906 | kref_put(&bat_priv->my_vis_info->refcount, batadv_free_info); | 908 | kref_put(&bat_priv->vis.my_info->refcount, batadv_free_info); |
907 | goto err; | 909 | goto err; |
908 | } | 910 | } |
909 | 911 | ||
910 | spin_unlock_bh(&bat_priv->vis_hash_lock); | 912 | spin_unlock_bh(&bat_priv->vis.hash_lock); |
911 | batadv_start_vis_timer(bat_priv); | 913 | batadv_start_vis_timer(bat_priv); |
912 | return 0; | 914 | return 0; |
913 | 915 | ||
914 | free_info: | 916 | free_info: |
915 | kfree(bat_priv->my_vis_info); | 917 | kfree(bat_priv->vis.my_info); |
916 | bat_priv->my_vis_info = NULL; | 918 | bat_priv->vis.my_info = NULL; |
917 | err: | 919 | err: |
918 | spin_unlock_bh(&bat_priv->vis_hash_lock); | 920 | spin_unlock_bh(&bat_priv->vis.hash_lock); |
919 | batadv_vis_quit(bat_priv); | 921 | batadv_vis_quit(bat_priv); |
920 | return -ENOMEM; | 922 | return -ENOMEM; |
921 | } | 923 | } |
@@ -933,23 +935,23 @@ static void batadv_free_info_ref(struct hlist_node *node, void *arg) | |||
933 | /* shutdown vis-server */ | 935 | /* shutdown vis-server */ |
934 | void batadv_vis_quit(struct batadv_priv *bat_priv) | 936 | void batadv_vis_quit(struct batadv_priv *bat_priv) |
935 | { | 937 | { |
936 | if (!bat_priv->vis_hash) | 938 | if (!bat_priv->vis.hash) |
937 | return; | 939 | return; |
938 | 940 | ||
939 | cancel_delayed_work_sync(&bat_priv->vis_work); | 941 | cancel_delayed_work_sync(&bat_priv->vis.work); |
940 | 942 | ||
941 | spin_lock_bh(&bat_priv->vis_hash_lock); | 943 | spin_lock_bh(&bat_priv->vis.hash_lock); |
942 | /* properly remove, kill timers ... */ | 944 | /* properly remove, kill timers ... */ |
943 | batadv_hash_delete(bat_priv->vis_hash, batadv_free_info_ref, NULL); | 945 | batadv_hash_delete(bat_priv->vis.hash, batadv_free_info_ref, NULL); |
944 | bat_priv->vis_hash = NULL; | 946 | bat_priv->vis.hash = NULL; |
945 | bat_priv->my_vis_info = NULL; | 947 | bat_priv->vis.my_info = NULL; |
946 | spin_unlock_bh(&bat_priv->vis_hash_lock); | 948 | spin_unlock_bh(&bat_priv->vis.hash_lock); |
947 | } | 949 | } |
948 | 950 | ||
949 | /* schedule packets for (re)transmission */ | 951 | /* schedule packets for (re)transmission */ |
950 | static void batadv_start_vis_timer(struct batadv_priv *bat_priv) | 952 | static void batadv_start_vis_timer(struct batadv_priv *bat_priv) |
951 | { | 953 | { |
952 | INIT_DELAYED_WORK(&bat_priv->vis_work, batadv_send_vis_packets); | 954 | INIT_DELAYED_WORK(&bat_priv->vis.work, batadv_send_vis_packets); |
953 | queue_delayed_work(batadv_event_workqueue, &bat_priv->vis_work, | 955 | queue_delayed_work(batadv_event_workqueue, &bat_priv->vis.work, |
954 | msecs_to_jiffies(BATADV_VIS_INTERVAL)); | 956 | msecs_to_jiffies(BATADV_VIS_INTERVAL)); |
955 | } | 957 | } |