aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorAntonio Quartulli <ordex@autistici.org>2011-04-27 08:28:07 -0400
committerSven Eckelmann <sven@narfation.org>2011-06-20 05:37:30 -0400
commit7683fdc1e88644ee8108a1f33faba80545f0024d (patch)
tree6a06c51fc2344e1f88e31591f978f3944cfe230e /net
parentcc47f66e6b9ec7e7d465f74739a6fc9844593894 (diff)
batman-adv: protect the local and the global trans-tables with rcu
The local and the global translation-tables are now lock free and rcu protected. Signed-off-by: Antonio Quartulli <ordex@autistici.org> Acked-by: Simon Wunderlich <siwu@hrz.tu-chemnitz.de> Signed-off-by: Sven Eckelmann <sven@narfation.org>
Diffstat (limited to 'net')
-rw-r--r--net/batman-adv/main.c2
-rw-r--r--net/batman-adv/routing.c2
-rw-r--r--net/batman-adv/translation-table.c255
-rw-r--r--net/batman-adv/types.h6
-rw-r--r--net/batman-adv/vis.c13
5 files changed, 150 insertions, 128 deletions
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 3318ee27fe23..c2b06b71d574 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -84,8 +84,6 @@ int mesh_init(struct net_device *soft_iface)
84 84
85 spin_lock_init(&bat_priv->forw_bat_list_lock); 85 spin_lock_init(&bat_priv->forw_bat_list_lock);
86 spin_lock_init(&bat_priv->forw_bcast_list_lock); 86 spin_lock_init(&bat_priv->forw_bcast_list_lock);
87 spin_lock_init(&bat_priv->tt_lhash_lock);
88 spin_lock_init(&bat_priv->tt_ghash_lock);
89 spin_lock_init(&bat_priv->tt_changes_list_lock); 87 spin_lock_init(&bat_priv->tt_changes_list_lock);
90 spin_lock_init(&bat_priv->tt_req_list_lock); 88 spin_lock_init(&bat_priv->tt_req_list_lock);
91 spin_lock_init(&bat_priv->tt_roam_list_lock); 89 spin_lock_init(&bat_priv->tt_roam_list_lock);
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 05d50ca3c4db..0ce090c9fe86 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -90,9 +90,7 @@ static void update_transtable(struct bat_priv *bat_priv,
90 /* Even if we received the crc into the OGM, we prefer 90 /* Even if we received the crc into the OGM, we prefer
91 * to recompute it to spot any possible inconsistency 91 * to recompute it to spot any possible inconsistency
92 * in the global table */ 92 * in the global table */
93 spin_lock_bh(&bat_priv->tt_ghash_lock);
94 orig_node->tt_crc = tt_global_crc(bat_priv, orig_node); 93 orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
95 spin_unlock_bh(&bat_priv->tt_ghash_lock);
96 /* Roaming phase is over: tables are in sync again. I can 94 /* Roaming phase is over: tables are in sync again. I can
97 * unset the flag */ 95 * unset the flag */
98 orig_node->tt_poss_change = false; 96 orig_node->tt_poss_change = false;
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index d516d8591cfc..5f1fcd573633 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -80,6 +80,9 @@ static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
80 if (!compare_eth(tt_local_entry, data)) 80 if (!compare_eth(tt_local_entry, data))
81 continue; 81 continue;
82 82
83 if (!atomic_inc_not_zero(&tt_local_entry->refcount))
84 continue;
85
83 tt_local_entry_tmp = tt_local_entry; 86 tt_local_entry_tmp = tt_local_entry;
84 break; 87 break;
85 } 88 }
@@ -109,6 +112,9 @@ static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
109 if (!compare_eth(tt_global_entry, data)) 112 if (!compare_eth(tt_global_entry, data))
110 continue; 113 continue;
111 114
115 if (!atomic_inc_not_zero(&tt_global_entry->refcount))
116 continue;
117
112 tt_global_entry_tmp = tt_global_entry; 118 tt_global_entry_tmp = tt_global_entry;
113 break; 119 break;
114 } 120 }
@@ -125,8 +131,20 @@ static bool is_out_of_time(unsigned long starting_time, unsigned long timeout)
125 return time_after(jiffies, deadline); 131 return time_after(jiffies, deadline);
126} 132}
127 133
134static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
135{
136 if (atomic_dec_and_test(&tt_local_entry->refcount))
137 kfree_rcu(tt_local_entry, rcu);
138}
139
140static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
141{
142 if (atomic_dec_and_test(&tt_global_entry->refcount))
143 kfree_rcu(tt_global_entry, rcu);
144}
145
128static void tt_local_event(struct bat_priv *bat_priv, uint8_t op, 146static void tt_local_event(struct bat_priv *bat_priv, uint8_t op,
129 const uint8_t *addr, uint8_t roaming) 147 const uint8_t *addr, bool roaming)
130{ 148{
131 struct tt_change_node *tt_change_node; 149 struct tt_change_node *tt_change_node;
132 150
@@ -171,21 +189,19 @@ static int tt_local_init(struct bat_priv *bat_priv)
171void tt_local_add(struct net_device *soft_iface, const uint8_t *addr) 189void tt_local_add(struct net_device *soft_iface, const uint8_t *addr)
172{ 190{
173 struct bat_priv *bat_priv = netdev_priv(soft_iface); 191 struct bat_priv *bat_priv = netdev_priv(soft_iface);
174 struct tt_local_entry *tt_local_entry; 192 struct tt_local_entry *tt_local_entry = NULL;
175 struct tt_global_entry *tt_global_entry; 193 struct tt_global_entry *tt_global_entry = NULL;
176 uint8_t roam_addr[ETH_ALEN];
177 194
178 spin_lock_bh(&bat_priv->tt_lhash_lock);
179 tt_local_entry = tt_local_hash_find(bat_priv, addr); 195 tt_local_entry = tt_local_hash_find(bat_priv, addr);
180 196
181 if (tt_local_entry) { 197 if (tt_local_entry) {
182 tt_local_entry->last_seen = jiffies; 198 tt_local_entry->last_seen = jiffies;
183 goto unlock; 199 goto out;
184 } 200 }
185 201
186 tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC); 202 tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC);
187 if (!tt_local_entry) 203 if (!tt_local_entry)
188 goto unlock; 204 goto out;
189 205
190 tt_local_event(bat_priv, NO_FLAGS, addr, false); 206 tt_local_event(bat_priv, NO_FLAGS, addr, false);
191 207
@@ -195,6 +211,7 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr)
195 211
196 memcpy(tt_local_entry->addr, addr, ETH_ALEN); 212 memcpy(tt_local_entry->addr, addr, ETH_ALEN);
197 tt_local_entry->last_seen = jiffies; 213 tt_local_entry->last_seen = jiffies;
214 atomic_set(&tt_local_entry->refcount, 2);
198 215
199 /* the batman interface mac address should never be purged */ 216 /* the batman interface mac address should never be purged */
200 if (compare_eth(addr, soft_iface->dev_addr)) 217 if (compare_eth(addr, soft_iface->dev_addr))
@@ -204,30 +221,26 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr)
204 221
205 hash_add(bat_priv->tt_local_hash, compare_ltt, choose_orig, 222 hash_add(bat_priv->tt_local_hash, compare_ltt, choose_orig,
206 tt_local_entry, &tt_local_entry->hash_entry); 223 tt_local_entry, &tt_local_entry->hash_entry);
224
207 atomic_inc(&bat_priv->num_local_tt); 225 atomic_inc(&bat_priv->num_local_tt);
208 spin_unlock_bh(&bat_priv->tt_lhash_lock);
209 226
210 /* remove address from global hash if present */ 227 /* remove address from global hash if present */
211 spin_lock_bh(&bat_priv->tt_ghash_lock);
212
213 tt_global_entry = tt_global_hash_find(bat_priv, addr); 228 tt_global_entry = tt_global_hash_find(bat_priv, addr);
214 229
215 /* Check whether it is a roaming! */ 230 /* Check whether it is a roaming! */
216 if (tt_global_entry) { 231 if (tt_global_entry) {
217 memcpy(roam_addr, tt_global_entry->addr, ETH_ALEN);
218 /* This node is probably going to update its tt table */ 232 /* This node is probably going to update its tt table */
219 tt_global_entry->orig_node->tt_poss_change = true; 233 tt_global_entry->orig_node->tt_poss_change = true;
220 _tt_global_del(bat_priv, tt_global_entry, 234 _tt_global_del(bat_priv, tt_global_entry,
221 "local tt received"); 235 "local tt received");
222 spin_unlock_bh(&bat_priv->tt_ghash_lock);
223 send_roam_adv(bat_priv, tt_global_entry->addr, 236 send_roam_adv(bat_priv, tt_global_entry->addr,
224 tt_global_entry->orig_node); 237 tt_global_entry->orig_node);
225 } else 238 }
226 spin_unlock_bh(&bat_priv->tt_ghash_lock); 239out:
227 240 if (tt_local_entry)
228 return; 241 tt_local_entry_free_ref(tt_local_entry);
229unlock: 242 if (tt_global_entry)
230 spin_unlock_bh(&bat_priv->tt_lhash_lock); 243 tt_global_entry_free_ref(tt_global_entry);
231} 244}
232 245
233int tt_changes_fill_buffer(struct bat_priv *bat_priv, 246int tt_changes_fill_buffer(struct bat_priv *bat_priv,
@@ -309,8 +322,6 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
309 "announced via TT (TTVN: %u):\n", 322 "announced via TT (TTVN: %u):\n",
310 net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn)); 323 net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
311 324
312 spin_lock_bh(&bat_priv->tt_lhash_lock);
313
314 buf_size = 1; 325 buf_size = 1;
315 /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */ 326 /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
316 for (i = 0; i < hash->size; i++) { 327 for (i = 0; i < hash->size; i++) {
@@ -324,7 +335,6 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
324 335
325 buff = kmalloc(buf_size, GFP_ATOMIC); 336 buff = kmalloc(buf_size, GFP_ATOMIC);
326 if (!buff) { 337 if (!buff) {
327 spin_unlock_bh(&bat_priv->tt_lhash_lock);
328 ret = -ENOMEM; 338 ret = -ENOMEM;
329 goto out; 339 goto out;
330 } 340 }
@@ -344,8 +354,6 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
344 rcu_read_unlock(); 354 rcu_read_unlock();
345 } 355 }
346 356
347 spin_unlock_bh(&bat_priv->tt_lhash_lock);
348
349 seq_printf(seq, "%s", buff); 357 seq_printf(seq, "%s", buff);
350 kfree(buff); 358 kfree(buff);
351out: 359out:
@@ -354,15 +362,6 @@ out:
354 return ret; 362 return ret;
355} 363}
356 364
357static void tt_local_entry_free(struct hlist_node *node, void *arg)
358{
359 struct bat_priv *bat_priv = arg;
360 void *data = container_of(node, struct tt_local_entry, hash_entry);
361
362 kfree(data);
363 atomic_dec(&bat_priv->num_local_tt);
364}
365
366static void tt_local_del(struct bat_priv *bat_priv, 365static void tt_local_del(struct bat_priv *bat_priv,
367 struct tt_local_entry *tt_local_entry, 366 struct tt_local_entry *tt_local_entry,
368 const char *message) 367 const char *message)
@@ -375,23 +374,24 @@ static void tt_local_del(struct bat_priv *bat_priv,
375 hash_remove(bat_priv->tt_local_hash, compare_ltt, choose_orig, 374 hash_remove(bat_priv->tt_local_hash, compare_ltt, choose_orig,
376 tt_local_entry->addr); 375 tt_local_entry->addr);
377 376
378 tt_local_entry_free(&tt_local_entry->hash_entry, bat_priv); 377 tt_local_entry_free_ref(tt_local_entry);
379} 378}
380 379
381void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr, 380void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
382 const char *message, bool roaming) 381 const char *message, bool roaming)
383{ 382{
384 struct tt_local_entry *tt_local_entry; 383 struct tt_local_entry *tt_local_entry = NULL;
385 384
386 spin_lock_bh(&bat_priv->tt_lhash_lock);
387 tt_local_entry = tt_local_hash_find(bat_priv, addr); 385 tt_local_entry = tt_local_hash_find(bat_priv, addr);
388 386
389 if (tt_local_entry) { 387 if (!tt_local_entry)
390 tt_local_event(bat_priv, TT_CHANGE_DEL, tt_local_entry->addr, 388 goto out;
391 roaming); 389
392 tt_local_del(bat_priv, tt_local_entry, message); 390 tt_local_event(bat_priv, TT_CHANGE_DEL, tt_local_entry->addr, roaming);
393 } 391 tt_local_del(bat_priv, tt_local_entry, message);
394 spin_unlock_bh(&bat_priv->tt_lhash_lock); 392out:
393 if (tt_local_entry)
394 tt_local_entry_free_ref(tt_local_entry);
395} 395}
396 396
397static void tt_local_purge(struct bat_priv *bat_priv) 397static void tt_local_purge(struct bat_priv *bat_priv)
@@ -400,40 +400,45 @@ static void tt_local_purge(struct bat_priv *bat_priv)
400 struct tt_local_entry *tt_local_entry; 400 struct tt_local_entry *tt_local_entry;
401 struct hlist_node *node, *node_tmp; 401 struct hlist_node *node, *node_tmp;
402 struct hlist_head *head; 402 struct hlist_head *head;
403 spinlock_t *list_lock; /* protects write access to the hash lists */
403 int i; 404 int i;
404 405
405 spin_lock_bh(&bat_priv->tt_lhash_lock);
406
407 for (i = 0; i < hash->size; i++) { 406 for (i = 0; i < hash->size; i++) {
408 head = &hash->table[i]; 407 head = &hash->table[i];
408 list_lock = &hash->list_locks[i];
409 409
410 spin_lock_bh(list_lock);
410 hlist_for_each_entry_safe(tt_local_entry, node, node_tmp, 411 hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
411 head, hash_entry) { 412 head, hash_entry) {
412 if (tt_local_entry->never_purge) 413 if (tt_local_entry->never_purge)
413 continue; 414 continue;
414 415
415 if (!is_out_of_time(tt_local_entry->last_seen, 416 if (!is_out_of_time(tt_local_entry->last_seen,
416 TT_LOCAL_TIMEOUT * 1000)) 417 TT_LOCAL_TIMEOUT * 1000))
417 continue; 418 continue;
418 419
419 tt_local_event(bat_priv, TT_CHANGE_DEL, 420 tt_local_event(bat_priv, TT_CHANGE_DEL,
420 tt_local_entry->addr, false); 421 tt_local_entry->addr, false);
421 tt_local_del(bat_priv, tt_local_entry, 422 atomic_dec(&bat_priv->num_local_tt);
422 "address timed out"); 423 bat_dbg(DBG_TT, bat_priv, "Deleting local "
424 "tt entry (%pM): timed out\n",
425 tt_local_entry->addr);
426 hlist_del_rcu(node);
427 tt_local_entry_free_ref(tt_local_entry);
423 } 428 }
429 spin_unlock_bh(list_lock);
424 } 430 }
425 431
426 spin_unlock_bh(&bat_priv->tt_lhash_lock);
427} 432}
428 433
429static void tt_local_table_free(struct bat_priv *bat_priv) 434static void tt_local_table_free(struct bat_priv *bat_priv)
430{ 435{
431 struct hashtable_t *hash; 436 struct hashtable_t *hash;
432 int i;
433 spinlock_t *list_lock; /* protects write access to the hash lists */ 437 spinlock_t *list_lock; /* protects write access to the hash lists */
434 struct hlist_head *head;
435 struct hlist_node *node, *node_tmp;
436 struct tt_local_entry *tt_local_entry; 438 struct tt_local_entry *tt_local_entry;
439 struct hlist_node *node, *node_tmp;
440 struct hlist_head *head;
441 int i;
437 442
438 if (!bat_priv->tt_local_hash) 443 if (!bat_priv->tt_local_hash)
439 return; 444 return;
@@ -448,7 +453,7 @@ static void tt_local_table_free(struct bat_priv *bat_priv)
448 hlist_for_each_entry_safe(tt_local_entry, node, node_tmp, 453 hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
449 head, hash_entry) { 454 head, hash_entry) {
450 hlist_del_rcu(node); 455 hlist_del_rcu(node);
451 kfree(tt_local_entry); 456 tt_local_entry_free_ref(tt_local_entry);
452 } 457 }
453 spin_unlock_bh(list_lock); 458 spin_unlock_bh(list_lock);
454 } 459 }
@@ -492,10 +497,9 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
492 const unsigned char *tt_addr, uint8_t ttvn, bool roaming) 497 const unsigned char *tt_addr, uint8_t ttvn, bool roaming)
493{ 498{
494 struct tt_global_entry *tt_global_entry; 499 struct tt_global_entry *tt_global_entry;
495 struct tt_local_entry *tt_local_entry;
496 struct orig_node *orig_node_tmp; 500 struct orig_node *orig_node_tmp;
501 int ret = 0;
497 502
498 spin_lock_bh(&bat_priv->tt_ghash_lock);
499 tt_global_entry = tt_global_hash_find(bat_priv, tt_addr); 503 tt_global_entry = tt_global_hash_find(bat_priv, tt_addr);
500 504
501 if (!tt_global_entry) { 505 if (!tt_global_entry) {
@@ -503,7 +507,8 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
503 kmalloc(sizeof(*tt_global_entry), 507 kmalloc(sizeof(*tt_global_entry),
504 GFP_ATOMIC); 508 GFP_ATOMIC);
505 if (!tt_global_entry) 509 if (!tt_global_entry)
506 goto unlock; 510 goto out;
511
507 memcpy(tt_global_entry->addr, tt_addr, ETH_ALEN); 512 memcpy(tt_global_entry->addr, tt_addr, ETH_ALEN);
508 /* Assign the new orig_node */ 513 /* Assign the new orig_node */
509 atomic_inc(&orig_node->refcount); 514 atomic_inc(&orig_node->refcount);
@@ -511,10 +516,12 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
511 tt_global_entry->ttvn = ttvn; 516 tt_global_entry->ttvn = ttvn;
512 tt_global_entry->flags = NO_FLAGS; 517 tt_global_entry->flags = NO_FLAGS;
513 tt_global_entry->roam_at = 0; 518 tt_global_entry->roam_at = 0;
514 atomic_inc(&orig_node->tt_size); 519 atomic_set(&tt_global_entry->refcount, 2);
520
515 hash_add(bat_priv->tt_global_hash, compare_gtt, 521 hash_add(bat_priv->tt_global_hash, compare_gtt,
516 choose_orig, tt_global_entry, 522 choose_orig, tt_global_entry,
517 &tt_global_entry->hash_entry); 523 &tt_global_entry->hash_entry);
524 atomic_inc(&orig_node->tt_size);
518 } else { 525 } else {
519 if (tt_global_entry->orig_node != orig_node) { 526 if (tt_global_entry->orig_node != orig_node) {
520 atomic_dec(&tt_global_entry->orig_node->tt_size); 527 atomic_dec(&tt_global_entry->orig_node->tt_size);
@@ -529,25 +536,18 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
529 tt_global_entry->roam_at = 0; 536 tt_global_entry->roam_at = 0;
530 } 537 }
531 538
532 spin_unlock_bh(&bat_priv->tt_ghash_lock);
533
534 bat_dbg(DBG_TT, bat_priv, 539 bat_dbg(DBG_TT, bat_priv,
535 "Creating new global tt entry: %pM (via %pM)\n", 540 "Creating new global tt entry: %pM (via %pM)\n",
536 tt_global_entry->addr, orig_node->orig); 541 tt_global_entry->addr, orig_node->orig);
537 542
538 /* remove address from local hash if present */ 543 /* remove address from local hash if present */
539 spin_lock_bh(&bat_priv->tt_lhash_lock); 544 tt_local_remove(bat_priv, tt_global_entry->addr,
540 tt_local_entry = tt_local_hash_find(bat_priv, tt_addr); 545 "global tt received", roaming);
541 546 ret = 1;
542 if (tt_local_entry) 547out:
543 tt_local_remove(bat_priv, tt_global_entry->addr, 548 if (tt_global_entry)
544 "global tt received", roaming); 549 tt_global_entry_free_ref(tt_global_entry);
545 550 return ret;
546 spin_unlock_bh(&bat_priv->tt_lhash_lock);
547 return 1;
548unlock:
549 spin_unlock_bh(&bat_priv->tt_ghash_lock);
550 return 0;
551} 551}
552 552
553int tt_global_seq_print_text(struct seq_file *seq, void *offset) 553int tt_global_seq_print_text(struct seq_file *seq, void *offset)
@@ -584,8 +584,6 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
584 seq_printf(seq, " %-13s %s %-15s %s\n", 584 seq_printf(seq, " %-13s %s %-15s %s\n",
585 "Client", "(TTVN)", "Originator", "(Curr TTVN)"); 585 "Client", "(TTVN)", "Originator", "(Curr TTVN)");
586 586
587 spin_lock_bh(&bat_priv->tt_ghash_lock);
588
589 buf_size = 1; 587 buf_size = 1;
590 /* Estimate length for: " * xx:xx:xx:xx:xx:xx (ttvn) via 588 /* Estimate length for: " * xx:xx:xx:xx:xx:xx (ttvn) via
591 * xx:xx:xx:xx:xx:xx (cur_ttvn)\n"*/ 589 * xx:xx:xx:xx:xx:xx (cur_ttvn)\n"*/
@@ -600,10 +598,10 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
600 598
601 buff = kmalloc(buf_size, GFP_ATOMIC); 599 buff = kmalloc(buf_size, GFP_ATOMIC);
602 if (!buff) { 600 if (!buff) {
603 spin_unlock_bh(&bat_priv->tt_ghash_lock);
604 ret = -ENOMEM; 601 ret = -ENOMEM;
605 goto out; 602 goto out;
606 } 603 }
604
607 buff[0] = '\0'; 605 buff[0] = '\0';
608 pos = 0; 606 pos = 0;
609 607
@@ -625,8 +623,6 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
625 rcu_read_unlock(); 623 rcu_read_unlock();
626 } 624 }
627 625
628 spin_unlock_bh(&bat_priv->tt_ghash_lock);
629
630 seq_printf(seq, "%s", buff); 626 seq_printf(seq, "%s", buff);
631 kfree(buff); 627 kfree(buff);
632out: 628out:
@@ -640,7 +636,7 @@ static void _tt_global_del(struct bat_priv *bat_priv,
640 const char *message) 636 const char *message)
641{ 637{
642 if (!tt_global_entry) 638 if (!tt_global_entry)
643 return; 639 goto out;
644 640
645 bat_dbg(DBG_TT, bat_priv, 641 bat_dbg(DBG_TT, bat_priv,
646 "Deleting global tt entry %pM (via %pM): %s\n", 642 "Deleting global tt entry %pM (via %pM): %s\n",
@@ -648,31 +644,35 @@ static void _tt_global_del(struct bat_priv *bat_priv,
648 message); 644 message);
649 645
650 atomic_dec(&tt_global_entry->orig_node->tt_size); 646 atomic_dec(&tt_global_entry->orig_node->tt_size);
647
651 hash_remove(bat_priv->tt_global_hash, compare_gtt, choose_orig, 648 hash_remove(bat_priv->tt_global_hash, compare_gtt, choose_orig,
652 tt_global_entry->addr); 649 tt_global_entry->addr);
653 kfree(tt_global_entry); 650out:
651 if (tt_global_entry)
652 tt_global_entry_free_ref(tt_global_entry);
654} 653}
655 654
656void tt_global_del(struct bat_priv *bat_priv, 655void tt_global_del(struct bat_priv *bat_priv,
657 struct orig_node *orig_node, const unsigned char *addr, 656 struct orig_node *orig_node, const unsigned char *addr,
658 const char *message, bool roaming) 657 const char *message, bool roaming)
659{ 658{
660 struct tt_global_entry *tt_global_entry; 659 struct tt_global_entry *tt_global_entry = NULL;
661 660
662 spin_lock_bh(&bat_priv->tt_ghash_lock);
663 tt_global_entry = tt_global_hash_find(bat_priv, addr); 661 tt_global_entry = tt_global_hash_find(bat_priv, addr);
662 if (!tt_global_entry)
663 goto out;
664 664
665 if (tt_global_entry && tt_global_entry->orig_node == orig_node) { 665 if (tt_global_entry->orig_node == orig_node) {
666 if (roaming) { 666 if (roaming) {
667 tt_global_entry->flags |= TT_CLIENT_ROAM; 667 tt_global_entry->flags |= TT_CLIENT_ROAM;
668 tt_global_entry->roam_at = jiffies; 668 tt_global_entry->roam_at = jiffies;
669 goto out; 669 goto out;
670 } 670 }
671 atomic_dec(&orig_node->tt_size);
672 _tt_global_del(bat_priv, tt_global_entry, message); 671 _tt_global_del(bat_priv, tt_global_entry, message);
673 } 672 }
674out: 673out:
675 spin_unlock_bh(&bat_priv->tt_ghash_lock); 674 if (tt_global_entry)
675 tt_global_entry_free_ref(tt_global_entry);
676} 676}
677 677
678void tt_global_del_orig(struct bat_priv *bat_priv, 678void tt_global_del_orig(struct bat_priv *bat_priv,
@@ -683,30 +683,28 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
683 struct hashtable_t *hash = bat_priv->tt_global_hash; 683 struct hashtable_t *hash = bat_priv->tt_global_hash;
684 struct hlist_node *node, *safe; 684 struct hlist_node *node, *safe;
685 struct hlist_head *head; 685 struct hlist_head *head;
686 spinlock_t *list_lock; /* protects write access to the hash lists */
686 687
687 if (!bat_priv->tt_global_hash)
688 return;
689
690 spin_lock_bh(&bat_priv->tt_ghash_lock);
691 for (i = 0; i < hash->size; i++) { 688 for (i = 0; i < hash->size; i++) {
692 head = &hash->table[i]; 689 head = &hash->table[i];
690 list_lock = &hash->list_locks[i];
693 691
692 spin_lock_bh(list_lock);
694 hlist_for_each_entry_safe(tt_global_entry, node, safe, 693 hlist_for_each_entry_safe(tt_global_entry, node, safe,
695 head, hash_entry) { 694 head, hash_entry) {
696 if (tt_global_entry->orig_node == orig_node) 695 if (tt_global_entry->orig_node == orig_node) {
697 _tt_global_del(bat_priv, tt_global_entry, 696 bat_dbg(DBG_TT, bat_priv,
698 message); 697 "Deleting global tt entry %pM "
698 "(via %pM): originator time out\n",
699 tt_global_entry->addr,
700 tt_global_entry->orig_node->orig);
701 hlist_del_rcu(node);
702 tt_global_entry_free_ref(tt_global_entry);
703 }
699 } 704 }
705 spin_unlock_bh(list_lock);
700 } 706 }
701 atomic_set(&orig_node->tt_size, 0); 707 atomic_set(&orig_node->tt_size, 0);
702
703 spin_unlock_bh(&bat_priv->tt_ghash_lock);
704}
705
706static void tt_global_entry_free(struct hlist_node *node, void *arg)
707{
708 void *data = container_of(node, struct tt_global_entry, hash_entry);
709 kfree(data);
710} 708}
711 709
712static void tt_global_roam_purge(struct bat_priv *bat_priv) 710static void tt_global_roam_purge(struct bat_priv *bat_priv)
@@ -715,13 +713,14 @@ static void tt_global_roam_purge(struct bat_priv *bat_priv)
715 struct tt_global_entry *tt_global_entry; 713 struct tt_global_entry *tt_global_entry;
716 struct hlist_node *node, *node_tmp; 714 struct hlist_node *node, *node_tmp;
717 struct hlist_head *head; 715 struct hlist_head *head;
716 spinlock_t *list_lock; /* protects write access to the hash lists */
718 int i; 717 int i;
719 718
720 spin_lock_bh(&bat_priv->tt_ghash_lock);
721
722 for (i = 0; i < hash->size; i++) { 719 for (i = 0; i < hash->size; i++) {
723 head = &hash->table[i]; 720 head = &hash->table[i];
721 list_lock = &hash->list_locks[i];
724 722
723 spin_lock_bh(list_lock);
725 hlist_for_each_entry_safe(tt_global_entry, node, node_tmp, 724 hlist_for_each_entry_safe(tt_global_entry, node, node_tmp,
726 head, hash_entry) { 725 head, hash_entry) {
727 if (!(tt_global_entry->flags & TT_CLIENT_ROAM)) 726 if (!(tt_global_entry->flags & TT_CLIENT_ROAM))
@@ -730,20 +729,47 @@ static void tt_global_roam_purge(struct bat_priv *bat_priv)
730 TT_CLIENT_ROAM_TIMEOUT * 1000)) 729 TT_CLIENT_ROAM_TIMEOUT * 1000))
731 continue; 730 continue;
732 731
733 _tt_global_del(bat_priv, tt_global_entry, 732 bat_dbg(DBG_TT, bat_priv, "Deleting global "
734 "Roaming timeout"); 733 "tt entry (%pM): Roaming timeout\n",
734 tt_global_entry->addr);
735 atomic_dec(&tt_global_entry->orig_node->tt_size);
736 hlist_del_rcu(node);
737 tt_global_entry_free_ref(tt_global_entry);
735 } 738 }
739 spin_unlock_bh(list_lock);
736 } 740 }
737 741
738 spin_unlock_bh(&bat_priv->tt_ghash_lock);
739} 742}
740 743
741static void tt_global_table_free(struct bat_priv *bat_priv) 744static void tt_global_table_free(struct bat_priv *bat_priv)
742{ 745{
746 struct hashtable_t *hash;
747 spinlock_t *list_lock; /* protects write access to the hash lists */
748 struct tt_global_entry *tt_global_entry;
749 struct hlist_node *node, *node_tmp;
750 struct hlist_head *head;
751 int i;
752
743 if (!bat_priv->tt_global_hash) 753 if (!bat_priv->tt_global_hash)
744 return; 754 return;
745 755
746 hash_delete(bat_priv->tt_global_hash, tt_global_entry_free, NULL); 756 hash = bat_priv->tt_global_hash;
757
758 for (i = 0; i < hash->size; i++) {
759 head = &hash->table[i];
760 list_lock = &hash->list_locks[i];
761
762 spin_lock_bh(list_lock);
763 hlist_for_each_entry_safe(tt_global_entry, node, node_tmp,
764 head, hash_entry) {
765 hlist_del_rcu(node);
766 tt_global_entry_free_ref(tt_global_entry);
767 }
768 spin_unlock_bh(list_lock);
769 }
770
771 hash_destroy(hash);
772
747 bat_priv->tt_global_hash = NULL; 773 bat_priv->tt_global_hash = NULL;
748} 774}
749 775
@@ -753,19 +779,19 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv,
753 struct tt_global_entry *tt_global_entry; 779 struct tt_global_entry *tt_global_entry;
754 struct orig_node *orig_node = NULL; 780 struct orig_node *orig_node = NULL;
755 781
756 spin_lock_bh(&bat_priv->tt_ghash_lock);
757 tt_global_entry = tt_global_hash_find(bat_priv, addr); 782 tt_global_entry = tt_global_hash_find(bat_priv, addr);
758 783
759 if (!tt_global_entry) 784 if (!tt_global_entry)
760 goto out; 785 goto out;
761 786
762 if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount)) 787 if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount))
763 goto out; 788 goto free_tt;
764 789
765 orig_node = tt_global_entry->orig_node; 790 orig_node = tt_global_entry->orig_node;
766 791
792free_tt:
793 tt_global_entry_free_ref(tt_global_entry);
767out: 794out:
768 spin_unlock_bh(&bat_priv->tt_ghash_lock);
769 return orig_node; 795 return orig_node;
770} 796}
771 797
@@ -828,7 +854,6 @@ uint16_t tt_local_crc(struct bat_priv *bat_priv)
828 tt_local_entry->addr[j]); 854 tt_local_entry->addr[j]);
829 total ^= total_one; 855 total ^= total_one;
830 } 856 }
831
832 rcu_read_unlock(); 857 rcu_read_unlock();
833 } 858 }
834 859
@@ -1371,15 +1396,17 @@ void tt_update_changes(struct bat_priv *bat_priv, struct orig_node *orig_node,
1371 1396
1372bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr) 1397bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr)
1373{ 1398{
1374 struct tt_local_entry *tt_local_entry; 1399 struct tt_local_entry *tt_local_entry = NULL;
1400 bool ret = false;
1375 1401
1376 spin_lock_bh(&bat_priv->tt_lhash_lock);
1377 tt_local_entry = tt_local_hash_find(bat_priv, addr); 1402 tt_local_entry = tt_local_hash_find(bat_priv, addr);
1378 spin_unlock_bh(&bat_priv->tt_lhash_lock); 1403 if (!tt_local_entry)
1379 1404 goto out;
1405 ret = true;
1406out:
1380 if (tt_local_entry) 1407 if (tt_local_entry)
1381 return true; 1408 tt_local_entry_free_ref(tt_local_entry);
1382 return false; 1409 return ret;
1383} 1410}
1384 1411
1385void handle_tt_response(struct bat_priv *bat_priv, 1412void handle_tt_response(struct bat_priv *bat_priv,
@@ -1416,9 +1443,7 @@ void handle_tt_response(struct bat_priv *bat_priv,
1416 spin_unlock_bh(&bat_priv->tt_req_list_lock); 1443 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1417 1444
1418 /* Recalculate the CRC for this orig_node and store it */ 1445 /* Recalculate the CRC for this orig_node and store it */
1419 spin_lock_bh(&bat_priv->tt_ghash_lock);
1420 orig_node->tt_crc = tt_global_crc(bat_priv, orig_node); 1446 orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
1421 spin_unlock_bh(&bat_priv->tt_ghash_lock);
1422 /* Roaming phase is over: tables are in sync again. I can 1447 /* Roaming phase is over: tables are in sync again. I can
1423 * unset the flag */ 1448 * unset the flag */
1424 orig_node->tt_poss_change = false; 1449 orig_node->tt_poss_change = false;
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 9c84fa9f0968..11e8569a8ca7 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -184,8 +184,6 @@ struct bat_priv {
184 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ 184 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
185 spinlock_t forw_bcast_list_lock; /* protects */ 185 spinlock_t forw_bcast_list_lock; /* protects */
186 spinlock_t tt_changes_list_lock; /* protects tt_changes */ 186 spinlock_t tt_changes_list_lock; /* protects tt_changes */
187 spinlock_t tt_lhash_lock; /* protects tt_local_hash */
188 spinlock_t tt_ghash_lock; /* protects tt_global_hash */
189 spinlock_t tt_req_list_lock; /* protects tt_req_list */ 187 spinlock_t tt_req_list_lock; /* protects tt_req_list */
190 spinlock_t tt_roam_list_lock; /* protects tt_roam_list */ 188 spinlock_t tt_roam_list_lock; /* protects tt_roam_list */
191 spinlock_t gw_list_lock; /* protects gw_list and curr_gw */ 189 spinlock_t gw_list_lock; /* protects gw_list and curr_gw */
@@ -226,6 +224,8 @@ struct tt_local_entry {
226 uint8_t addr[ETH_ALEN]; 224 uint8_t addr[ETH_ALEN];
227 unsigned long last_seen; 225 unsigned long last_seen;
228 char never_purge; 226 char never_purge;
227 atomic_t refcount;
228 struct rcu_head rcu;
229 struct hlist_node hash_entry; 229 struct hlist_node hash_entry;
230}; 230};
231 231
@@ -235,6 +235,8 @@ struct tt_global_entry {
235 uint8_t ttvn; 235 uint8_t ttvn;
236 uint8_t flags; /* only TT_GLOBAL_ROAM is used */ 236 uint8_t flags; /* only TT_GLOBAL_ROAM is used */
237 unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */ 237 unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */
238 atomic_t refcount;
239 struct rcu_head rcu;
238 struct hlist_node hash_entry; /* entry in the global table */ 240 struct hlist_node hash_entry; /* entry in the global table */
239}; 241};
240 242
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index 355c6e590b0c..8a1b98589d76 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -665,11 +665,12 @@ next:
665 665
666 hash = bat_priv->tt_local_hash; 666 hash = bat_priv->tt_local_hash;
667 667
668 spin_lock_bh(&bat_priv->tt_lhash_lock);
669 for (i = 0; i < hash->size; i++) { 668 for (i = 0; i < hash->size; i++) {
670 head = &hash->table[i]; 669 head = &hash->table[i];
671 670
672 hlist_for_each_entry(tt_local_entry, node, head, hash_entry) { 671 rcu_read_lock();
672 hlist_for_each_entry_rcu(tt_local_entry, node, head,
673 hash_entry) {
673 entry = (struct vis_info_entry *) 674 entry = (struct vis_info_entry *)
674 skb_put(info->skb_packet, 675 skb_put(info->skb_packet,
675 sizeof(*entry)); 676 sizeof(*entry));
@@ -678,14 +679,12 @@ next:
678 entry->quality = 0; /* 0 means TT */ 679 entry->quality = 0; /* 0 means TT */
679 packet->entries++; 680 packet->entries++;
680 681
681 if (vis_packet_full(info)) { 682 if (vis_packet_full(info))
682 spin_unlock_bh(&bat_priv->tt_lhash_lock); 683 goto unlock;
683 return 0;
684 }
685 } 684 }
685 rcu_read_unlock();
686 } 686 }
687 687
688 spin_unlock_bh(&bat_priv->tt_lhash_lock);
689 return 0; 688 return 0;
690 689
691unlock: 690unlock: