aboutsummaryrefslogtreecommitdiffstats
path: root/net/batman-adv/translation-table.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/batman-adv/translation-table.c')
-rw-r--r--net/batman-adv/translation-table.c205
1 files changed, 140 insertions, 65 deletions
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 7fb6726ccbdd..8d15b48d1692 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -30,12 +30,85 @@ static void _hna_global_del_orig(struct bat_priv *bat_priv,
30 struct hna_global_entry *hna_global_entry, 30 struct hna_global_entry *hna_global_entry,
31 char *message); 31 char *message);
32 32
33/* returns 1 if they are the same mac addr */
34static int compare_lhna(struct hlist_node *node, void *data2)
35{
36 void *data1 = container_of(node, struct hna_local_entry, hash_entry);
37
38 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
39}
40
41/* returns 1 if they are the same mac addr */
42static int compare_ghna(struct hlist_node *node, void *data2)
43{
44 void *data1 = container_of(node, struct hna_global_entry, hash_entry);
45
46 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
47}
48
33static void hna_local_start_timer(struct bat_priv *bat_priv) 49static void hna_local_start_timer(struct bat_priv *bat_priv)
34{ 50{
35 INIT_DELAYED_WORK(&bat_priv->hna_work, hna_local_purge); 51 INIT_DELAYED_WORK(&bat_priv->hna_work, hna_local_purge);
36 queue_delayed_work(bat_event_workqueue, &bat_priv->hna_work, 10 * HZ); 52 queue_delayed_work(bat_event_workqueue, &bat_priv->hna_work, 10 * HZ);
37} 53}
38 54
55static struct hna_local_entry *hna_local_hash_find(struct bat_priv *bat_priv,
56 void *data)
57{
58 struct hashtable_t *hash = bat_priv->hna_local_hash;
59 struct hlist_head *head;
60 struct hlist_node *node;
61 struct hna_local_entry *hna_local_entry, *hna_local_entry_tmp = NULL;
62 int index;
63
64 if (!hash)
65 return NULL;
66
67 index = choose_orig(data, hash->size);
68 head = &hash->table[index];
69
70 rcu_read_lock();
71 hlist_for_each_entry_rcu(hna_local_entry, node, head, hash_entry) {
72 if (!compare_eth(hna_local_entry, data))
73 continue;
74
75 hna_local_entry_tmp = hna_local_entry;
76 break;
77 }
78 rcu_read_unlock();
79
80 return hna_local_entry_tmp;
81}
82
83static struct hna_global_entry *hna_global_hash_find(struct bat_priv *bat_priv,
84 void *data)
85{
86 struct hashtable_t *hash = bat_priv->hna_global_hash;
87 struct hlist_head *head;
88 struct hlist_node *node;
89 struct hna_global_entry *hna_global_entry;
90 struct hna_global_entry *hna_global_entry_tmp = NULL;
91 int index;
92
93 if (!hash)
94 return NULL;
95
96 index = choose_orig(data, hash->size);
97 head = &hash->table[index];
98
99 rcu_read_lock();
100 hlist_for_each_entry_rcu(hna_global_entry, node, head, hash_entry) {
101 if (!compare_eth(hna_global_entry, data))
102 continue;
103
104 hna_global_entry_tmp = hna_global_entry;
105 break;
106 }
107 rcu_read_unlock();
108
109 return hna_global_entry_tmp;
110}
111
39int hna_local_init(struct bat_priv *bat_priv) 112int hna_local_init(struct bat_priv *bat_priv)
40{ 113{
41 if (bat_priv->hna_local_hash) 114 if (bat_priv->hna_local_hash)
@@ -60,10 +133,7 @@ void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
60 int required_bytes; 133 int required_bytes;
61 134
62 spin_lock_bh(&bat_priv->hna_lhash_lock); 135 spin_lock_bh(&bat_priv->hna_lhash_lock);
63 hna_local_entry = 136 hna_local_entry = hna_local_hash_find(bat_priv, addr);
64 ((struct hna_local_entry *)hash_find(bat_priv->hna_local_hash,
65 compare_orig, choose_orig,
66 addr));
67 spin_unlock_bh(&bat_priv->hna_lhash_lock); 137 spin_unlock_bh(&bat_priv->hna_lhash_lock);
68 138
69 if (hna_local_entry) { 139 if (hna_local_entry) {
@@ -99,15 +169,15 @@ void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
99 hna_local_entry->last_seen = jiffies; 169 hna_local_entry->last_seen = jiffies;
100 170
101 /* the batman interface mac address should never be purged */ 171 /* the batman interface mac address should never be purged */
102 if (compare_orig(addr, soft_iface->dev_addr)) 172 if (compare_eth(addr, soft_iface->dev_addr))
103 hna_local_entry->never_purge = 1; 173 hna_local_entry->never_purge = 1;
104 else 174 else
105 hna_local_entry->never_purge = 0; 175 hna_local_entry->never_purge = 0;
106 176
107 spin_lock_bh(&bat_priv->hna_lhash_lock); 177 spin_lock_bh(&bat_priv->hna_lhash_lock);
108 178
109 hash_add(bat_priv->hna_local_hash, compare_orig, choose_orig, 179 hash_add(bat_priv->hna_local_hash, compare_lhna, choose_orig,
110 hna_local_entry); 180 hna_local_entry, &hna_local_entry->hash_entry);
111 bat_priv->num_local_hna++; 181 bat_priv->num_local_hna++;
112 atomic_set(&bat_priv->hna_local_changed, 1); 182 atomic_set(&bat_priv->hna_local_changed, 1);
113 183
@@ -116,9 +186,7 @@ void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
116 /* remove address from global hash if present */ 186 /* remove address from global hash if present */
117 spin_lock_bh(&bat_priv->hna_ghash_lock); 187 spin_lock_bh(&bat_priv->hna_ghash_lock);
118 188
119 hna_global_entry = ((struct hna_global_entry *) 189 hna_global_entry = hna_global_hash_find(bat_priv, addr);
120 hash_find(bat_priv->hna_global_hash,
121 compare_orig, choose_orig, addr));
122 190
123 if (hna_global_entry) 191 if (hna_global_entry)
124 _hna_global_del_orig(bat_priv, hna_global_entry, 192 _hna_global_del_orig(bat_priv, hna_global_entry,
@@ -132,28 +200,27 @@ int hna_local_fill_buffer(struct bat_priv *bat_priv,
132{ 200{
133 struct hashtable_t *hash = bat_priv->hna_local_hash; 201 struct hashtable_t *hash = bat_priv->hna_local_hash;
134 struct hna_local_entry *hna_local_entry; 202 struct hna_local_entry *hna_local_entry;
135 struct element_t *bucket; 203 struct hlist_node *node;
136 int i;
137 struct hlist_node *walk;
138 struct hlist_head *head; 204 struct hlist_head *head;
139 int count = 0; 205 int i, count = 0;
140 206
141 spin_lock_bh(&bat_priv->hna_lhash_lock); 207 spin_lock_bh(&bat_priv->hna_lhash_lock);
142 208
143 for (i = 0; i < hash->size; i++) { 209 for (i = 0; i < hash->size; i++) {
144 head = &hash->table[i]; 210 head = &hash->table[i];
145 211
146 hlist_for_each_entry(bucket, walk, head, hlist) { 212 rcu_read_lock();
147 213 hlist_for_each_entry_rcu(hna_local_entry, node,
214 head, hash_entry) {
148 if (buff_len < (count + 1) * ETH_ALEN) 215 if (buff_len < (count + 1) * ETH_ALEN)
149 break; 216 break;
150 217
151 hna_local_entry = bucket->data;
152 memcpy(buff + (count * ETH_ALEN), hna_local_entry->addr, 218 memcpy(buff + (count * ETH_ALEN), hna_local_entry->addr,
153 ETH_ALEN); 219 ETH_ALEN);
154 220
155 count++; 221 count++;
156 } 222 }
223 rcu_read_unlock();
157 } 224 }
158 225
159 /* if we did not get all new local hnas see you next time ;-) */ 226 /* if we did not get all new local hnas see you next time ;-) */
@@ -170,12 +237,11 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
170 struct bat_priv *bat_priv = netdev_priv(net_dev); 237 struct bat_priv *bat_priv = netdev_priv(net_dev);
171 struct hashtable_t *hash = bat_priv->hna_local_hash; 238 struct hashtable_t *hash = bat_priv->hna_local_hash;
172 struct hna_local_entry *hna_local_entry; 239 struct hna_local_entry *hna_local_entry;
173 int i; 240 struct hlist_node *node;
174 struct hlist_node *walk;
175 struct hlist_head *head; 241 struct hlist_head *head;
176 struct element_t *bucket;
177 size_t buf_size, pos; 242 size_t buf_size, pos;
178 char *buff; 243 char *buff;
244 int i;
179 245
180 if (!bat_priv->primary_if) { 246 if (!bat_priv->primary_if) {
181 return seq_printf(seq, "BATMAN mesh %s disabled - " 247 return seq_printf(seq, "BATMAN mesh %s disabled - "
@@ -194,8 +260,10 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
194 for (i = 0; i < hash->size; i++) { 260 for (i = 0; i < hash->size; i++) {
195 head = &hash->table[i]; 261 head = &hash->table[i];
196 262
197 hlist_for_each(walk, head) 263 rcu_read_lock();
264 __hlist_for_each_rcu(node, head)
198 buf_size += 21; 265 buf_size += 21;
266 rcu_read_unlock();
199 } 267 }
200 268
201 buff = kmalloc(buf_size, GFP_ATOMIC); 269 buff = kmalloc(buf_size, GFP_ATOMIC);
@@ -203,18 +271,20 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
203 spin_unlock_bh(&bat_priv->hna_lhash_lock); 271 spin_unlock_bh(&bat_priv->hna_lhash_lock);
204 return -ENOMEM; 272 return -ENOMEM;
205 } 273 }
274
206 buff[0] = '\0'; 275 buff[0] = '\0';
207 pos = 0; 276 pos = 0;
208 277
209 for (i = 0; i < hash->size; i++) { 278 for (i = 0; i < hash->size; i++) {
210 head = &hash->table[i]; 279 head = &hash->table[i];
211 280
212 hlist_for_each_entry(bucket, walk, head, hlist) { 281 rcu_read_lock();
213 hna_local_entry = bucket->data; 282 hlist_for_each_entry_rcu(hna_local_entry, node,
214 283 head, hash_entry) {
215 pos += snprintf(buff + pos, 22, " * %pM\n", 284 pos += snprintf(buff + pos, 22, " * %pM\n",
216 hna_local_entry->addr); 285 hna_local_entry->addr);
217 } 286 }
287 rcu_read_unlock();
218 } 288 }
219 289
220 spin_unlock_bh(&bat_priv->hna_lhash_lock); 290 spin_unlock_bh(&bat_priv->hna_lhash_lock);
@@ -224,9 +294,10 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
224 return 0; 294 return 0;
225} 295}
226 296
227static void _hna_local_del(void *data, void *arg) 297static void _hna_local_del(struct hlist_node *node, void *arg)
228{ 298{
229 struct bat_priv *bat_priv = (struct bat_priv *)arg; 299 struct bat_priv *bat_priv = (struct bat_priv *)arg;
300 void *data = container_of(node, struct hna_local_entry, hash_entry);
230 301
231 kfree(data); 302 kfree(data);
232 bat_priv->num_local_hna--; 303 bat_priv->num_local_hna--;
@@ -240,9 +311,9 @@ static void hna_local_del(struct bat_priv *bat_priv,
240 bat_dbg(DBG_ROUTES, bat_priv, "Deleting local hna entry (%pM): %s\n", 311 bat_dbg(DBG_ROUTES, bat_priv, "Deleting local hna entry (%pM): %s\n",
241 hna_local_entry->addr, message); 312 hna_local_entry->addr, message);
242 313
243 hash_remove(bat_priv->hna_local_hash, compare_orig, choose_orig, 314 hash_remove(bat_priv->hna_local_hash, compare_lhna, choose_orig,
244 hna_local_entry->addr); 315 hna_local_entry->addr);
245 _hna_local_del(hna_local_entry, bat_priv); 316 _hna_local_del(&hna_local_entry->hash_entry, bat_priv);
246} 317}
247 318
248void hna_local_remove(struct bat_priv *bat_priv, 319void hna_local_remove(struct bat_priv *bat_priv,
@@ -252,9 +323,7 @@ void hna_local_remove(struct bat_priv *bat_priv,
252 323
253 spin_lock_bh(&bat_priv->hna_lhash_lock); 324 spin_lock_bh(&bat_priv->hna_lhash_lock);
254 325
255 hna_local_entry = (struct hna_local_entry *) 326 hna_local_entry = hna_local_hash_find(bat_priv, addr);
256 hash_find(bat_priv->hna_local_hash, compare_orig, choose_orig,
257 addr);
258 327
259 if (hna_local_entry) 328 if (hna_local_entry)
260 hna_local_del(bat_priv, hna_local_entry, message); 329 hna_local_del(bat_priv, hna_local_entry, message);
@@ -270,27 +339,29 @@ static void hna_local_purge(struct work_struct *work)
270 container_of(delayed_work, struct bat_priv, hna_work); 339 container_of(delayed_work, struct bat_priv, hna_work);
271 struct hashtable_t *hash = bat_priv->hna_local_hash; 340 struct hashtable_t *hash = bat_priv->hna_local_hash;
272 struct hna_local_entry *hna_local_entry; 341 struct hna_local_entry *hna_local_entry;
273 int i; 342 struct hlist_node *node, *node_tmp;
274 struct hlist_node *walk, *safe;
275 struct hlist_head *head; 343 struct hlist_head *head;
276 struct element_t *bucket;
277 unsigned long timeout; 344 unsigned long timeout;
345 int i;
278 346
279 spin_lock_bh(&bat_priv->hna_lhash_lock); 347 spin_lock_bh(&bat_priv->hna_lhash_lock);
280 348
281 for (i = 0; i < hash->size; i++) { 349 for (i = 0; i < hash->size; i++) {
282 head = &hash->table[i]; 350 head = &hash->table[i];
283 351
284 hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) { 352 hlist_for_each_entry_safe(hna_local_entry, node, node_tmp,
285 hna_local_entry = bucket->data; 353 head, hash_entry) {
354 if (hna_local_entry->never_purge)
355 continue;
286 356
287 timeout = hna_local_entry->last_seen; 357 timeout = hna_local_entry->last_seen;
288 timeout += LOCAL_HNA_TIMEOUT * HZ; 358 timeout += LOCAL_HNA_TIMEOUT * HZ;
289 359
290 if ((!hna_local_entry->never_purge) && 360 if (time_before(jiffies, timeout))
291 time_after(jiffies, timeout)) 361 continue;
292 hna_local_del(bat_priv, hna_local_entry, 362
293 "address timed out"); 363 hna_local_del(bat_priv, hna_local_entry,
364 "address timed out");
294 } 365 }
295 } 366 }
296 367
@@ -334,9 +405,7 @@ void hna_global_add_orig(struct bat_priv *bat_priv,
334 spin_lock_bh(&bat_priv->hna_ghash_lock); 405 spin_lock_bh(&bat_priv->hna_ghash_lock);
335 406
336 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN); 407 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
337 hna_global_entry = (struct hna_global_entry *) 408 hna_global_entry = hna_global_hash_find(bat_priv, hna_ptr);
338 hash_find(bat_priv->hna_global_hash, compare_orig,
339 choose_orig, hna_ptr);
340 409
341 if (!hna_global_entry) { 410 if (!hna_global_entry) {
342 spin_unlock_bh(&bat_priv->hna_ghash_lock); 411 spin_unlock_bh(&bat_priv->hna_ghash_lock);
@@ -356,8 +425,9 @@ void hna_global_add_orig(struct bat_priv *bat_priv,
356 hna_global_entry->addr, orig_node->orig); 425 hna_global_entry->addr, orig_node->orig);
357 426
358 spin_lock_bh(&bat_priv->hna_ghash_lock); 427 spin_lock_bh(&bat_priv->hna_ghash_lock);
359 hash_add(bat_priv->hna_global_hash, compare_orig, 428 hash_add(bat_priv->hna_global_hash, compare_ghna,
360 choose_orig, hna_global_entry); 429 choose_orig, hna_global_entry,
430 &hna_global_entry->hash_entry);
361 431
362 } 432 }
363 433
@@ -368,9 +438,7 @@ void hna_global_add_orig(struct bat_priv *bat_priv,
368 spin_lock_bh(&bat_priv->hna_lhash_lock); 438 spin_lock_bh(&bat_priv->hna_lhash_lock);
369 439
370 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN); 440 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
371 hna_local_entry = (struct hna_local_entry *) 441 hna_local_entry = hna_local_hash_find(bat_priv, hna_ptr);
372 hash_find(bat_priv->hna_local_hash, compare_orig,
373 choose_orig, hna_ptr);
374 442
375 if (hna_local_entry) 443 if (hna_local_entry)
376 hna_local_del(bat_priv, hna_local_entry, 444 hna_local_del(bat_priv, hna_local_entry,
@@ -400,12 +468,11 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
400 struct bat_priv *bat_priv = netdev_priv(net_dev); 468 struct bat_priv *bat_priv = netdev_priv(net_dev);
401 struct hashtable_t *hash = bat_priv->hna_global_hash; 469 struct hashtable_t *hash = bat_priv->hna_global_hash;
402 struct hna_global_entry *hna_global_entry; 470 struct hna_global_entry *hna_global_entry;
403 int i; 471 struct hlist_node *node;
404 struct hlist_node *walk;
405 struct hlist_head *head; 472 struct hlist_head *head;
406 struct element_t *bucket;
407 size_t buf_size, pos; 473 size_t buf_size, pos;
408 char *buff; 474 char *buff;
475 int i;
409 476
410 if (!bat_priv->primary_if) { 477 if (!bat_priv->primary_if) {
411 return seq_printf(seq, "BATMAN mesh %s disabled - " 478 return seq_printf(seq, "BATMAN mesh %s disabled - "
@@ -423,8 +490,10 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
423 for (i = 0; i < hash->size; i++) { 490 for (i = 0; i < hash->size; i++) {
424 head = &hash->table[i]; 491 head = &hash->table[i];
425 492
426 hlist_for_each(walk, head) 493 rcu_read_lock();
494 __hlist_for_each_rcu(node, head)
427 buf_size += 43; 495 buf_size += 43;
496 rcu_read_unlock();
428 } 497 }
429 498
430 buff = kmalloc(buf_size, GFP_ATOMIC); 499 buff = kmalloc(buf_size, GFP_ATOMIC);
@@ -438,14 +507,15 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
438 for (i = 0; i < hash->size; i++) { 507 for (i = 0; i < hash->size; i++) {
439 head = &hash->table[i]; 508 head = &hash->table[i];
440 509
441 hlist_for_each_entry(bucket, walk, head, hlist) { 510 rcu_read_lock();
442 hna_global_entry = bucket->data; 511 hlist_for_each_entry_rcu(hna_global_entry, node,
443 512 head, hash_entry) {
444 pos += snprintf(buff + pos, 44, 513 pos += snprintf(buff + pos, 44,
445 " * %pM via %pM\n", 514 " * %pM via %pM\n",
446 hna_global_entry->addr, 515 hna_global_entry->addr,
447 hna_global_entry->orig_node->orig); 516 hna_global_entry->orig_node->orig);
448 } 517 }
518 rcu_read_unlock();
449 } 519 }
450 520
451 spin_unlock_bh(&bat_priv->hna_ghash_lock); 521 spin_unlock_bh(&bat_priv->hna_ghash_lock);
@@ -464,7 +534,7 @@ static void _hna_global_del_orig(struct bat_priv *bat_priv,
464 hna_global_entry->addr, hna_global_entry->orig_node->orig, 534 hna_global_entry->addr, hna_global_entry->orig_node->orig,
465 message); 535 message);
466 536
467 hash_remove(bat_priv->hna_global_hash, compare_orig, choose_orig, 537 hash_remove(bat_priv->hna_global_hash, compare_ghna, choose_orig,
468 hna_global_entry->addr); 538 hna_global_entry->addr);
469 kfree(hna_global_entry); 539 kfree(hna_global_entry);
470} 540}
@@ -483,9 +553,7 @@ void hna_global_del_orig(struct bat_priv *bat_priv,
483 553
484 while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) { 554 while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) {
485 hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN); 555 hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN);
486 hna_global_entry = (struct hna_global_entry *) 556 hna_global_entry = hna_global_hash_find(bat_priv, hna_ptr);
487 hash_find(bat_priv->hna_global_hash, compare_orig,
488 choose_orig, hna_ptr);
489 557
490 if ((hna_global_entry) && 558 if ((hna_global_entry) &&
491 (hna_global_entry->orig_node == orig_node)) 559 (hna_global_entry->orig_node == orig_node))
@@ -502,8 +570,10 @@ void hna_global_del_orig(struct bat_priv *bat_priv,
502 orig_node->hna_buff = NULL; 570 orig_node->hna_buff = NULL;
503} 571}
504 572
505static void hna_global_del(void *data, void *arg) 573static void hna_global_del(struct hlist_node *node, void *arg)
506{ 574{
575 void *data = container_of(node, struct hna_global_entry, hash_entry);
576
507 kfree(data); 577 kfree(data);
508} 578}
509 579
@@ -519,15 +589,20 @@ void hna_global_free(struct bat_priv *bat_priv)
519struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr) 589struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr)
520{ 590{
521 struct hna_global_entry *hna_global_entry; 591 struct hna_global_entry *hna_global_entry;
592 struct orig_node *orig_node = NULL;
522 593
523 spin_lock_bh(&bat_priv->hna_ghash_lock); 594 spin_lock_bh(&bat_priv->hna_ghash_lock);
524 hna_global_entry = (struct hna_global_entry *) 595 hna_global_entry = hna_global_hash_find(bat_priv, addr);
525 hash_find(bat_priv->hna_global_hash,
526 compare_orig, choose_orig, addr);
527 spin_unlock_bh(&bat_priv->hna_ghash_lock);
528 596
529 if (!hna_global_entry) 597 if (!hna_global_entry)
530 return NULL; 598 goto out;
531 599
532 return hna_global_entry->orig_node; 600 if (!atomic_inc_not_zero(&hna_global_entry->orig_node->refcount))
601 goto out;
602
603 orig_node = hna_global_entry->orig_node;
604
605out:
606 spin_unlock_bh(&bat_priv->hna_ghash_lock);
607 return orig_node;
533} 608}