aboutsummaryrefslogtreecommitdiffstats
path: root/net/batman-adv/translation-table.c
diff options
context:
space:
mode:
authorMarek Lindner <lindner_marek@yahoo.de>2011-02-18 07:28:09 -0500
committerMarek Lindner <lindner_marek@yahoo.de>2011-03-05 06:52:00 -0500
commit7aadf889e897155c45cda230d2a6701ad1fbff61 (patch)
tree4a31df411c29844afe25ccde17d2ff9e618241c1 /net/batman-adv/translation-table.c
parent39901e716275da4e831b40f9e45a1b61d6a776dc (diff)
batman-adv: remove extra layer between hash and hash element - hash bucket
Signed-off-by: Marek Lindner <lindner_marek@yahoo.de>
Diffstat (limited to 'net/batman-adv/translation-table.c')
-rw-r--r--net/batman-adv/translation-table.c208
1 files changed, 133 insertions, 75 deletions
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 77d0ee0d125..cd8a58396d2 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -30,12 +30,85 @@ static void _hna_global_del_orig(struct bat_priv *bat_priv,
30 struct hna_global_entry *hna_global_entry, 30 struct hna_global_entry *hna_global_entry,
31 char *message); 31 char *message);
32 32
33/* returns 1 if they are the same mac addr */
34static int compare_lhna(struct hlist_node *node, void *data2)
35{
36 void *data1 = container_of(node, struct hna_local_entry, hash_entry);
37
38 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
39}
40
41/* returns 1 if they are the same mac addr */
42static int compare_ghna(struct hlist_node *node, void *data2)
43{
44 void *data1 = container_of(node, struct hna_global_entry, hash_entry);
45
46 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
47}
48
33static void hna_local_start_timer(struct bat_priv *bat_priv) 49static void hna_local_start_timer(struct bat_priv *bat_priv)
34{ 50{
35 INIT_DELAYED_WORK(&bat_priv->hna_work, hna_local_purge); 51 INIT_DELAYED_WORK(&bat_priv->hna_work, hna_local_purge);
36 queue_delayed_work(bat_event_workqueue, &bat_priv->hna_work, 10 * HZ); 52 queue_delayed_work(bat_event_workqueue, &bat_priv->hna_work, 10 * HZ);
37} 53}
38 54
55static struct hna_local_entry *hna_local_hash_find(struct bat_priv *bat_priv,
56 void *data)
57{
58 struct hashtable_t *hash = bat_priv->hna_local_hash;
59 struct hlist_head *head;
60 struct hlist_node *node;
61 struct hna_local_entry *hna_local_entry, *hna_local_entry_tmp = NULL;
62 int index;
63
64 if (!hash)
65 return NULL;
66
67 index = choose_orig(data, hash->size);
68 head = &hash->table[index];
69
70 rcu_read_lock();
71 hlist_for_each_entry_rcu(hna_local_entry, node, head, hash_entry) {
72 if (!compare_eth(hna_local_entry, data))
73 continue;
74
75 hna_local_entry_tmp = hna_local_entry;
76 break;
77 }
78 rcu_read_unlock();
79
80 return hna_local_entry_tmp;
81}
82
83static struct hna_global_entry *hna_global_hash_find(struct bat_priv *bat_priv,
84 void *data)
85{
86 struct hashtable_t *hash = bat_priv->hna_global_hash;
87 struct hlist_head *head;
88 struct hlist_node *node;
89 struct hna_global_entry *hna_global_entry;
90 struct hna_global_entry *hna_global_entry_tmp = NULL;
91 int index;
92
93 if (!hash)
94 return NULL;
95
96 index = choose_orig(data, hash->size);
97 head = &hash->table[index];
98
99 rcu_read_lock();
100 hlist_for_each_entry_rcu(hna_global_entry, node, head, hash_entry) {
101 if (!compare_eth(hna_global_entry, data))
102 continue;
103
104 hna_global_entry_tmp = hna_global_entry;
105 break;
106 }
107 rcu_read_unlock();
108
109 return hna_global_entry_tmp;
110}
111
39int hna_local_init(struct bat_priv *bat_priv) 112int hna_local_init(struct bat_priv *bat_priv)
40{ 113{
41 if (bat_priv->hna_local_hash) 114 if (bat_priv->hna_local_hash)
@@ -60,12 +133,7 @@ void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
60 int required_bytes; 133 int required_bytes;
61 134
62 spin_lock_bh(&bat_priv->hna_lhash_lock); 135 spin_lock_bh(&bat_priv->hna_lhash_lock);
63 rcu_read_lock(); 136 hna_local_entry = hna_local_hash_find(bat_priv, addr);
64 hna_local_entry =
65 ((struct hna_local_entry *)hash_find(bat_priv->hna_local_hash,
66 compare_orig, choose_orig,
67 addr));
68 rcu_read_unlock();
69 spin_unlock_bh(&bat_priv->hna_lhash_lock); 137 spin_unlock_bh(&bat_priv->hna_lhash_lock);
70 138
71 if (hna_local_entry) { 139 if (hna_local_entry) {
@@ -108,8 +176,8 @@ void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
108 176
109 spin_lock_bh(&bat_priv->hna_lhash_lock); 177 spin_lock_bh(&bat_priv->hna_lhash_lock);
110 178
111 hash_add(bat_priv->hna_local_hash, compare_orig, choose_orig, 179 hash_add(bat_priv->hna_local_hash, compare_lhna, choose_orig,
112 hna_local_entry); 180 hna_local_entry, &hna_local_entry->hash_entry);
113 bat_priv->num_local_hna++; 181 bat_priv->num_local_hna++;
114 atomic_set(&bat_priv->hna_local_changed, 1); 182 atomic_set(&bat_priv->hna_local_changed, 1);
115 183
@@ -118,11 +186,7 @@ void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
118 /* remove address from global hash if present */ 186 /* remove address from global hash if present */
119 spin_lock_bh(&bat_priv->hna_ghash_lock); 187 spin_lock_bh(&bat_priv->hna_ghash_lock);
120 188
121 rcu_read_lock(); 189 hna_global_entry = hna_global_hash_find(bat_priv, addr);
122 hna_global_entry = ((struct hna_global_entry *)
123 hash_find(bat_priv->hna_global_hash,
124 compare_orig, choose_orig, addr));
125 rcu_read_unlock();
126 190
127 if (hna_global_entry) 191 if (hna_global_entry)
128 _hna_global_del_orig(bat_priv, hna_global_entry, 192 _hna_global_del_orig(bat_priv, hna_global_entry,
@@ -136,28 +200,27 @@ int hna_local_fill_buffer(struct bat_priv *bat_priv,
136{ 200{
137 struct hashtable_t *hash = bat_priv->hna_local_hash; 201 struct hashtable_t *hash = bat_priv->hna_local_hash;
138 struct hna_local_entry *hna_local_entry; 202 struct hna_local_entry *hna_local_entry;
139 struct element_t *bucket; 203 struct hlist_node *node;
140 int i;
141 struct hlist_node *walk;
142 struct hlist_head *head; 204 struct hlist_head *head;
143 int count = 0; 205 int i, count = 0;
144 206
145 spin_lock_bh(&bat_priv->hna_lhash_lock); 207 spin_lock_bh(&bat_priv->hna_lhash_lock);
146 208
147 for (i = 0; i < hash->size; i++) { 209 for (i = 0; i < hash->size; i++) {
148 head = &hash->table[i]; 210 head = &hash->table[i];
149 211
150 hlist_for_each_entry(bucket, walk, head, hlist) { 212 rcu_read_lock();
151 213 hlist_for_each_entry_rcu(hna_local_entry, node,
214 head, hash_entry) {
152 if (buff_len < (count + 1) * ETH_ALEN) 215 if (buff_len < (count + 1) * ETH_ALEN)
153 break; 216 break;
154 217
155 hna_local_entry = bucket->data;
156 memcpy(buff + (count * ETH_ALEN), hna_local_entry->addr, 218 memcpy(buff + (count * ETH_ALEN), hna_local_entry->addr,
157 ETH_ALEN); 219 ETH_ALEN);
158 220
159 count++; 221 count++;
160 } 222 }
223 rcu_read_unlock();
161 } 224 }
162 225
163 /* if we did not get all new local hnas see you next time ;-) */ 226 /* if we did not get all new local hnas see you next time ;-) */
@@ -174,12 +237,11 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
174 struct bat_priv *bat_priv = netdev_priv(net_dev); 237 struct bat_priv *bat_priv = netdev_priv(net_dev);
175 struct hashtable_t *hash = bat_priv->hna_local_hash; 238 struct hashtable_t *hash = bat_priv->hna_local_hash;
176 struct hna_local_entry *hna_local_entry; 239 struct hna_local_entry *hna_local_entry;
177 int i; 240 struct hlist_node *node;
178 struct hlist_node *walk;
179 struct hlist_head *head; 241 struct hlist_head *head;
180 struct element_t *bucket;
181 size_t buf_size, pos; 242 size_t buf_size, pos;
182 char *buff; 243 char *buff;
244 int i;
183 245
184 if (!bat_priv->primary_if) { 246 if (!bat_priv->primary_if) {
185 return seq_printf(seq, "BATMAN mesh %s disabled - " 247 return seq_printf(seq, "BATMAN mesh %s disabled - "
@@ -198,8 +260,10 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
198 for (i = 0; i < hash->size; i++) { 260 for (i = 0; i < hash->size; i++) {
199 head = &hash->table[i]; 261 head = &hash->table[i];
200 262
201 hlist_for_each(walk, head) 263 rcu_read_lock();
264 __hlist_for_each_rcu(node, head)
202 buf_size += 21; 265 buf_size += 21;
266 rcu_read_unlock();
203 } 267 }
204 268
205 buff = kmalloc(buf_size, GFP_ATOMIC); 269 buff = kmalloc(buf_size, GFP_ATOMIC);
@@ -207,18 +271,20 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
207 spin_unlock_bh(&bat_priv->hna_lhash_lock); 271 spin_unlock_bh(&bat_priv->hna_lhash_lock);
208 return -ENOMEM; 272 return -ENOMEM;
209 } 273 }
274
210 buff[0] = '\0'; 275 buff[0] = '\0';
211 pos = 0; 276 pos = 0;
212 277
213 for (i = 0; i < hash->size; i++) { 278 for (i = 0; i < hash->size; i++) {
214 head = &hash->table[i]; 279 head = &hash->table[i];
215 280
216 hlist_for_each_entry(bucket, walk, head, hlist) { 281 rcu_read_lock();
217 hna_local_entry = bucket->data; 282 hlist_for_each_entry_rcu(hna_local_entry, node,
218 283 head, hash_entry) {
219 pos += snprintf(buff + pos, 22, " * %pM\n", 284 pos += snprintf(buff + pos, 22, " * %pM\n",
220 hna_local_entry->addr); 285 hna_local_entry->addr);
221 } 286 }
287 rcu_read_unlock();
222 } 288 }
223 289
224 spin_unlock_bh(&bat_priv->hna_lhash_lock); 290 spin_unlock_bh(&bat_priv->hna_lhash_lock);
@@ -228,9 +294,10 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
228 return 0; 294 return 0;
229} 295}
230 296
231static void _hna_local_del(void *data, void *arg) 297static void _hna_local_del(struct hlist_node *node, void *arg)
232{ 298{
233 struct bat_priv *bat_priv = (struct bat_priv *)arg; 299 struct bat_priv *bat_priv = (struct bat_priv *)arg;
300 void *data = container_of(node, struct hna_local_entry, hash_entry);
234 301
235 kfree(data); 302 kfree(data);
236 bat_priv->num_local_hna--; 303 bat_priv->num_local_hna--;
@@ -244,9 +311,9 @@ static void hna_local_del(struct bat_priv *bat_priv,
244 bat_dbg(DBG_ROUTES, bat_priv, "Deleting local hna entry (%pM): %s\n", 311 bat_dbg(DBG_ROUTES, bat_priv, "Deleting local hna entry (%pM): %s\n",
245 hna_local_entry->addr, message); 312 hna_local_entry->addr, message);
246 313
247 hash_remove(bat_priv->hna_local_hash, compare_orig, choose_orig, 314 hash_remove(bat_priv->hna_local_hash, compare_lhna, choose_orig,
248 hna_local_entry->addr); 315 hna_local_entry->addr);
249 _hna_local_del(hna_local_entry, bat_priv); 316 _hna_local_del(&hna_local_entry->hash_entry, bat_priv);
250} 317}
251 318
252void hna_local_remove(struct bat_priv *bat_priv, 319void hna_local_remove(struct bat_priv *bat_priv,
@@ -256,11 +323,7 @@ void hna_local_remove(struct bat_priv *bat_priv,
256 323
257 spin_lock_bh(&bat_priv->hna_lhash_lock); 324 spin_lock_bh(&bat_priv->hna_lhash_lock);
258 325
259 rcu_read_lock(); 326 hna_local_entry = hna_local_hash_find(bat_priv, addr);
260 hna_local_entry = (struct hna_local_entry *)
261 hash_find(bat_priv->hna_local_hash, compare_orig, choose_orig,
262 addr);
263 rcu_read_unlock();
264 327
265 if (hna_local_entry) 328 if (hna_local_entry)
266 hna_local_del(bat_priv, hna_local_entry, message); 329 hna_local_del(bat_priv, hna_local_entry, message);
@@ -276,27 +339,29 @@ static void hna_local_purge(struct work_struct *work)
276 container_of(delayed_work, struct bat_priv, hna_work); 339 container_of(delayed_work, struct bat_priv, hna_work);
277 struct hashtable_t *hash = bat_priv->hna_local_hash; 340 struct hashtable_t *hash = bat_priv->hna_local_hash;
278 struct hna_local_entry *hna_local_entry; 341 struct hna_local_entry *hna_local_entry;
279 int i; 342 struct hlist_node *node, *node_tmp;
280 struct hlist_node *walk, *safe;
281 struct hlist_head *head; 343 struct hlist_head *head;
282 struct element_t *bucket;
283 unsigned long timeout; 344 unsigned long timeout;
345 int i;
284 346
285 spin_lock_bh(&bat_priv->hna_lhash_lock); 347 spin_lock_bh(&bat_priv->hna_lhash_lock);
286 348
287 for (i = 0; i < hash->size; i++) { 349 for (i = 0; i < hash->size; i++) {
288 head = &hash->table[i]; 350 head = &hash->table[i];
289 351
290 hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) { 352 hlist_for_each_entry_safe(hna_local_entry, node, node_tmp,
291 hna_local_entry = bucket->data; 353 head, hash_entry) {
354 if (hna_local_entry->never_purge)
355 continue;
292 356
293 timeout = hna_local_entry->last_seen; 357 timeout = hna_local_entry->last_seen;
294 timeout += LOCAL_HNA_TIMEOUT * HZ; 358 timeout += LOCAL_HNA_TIMEOUT * HZ;
295 359
296 if ((!hna_local_entry->never_purge) && 360 if (time_before(jiffies, timeout))
297 time_after(jiffies, timeout)) 361 continue;
298 hna_local_del(bat_priv, hna_local_entry, 362
299 "address timed out"); 363 hna_local_del(bat_priv, hna_local_entry,
364 "address timed out");
300 } 365 }
301 } 366 }
302 367
@@ -340,11 +405,7 @@ void hna_global_add_orig(struct bat_priv *bat_priv,
340 spin_lock_bh(&bat_priv->hna_ghash_lock); 405 spin_lock_bh(&bat_priv->hna_ghash_lock);
341 406
342 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN); 407 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
343 rcu_read_lock(); 408 hna_global_entry = hna_global_hash_find(bat_priv, hna_ptr);
344 hna_global_entry = (struct hna_global_entry *)
345 hash_find(bat_priv->hna_global_hash, compare_orig,
346 choose_orig, hna_ptr);
347 rcu_read_unlock();
348 409
349 if (!hna_global_entry) { 410 if (!hna_global_entry) {
350 spin_unlock_bh(&bat_priv->hna_ghash_lock); 411 spin_unlock_bh(&bat_priv->hna_ghash_lock);
@@ -364,8 +425,9 @@ void hna_global_add_orig(struct bat_priv *bat_priv,
364 hna_global_entry->addr, orig_node->orig); 425 hna_global_entry->addr, orig_node->orig);
365 426
366 spin_lock_bh(&bat_priv->hna_ghash_lock); 427 spin_lock_bh(&bat_priv->hna_ghash_lock);
367 hash_add(bat_priv->hna_global_hash, compare_orig, 428 hash_add(bat_priv->hna_global_hash, compare_ghna,
368 choose_orig, hna_global_entry); 429 choose_orig, hna_global_entry,
430 &hna_global_entry->hash_entry);
369 431
370 } 432 }
371 433
@@ -376,11 +438,7 @@ void hna_global_add_orig(struct bat_priv *bat_priv,
376 spin_lock_bh(&bat_priv->hna_lhash_lock); 438 spin_lock_bh(&bat_priv->hna_lhash_lock);
377 439
378 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN); 440 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
379 rcu_read_lock(); 441 hna_local_entry = hna_local_hash_find(bat_priv, hna_ptr);
380 hna_local_entry = (struct hna_local_entry *)
381 hash_find(bat_priv->hna_local_hash, compare_orig,
382 choose_orig, hna_ptr);
383 rcu_read_unlock();
384 442
385 if (hna_local_entry) 443 if (hna_local_entry)
386 hna_local_del(bat_priv, hna_local_entry, 444 hna_local_del(bat_priv, hna_local_entry,
@@ -410,12 +468,11 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
410 struct bat_priv *bat_priv = netdev_priv(net_dev); 468 struct bat_priv *bat_priv = netdev_priv(net_dev);
411 struct hashtable_t *hash = bat_priv->hna_global_hash; 469 struct hashtable_t *hash = bat_priv->hna_global_hash;
412 struct hna_global_entry *hna_global_entry; 470 struct hna_global_entry *hna_global_entry;
413 int i; 471 struct hlist_node *node;
414 struct hlist_node *walk;
415 struct hlist_head *head; 472 struct hlist_head *head;
416 struct element_t *bucket;
417 size_t buf_size, pos; 473 size_t buf_size, pos;
418 char *buff; 474 char *buff;
475 int i;
419 476
420 if (!bat_priv->primary_if) { 477 if (!bat_priv->primary_if) {
421 return seq_printf(seq, "BATMAN mesh %s disabled - " 478 return seq_printf(seq, "BATMAN mesh %s disabled - "
@@ -433,8 +490,10 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
433 for (i = 0; i < hash->size; i++) { 490 for (i = 0; i < hash->size; i++) {
434 head = &hash->table[i]; 491 head = &hash->table[i];
435 492
436 hlist_for_each(walk, head) 493 rcu_read_lock();
494 __hlist_for_each_rcu(node, head)
437 buf_size += 43; 495 buf_size += 43;
496 rcu_read_unlock();
438 } 497 }
439 498
440 buff = kmalloc(buf_size, GFP_ATOMIC); 499 buff = kmalloc(buf_size, GFP_ATOMIC);
@@ -448,14 +507,15 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
448 for (i = 0; i < hash->size; i++) { 507 for (i = 0; i < hash->size; i++) {
449 head = &hash->table[i]; 508 head = &hash->table[i];
450 509
451 hlist_for_each_entry(bucket, walk, head, hlist) { 510 rcu_read_lock();
452 hna_global_entry = bucket->data; 511 hlist_for_each_entry_rcu(hna_global_entry, node,
453 512 head, hash_entry) {
454 pos += snprintf(buff + pos, 44, 513 pos += snprintf(buff + pos, 44,
455 " * %pM via %pM\n", 514 " * %pM via %pM\n",
456 hna_global_entry->addr, 515 hna_global_entry->addr,
457 hna_global_entry->orig_node->orig); 516 hna_global_entry->orig_node->orig);
458 } 517 }
518 rcu_read_unlock();
459 } 519 }
460 520
461 spin_unlock_bh(&bat_priv->hna_ghash_lock); 521 spin_unlock_bh(&bat_priv->hna_ghash_lock);
@@ -474,7 +534,7 @@ static void _hna_global_del_orig(struct bat_priv *bat_priv,
474 hna_global_entry->addr, hna_global_entry->orig_node->orig, 534 hna_global_entry->addr, hna_global_entry->orig_node->orig,
475 message); 535 message);
476 536
477 hash_remove(bat_priv->hna_global_hash, compare_orig, choose_orig, 537 hash_remove(bat_priv->hna_global_hash, compare_ghna, choose_orig,
478 hna_global_entry->addr); 538 hna_global_entry->addr);
479 kfree(hna_global_entry); 539 kfree(hna_global_entry);
480} 540}
@@ -493,11 +553,7 @@ void hna_global_del_orig(struct bat_priv *bat_priv,
493 553
494 while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) { 554 while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) {
495 hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN); 555 hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN);
496 rcu_read_lock(); 556 hna_global_entry = hna_global_hash_find(bat_priv, hna_ptr);
497 hna_global_entry = (struct hna_global_entry *)
498 hash_find(bat_priv->hna_global_hash, compare_orig,
499 choose_orig, hna_ptr);
500 rcu_read_unlock();
501 557
502 if ((hna_global_entry) && 558 if ((hna_global_entry) &&
503 (hna_global_entry->orig_node == orig_node)) 559 (hna_global_entry->orig_node == orig_node))
@@ -514,8 +570,10 @@ void hna_global_del_orig(struct bat_priv *bat_priv,
514 orig_node->hna_buff = NULL; 570 orig_node->hna_buff = NULL;
515} 571}
516 572
517static void hna_global_del(void *data, void *arg) 573static void hna_global_del(struct hlist_node *node, void *arg)
518{ 574{
575 void *data = container_of(node, struct hna_global_entry, hash_entry);
576
519 kfree(data); 577 kfree(data);
520} 578}
521 579
@@ -533,11 +591,11 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr)
533 struct hna_global_entry *hna_global_entry; 591 struct hna_global_entry *hna_global_entry;
534 592
535 spin_lock_bh(&bat_priv->hna_ghash_lock); 593 spin_lock_bh(&bat_priv->hna_ghash_lock);
536 rcu_read_lock(); 594 hna_global_entry = hna_global_hash_find(bat_priv, addr);
537 hna_global_entry = (struct hna_global_entry *) 595
538 hash_find(bat_priv->hna_global_hash, 596 if (hna_global_entry)
539 compare_orig, choose_orig, addr); 597 kref_get(&hna_global_entry->orig_node->refcount);
540 rcu_read_unlock(); 598
541 spin_unlock_bh(&bat_priv->hna_ghash_lock); 599 spin_unlock_bh(&bat_priv->hna_ghash_lock);
542 600
543 if (!hna_global_entry) 601 if (!hna_global_entry)