aboutsummaryrefslogtreecommitdiffstats
path: root/net/batman-adv/translation-table.c
diff options
context:
space:
mode:
authorAntonio Quartulli <ordex@autistici.org>2011-04-27 08:27:44 -0400
committerSven Eckelmann <sven@narfation.org>2011-06-20 05:37:24 -0400
commita73105b8d4c765d9ebfb664d0a66802127d8e4c7 (patch)
treeb4b11a4050109d8f042c7ac87a5a6d6d91b5d1d2 /net/batman-adv/translation-table.c
parent3b27ffb00fbe9d9189715ea13ce8712e2f0cb0c5 (diff)
batman-adv: improved client announcement mechanism
The client announcement mechanism informs every mesh node in the network of any connected non-mesh client, in order to find the path towards that client from any given point in the mesh. The old implementation was based on the simple idea of appending a data buffer to each OGM containing all the client MAC addresses the node is serving. All other nodes can populate their global translation tables (table which links client MAC addresses to node addresses) using this MAC address buffer and linking it to the node's address contained in the OGM. A node that wants to contact a client has to lookup the node the client is connected to and its address in the global translation table. It is easy to understand that this implementation suffers from several issues: - big overhead (each and every OGM contains the entire list of connected clients) - high latencies for client route updates due to long OGM trip time and OGM losses The new implementation addresses these issues by appending client changes (new client joined or a client left) to the OGM instead of filling it with all the client addresses each time. In this way nodes can modify their global tables by means of "updates", thus reducing the overhead within the OGMs. To keep the entire network in sync each node maintains a translation table version number (ttvn) and a translation table checksum. These values are spread with the OGM to allow all the network participants to determine whether or not they need to update their translation table information. When a translation table lookup is performed in order to send a packet to a client attached to another node, the destination's ttvn is added to the payload packet. Forwarding nodes can compare the packet's ttvn with their destination's ttvn (this node could have a fresher information than the source) and re-route the packet if necessary. This greatly reduces the packet loss of clients roaming from one AP to the next. Signed-off-by: Antonio Quartulli <ordex@autistici.org> Signed-off-by: Marek Lindner <lindner_marek@yahoo.de> Signed-off-by: Sven Eckelmann <sven@narfation.org>
Diffstat (limited to 'net/batman-adv/translation-table.c')
-rw-r--r--net/batman-adv/translation-table.c1126
1 files changed, 943 insertions, 183 deletions
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 561f76968d5e..597cd1a43058 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -23,13 +23,17 @@
23#include "translation-table.h" 23#include "translation-table.h"
24#include "soft-interface.h" 24#include "soft-interface.h"
25#include "hard-interface.h" 25#include "hard-interface.h"
26#include "send.h"
26#include "hash.h" 27#include "hash.h"
27#include "originator.h" 28#include "originator.h"
29#include "routing.h"
28 30
29static void tt_local_purge(struct work_struct *work); 31#include <linux/crc16.h>
30static void _tt_global_del_orig(struct bat_priv *bat_priv, 32
31 struct tt_global_entry *tt_global_entry, 33static void _tt_global_del(struct bat_priv *bat_priv,
32 const char *message); 34 struct tt_global_entry *tt_global_entry,
35 const char *message);
36static void tt_purge(struct work_struct *work);
33 37
34/* returns 1 if they are the same mac addr */ 38/* returns 1 if they are the same mac addr */
35static int compare_ltt(const struct hlist_node *node, const void *data2) 39static int compare_ltt(const struct hlist_node *node, const void *data2)
@@ -49,10 +53,11 @@ static int compare_gtt(const struct hlist_node *node, const void *data2)
49 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 53 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
50} 54}
51 55
52static void tt_local_start_timer(struct bat_priv *bat_priv) 56static void tt_start_timer(struct bat_priv *bat_priv)
53{ 57{
54 INIT_DELAYED_WORK(&bat_priv->tt_work, tt_local_purge); 58 INIT_DELAYED_WORK(&bat_priv->tt_work, tt_purge);
55 queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work, 10 * HZ); 59 queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work,
60 msecs_to_jiffies(5000));
56} 61}
57 62
58static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv, 63static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
@@ -112,7 +117,42 @@ static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
112 return tt_global_entry_tmp; 117 return tt_global_entry_tmp;
113} 118}
114 119
115int tt_local_init(struct bat_priv *bat_priv) 120static bool is_out_of_time(unsigned long starting_time, unsigned long timeout)
121{
122 unsigned long deadline;
123 deadline = starting_time + msecs_to_jiffies(timeout);
124
125 return time_after(jiffies, deadline);
126}
127
128static void tt_local_event(struct bat_priv *bat_priv, uint8_t op,
129 const uint8_t *addr)
130{
131 struct tt_change_node *tt_change_node;
132
133 tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC);
134
135 if (!tt_change_node)
136 return;
137
138 tt_change_node->change.flags = op;
139 memcpy(tt_change_node->change.addr, addr, ETH_ALEN);
140
141 spin_lock_bh(&bat_priv->tt_changes_list_lock);
142 /* track the change in the OGMinterval list */
143 list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list);
144 atomic_inc(&bat_priv->tt_local_changes);
145 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
146
147 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
148}
149
150int tt_len(int changes_num)
151{
152 return changes_num * sizeof(struct tt_change);
153}
154
155static int tt_local_init(struct bat_priv *bat_priv)
116{ 156{
117 if (bat_priv->tt_local_hash) 157 if (bat_priv->tt_local_hash)
118 return 1; 158 return 1;
@@ -122,9 +162,6 @@ int tt_local_init(struct bat_priv *bat_priv)
122 if (!bat_priv->tt_local_hash) 162 if (!bat_priv->tt_local_hash)
123 return 0; 163 return 0;
124 164
125 atomic_set(&bat_priv->tt_local_changed, 0);
126 tt_local_start_timer(bat_priv);
127
128 return 1; 165 return 1;
129} 166}
130 167
@@ -133,40 +170,24 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr)
133 struct bat_priv *bat_priv = netdev_priv(soft_iface); 170 struct bat_priv *bat_priv = netdev_priv(soft_iface);
134 struct tt_local_entry *tt_local_entry; 171 struct tt_local_entry *tt_local_entry;
135 struct tt_global_entry *tt_global_entry; 172 struct tt_global_entry *tt_global_entry;
136 int required_bytes;
137 173
138 spin_lock_bh(&bat_priv->tt_lhash_lock); 174 spin_lock_bh(&bat_priv->tt_lhash_lock);
139 tt_local_entry = tt_local_hash_find(bat_priv, addr); 175 tt_local_entry = tt_local_hash_find(bat_priv, addr);
140 spin_unlock_bh(&bat_priv->tt_lhash_lock);
141 176
142 if (tt_local_entry) { 177 if (tt_local_entry) {
143 tt_local_entry->last_seen = jiffies; 178 tt_local_entry->last_seen = jiffies;
144 return; 179 goto unlock;
145 }
146
147 /* only announce as many hosts as possible in the batman-packet and
148 space in batman_packet->num_tt That also should give a limit to
149 MAC-flooding. */
150 required_bytes = (bat_priv->num_local_tt + 1) * ETH_ALEN;
151 required_bytes += BAT_PACKET_LEN;
152
153 if ((required_bytes > ETH_DATA_LEN) ||
154 (atomic_read(&bat_priv->aggregated_ogms) &&
155 required_bytes > MAX_AGGREGATION_BYTES) ||
156 (bat_priv->num_local_tt + 1 > 255)) {
157 bat_dbg(DBG_ROUTES, bat_priv,
158 "Can't add new local tt entry (%pM): "
159 "number of local tt entries exceeds packet size\n",
160 addr);
161 return;
162 } 180 }
163 181
164 bat_dbg(DBG_ROUTES, bat_priv,
165 "Creating new local tt entry: %pM\n", addr);
166
167 tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC); 182 tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC);
168 if (!tt_local_entry) 183 if (!tt_local_entry)
169 return; 184 goto unlock;
185
186 tt_local_event(bat_priv, NO_FLAGS, addr);
187
188 bat_dbg(DBG_TT, bat_priv,
189 "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
190 (uint8_t)atomic_read(&bat_priv->ttvn));
170 191
171 memcpy(tt_local_entry->addr, addr, ETH_ALEN); 192 memcpy(tt_local_entry->addr, addr, ETH_ALEN);
172 tt_local_entry->last_seen = jiffies; 193 tt_local_entry->last_seen = jiffies;
@@ -177,13 +198,9 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr)
177 else 198 else
178 tt_local_entry->never_purge = 0; 199 tt_local_entry->never_purge = 0;
179 200
180 spin_lock_bh(&bat_priv->tt_lhash_lock);
181
182 hash_add(bat_priv->tt_local_hash, compare_ltt, choose_orig, 201 hash_add(bat_priv->tt_local_hash, compare_ltt, choose_orig,
183 tt_local_entry, &tt_local_entry->hash_entry); 202 tt_local_entry, &tt_local_entry->hash_entry);
184 bat_priv->num_local_tt++; 203 atomic_inc(&bat_priv->num_local_tt);
185 atomic_set(&bat_priv->tt_local_changed, 1);
186
187 spin_unlock_bh(&bat_priv->tt_lhash_lock); 204 spin_unlock_bh(&bat_priv->tt_lhash_lock);
188 205
189 /* remove address from global hash if present */ 206 /* remove address from global hash if present */
@@ -192,46 +209,60 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr)
192 tt_global_entry = tt_global_hash_find(bat_priv, addr); 209 tt_global_entry = tt_global_hash_find(bat_priv, addr);
193 210
194 if (tt_global_entry) 211 if (tt_global_entry)
195 _tt_global_del_orig(bat_priv, tt_global_entry, 212 _tt_global_del(bat_priv, tt_global_entry,
196 "local tt received"); 213 "local tt received");
197 214
198 spin_unlock_bh(&bat_priv->tt_ghash_lock); 215 spin_unlock_bh(&bat_priv->tt_ghash_lock);
216 return;
217unlock:
218 spin_unlock_bh(&bat_priv->tt_lhash_lock);
199} 219}
200 220
201int tt_local_fill_buffer(struct bat_priv *bat_priv, 221int tt_changes_fill_buffer(struct bat_priv *bat_priv,
202 unsigned char *buff, int buff_len) 222 unsigned char *buff, int buff_len)
203{ 223{
204 struct hashtable_t *hash = bat_priv->tt_local_hash; 224 int count = 0, tot_changes = 0;
205 struct tt_local_entry *tt_local_entry; 225 struct tt_change_node *entry, *safe;
206 struct hlist_node *node;
207 struct hlist_head *head;
208 int i, count = 0;
209 226
210 spin_lock_bh(&bat_priv->tt_lhash_lock); 227 if (buff_len > 0)
228 tot_changes = buff_len / tt_len(1);
211 229
212 for (i = 0; i < hash->size; i++) { 230 spin_lock_bh(&bat_priv->tt_changes_list_lock);
213 head = &hash->table[i]; 231 atomic_set(&bat_priv->tt_local_changes, 0);
214
215 rcu_read_lock();
216 hlist_for_each_entry_rcu(tt_local_entry, node,
217 head, hash_entry) {
218 if (buff_len < (count + 1) * ETH_ALEN)
219 break;
220
221 memcpy(buff + (count * ETH_ALEN), tt_local_entry->addr,
222 ETH_ALEN);
223 232
233 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
234 list) {
235 if (count < tot_changes) {
236 memcpy(buff + tt_len(count),
237 &entry->change, sizeof(struct tt_change));
224 count++; 238 count++;
225 } 239 }
226 rcu_read_unlock(); 240 list_del(&entry->list);
241 kfree(entry);
227 } 242 }
243 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
244
245 /* Keep the buffer for possible tt_request */
246 spin_lock_bh(&bat_priv->tt_buff_lock);
247 kfree(bat_priv->tt_buff);
248 bat_priv->tt_buff_len = 0;
249 bat_priv->tt_buff = NULL;
250 /* We check whether this new OGM has no changes due to size
251 * problems */
252 if (buff_len > 0) {
253 /**
254 * if kmalloc() fails we will reply with the full table
255 * instead of providing the diff
256 */
257 bat_priv->tt_buff = kmalloc(buff_len, GFP_ATOMIC);
258 if (bat_priv->tt_buff) {
259 memcpy(bat_priv->tt_buff, buff, buff_len);
260 bat_priv->tt_buff_len = buff_len;
261 }
262 }
263 spin_unlock_bh(&bat_priv->tt_buff_lock);
228 264
229 /* if we did not get all new local tts see you next time ;-) */ 265 return tot_changes;
230 if (count == bat_priv->num_local_tt)
231 atomic_set(&bat_priv->tt_local_changed, 0);
232
233 spin_unlock_bh(&bat_priv->tt_lhash_lock);
234 return count;
235} 266}
236 267
237int tt_local_seq_print_text(struct seq_file *seq, void *offset) 268int tt_local_seq_print_text(struct seq_file *seq, void *offset)
@@ -263,8 +294,8 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
263 } 294 }
264 295
265 seq_printf(seq, "Locally retrieved addresses (from %s) " 296 seq_printf(seq, "Locally retrieved addresses (from %s) "
266 "announced via TT:\n", 297 "announced via TT (TTVN: %u):\n",
267 net_dev->name); 298 net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
268 299
269 spin_lock_bh(&bat_priv->tt_lhash_lock); 300 spin_lock_bh(&bat_priv->tt_lhash_lock);
270 301
@@ -311,54 +342,51 @@ out:
311 return ret; 342 return ret;
312} 343}
313 344
314static void _tt_local_del(struct hlist_node *node, void *arg) 345static void tt_local_entry_free(struct hlist_node *node, void *arg)
315{ 346{
316 struct bat_priv *bat_priv = arg; 347 struct bat_priv *bat_priv = arg;
317 void *data = container_of(node, struct tt_local_entry, hash_entry); 348 void *data = container_of(node, struct tt_local_entry, hash_entry);
318 349
319 kfree(data); 350 kfree(data);
320 bat_priv->num_local_tt--; 351 atomic_dec(&bat_priv->num_local_tt);
321 atomic_set(&bat_priv->tt_local_changed, 1);
322} 352}
323 353
324static void tt_local_del(struct bat_priv *bat_priv, 354static void tt_local_del(struct bat_priv *bat_priv,
325 struct tt_local_entry *tt_local_entry, 355 struct tt_local_entry *tt_local_entry,
326 const char *message) 356 const char *message)
327{ 357{
328 bat_dbg(DBG_ROUTES, bat_priv, "Deleting local tt entry (%pM): %s\n", 358 bat_dbg(DBG_TT, bat_priv, "Deleting local tt entry (%pM): %s\n",
329 tt_local_entry->addr, message); 359 tt_local_entry->addr, message);
330 360
361 atomic_dec(&bat_priv->num_local_tt);
362
331 hash_remove(bat_priv->tt_local_hash, compare_ltt, choose_orig, 363 hash_remove(bat_priv->tt_local_hash, compare_ltt, choose_orig,
332 tt_local_entry->addr); 364 tt_local_entry->addr);
333 _tt_local_del(&tt_local_entry->hash_entry, bat_priv); 365
366 tt_local_entry_free(&tt_local_entry->hash_entry, bat_priv);
334} 367}
335 368
336void tt_local_remove(struct bat_priv *bat_priv, 369void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
337 const uint8_t *addr, const char *message) 370 const char *message)
338{ 371{
339 struct tt_local_entry *tt_local_entry; 372 struct tt_local_entry *tt_local_entry;
340 373
341 spin_lock_bh(&bat_priv->tt_lhash_lock); 374 spin_lock_bh(&bat_priv->tt_lhash_lock);
342
343 tt_local_entry = tt_local_hash_find(bat_priv, addr); 375 tt_local_entry = tt_local_hash_find(bat_priv, addr);
344 376
345 if (tt_local_entry) 377 if (tt_local_entry) {
378 tt_local_event(bat_priv, TT_CHANGE_DEL, tt_local_entry->addr);
346 tt_local_del(bat_priv, tt_local_entry, message); 379 tt_local_del(bat_priv, tt_local_entry, message);
347 380 }
348 spin_unlock_bh(&bat_priv->tt_lhash_lock); 381 spin_unlock_bh(&bat_priv->tt_lhash_lock);
349} 382}
350 383
351static void tt_local_purge(struct work_struct *work) 384static void tt_local_purge(struct bat_priv *bat_priv)
352{ 385{
353 struct delayed_work *delayed_work =
354 container_of(work, struct delayed_work, work);
355 struct bat_priv *bat_priv =
356 container_of(delayed_work, struct bat_priv, tt_work);
357 struct hashtable_t *hash = bat_priv->tt_local_hash; 386 struct hashtable_t *hash = bat_priv->tt_local_hash;
358 struct tt_local_entry *tt_local_entry; 387 struct tt_local_entry *tt_local_entry;
359 struct hlist_node *node, *node_tmp; 388 struct hlist_node *node, *node_tmp;
360 struct hlist_head *head; 389 struct hlist_head *head;
361 unsigned long timeout;
362 int i; 390 int i;
363 391
364 spin_lock_bh(&bat_priv->tt_lhash_lock); 392 spin_lock_bh(&bat_priv->tt_lhash_lock);
@@ -371,32 +399,53 @@ static void tt_local_purge(struct work_struct *work)
371 if (tt_local_entry->never_purge) 399 if (tt_local_entry->never_purge)
372 continue; 400 continue;
373 401
374 timeout = tt_local_entry->last_seen; 402 if (!is_out_of_time(tt_local_entry->last_seen,
375 timeout += TT_LOCAL_TIMEOUT * HZ; 403 TT_LOCAL_TIMEOUT * 1000))
376
377 if (time_before(jiffies, timeout))
378 continue; 404 continue;
379 405
406 tt_local_event(bat_priv, TT_CHANGE_DEL,
407 tt_local_entry->addr);
380 tt_local_del(bat_priv, tt_local_entry, 408 tt_local_del(bat_priv, tt_local_entry,
381 "address timed out"); 409 "address timed out");
382 } 410 }
383 } 411 }
384 412
385 spin_unlock_bh(&bat_priv->tt_lhash_lock); 413 spin_unlock_bh(&bat_priv->tt_lhash_lock);
386 tt_local_start_timer(bat_priv);
387} 414}
388 415
389void tt_local_free(struct bat_priv *bat_priv) 416static void tt_local_table_free(struct bat_priv *bat_priv)
390{ 417{
418 struct hashtable_t *hash;
419 int i;
420 spinlock_t *list_lock; /* protects write access to the hash lists */
421 struct hlist_head *head;
422 struct hlist_node *node, *node_tmp;
423 struct tt_local_entry *tt_local_entry;
424
391 if (!bat_priv->tt_local_hash) 425 if (!bat_priv->tt_local_hash)
392 return; 426 return;
393 427
394 cancel_delayed_work_sync(&bat_priv->tt_work); 428 hash = bat_priv->tt_local_hash;
395 hash_delete(bat_priv->tt_local_hash, _tt_local_del, bat_priv); 429
430 for (i = 0; i < hash->size; i++) {
431 head = &hash->table[i];
432 list_lock = &hash->list_locks[i];
433
434 spin_lock_bh(list_lock);
435 hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
436 head, hash_entry) {
437 hlist_del_rcu(node);
438 kfree(tt_local_entry);
439 }
440 spin_unlock_bh(list_lock);
441 }
442
443 hash_destroy(hash);
444
396 bat_priv->tt_local_hash = NULL; 445 bat_priv->tt_local_hash = NULL;
397} 446}
398 447
399int tt_global_init(struct bat_priv *bat_priv) 448static int tt_global_init(struct bat_priv *bat_priv)
400{ 449{
401 if (bat_priv->tt_global_hash) 450 if (bat_priv->tt_global_hash)
402 return 1; 451 return 1;
@@ -409,73 +458,78 @@ int tt_global_init(struct bat_priv *bat_priv)
409 return 1; 458 return 1;
410} 459}
411 460
412void tt_global_add_orig(struct bat_priv *bat_priv, 461static void tt_changes_list_free(struct bat_priv *bat_priv)
413 struct orig_node *orig_node,
414 const unsigned char *tt_buff, int tt_buff_len)
415{ 462{
416 struct tt_global_entry *tt_global_entry; 463 struct tt_change_node *entry, *safe;
417 struct tt_local_entry *tt_local_entry;
418 int tt_buff_count = 0;
419 const unsigned char *tt_ptr;
420
421 while ((tt_buff_count + 1) * ETH_ALEN <= tt_buff_len) {
422 spin_lock_bh(&bat_priv->tt_ghash_lock);
423
424 tt_ptr = tt_buff + (tt_buff_count * ETH_ALEN);
425 tt_global_entry = tt_global_hash_find(bat_priv, tt_ptr);
426 464
427 if (!tt_global_entry) { 465 spin_lock_bh(&bat_priv->tt_changes_list_lock);
428 spin_unlock_bh(&bat_priv->tt_ghash_lock);
429 466
430 tt_global_entry = kmalloc(sizeof(*tt_global_entry), 467 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
431 GFP_ATOMIC); 468 list) {
432 469 list_del(&entry->list);
433 if (!tt_global_entry) 470 kfree(entry);
434 break; 471 }
435
436 memcpy(tt_global_entry->addr, tt_ptr, ETH_ALEN);
437
438 bat_dbg(DBG_ROUTES, bat_priv,
439 "Creating new global tt entry: "
440 "%pM (via %pM)\n",
441 tt_global_entry->addr, orig_node->orig);
442 472
443 spin_lock_bh(&bat_priv->tt_ghash_lock); 473 atomic_set(&bat_priv->tt_local_changes, 0);
444 hash_add(bat_priv->tt_global_hash, compare_gtt, 474 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
445 choose_orig, tt_global_entry, 475}
446 &tt_global_entry->hash_entry);
447 476
448 } 477/* caller must hold orig_node refcount */
478int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
479 const unsigned char *tt_addr, uint8_t ttvn)
480{
481 struct tt_global_entry *tt_global_entry;
482 struct tt_local_entry *tt_local_entry;
483 struct orig_node *orig_node_tmp;
449 484
485 spin_lock_bh(&bat_priv->tt_ghash_lock);
486 tt_global_entry = tt_global_hash_find(bat_priv, tt_addr);
487
488 if (!tt_global_entry) {
489 tt_global_entry =
490 kmalloc(sizeof(*tt_global_entry),
491 GFP_ATOMIC);
492 if (!tt_global_entry)
493 goto unlock;
494 memcpy(tt_global_entry->addr, tt_addr, ETH_ALEN);
495 /* Assign the new orig_node */
496 atomic_inc(&orig_node->refcount);
450 tt_global_entry->orig_node = orig_node; 497 tt_global_entry->orig_node = orig_node;
451 spin_unlock_bh(&bat_priv->tt_ghash_lock); 498 tt_global_entry->ttvn = ttvn;
452 499 atomic_inc(&orig_node->tt_size);
453 /* remove address from local hash if present */ 500 hash_add(bat_priv->tt_global_hash, compare_gtt,
454 spin_lock_bh(&bat_priv->tt_lhash_lock); 501 choose_orig, tt_global_entry,
455 502 &tt_global_entry->hash_entry);
456 tt_ptr = tt_buff + (tt_buff_count * ETH_ALEN); 503 } else {
457 tt_local_entry = tt_local_hash_find(bat_priv, tt_ptr); 504 if (tt_global_entry->orig_node != orig_node) {
458 505 atomic_dec(&tt_global_entry->orig_node->tt_size);
459 if (tt_local_entry) 506 orig_node_tmp = tt_global_entry->orig_node;
460 tt_local_del(bat_priv, tt_local_entry, 507 atomic_inc(&orig_node->refcount);
461 "global tt received"); 508 tt_global_entry->orig_node = orig_node;
509 tt_global_entry->ttvn = ttvn;
510 orig_node_free_ref(orig_node_tmp);
511 atomic_inc(&orig_node->tt_size);
512 }
513 }
462 514
463 spin_unlock_bh(&bat_priv->tt_lhash_lock); 515 spin_unlock_bh(&bat_priv->tt_ghash_lock);
464 516
465 tt_buff_count++; 517 bat_dbg(DBG_TT, bat_priv,
466 } 518 "Creating new global tt entry: %pM (via %pM)\n",
519 tt_global_entry->addr, orig_node->orig);
467 520
468 /* initialize, and overwrite if malloc succeeds */ 521 /* remove address from local hash if present */
469 orig_node->tt_buff = NULL; 522 spin_lock_bh(&bat_priv->tt_lhash_lock);
470 orig_node->tt_buff_len = 0; 523 tt_local_entry = tt_local_hash_find(bat_priv, tt_addr);
471 524
472 if (tt_buff_len > 0) { 525 if (tt_local_entry)
473 orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC); 526 tt_local_del(bat_priv, tt_local_entry,
474 if (orig_node->tt_buff) { 527 "global tt received");
475 memcpy(orig_node->tt_buff, tt_buff, tt_buff_len); 528 spin_unlock_bh(&bat_priv->tt_lhash_lock);
476 orig_node->tt_buff_len = tt_buff_len; 529 return 1;
477 } 530unlock:
478 } 531 spin_unlock_bh(&bat_priv->tt_ghash_lock);
532 return 0;
479} 533}
480 534
481int tt_global_seq_print_text(struct seq_file *seq, void *offset) 535int tt_global_seq_print_text(struct seq_file *seq, void *offset)
@@ -509,17 +563,20 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
509 seq_printf(seq, 563 seq_printf(seq,
510 "Globally announced TT entries received via the mesh %s\n", 564 "Globally announced TT entries received via the mesh %s\n",
511 net_dev->name); 565 net_dev->name);
566 seq_printf(seq, " %-13s %s %-15s %s\n",
567 "Client", "(TTVN)", "Originator", "(Curr TTVN)");
512 568
513 spin_lock_bh(&bat_priv->tt_ghash_lock); 569 spin_lock_bh(&bat_priv->tt_ghash_lock);
514 570
515 buf_size = 1; 571 buf_size = 1;
516 /* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/ 572 /* Estimate length for: " * xx:xx:xx:xx:xx:xx (ttvn) via
573 * xx:xx:xx:xx:xx:xx (cur_ttvn)\n"*/
517 for (i = 0; i < hash->size; i++) { 574 for (i = 0; i < hash->size; i++) {
518 head = &hash->table[i]; 575 head = &hash->table[i];
519 576
520 rcu_read_lock(); 577 rcu_read_lock();
521 __hlist_for_each_rcu(node, head) 578 __hlist_for_each_rcu(node, head)
522 buf_size += 43; 579 buf_size += 59;
523 rcu_read_unlock(); 580 rcu_read_unlock();
524 } 581 }
525 582
@@ -538,10 +595,14 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
538 rcu_read_lock(); 595 rcu_read_lock();
539 hlist_for_each_entry_rcu(tt_global_entry, node, 596 hlist_for_each_entry_rcu(tt_global_entry, node,
540 head, hash_entry) { 597 head, hash_entry) {
541 pos += snprintf(buff + pos, 44, 598 pos += snprintf(buff + pos, 61,
542 " * %pM via %pM\n", 599 " * %pM (%3u) via %pM (%3u)\n",
543 tt_global_entry->addr, 600 tt_global_entry->addr,
544 tt_global_entry->orig_node->orig); 601 tt_global_entry->ttvn,
602 tt_global_entry->orig_node->orig,
603 (uint8_t) atomic_read(
604 &tt_global_entry->orig_node->
605 last_ttvn));
545 } 606 }
546 rcu_read_unlock(); 607 rcu_read_unlock();
547 } 608 }
@@ -556,64 +617,80 @@ out:
556 return ret; 617 return ret;
557} 618}
558 619
559static void _tt_global_del_orig(struct bat_priv *bat_priv, 620static void _tt_global_del(struct bat_priv *bat_priv,
560 struct tt_global_entry *tt_global_entry, 621 struct tt_global_entry *tt_global_entry,
561 const char *message) 622 const char *message)
562{ 623{
563 bat_dbg(DBG_ROUTES, bat_priv, 624 if (!tt_global_entry)
625 return;
626
627 bat_dbg(DBG_TT, bat_priv,
564 "Deleting global tt entry %pM (via %pM): %s\n", 628 "Deleting global tt entry %pM (via %pM): %s\n",
565 tt_global_entry->addr, tt_global_entry->orig_node->orig, 629 tt_global_entry->addr, tt_global_entry->orig_node->orig,
566 message); 630 message);
567 631
632 atomic_dec(&tt_global_entry->orig_node->tt_size);
568 hash_remove(bat_priv->tt_global_hash, compare_gtt, choose_orig, 633 hash_remove(bat_priv->tt_global_hash, compare_gtt, choose_orig,
569 tt_global_entry->addr); 634 tt_global_entry->addr);
570 kfree(tt_global_entry); 635 kfree(tt_global_entry);
571} 636}
572 637
638void tt_global_del(struct bat_priv *bat_priv,
639 struct orig_node *orig_node, const unsigned char *addr,
640 const char *message)
641{
642 struct tt_global_entry *tt_global_entry;
643
644 spin_lock_bh(&bat_priv->tt_ghash_lock);
645 tt_global_entry = tt_global_hash_find(bat_priv, addr);
646
647 if (tt_global_entry && tt_global_entry->orig_node == orig_node) {
648 atomic_dec(&orig_node->tt_size);
649 _tt_global_del(bat_priv, tt_global_entry, message);
650 }
651 spin_unlock_bh(&bat_priv->tt_ghash_lock);
652}
653
573void tt_global_del_orig(struct bat_priv *bat_priv, 654void tt_global_del_orig(struct bat_priv *bat_priv,
574 struct orig_node *orig_node, const char *message) 655 struct orig_node *orig_node, const char *message)
575{ 656{
576 struct tt_global_entry *tt_global_entry; 657 struct tt_global_entry *tt_global_entry;
577 int tt_buff_count = 0; 658 int i;
578 unsigned char *tt_ptr; 659 struct hashtable_t *hash = bat_priv->tt_global_hash;
660 struct hlist_node *node, *safe;
661 struct hlist_head *head;
579 662
580 if (orig_node->tt_buff_len == 0) 663 if (!bat_priv->tt_global_hash)
581 return; 664 return;
582 665
583 spin_lock_bh(&bat_priv->tt_ghash_lock); 666 spin_lock_bh(&bat_priv->tt_ghash_lock);
667 for (i = 0; i < hash->size; i++) {
668 head = &hash->table[i];
584 669
585 while ((tt_buff_count + 1) * ETH_ALEN <= orig_node->tt_buff_len) { 670 hlist_for_each_entry_safe(tt_global_entry, node, safe,
586 tt_ptr = orig_node->tt_buff + (tt_buff_count * ETH_ALEN); 671 head, hash_entry) {
587 tt_global_entry = tt_global_hash_find(bat_priv, tt_ptr); 672 if (tt_global_entry->orig_node == orig_node)
588 673 _tt_global_del(bat_priv, tt_global_entry,
589 if ((tt_global_entry) && 674 message);
590 (tt_global_entry->orig_node == orig_node)) 675 }
591 _tt_global_del_orig(bat_priv, tt_global_entry,
592 message);
593
594 tt_buff_count++;
595 } 676 }
677 atomic_set(&orig_node->tt_size, 0);
596 678
597 spin_unlock_bh(&bat_priv->tt_ghash_lock); 679 spin_unlock_bh(&bat_priv->tt_ghash_lock);
598
599 orig_node->tt_buff_len = 0;
600 kfree(orig_node->tt_buff);
601 orig_node->tt_buff = NULL;
602} 680}
603 681
604static void tt_global_del(struct hlist_node *node, void *arg) 682static void tt_global_entry_free(struct hlist_node *node, void *arg)
605{ 683{
606 void *data = container_of(node, struct tt_global_entry, hash_entry); 684 void *data = container_of(node, struct tt_global_entry, hash_entry);
607
608 kfree(data); 685 kfree(data);
609} 686}
610 687
611void tt_global_free(struct bat_priv *bat_priv) 688static void tt_global_table_free(struct bat_priv *bat_priv)
612{ 689{
613 if (!bat_priv->tt_global_hash) 690 if (!bat_priv->tt_global_hash)
614 return; 691 return;
615 692
616 hash_delete(bat_priv->tt_global_hash, tt_global_del, NULL); 693 hash_delete(bat_priv->tt_global_hash, tt_global_entry_free, NULL);
617 bat_priv->tt_global_hash = NULL; 694 bat_priv->tt_global_hash = NULL;
618} 695}
619 696
@@ -638,3 +715,686 @@ out:
638 spin_unlock_bh(&bat_priv->tt_ghash_lock); 715 spin_unlock_bh(&bat_priv->tt_ghash_lock);
639 return orig_node; 716 return orig_node;
640} 717}
718
719/* Calculates the checksum of the local table of a given orig_node */
720uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node)
721{
722 uint16_t total = 0, total_one;
723 struct hashtable_t *hash = bat_priv->tt_global_hash;
724 struct tt_global_entry *tt_global_entry;
725 struct hlist_node *node;
726 struct hlist_head *head;
727 int i, j;
728
729 for (i = 0; i < hash->size; i++) {
730 head = &hash->table[i];
731
732 rcu_read_lock();
733 hlist_for_each_entry_rcu(tt_global_entry, node,
734 head, hash_entry) {
735 if (compare_eth(tt_global_entry->orig_node,
736 orig_node)) {
737 total_one = 0;
738 for (j = 0; j < ETH_ALEN; j++)
739 total_one = crc16_byte(total_one,
740 tt_global_entry->addr[j]);
741 total ^= total_one;
742 }
743 }
744 rcu_read_unlock();
745 }
746
747 return total;
748}
749
750/* Calculates the checksum of the local table */
751uint16_t tt_local_crc(struct bat_priv *bat_priv)
752{
753 uint16_t total = 0, total_one;
754 struct hashtable_t *hash = bat_priv->tt_local_hash;
755 struct tt_local_entry *tt_local_entry;
756 struct hlist_node *node;
757 struct hlist_head *head;
758 int i, j;
759
760 for (i = 0; i < hash->size; i++) {
761 head = &hash->table[i];
762
763 rcu_read_lock();
764 hlist_for_each_entry_rcu(tt_local_entry, node,
765 head, hash_entry) {
766 total_one = 0;
767 for (j = 0; j < ETH_ALEN; j++)
768 total_one = crc16_byte(total_one,
769 tt_local_entry->addr[j]);
770 total ^= total_one;
771 }
772
773 rcu_read_unlock();
774 }
775
776 return total;
777}
778
779static void tt_req_list_free(struct bat_priv *bat_priv)
780{
781 struct tt_req_node *node, *safe;
782
783 spin_lock_bh(&bat_priv->tt_req_list_lock);
784
785 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
786 list_del(&node->list);
787 kfree(node);
788 }
789
790 spin_unlock_bh(&bat_priv->tt_req_list_lock);
791}
792
793void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node,
794 const unsigned char *tt_buff, uint8_t tt_num_changes)
795{
796 uint16_t tt_buff_len = tt_len(tt_num_changes);
797
798 /* Replace the old buffer only if I received something in the
799 * last OGM (the OGM could carry no changes) */
800 spin_lock_bh(&orig_node->tt_buff_lock);
801 if (tt_buff_len > 0) {
802 kfree(orig_node->tt_buff);
803 orig_node->tt_buff_len = 0;
804 orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC);
805 if (orig_node->tt_buff) {
806 memcpy(orig_node->tt_buff, tt_buff, tt_buff_len);
807 orig_node->tt_buff_len = tt_buff_len;
808 }
809 }
810 spin_unlock_bh(&orig_node->tt_buff_lock);
811}
812
813static void tt_req_purge(struct bat_priv *bat_priv)
814{
815 struct tt_req_node *node, *safe;
816
817 spin_lock_bh(&bat_priv->tt_req_list_lock);
818 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
819 if (is_out_of_time(node->issued_at,
820 TT_REQUEST_TIMEOUT * 1000)) {
821 list_del(&node->list);
822 kfree(node);
823 }
824 }
825 spin_unlock_bh(&bat_priv->tt_req_list_lock);
826}
827
828/* returns the pointer to the new tt_req_node struct if no request
829 * has already been issued for this orig_node, NULL otherwise */
830static struct tt_req_node *new_tt_req_node(struct bat_priv *bat_priv,
831 struct orig_node *orig_node)
832{
833 struct tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
834
835 spin_lock_bh(&bat_priv->tt_req_list_lock);
836 list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) {
837 if (compare_eth(tt_req_node_tmp, orig_node) &&
838 !is_out_of_time(tt_req_node_tmp->issued_at,
839 TT_REQUEST_TIMEOUT * 1000))
840 goto unlock;
841 }
842
843 tt_req_node = kmalloc(sizeof(*tt_req_node), GFP_ATOMIC);
844 if (!tt_req_node)
845 goto unlock;
846
847 memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN);
848 tt_req_node->issued_at = jiffies;
849
850 list_add(&tt_req_node->list, &bat_priv->tt_req_list);
851unlock:
852 spin_unlock_bh(&bat_priv->tt_req_list_lock);
853 return tt_req_node;
854}
855
856static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr)
857{
858 const struct tt_global_entry *tt_global_entry = entry_ptr;
859 const struct orig_node *orig_node = data_ptr;
860
861 return (tt_global_entry->orig_node == orig_node);
862}
863
864static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
865 struct hashtable_t *hash,
866 struct hard_iface *primary_if,
867 int (*valid_cb)(const void *,
868 const void *),
869 void *cb_data)
870{
871 struct tt_local_entry *tt_local_entry;
872 struct tt_query_packet *tt_response;
873 struct tt_change *tt_change;
874 struct hlist_node *node;
875 struct hlist_head *head;
876 struct sk_buff *skb = NULL;
877 uint16_t tt_tot, tt_count;
878 ssize_t tt_query_size = sizeof(struct tt_query_packet);
879 int i;
880
881 if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
882 tt_len = primary_if->soft_iface->mtu - tt_query_size;
883 tt_len -= tt_len % sizeof(struct tt_change);
884 }
885 tt_tot = tt_len / sizeof(struct tt_change);
886
887 skb = dev_alloc_skb(tt_query_size + tt_len + ETH_HLEN);
888 if (!skb)
889 goto out;
890
891 skb_reserve(skb, ETH_HLEN);
892 tt_response = (struct tt_query_packet *)skb_put(skb,
893 tt_query_size + tt_len);
894 tt_response->ttvn = ttvn;
895 tt_response->tt_data = htons(tt_tot);
896
897 tt_change = (struct tt_change *)(skb->data + tt_query_size);
898 tt_count = 0;
899
900 rcu_read_lock();
901 for (i = 0; i < hash->size; i++) {
902 head = &hash->table[i];
903
904 hlist_for_each_entry_rcu(tt_local_entry, node,
905 head, hash_entry) {
906 if (tt_count == tt_tot)
907 break;
908
909 if ((valid_cb) && (!valid_cb(tt_local_entry, cb_data)))
910 continue;
911
912 memcpy(tt_change->addr, tt_local_entry->addr, ETH_ALEN);
913 tt_change->flags = NO_FLAGS;
914
915 tt_count++;
916 tt_change++;
917 }
918 }
919 rcu_read_unlock();
920
921out:
922 return skb;
923}
924
925int send_tt_request(struct bat_priv *bat_priv, struct orig_node *dst_orig_node,
926 uint8_t ttvn, uint16_t tt_crc, bool full_table)
927{
928 struct sk_buff *skb = NULL;
929 struct tt_query_packet *tt_request;
930 struct neigh_node *neigh_node = NULL;
931 struct hard_iface *primary_if;
932 struct tt_req_node *tt_req_node = NULL;
933 int ret = 1;
934
935 primary_if = primary_if_get_selected(bat_priv);
936 if (!primary_if)
937 goto out;
938
939 /* The new tt_req will be issued only if I'm not waiting for a
940 * reply from the same orig_node yet */
941 tt_req_node = new_tt_req_node(bat_priv, dst_orig_node);
942 if (!tt_req_node)
943 goto out;
944
945 skb = dev_alloc_skb(sizeof(struct tt_query_packet) + ETH_HLEN);
946 if (!skb)
947 goto out;
948
949 skb_reserve(skb, ETH_HLEN);
950
951 tt_request = (struct tt_query_packet *)skb_put(skb,
952 sizeof(struct tt_query_packet));
953
954 tt_request->packet_type = BAT_TT_QUERY;
955 tt_request->version = COMPAT_VERSION;
956 memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
957 memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
958 tt_request->ttl = TTL;
959 tt_request->ttvn = ttvn;
960 tt_request->tt_data = tt_crc;
961 tt_request->flags = TT_REQUEST;
962
963 if (full_table)
964 tt_request->flags |= TT_FULL_TABLE;
965
966 neigh_node = orig_node_get_router(dst_orig_node);
967 if (!neigh_node)
968 goto out;
969
970 bat_dbg(DBG_TT, bat_priv, "Sending TT_REQUEST to %pM via %pM "
971 "[%c]\n", dst_orig_node->orig, neigh_node->addr,
972 (full_table ? 'F' : '.'));
973
974 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
975 ret = 0;
976
977out:
978 if (neigh_node)
979 neigh_node_free_ref(neigh_node);
980 if (primary_if)
981 hardif_free_ref(primary_if);
982 if (ret)
983 kfree_skb(skb);
984 if (ret && tt_req_node) {
985 spin_lock_bh(&bat_priv->tt_req_list_lock);
986 list_del(&tt_req_node->list);
987 spin_unlock_bh(&bat_priv->tt_req_list_lock);
988 kfree(tt_req_node);
989 }
990 return ret;
991}
992
993static bool send_other_tt_response(struct bat_priv *bat_priv,
994 struct tt_query_packet *tt_request)
995{
996 struct orig_node *req_dst_orig_node = NULL, *res_dst_orig_node = NULL;
997 struct neigh_node *neigh_node = NULL;
998 struct hard_iface *primary_if = NULL;
999 uint8_t orig_ttvn, req_ttvn, ttvn;
1000 int ret = false;
1001 unsigned char *tt_buff;
1002 bool full_table;
1003 uint16_t tt_len, tt_tot;
1004 struct sk_buff *skb = NULL;
1005 struct tt_query_packet *tt_response;
1006
1007 bat_dbg(DBG_TT, bat_priv,
1008 "Received TT_REQUEST from %pM for "
1009 "ttvn: %u (%pM) [%c]\n", tt_request->src,
1010 tt_request->ttvn, tt_request->dst,
1011 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
1012
1013 /* Let's get the orig node of the REAL destination */
1014 req_dst_orig_node = get_orig_node(bat_priv, tt_request->dst);
1015 if (!req_dst_orig_node)
1016 goto out;
1017
1018 res_dst_orig_node = get_orig_node(bat_priv, tt_request->src);
1019 if (!res_dst_orig_node)
1020 goto out;
1021
1022 neigh_node = orig_node_get_router(res_dst_orig_node);
1023 if (!neigh_node)
1024 goto out;
1025
1026 primary_if = primary_if_get_selected(bat_priv);
1027 if (!primary_if)
1028 goto out;
1029
1030 orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1031 req_ttvn = tt_request->ttvn;
1032
1033 /* I have not the requested data */
1034 if (orig_ttvn != req_ttvn ||
1035 tt_request->tt_data != req_dst_orig_node->tt_crc)
1036 goto out;
1037
1038 /* If it has explicitly been requested the full table */
1039 if (tt_request->flags & TT_FULL_TABLE ||
1040 !req_dst_orig_node->tt_buff)
1041 full_table = true;
1042 else
1043 full_table = false;
1044
1045 /* In this version, fragmentation is not implemented, then
1046 * I'll send only one packet with as much TT entries as I can */
1047 if (!full_table) {
1048 spin_lock_bh(&req_dst_orig_node->tt_buff_lock);
1049 tt_len = req_dst_orig_node->tt_buff_len;
1050 tt_tot = tt_len / sizeof(struct tt_change);
1051
1052 skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
1053 tt_len + ETH_HLEN);
1054 if (!skb)
1055 goto unlock;
1056
1057 skb_reserve(skb, ETH_HLEN);
1058 tt_response = (struct tt_query_packet *)skb_put(skb,
1059 sizeof(struct tt_query_packet) + tt_len);
1060 tt_response->ttvn = req_ttvn;
1061 tt_response->tt_data = htons(tt_tot);
1062
1063 tt_buff = skb->data + sizeof(struct tt_query_packet);
1064 /* Copy the last orig_node's OGM buffer */
1065 memcpy(tt_buff, req_dst_orig_node->tt_buff,
1066 req_dst_orig_node->tt_buff_len);
1067
1068 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1069 } else {
1070 tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size) *
1071 sizeof(struct tt_change);
1072 ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1073
1074 skb = tt_response_fill_table(tt_len, ttvn,
1075 bat_priv->tt_global_hash,
1076 primary_if, tt_global_valid_entry,
1077 req_dst_orig_node);
1078 if (!skb)
1079 goto out;
1080
1081 tt_response = (struct tt_query_packet *)skb->data;
1082 }
1083
1084 tt_response->packet_type = BAT_TT_QUERY;
1085 tt_response->version = COMPAT_VERSION;
1086 tt_response->ttl = TTL;
1087 memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
1088 memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1089 tt_response->flags = TT_RESPONSE;
1090
1091 if (full_table)
1092 tt_response->flags |= TT_FULL_TABLE;
1093
1094 bat_dbg(DBG_TT, bat_priv,
1095 "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
1096 res_dst_orig_node->orig, neigh_node->addr,
1097 req_dst_orig_node->orig, req_ttvn);
1098
1099 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1100 ret = true;
1101 goto out;
1102
1103unlock:
1104 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1105
1106out:
1107 if (res_dst_orig_node)
1108 orig_node_free_ref(res_dst_orig_node);
1109 if (req_dst_orig_node)
1110 orig_node_free_ref(req_dst_orig_node);
1111 if (neigh_node)
1112 neigh_node_free_ref(neigh_node);
1113 if (primary_if)
1114 hardif_free_ref(primary_if);
1115 if (!ret)
1116 kfree_skb(skb);
1117 return ret;
1118
1119}
1120static bool send_my_tt_response(struct bat_priv *bat_priv,
1121 struct tt_query_packet *tt_request)
1122{
1123 struct orig_node *orig_node = NULL;
1124 struct neigh_node *neigh_node = NULL;
1125 struct hard_iface *primary_if = NULL;
1126 uint8_t my_ttvn, req_ttvn, ttvn;
1127 int ret = false;
1128 unsigned char *tt_buff;
1129 bool full_table;
1130 uint16_t tt_len, tt_tot;
1131 struct sk_buff *skb = NULL;
1132 struct tt_query_packet *tt_response;
1133
1134 bat_dbg(DBG_TT, bat_priv,
1135 "Received TT_REQUEST from %pM for "
1136 "ttvn: %u (me) [%c]\n", tt_request->src,
1137 tt_request->ttvn,
1138 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
1139
1140
1141 my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1142 req_ttvn = tt_request->ttvn;
1143
1144 orig_node = get_orig_node(bat_priv, tt_request->src);
1145 if (!orig_node)
1146 goto out;
1147
1148 neigh_node = orig_node_get_router(orig_node);
1149 if (!neigh_node)
1150 goto out;
1151
1152 primary_if = primary_if_get_selected(bat_priv);
1153 if (!primary_if)
1154 goto out;
1155
1156 /* If the full table has been explicitly requested or the gap
1157 * is too big send the whole local translation table */
1158 if (tt_request->flags & TT_FULL_TABLE || my_ttvn != req_ttvn ||
1159 !bat_priv->tt_buff)
1160 full_table = true;
1161 else
1162 full_table = false;
1163
1164 /* In this version, fragmentation is not implemented, then
1165 * I'll send only one packet with as much TT entries as I can */
1166 if (!full_table) {
1167 spin_lock_bh(&bat_priv->tt_buff_lock);
1168 tt_len = bat_priv->tt_buff_len;
1169 tt_tot = tt_len / sizeof(struct tt_change);
1170
1171 skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
1172 tt_len + ETH_HLEN);
1173 if (!skb)
1174 goto unlock;
1175
1176 skb_reserve(skb, ETH_HLEN);
1177 tt_response = (struct tt_query_packet *)skb_put(skb,
1178 sizeof(struct tt_query_packet) + tt_len);
1179 tt_response->ttvn = req_ttvn;
1180 tt_response->tt_data = htons(tt_tot);
1181
1182 tt_buff = skb->data + sizeof(struct tt_query_packet);
1183 memcpy(tt_buff, bat_priv->tt_buff,
1184 bat_priv->tt_buff_len);
1185 spin_unlock_bh(&bat_priv->tt_buff_lock);
1186 } else {
1187 tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt) *
1188 sizeof(struct tt_change);
1189 ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1190
1191 skb = tt_response_fill_table(tt_len, ttvn,
1192 bat_priv->tt_local_hash,
1193 primary_if, NULL, NULL);
1194 if (!skb)
1195 goto out;
1196
1197 tt_response = (struct tt_query_packet *)skb->data;
1198 }
1199
1200 tt_response->packet_type = BAT_TT_QUERY;
1201 tt_response->version = COMPAT_VERSION;
1202 tt_response->ttl = TTL;
1203 memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1204 memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1205 tt_response->flags = TT_RESPONSE;
1206
1207 if (full_table)
1208 tt_response->flags |= TT_FULL_TABLE;
1209
1210 bat_dbg(DBG_TT, bat_priv,
1211 "Sending TT_RESPONSE to %pM via %pM [%c]\n",
1212 orig_node->orig, neigh_node->addr,
1213 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
1214
1215 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1216 ret = true;
1217 goto out;
1218
1219unlock:
1220 spin_unlock_bh(&bat_priv->tt_buff_lock);
1221out:
1222 if (orig_node)
1223 orig_node_free_ref(orig_node);
1224 if (neigh_node)
1225 neigh_node_free_ref(neigh_node);
1226 if (primary_if)
1227 hardif_free_ref(primary_if);
1228 if (!ret)
1229 kfree_skb(skb);
1230 /* This packet was for me, so it doesn't need to be re-routed */
1231 return true;
1232}
1233
1234bool send_tt_response(struct bat_priv *bat_priv,
1235 struct tt_query_packet *tt_request)
1236{
1237 if (is_my_mac(tt_request->dst))
1238 return send_my_tt_response(bat_priv, tt_request);
1239 else
1240 return send_other_tt_response(bat_priv, tt_request);
1241}
1242
1243static void _tt_update_changes(struct bat_priv *bat_priv,
1244 struct orig_node *orig_node,
1245 struct tt_change *tt_change,
1246 uint16_t tt_num_changes, uint8_t ttvn)
1247{
1248 int i;
1249
1250 for (i = 0; i < tt_num_changes; i++) {
1251 if ((tt_change + i)->flags & TT_CHANGE_DEL)
1252 tt_global_del(bat_priv, orig_node,
1253 (tt_change + i)->addr,
1254 "tt removed by changes");
1255 else
1256 if (!tt_global_add(bat_priv, orig_node,
1257 (tt_change + i)->addr, ttvn))
1258 /* In case of problem while storing a
1259 * global_entry, we stop the updating
1260 * procedure without committing the
1261 * ttvn change. This will avoid to send
1262 * corrupted data on tt_request
1263 */
1264 return;
1265 }
1266}
1267
1268static void tt_fill_gtable(struct bat_priv *bat_priv,
1269 struct tt_query_packet *tt_response)
1270{
1271 struct orig_node *orig_node = NULL;
1272
1273 orig_node = orig_hash_find(bat_priv, tt_response->src);
1274 if (!orig_node)
1275 goto out;
1276
1277 /* Purge the old table first.. */
1278 tt_global_del_orig(bat_priv, orig_node, "Received full table");
1279
1280 _tt_update_changes(bat_priv, orig_node,
1281 (struct tt_change *)(tt_response + 1),
1282 tt_response->tt_data, tt_response->ttvn);
1283
1284 spin_lock_bh(&orig_node->tt_buff_lock);
1285 kfree(orig_node->tt_buff);
1286 orig_node->tt_buff_len = 0;
1287 orig_node->tt_buff = NULL;
1288 spin_unlock_bh(&orig_node->tt_buff_lock);
1289
1290 atomic_set(&orig_node->last_ttvn, tt_response->ttvn);
1291
1292out:
1293 if (orig_node)
1294 orig_node_free_ref(orig_node);
1295}
1296
1297void tt_update_changes(struct bat_priv *bat_priv, struct orig_node *orig_node,
1298 uint16_t tt_num_changes, uint8_t ttvn,
1299 struct tt_change *tt_change)
1300{
1301 _tt_update_changes(bat_priv, orig_node, tt_change, tt_num_changes,
1302 ttvn);
1303
1304 tt_save_orig_buffer(bat_priv, orig_node, (unsigned char *)tt_change,
1305 tt_num_changes);
1306 atomic_set(&orig_node->last_ttvn, ttvn);
1307}
1308
1309bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr)
1310{
1311 struct tt_local_entry *tt_local_entry;
1312
1313 spin_lock_bh(&bat_priv->tt_lhash_lock);
1314 tt_local_entry = tt_local_hash_find(bat_priv, addr);
1315 spin_unlock_bh(&bat_priv->tt_lhash_lock);
1316
1317 if (tt_local_entry)
1318 return true;
1319 return false;
1320}
1321
1322void handle_tt_response(struct bat_priv *bat_priv,
1323 struct tt_query_packet *tt_response)
1324{
1325 struct tt_req_node *node, *safe;
1326 struct orig_node *orig_node = NULL;
1327
1328 bat_dbg(DBG_TT, bat_priv, "Received TT_RESPONSE from %pM for "
1329 "ttvn %d t_size: %d [%c]\n",
1330 tt_response->src, tt_response->ttvn,
1331 tt_response->tt_data,
1332 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
1333
1334 orig_node = orig_hash_find(bat_priv, tt_response->src);
1335 if (!orig_node)
1336 goto out;
1337
1338 if (tt_response->flags & TT_FULL_TABLE)
1339 tt_fill_gtable(bat_priv, tt_response);
1340 else
1341 tt_update_changes(bat_priv, orig_node, tt_response->tt_data,
1342 tt_response->ttvn,
1343 (struct tt_change *)(tt_response + 1));
1344
1345 /* Delete the tt_req_node from pending tt_requests list */
1346 spin_lock_bh(&bat_priv->tt_req_list_lock);
1347 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1348 if (!compare_eth(node->addr, tt_response->src))
1349 continue;
1350 list_del(&node->list);
1351 kfree(node);
1352 }
1353 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1354
1355 /* Recalculate the CRC for this orig_node and store it */
1356 spin_lock_bh(&bat_priv->tt_ghash_lock);
1357 orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
1358 spin_unlock_bh(&bat_priv->tt_ghash_lock);
1359out:
1360 if (orig_node)
1361 orig_node_free_ref(orig_node);
1362}
1363
1364int tt_init(struct bat_priv *bat_priv)
1365{
1366 if (!tt_local_init(bat_priv))
1367 return 0;
1368
1369 if (!tt_global_init(bat_priv))
1370 return 0;
1371
1372 tt_start_timer(bat_priv);
1373
1374 return 1;
1375}
1376
1377void tt_free(struct bat_priv *bat_priv)
1378{
1379 cancel_delayed_work_sync(&bat_priv->tt_work);
1380
1381 tt_local_table_free(bat_priv);
1382 tt_global_table_free(bat_priv);
1383 tt_req_list_free(bat_priv);
1384 tt_changes_list_free(bat_priv);
1385
1386 kfree(bat_priv->tt_buff);
1387}
1388
1389static void tt_purge(struct work_struct *work)
1390{
1391 struct delayed_work *delayed_work =
1392 container_of(work, struct delayed_work, work);
1393 struct bat_priv *bat_priv =
1394 container_of(delayed_work, struct bat_priv, tt_work);
1395
1396 tt_local_purge(bat_priv);
1397 tt_req_purge(bat_priv);
1398
1399 tt_start_timer(bat_priv);
1400}