aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarek Lindner <lindner_marek@yahoo.de>2012-05-06 16:22:05 -0400
committerAntonio Quartulli <ordex@autistici.org>2012-06-18 12:01:05 -0400
commitbe9aa4c1e0d7124cf976831db098f1e852fdbd14 (patch)
tree652acbdcf0bc309de8e69ab76ca233d5a9c652a7
parentbeeb96a4142180c34ddf592aef5a278c2d676bf0 (diff)
batman-adv: turn tt commit code into routing protocol agnostic API
Prior to this patch the translation table code made assumptions about how the routing protocol works and where its buffers are stored (to directly modify them). Each protocol now calls the tt code with the relevant pointers, thereby abstracting the code. Signed-off-by: Marek Lindner <lindner_marek@yahoo.de> Acked-by: Antonio Quartulli <ordex@autistici.org> Signed-off-by: Sven Eckelmann <sven@narfation.org>
-rw-r--r--net/batman-adv/bat_iv_ogm.c14
-rw-r--r--net/batman-adv/send.c74
-rw-r--r--net/batman-adv/translation-table.c124
-rw-r--r--net/batman-adv/translation-table.h7
-rw-r--r--net/batman-adv/types.h3
5 files changed, 118 insertions, 104 deletions
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index ec3542c3bf0c..6e0859f4a6a9 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -559,22 +559,28 @@ static void bat_iv_ogm_forward(struct orig_node *orig_node,
559 if_incoming, 0, bat_iv_ogm_fwd_send_time()); 559 if_incoming, 0, bat_iv_ogm_fwd_send_time());
560} 560}
561 561
562static void bat_iv_ogm_schedule(struct hard_iface *hard_iface, 562static void bat_iv_ogm_schedule(struct hard_iface *hard_iface)
563 int tt_num_changes)
564{ 563{
565 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 564 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
566 struct batman_ogm_packet *batman_ogm_packet; 565 struct batman_ogm_packet *batman_ogm_packet;
567 struct hard_iface *primary_if; 566 struct hard_iface *primary_if;
568 int vis_server; 567 int vis_server, tt_num_changes = 0;
569 568
570 vis_server = atomic_read(&bat_priv->vis_mode); 569 vis_server = atomic_read(&bat_priv->vis_mode);
571 primary_if = primary_if_get_selected(bat_priv); 570 primary_if = primary_if_get_selected(bat_priv);
572 571
572 if (hard_iface == primary_if)
573 tt_num_changes = batadv_tt_append_diff(bat_priv,
574 &hard_iface->packet_buff,
575 &hard_iface->packet_len,
576 BATMAN_OGM_HLEN);
577
573 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; 578 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
574 579
575 /* change sequence number to network order */ 580 /* change sequence number to network order */
576 batman_ogm_packet->seqno = 581 batman_ogm_packet->seqno =
577 htonl((uint32_t)atomic_read(&hard_iface->seqno)); 582 htonl((uint32_t)atomic_read(&hard_iface->seqno));
583 atomic_inc(&hard_iface->seqno);
578 584
579 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn); 585 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
580 batman_ogm_packet->tt_crc = htons(bat_priv->tt_crc); 586 batman_ogm_packet->tt_crc = htons(bat_priv->tt_crc);
@@ -593,8 +599,6 @@ static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
593 else 599 else
594 batman_ogm_packet->gw_flags = NO_FLAGS; 600 batman_ogm_packet->gw_flags = NO_FLAGS;
595 601
596 atomic_inc(&hard_iface->seqno);
597
598 slide_own_bcast_window(hard_iface); 602 slide_own_bcast_window(hard_iface);
599 bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff, 603 bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
600 hard_iface->packet_len, hard_iface, 1, 604 hard_iface->packet_len, hard_iface, 1,
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index f5ff36492b2f..79f8973810c0 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -77,62 +77,9 @@ send_skb_err:
77 return NET_XMIT_DROP; 77 return NET_XMIT_DROP;
78} 78}
79 79
80static void realloc_packet_buffer(struct hard_iface *hard_iface,
81 int new_len)
82{
83 unsigned char *new_buff;
84
85 new_buff = kmalloc(new_len, GFP_ATOMIC);
86
87 /* keep old buffer if kmalloc should fail */
88 if (new_buff) {
89 memcpy(new_buff, hard_iface->packet_buff,
90 BATMAN_OGM_HLEN);
91
92 kfree(hard_iface->packet_buff);
93 hard_iface->packet_buff = new_buff;
94 hard_iface->packet_len = new_len;
95 }
96}
97
98/* when calling this function (hard_iface == primary_if) has to be true */
99static int prepare_packet_buffer(struct bat_priv *bat_priv,
100 struct hard_iface *hard_iface)
101{
102 int new_len;
103
104 new_len = BATMAN_OGM_HLEN +
105 tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes));
106
107 /* if we have too many changes for one packet don't send any
108 * and wait for the tt table request which will be fragmented */
109 if (new_len > hard_iface->soft_iface->mtu)
110 new_len = BATMAN_OGM_HLEN;
111
112 realloc_packet_buffer(hard_iface, new_len);
113
114 bat_priv->tt_crc = tt_local_crc(bat_priv);
115
116 /* reset the sending counter */
117 atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
118
119 return tt_changes_fill_buffer(bat_priv,
120 hard_iface->packet_buff + BATMAN_OGM_HLEN,
121 hard_iface->packet_len - BATMAN_OGM_HLEN);
122}
123
124static int reset_packet_buffer(struct bat_priv *bat_priv,
125 struct hard_iface *hard_iface)
126{
127 realloc_packet_buffer(hard_iface, BATMAN_OGM_HLEN);
128 return 0;
129}
130
131void schedule_bat_ogm(struct hard_iface *hard_iface) 80void schedule_bat_ogm(struct hard_iface *hard_iface)
132{ 81{
133 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 82 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
134 struct hard_iface *primary_if;
135 int tt_num_changes = -1;
136 83
137 if ((hard_iface->if_status == IF_NOT_IN_USE) || 84 if ((hard_iface->if_status == IF_NOT_IN_USE) ||
138 (hard_iface->if_status == IF_TO_BE_REMOVED)) 85 (hard_iface->if_status == IF_TO_BE_REMOVED))
@@ -148,26 +95,7 @@ void schedule_bat_ogm(struct hard_iface *hard_iface)
148 if (hard_iface->if_status == IF_TO_BE_ACTIVATED) 95 if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
149 hard_iface->if_status = IF_ACTIVE; 96 hard_iface->if_status = IF_ACTIVE;
150 97
151 primary_if = primary_if_get_selected(bat_priv); 98 bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
152
153 if (hard_iface == primary_if) {
154 /* if at least one change happened */
155 if (atomic_read(&bat_priv->tt_local_changes) > 0) {
156 tt_commit_changes(bat_priv);
157 tt_num_changes = prepare_packet_buffer(bat_priv,
158 hard_iface);
159 }
160
161 /* if the changes have been sent often enough */
162 if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))
163 tt_num_changes = reset_packet_buffer(bat_priv,
164 hard_iface);
165 }
166
167 if (primary_if)
168 hardif_free_ref(primary_if);
169
170 bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface, tt_num_changes);
171} 99}
172 100
173static void forw_packet_free(struct forw_packet *forw_packet) 101static void forw_packet_free(struct forw_packet *forw_packet)
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 88cfe2a8ea4f..a1a51cc9d88e 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -275,14 +275,64 @@ out:
275 tt_global_entry_free_ref(tt_global_entry); 275 tt_global_entry_free_ref(tt_global_entry);
276} 276}
277 277
278int tt_changes_fill_buffer(struct bat_priv *bat_priv, 278static void tt_realloc_packet_buff(unsigned char **packet_buff,
279 unsigned char *buff, int buff_len) 279 int *packet_buff_len, int min_packet_len,
280 int new_packet_len)
281{
282 unsigned char *new_buff;
283
284 new_buff = kmalloc(new_packet_len, GFP_ATOMIC);
285
286 /* keep old buffer if kmalloc should fail */
287 if (new_buff) {
288 memcpy(new_buff, *packet_buff, min_packet_len);
289 kfree(*packet_buff);
290 *packet_buff = new_buff;
291 *packet_buff_len = new_packet_len;
292 }
293}
294
295static void tt_prepare_packet_buff(struct bat_priv *bat_priv,
296 unsigned char **packet_buff,
297 int *packet_buff_len, int min_packet_len)
298{
299 struct hard_iface *primary_if;
300 int req_len;
301
302 primary_if = primary_if_get_selected(bat_priv);
303
304 req_len = min_packet_len;
305 req_len += tt_len(atomic_read(&bat_priv->tt_local_changes));
306
307 /* if we have too many changes for one packet don't send any
308 * and wait for the tt table request which will be fragmented
309 */
310 if ((!primary_if) || (req_len > primary_if->soft_iface->mtu))
311 req_len = min_packet_len;
312
313 tt_realloc_packet_buff(packet_buff, packet_buff_len,
314 min_packet_len, req_len);
315
316 if (primary_if)
317 hardif_free_ref(primary_if);
318}
319
320static int tt_changes_fill_buff(struct bat_priv *bat_priv,
321 unsigned char **packet_buff,
322 int *packet_buff_len, int min_packet_len)
280{ 323{
281 int count = 0, tot_changes = 0;
282 struct tt_change_node *entry, *safe; 324 struct tt_change_node *entry, *safe;
325 int count = 0, tot_changes = 0, new_len;
326 unsigned char *tt_buff;
327
328 tt_prepare_packet_buff(bat_priv, packet_buff,
329 packet_buff_len, min_packet_len);
283 330
284 if (buff_len > 0) 331 new_len = *packet_buff_len - min_packet_len;
285 tot_changes = buff_len / tt_len(1); 332 tt_buff = *packet_buff + min_packet_len;
333
334 if (new_len > 0)
335 tot_changes = new_len / tt_len(1);
286 336
287 spin_lock_bh(&bat_priv->tt_changes_list_lock); 337 spin_lock_bh(&bat_priv->tt_changes_list_lock);
288 atomic_set(&bat_priv->tt_local_changes, 0); 338 atomic_set(&bat_priv->tt_local_changes, 0);
@@ -290,7 +340,7 @@ int tt_changes_fill_buffer(struct bat_priv *bat_priv,
290 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list, 340 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
291 list) { 341 list) {
292 if (count < tot_changes) { 342 if (count < tot_changes) {
293 memcpy(buff + tt_len(count), 343 memcpy(tt_buff + tt_len(count),
294 &entry->change, sizeof(struct tt_change)); 344 &entry->change, sizeof(struct tt_change));
295 count++; 345 count++;
296 } 346 }
@@ -304,17 +354,15 @@ int tt_changes_fill_buffer(struct bat_priv *bat_priv,
304 kfree(bat_priv->tt_buff); 354 kfree(bat_priv->tt_buff);
305 bat_priv->tt_buff_len = 0; 355 bat_priv->tt_buff_len = 0;
306 bat_priv->tt_buff = NULL; 356 bat_priv->tt_buff = NULL;
307 /* We check whether this new OGM has no changes due to size 357 /* check whether this new OGM has no changes due to size problems */
308 * problems */ 358 if (new_len > 0) {
309 if (buff_len > 0) { 359 /* if kmalloc() fails we will reply with the full table
310 /**
311 * if kmalloc() fails we will reply with the full table
312 * instead of providing the diff 360 * instead of providing the diff
313 */ 361 */
314 bat_priv->tt_buff = kmalloc(buff_len, GFP_ATOMIC); 362 bat_priv->tt_buff = kmalloc(new_len, GFP_ATOMIC);
315 if (bat_priv->tt_buff) { 363 if (bat_priv->tt_buff) {
316 memcpy(bat_priv->tt_buff, buff, buff_len); 364 memcpy(bat_priv->tt_buff, tt_buff, new_len);
317 bat_priv->tt_buff_len = buff_len; 365 bat_priv->tt_buff_len = new_len;
318 } 366 }
319 } 367 }
320 spin_unlock_bh(&bat_priv->tt_buff_lock); 368 spin_unlock_bh(&bat_priv->tt_buff_lock);
@@ -1105,7 +1153,7 @@ static uint16_t tt_global_crc(struct bat_priv *bat_priv,
1105} 1153}
1106 1154
1107/* Calculates the checksum of the local table */ 1155/* Calculates the checksum of the local table */
1108uint16_t tt_local_crc(struct bat_priv *bat_priv) 1156static uint16_t batadv_tt_local_crc(struct bat_priv *bat_priv)
1109{ 1157{
1110 uint16_t total = 0, total_one; 1158 uint16_t total = 0, total_one;
1111 struct hashtable_t *hash = bat_priv->tt_local_hash; 1159 struct hashtable_t *hash = bat_priv->tt_local_hash;
@@ -2025,20 +2073,56 @@ static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
2025 2073
2026} 2074}
2027 2075
2028void tt_commit_changes(struct bat_priv *bat_priv) 2076static int tt_commit_changes(struct bat_priv *bat_priv,
2077 unsigned char **packet_buff, int *packet_buff_len,
2078 int packet_min_len)
2029{ 2079{
2030 uint16_t changed_num = tt_set_flags(bat_priv->tt_local_hash, 2080 uint16_t changed_num = 0;
2031 TT_CLIENT_NEW, false); 2081
2032 /* all the reset entries have now to be effectively counted as local 2082 if (atomic_read(&bat_priv->tt_local_changes) < 1)
2033 * entries */ 2083 return -ENOENT;
2084
2085 changed_num = tt_set_flags(bat_priv->tt_local_hash,
2086 TT_CLIENT_NEW, false);
2087
2088 /* all reset entries have to be counted as local entries */
2034 atomic_add(changed_num, &bat_priv->num_local_tt); 2089 atomic_add(changed_num, &bat_priv->num_local_tt);
2035 tt_local_purge_pending_clients(bat_priv); 2090 tt_local_purge_pending_clients(bat_priv);
2091 bat_priv->tt_crc = batadv_tt_local_crc(bat_priv);
2036 2092
2037 /* Increment the TTVN only once per OGM interval */ 2093 /* Increment the TTVN only once per OGM interval */
2038 atomic_inc(&bat_priv->ttvn); 2094 atomic_inc(&bat_priv->ttvn);
2039 bat_dbg(DBG_TT, bat_priv, "Local changes committed, updating to ttvn %u\n", 2095 bat_dbg(DBG_TT, bat_priv, "Local changes committed, updating to ttvn %u\n",
2040 (uint8_t)atomic_read(&bat_priv->ttvn)); 2096 (uint8_t)atomic_read(&bat_priv->ttvn));
2041 bat_priv->tt_poss_change = false; 2097 bat_priv->tt_poss_change = false;
2098
2099 /* reset the sending counter */
2100 atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
2101
2102 return tt_changes_fill_buff(bat_priv, packet_buff,
2103 packet_buff_len, packet_min_len);
2104}
2105
2106/* when calling this function (hard_iface == primary_if) has to be true */
2107int batadv_tt_append_diff(struct bat_priv *bat_priv,
2108 unsigned char **packet_buff, int *packet_buff_len,
2109 int packet_min_len)
2110{
2111 int tt_num_changes;
2112
2113 /* if at least one change happened */
2114 tt_num_changes = tt_commit_changes(bat_priv, packet_buff,
2115 packet_buff_len, packet_min_len);
2116
2117 /* if the changes have been sent often enough */
2118 if ((tt_num_changes < 0) &&
2119 (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))) {
2120 tt_realloc_packet_buff(packet_buff, packet_buff_len,
2121 packet_min_len, packet_min_len);
2122 tt_num_changes = 0;
2123 }
2124
2125 return tt_num_changes;
2042} 2126}
2043 2127
2044bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst) 2128bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst)
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index c43374dc364d..d6ea30f9b026 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -23,8 +23,6 @@
23#define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ 23#define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
24 24
25int tt_len(int changes_num); 25int tt_len(int changes_num);
26int tt_changes_fill_buffer(struct bat_priv *bat_priv,
27 unsigned char *buff, int buff_len);
28int tt_init(struct bat_priv *bat_priv); 26int tt_init(struct bat_priv *bat_priv);
29void tt_local_add(struct net_device *soft_iface, const uint8_t *addr, 27void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
30 int ifindex); 28 int ifindex);
@@ -41,18 +39,19 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
41 struct orig_node *orig_node, const char *message); 39 struct orig_node *orig_node, const char *message);
42struct orig_node *transtable_search(struct bat_priv *bat_priv, 40struct orig_node *transtable_search(struct bat_priv *bat_priv,
43 const uint8_t *src, const uint8_t *addr); 41 const uint8_t *src, const uint8_t *addr);
44uint16_t tt_local_crc(struct bat_priv *bat_priv);
45void tt_free(struct bat_priv *bat_priv); 42void tt_free(struct bat_priv *bat_priv);
46bool send_tt_response(struct bat_priv *bat_priv, 43bool send_tt_response(struct bat_priv *bat_priv,
47 struct tt_query_packet *tt_request); 44 struct tt_query_packet *tt_request);
48bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr); 45bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr);
49void handle_tt_response(struct bat_priv *bat_priv, 46void handle_tt_response(struct bat_priv *bat_priv,
50 struct tt_query_packet *tt_response); 47 struct tt_query_packet *tt_response);
51void tt_commit_changes(struct bat_priv *bat_priv);
52bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst); 48bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst);
53void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, 49void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
54 const unsigned char *tt_buff, uint8_t tt_num_changes, 50 const unsigned char *tt_buff, uint8_t tt_num_changes,
55 uint8_t ttvn, uint16_t tt_crc); 51 uint8_t ttvn, uint16_t tt_crc);
52int batadv_tt_append_diff(struct bat_priv *bat_priv,
53 unsigned char **packet_buff, int *packet_buff_len,
54 int packet_min_len);
56bool tt_global_client_is_roaming(struct bat_priv *bat_priv, uint8_t *addr); 55bool tt_global_client_is_roaming(struct bat_priv *bat_priv, uint8_t *addr);
57 56
58 57
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 6b569debc1a6..bf71d525445a 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -405,8 +405,7 @@ struct bat_algo_ops {
405 /* called when primary interface is selected / changed */ 405 /* called when primary interface is selected / changed */
406 void (*bat_primary_iface_set)(struct hard_iface *hard_iface); 406 void (*bat_primary_iface_set)(struct hard_iface *hard_iface);
407 /* prepare a new outgoing OGM for the send queue */ 407 /* prepare a new outgoing OGM for the send queue */
408 void (*bat_ogm_schedule)(struct hard_iface *hard_iface, 408 void (*bat_ogm_schedule)(struct hard_iface *hard_iface);
409 int tt_num_changes);
410 /* send scheduled OGM */ 409 /* send scheduled OGM */
411 void (*bat_ogm_emit)(struct forw_packet *forw_packet); 410 void (*bat_ogm_emit)(struct forw_packet *forw_packet);
412}; 411};