diff options
author | Sven Eckelmann <sven@narfation.org> | 2012-07-15 16:26:51 -0400 |
---|---|---|
committer | Antonio Quartulli <ordex@autistici.org> | 2012-08-23 08:20:13 -0400 |
commit | 807736f6e00714fdeb443b31061d1c27fa903296 (patch) | |
tree | a070c2e9316365424e4d08e2fa50e5a28729670d /net/batman-adv/gateway_client.c | |
parent | 624463079e0af455a2d70d2a59b9e2f6b5827aea (diff) |
batman-adv: Split batadv_priv in sub-structures for features
The structure batadv_priv grows everytime a new feature is introduced. It gets
hard to find the parts of the struct that belongs to a specific feature. This
becomes even harder by the fact that not every feature uses a prefix in the
member name.
The variables for bridge loop avoidence, gateway handling, translation table
and visualization server are moved into separate structs that are included in
the bat_priv main struct.
Signed-off-by: Sven Eckelmann <sven@narfation.org>
Signed-off-by: Antonio Quartulli <ordex@autistici.org>
Diffstat (limited to 'net/batman-adv/gateway_client.c')
-rw-r--r-- | net/batman-adv/gateway_client.c | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index fc866f2e4528..eef7cc739397 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c | |||
@@ -48,7 +48,7 @@ batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv) | |||
48 | struct batadv_gw_node *gw_node; | 48 | struct batadv_gw_node *gw_node; |
49 | 49 | ||
50 | rcu_read_lock(); | 50 | rcu_read_lock(); |
51 | gw_node = rcu_dereference(bat_priv->curr_gw); | 51 | gw_node = rcu_dereference(bat_priv->gw.curr_gw); |
52 | if (!gw_node) | 52 | if (!gw_node) |
53 | goto out; | 53 | goto out; |
54 | 54 | ||
@@ -91,23 +91,23 @@ static void batadv_gw_select(struct batadv_priv *bat_priv, | |||
91 | { | 91 | { |
92 | struct batadv_gw_node *curr_gw_node; | 92 | struct batadv_gw_node *curr_gw_node; |
93 | 93 | ||
94 | spin_lock_bh(&bat_priv->gw_list_lock); | 94 | spin_lock_bh(&bat_priv->gw.list_lock); |
95 | 95 | ||
96 | if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount)) | 96 | if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount)) |
97 | new_gw_node = NULL; | 97 | new_gw_node = NULL; |
98 | 98 | ||
99 | curr_gw_node = rcu_dereference_protected(bat_priv->curr_gw, 1); | 99 | curr_gw_node = rcu_dereference_protected(bat_priv->gw.curr_gw, 1); |
100 | rcu_assign_pointer(bat_priv->curr_gw, new_gw_node); | 100 | rcu_assign_pointer(bat_priv->gw.curr_gw, new_gw_node); |
101 | 101 | ||
102 | if (curr_gw_node) | 102 | if (curr_gw_node) |
103 | batadv_gw_node_free_ref(curr_gw_node); | 103 | batadv_gw_node_free_ref(curr_gw_node); |
104 | 104 | ||
105 | spin_unlock_bh(&bat_priv->gw_list_lock); | 105 | spin_unlock_bh(&bat_priv->gw.list_lock); |
106 | } | 106 | } |
107 | 107 | ||
108 | void batadv_gw_deselect(struct batadv_priv *bat_priv) | 108 | void batadv_gw_deselect(struct batadv_priv *bat_priv) |
109 | { | 109 | { |
110 | atomic_set(&bat_priv->gw_reselect, 1); | 110 | atomic_set(&bat_priv->gw.reselect, 1); |
111 | } | 111 | } |
112 | 112 | ||
113 | static struct batadv_gw_node * | 113 | static struct batadv_gw_node * |
@@ -122,7 +122,7 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv) | |||
122 | struct batadv_orig_node *orig_node; | 122 | struct batadv_orig_node *orig_node; |
123 | 123 | ||
124 | rcu_read_lock(); | 124 | rcu_read_lock(); |
125 | hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { | 125 | hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) { |
126 | if (gw_node->deleted) | 126 | if (gw_node->deleted) |
127 | continue; | 127 | continue; |
128 | 128 | ||
@@ -202,7 +202,7 @@ void batadv_gw_election(struct batadv_priv *bat_priv) | |||
202 | 202 | ||
203 | curr_gw = batadv_gw_get_selected_gw_node(bat_priv); | 203 | curr_gw = batadv_gw_get_selected_gw_node(bat_priv); |
204 | 204 | ||
205 | if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect) && curr_gw) | 205 | if (!batadv_atomic_dec_not_zero(&bat_priv->gw.reselect) && curr_gw) |
206 | goto out; | 206 | goto out; |
207 | 207 | ||
208 | next_gw = batadv_gw_get_best_gw_node(bat_priv); | 208 | next_gw = batadv_gw_get_best_gw_node(bat_priv); |
@@ -321,9 +321,9 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv, | |||
321 | gw_node->orig_node = orig_node; | 321 | gw_node->orig_node = orig_node; |
322 | atomic_set(&gw_node->refcount, 1); | 322 | atomic_set(&gw_node->refcount, 1); |
323 | 323 | ||
324 | spin_lock_bh(&bat_priv->gw_list_lock); | 324 | spin_lock_bh(&bat_priv->gw.list_lock); |
325 | hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list); | 325 | hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.list); |
326 | spin_unlock_bh(&bat_priv->gw_list_lock); | 326 | spin_unlock_bh(&bat_priv->gw.list_lock); |
327 | 327 | ||
328 | batadv_gw_bandwidth_to_kbit(new_gwflags, &down, &up); | 328 | batadv_gw_bandwidth_to_kbit(new_gwflags, &down, &up); |
329 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, | 329 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, |
@@ -350,7 +350,7 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv, | |||
350 | curr_gw = batadv_gw_get_selected_gw_node(bat_priv); | 350 | curr_gw = batadv_gw_get_selected_gw_node(bat_priv); |
351 | 351 | ||
352 | rcu_read_lock(); | 352 | rcu_read_lock(); |
353 | hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { | 353 | hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) { |
354 | if (gw_node->orig_node != orig_node) | 354 | if (gw_node->orig_node != orig_node) |
355 | continue; | 355 | continue; |
356 | 356 | ||
@@ -404,10 +404,10 @@ void batadv_gw_node_purge(struct batadv_priv *bat_priv) | |||
404 | 404 | ||
405 | curr_gw = batadv_gw_get_selected_gw_node(bat_priv); | 405 | curr_gw = batadv_gw_get_selected_gw_node(bat_priv); |
406 | 406 | ||
407 | spin_lock_bh(&bat_priv->gw_list_lock); | 407 | spin_lock_bh(&bat_priv->gw.list_lock); |
408 | 408 | ||
409 | hlist_for_each_entry_safe(gw_node, node, node_tmp, | 409 | hlist_for_each_entry_safe(gw_node, node, node_tmp, |
410 | &bat_priv->gw_list, list) { | 410 | &bat_priv->gw.list, list) { |
411 | if (((!gw_node->deleted) || | 411 | if (((!gw_node->deleted) || |
412 | (time_before(jiffies, gw_node->deleted + timeout))) && | 412 | (time_before(jiffies, gw_node->deleted + timeout))) && |
413 | atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) | 413 | atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) |
@@ -420,7 +420,7 @@ void batadv_gw_node_purge(struct batadv_priv *bat_priv) | |||
420 | batadv_gw_node_free_ref(gw_node); | 420 | batadv_gw_node_free_ref(gw_node); |
421 | } | 421 | } |
422 | 422 | ||
423 | spin_unlock_bh(&bat_priv->gw_list_lock); | 423 | spin_unlock_bh(&bat_priv->gw.list_lock); |
424 | 424 | ||
425 | /* gw_deselect() needs to acquire the gw_list_lock */ | 425 | /* gw_deselect() needs to acquire the gw_list_lock */ |
426 | if (do_deselect) | 426 | if (do_deselect) |
@@ -496,7 +496,7 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset) | |||
496 | primary_if->net_dev->dev_addr, net_dev->name); | 496 | primary_if->net_dev->dev_addr, net_dev->name); |
497 | 497 | ||
498 | rcu_read_lock(); | 498 | rcu_read_lock(); |
499 | hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { | 499 | hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) { |
500 | if (gw_node->deleted) | 500 | if (gw_node->deleted) |
501 | continue; | 501 | continue; |
502 | 502 | ||