diff options
Diffstat (limited to 'net/batman-adv/gateway_client.c')
-rw-r--r-- | net/batman-adv/gateway_client.c | 53 |
1 files changed, 29 insertions, 24 deletions
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index fc866f2e4528..15d67abc10a4 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c | |||
@@ -48,7 +48,7 @@ batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv) | |||
48 | struct batadv_gw_node *gw_node; | 48 | struct batadv_gw_node *gw_node; |
49 | 49 | ||
50 | rcu_read_lock(); | 50 | rcu_read_lock(); |
51 | gw_node = rcu_dereference(bat_priv->curr_gw); | 51 | gw_node = rcu_dereference(bat_priv->gw.curr_gw); |
52 | if (!gw_node) | 52 | if (!gw_node) |
53 | goto out; | 53 | goto out; |
54 | 54 | ||
@@ -91,23 +91,23 @@ static void batadv_gw_select(struct batadv_priv *bat_priv, | |||
91 | { | 91 | { |
92 | struct batadv_gw_node *curr_gw_node; | 92 | struct batadv_gw_node *curr_gw_node; |
93 | 93 | ||
94 | spin_lock_bh(&bat_priv->gw_list_lock); | 94 | spin_lock_bh(&bat_priv->gw.list_lock); |
95 | 95 | ||
96 | if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount)) | 96 | if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount)) |
97 | new_gw_node = NULL; | 97 | new_gw_node = NULL; |
98 | 98 | ||
99 | curr_gw_node = rcu_dereference_protected(bat_priv->curr_gw, 1); | 99 | curr_gw_node = rcu_dereference_protected(bat_priv->gw.curr_gw, 1); |
100 | rcu_assign_pointer(bat_priv->curr_gw, new_gw_node); | 100 | rcu_assign_pointer(bat_priv->gw.curr_gw, new_gw_node); |
101 | 101 | ||
102 | if (curr_gw_node) | 102 | if (curr_gw_node) |
103 | batadv_gw_node_free_ref(curr_gw_node); | 103 | batadv_gw_node_free_ref(curr_gw_node); |
104 | 104 | ||
105 | spin_unlock_bh(&bat_priv->gw_list_lock); | 105 | spin_unlock_bh(&bat_priv->gw.list_lock); |
106 | } | 106 | } |
107 | 107 | ||
108 | void batadv_gw_deselect(struct batadv_priv *bat_priv) | 108 | void batadv_gw_deselect(struct batadv_priv *bat_priv) |
109 | { | 109 | { |
110 | atomic_set(&bat_priv->gw_reselect, 1); | 110 | atomic_set(&bat_priv->gw.reselect, 1); |
111 | } | 111 | } |
112 | 112 | ||
113 | static struct batadv_gw_node * | 113 | static struct batadv_gw_node * |
@@ -117,12 +117,17 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv) | |||
117 | struct hlist_node *node; | 117 | struct hlist_node *node; |
118 | struct batadv_gw_node *gw_node, *curr_gw = NULL; | 118 | struct batadv_gw_node *gw_node, *curr_gw = NULL; |
119 | uint32_t max_gw_factor = 0, tmp_gw_factor = 0; | 119 | uint32_t max_gw_factor = 0, tmp_gw_factor = 0; |
120 | uint32_t gw_divisor; | ||
120 | uint8_t max_tq = 0; | 121 | uint8_t max_tq = 0; |
121 | int down, up; | 122 | int down, up; |
123 | uint8_t tq_avg; | ||
122 | struct batadv_orig_node *orig_node; | 124 | struct batadv_orig_node *orig_node; |
123 | 125 | ||
126 | gw_divisor = BATADV_TQ_LOCAL_WINDOW_SIZE * BATADV_TQ_LOCAL_WINDOW_SIZE; | ||
127 | gw_divisor *= 64; | ||
128 | |||
124 | rcu_read_lock(); | 129 | rcu_read_lock(); |
125 | hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { | 130 | hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) { |
126 | if (gw_node->deleted) | 131 | if (gw_node->deleted) |
127 | continue; | 132 | continue; |
128 | 133 | ||
@@ -134,19 +139,19 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv) | |||
134 | if (!atomic_inc_not_zero(&gw_node->refcount)) | 139 | if (!atomic_inc_not_zero(&gw_node->refcount)) |
135 | goto next; | 140 | goto next; |
136 | 141 | ||
142 | tq_avg = router->tq_avg; | ||
143 | |||
137 | switch (atomic_read(&bat_priv->gw_sel_class)) { | 144 | switch (atomic_read(&bat_priv->gw_sel_class)) { |
138 | case 1: /* fast connection */ | 145 | case 1: /* fast connection */ |
139 | batadv_gw_bandwidth_to_kbit(orig_node->gw_flags, | 146 | batadv_gw_bandwidth_to_kbit(orig_node->gw_flags, |
140 | &down, &up); | 147 | &down, &up); |
141 | 148 | ||
142 | tmp_gw_factor = (router->tq_avg * router->tq_avg * | 149 | tmp_gw_factor = tq_avg * tq_avg * down * 100 * 100; |
143 | down * 100 * 100) / | 150 | tmp_gw_factor /= gw_divisor; |
144 | (BATADV_TQ_LOCAL_WINDOW_SIZE * | ||
145 | BATADV_TQ_LOCAL_WINDOW_SIZE * 64); | ||
146 | 151 | ||
147 | if ((tmp_gw_factor > max_gw_factor) || | 152 | if ((tmp_gw_factor > max_gw_factor) || |
148 | ((tmp_gw_factor == max_gw_factor) && | 153 | ((tmp_gw_factor == max_gw_factor) && |
149 | (router->tq_avg > max_tq))) { | 154 | (tq_avg > max_tq))) { |
150 | if (curr_gw) | 155 | if (curr_gw) |
151 | batadv_gw_node_free_ref(curr_gw); | 156 | batadv_gw_node_free_ref(curr_gw); |
152 | curr_gw = gw_node; | 157 | curr_gw = gw_node; |
@@ -161,7 +166,7 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv) | |||
161 | * soon as a better gateway appears which has | 166 | * soon as a better gateway appears which has |
162 | * $routing_class more tq points) | 167 | * $routing_class more tq points) |
163 | */ | 168 | */ |
164 | if (router->tq_avg > max_tq) { | 169 | if (tq_avg > max_tq) { |
165 | if (curr_gw) | 170 | if (curr_gw) |
166 | batadv_gw_node_free_ref(curr_gw); | 171 | batadv_gw_node_free_ref(curr_gw); |
167 | curr_gw = gw_node; | 172 | curr_gw = gw_node; |
@@ -170,8 +175,8 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv) | |||
170 | break; | 175 | break; |
171 | } | 176 | } |
172 | 177 | ||
173 | if (router->tq_avg > max_tq) | 178 | if (tq_avg > max_tq) |
174 | max_tq = router->tq_avg; | 179 | max_tq = tq_avg; |
175 | 180 | ||
176 | if (tmp_gw_factor > max_gw_factor) | 181 | if (tmp_gw_factor > max_gw_factor) |
177 | max_gw_factor = tmp_gw_factor; | 182 | max_gw_factor = tmp_gw_factor; |
@@ -202,7 +207,7 @@ void batadv_gw_election(struct batadv_priv *bat_priv) | |||
202 | 207 | ||
203 | curr_gw = batadv_gw_get_selected_gw_node(bat_priv); | 208 | curr_gw = batadv_gw_get_selected_gw_node(bat_priv); |
204 | 209 | ||
205 | if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect) && curr_gw) | 210 | if (!batadv_atomic_dec_not_zero(&bat_priv->gw.reselect) && curr_gw) |
206 | goto out; | 211 | goto out; |
207 | 212 | ||
208 | next_gw = batadv_gw_get_best_gw_node(bat_priv); | 213 | next_gw = batadv_gw_get_best_gw_node(bat_priv); |
@@ -321,9 +326,9 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv, | |||
321 | gw_node->orig_node = orig_node; | 326 | gw_node->orig_node = orig_node; |
322 | atomic_set(&gw_node->refcount, 1); | 327 | atomic_set(&gw_node->refcount, 1); |
323 | 328 | ||
324 | spin_lock_bh(&bat_priv->gw_list_lock); | 329 | spin_lock_bh(&bat_priv->gw.list_lock); |
325 | hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list); | 330 | hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.list); |
326 | spin_unlock_bh(&bat_priv->gw_list_lock); | 331 | spin_unlock_bh(&bat_priv->gw.list_lock); |
327 | 332 | ||
328 | batadv_gw_bandwidth_to_kbit(new_gwflags, &down, &up); | 333 | batadv_gw_bandwidth_to_kbit(new_gwflags, &down, &up); |
329 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, | 334 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, |
@@ -350,7 +355,7 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv, | |||
350 | curr_gw = batadv_gw_get_selected_gw_node(bat_priv); | 355 | curr_gw = batadv_gw_get_selected_gw_node(bat_priv); |
351 | 356 | ||
352 | rcu_read_lock(); | 357 | rcu_read_lock(); |
353 | hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { | 358 | hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) { |
354 | if (gw_node->orig_node != orig_node) | 359 | if (gw_node->orig_node != orig_node) |
355 | continue; | 360 | continue; |
356 | 361 | ||
@@ -404,10 +409,10 @@ void batadv_gw_node_purge(struct batadv_priv *bat_priv) | |||
404 | 409 | ||
405 | curr_gw = batadv_gw_get_selected_gw_node(bat_priv); | 410 | curr_gw = batadv_gw_get_selected_gw_node(bat_priv); |
406 | 411 | ||
407 | spin_lock_bh(&bat_priv->gw_list_lock); | 412 | spin_lock_bh(&bat_priv->gw.list_lock); |
408 | 413 | ||
409 | hlist_for_each_entry_safe(gw_node, node, node_tmp, | 414 | hlist_for_each_entry_safe(gw_node, node, node_tmp, |
410 | &bat_priv->gw_list, list) { | 415 | &bat_priv->gw.list, list) { |
411 | if (((!gw_node->deleted) || | 416 | if (((!gw_node->deleted) || |
412 | (time_before(jiffies, gw_node->deleted + timeout))) && | 417 | (time_before(jiffies, gw_node->deleted + timeout))) && |
413 | atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) | 418 | atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) |
@@ -420,7 +425,7 @@ void batadv_gw_node_purge(struct batadv_priv *bat_priv) | |||
420 | batadv_gw_node_free_ref(gw_node); | 425 | batadv_gw_node_free_ref(gw_node); |
421 | } | 426 | } |
422 | 427 | ||
423 | spin_unlock_bh(&bat_priv->gw_list_lock); | 428 | spin_unlock_bh(&bat_priv->gw.list_lock); |
424 | 429 | ||
425 | /* gw_deselect() needs to acquire the gw_list_lock */ | 430 | /* gw_deselect() needs to acquire the gw_list_lock */ |
426 | if (do_deselect) | 431 | if (do_deselect) |
@@ -496,7 +501,7 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset) | |||
496 | primary_if->net_dev->dev_addr, net_dev->name); | 501 | primary_if->net_dev->dev_addr, net_dev->name); |
497 | 502 | ||
498 | rcu_read_lock(); | 503 | rcu_read_lock(); |
499 | hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { | 504 | hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) { |
500 | if (gw_node->deleted) | 505 | if (gw_node->deleted) |
501 | continue; | 506 | continue; |
502 | 507 | ||