aboutsummaryrefslogtreecommitdiffstats
path: root/net/batman-adv/hard-interface.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/batman-adv/hard-interface.c')
-rw-r--r--net/batman-adv/hard-interface.c420
1 files changed, 218 insertions, 202 deletions
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 4f95777ce080..b3058e46ee6b 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors: 2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
3 * 3 *
4 * Marek Lindner, Simon Wunderlich 4 * Marek Lindner, Simon Wunderlich
5 * 5 *
@@ -31,36 +31,40 @@
31 31
32#include <linux/if_arp.h> 32#include <linux/if_arp.h>
33 33
34/* protect update critical side of if_list - but not the content */ 34/* protect update critical side of hardif_list - but not the content */
35static DEFINE_SPINLOCK(if_list_lock); 35static DEFINE_SPINLOCK(hardif_list_lock);
36 36
37static void hardif_free_rcu(struct rcu_head *rcu) 37
38static int batman_skb_recv(struct sk_buff *skb,
39 struct net_device *dev,
40 struct packet_type *ptype,
41 struct net_device *orig_dev);
42
43void hardif_free_rcu(struct rcu_head *rcu)
38{ 44{
39 struct batman_if *batman_if; 45 struct hard_iface *hard_iface;
40 46
41 batman_if = container_of(rcu, struct batman_if, rcu); 47 hard_iface = container_of(rcu, struct hard_iface, rcu);
42 dev_put(batman_if->net_dev); 48 dev_put(hard_iface->net_dev);
43 kref_put(&batman_if->refcount, hardif_free_ref); 49 kfree(hard_iface);
44} 50}
45 51
46struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev) 52struct hard_iface *hardif_get_by_netdev(struct net_device *net_dev)
47{ 53{
48 struct batman_if *batman_if; 54 struct hard_iface *hard_iface;
49 55
50 rcu_read_lock(); 56 rcu_read_lock();
51 list_for_each_entry_rcu(batman_if, &if_list, list) { 57 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
52 if (batman_if->net_dev == net_dev) 58 if (hard_iface->net_dev == net_dev &&
59 atomic_inc_not_zero(&hard_iface->refcount))
53 goto out; 60 goto out;
54 } 61 }
55 62
56 batman_if = NULL; 63 hard_iface = NULL;
57 64
58out: 65out:
59 if (batman_if)
60 kref_get(&batman_if->refcount);
61
62 rcu_read_unlock(); 66 rcu_read_unlock();
63 return batman_if; 67 return hard_iface;
64} 68}
65 69
66static int is_valid_iface(struct net_device *net_dev) 70static int is_valid_iface(struct net_device *net_dev)
@@ -75,13 +79,8 @@ static int is_valid_iface(struct net_device *net_dev)
75 return 0; 79 return 0;
76 80
77 /* no batman over batman */ 81 /* no batman over batman */
78#ifdef HAVE_NET_DEVICE_OPS 82 if (softif_is_valid(net_dev))
79 if (net_dev->netdev_ops->ndo_start_xmit == interface_tx)
80 return 0;
81#else
82 if (net_dev->hard_start_xmit == interface_tx)
83 return 0; 83 return 0;
84#endif
85 84
86 /* Device is being bridged */ 85 /* Device is being bridged */
87 /* if (net_dev->priv_flags & IFF_BRIDGE_PORT) 86 /* if (net_dev->priv_flags & IFF_BRIDGE_PORT)
@@ -90,27 +89,25 @@ static int is_valid_iface(struct net_device *net_dev)
90 return 1; 89 return 1;
91} 90}
92 91
93static struct batman_if *get_active_batman_if(struct net_device *soft_iface) 92static struct hard_iface *hardif_get_active(struct net_device *soft_iface)
94{ 93{
95 struct batman_if *batman_if; 94 struct hard_iface *hard_iface;
96 95
97 rcu_read_lock(); 96 rcu_read_lock();
98 list_for_each_entry_rcu(batman_if, &if_list, list) { 97 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
99 if (batman_if->soft_iface != soft_iface) 98 if (hard_iface->soft_iface != soft_iface)
100 continue; 99 continue;
101 100
102 if (batman_if->if_status == IF_ACTIVE) 101 if (hard_iface->if_status == IF_ACTIVE &&
102 atomic_inc_not_zero(&hard_iface->refcount))
103 goto out; 103 goto out;
104 } 104 }
105 105
106 batman_if = NULL; 106 hard_iface = NULL;
107 107
108out: 108out:
109 if (batman_if)
110 kref_get(&batman_if->refcount);
111
112 rcu_read_unlock(); 109 rcu_read_unlock();
113 return batman_if; 110 return hard_iface;
114} 111}
115 112
116static void update_primary_addr(struct bat_priv *bat_priv) 113static void update_primary_addr(struct bat_priv *bat_priv)
@@ -126,24 +123,24 @@ static void update_primary_addr(struct bat_priv *bat_priv)
126} 123}
127 124
128static void set_primary_if(struct bat_priv *bat_priv, 125static void set_primary_if(struct bat_priv *bat_priv,
129 struct batman_if *batman_if) 126 struct hard_iface *hard_iface)
130{ 127{
131 struct batman_packet *batman_packet; 128 struct batman_packet *batman_packet;
132 struct batman_if *old_if; 129 struct hard_iface *old_if;
133 130
134 if (batman_if) 131 if (hard_iface && !atomic_inc_not_zero(&hard_iface->refcount))
135 kref_get(&batman_if->refcount); 132 hard_iface = NULL;
136 133
137 old_if = bat_priv->primary_if; 134 old_if = bat_priv->primary_if;
138 bat_priv->primary_if = batman_if; 135 bat_priv->primary_if = hard_iface;
139 136
140 if (old_if) 137 if (old_if)
141 kref_put(&old_if->refcount, hardif_free_ref); 138 hardif_free_ref(old_if);
142 139
143 if (!bat_priv->primary_if) 140 if (!bat_priv->primary_if)
144 return; 141 return;
145 142
146 batman_packet = (struct batman_packet *)(batman_if->packet_buff); 143 batman_packet = (struct batman_packet *)(hard_iface->packet_buff);
147 batman_packet->flags = PRIMARIES_FIRST_HOP; 144 batman_packet->flags = PRIMARIES_FIRST_HOP;
148 batman_packet->ttl = TTL; 145 batman_packet->ttl = TTL;
149 146
@@ -156,42 +153,42 @@ static void set_primary_if(struct bat_priv *bat_priv,
156 atomic_set(&bat_priv->hna_local_changed, 1); 153 atomic_set(&bat_priv->hna_local_changed, 1);
157} 154}
158 155
159static bool hardif_is_iface_up(struct batman_if *batman_if) 156static bool hardif_is_iface_up(struct hard_iface *hard_iface)
160{ 157{
161 if (batman_if->net_dev->flags & IFF_UP) 158 if (hard_iface->net_dev->flags & IFF_UP)
162 return true; 159 return true;
163 160
164 return false; 161 return false;
165} 162}
166 163
167static void update_mac_addresses(struct batman_if *batman_if) 164static void update_mac_addresses(struct hard_iface *hard_iface)
168{ 165{
169 memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig, 166 memcpy(((struct batman_packet *)(hard_iface->packet_buff))->orig,
170 batman_if->net_dev->dev_addr, ETH_ALEN); 167 hard_iface->net_dev->dev_addr, ETH_ALEN);
171 memcpy(((struct batman_packet *)(batman_if->packet_buff))->prev_sender, 168 memcpy(((struct batman_packet *)(hard_iface->packet_buff))->prev_sender,
172 batman_if->net_dev->dev_addr, ETH_ALEN); 169 hard_iface->net_dev->dev_addr, ETH_ALEN);
173} 170}
174 171
175static void check_known_mac_addr(struct net_device *net_dev) 172static void check_known_mac_addr(struct net_device *net_dev)
176{ 173{
177 struct batman_if *batman_if; 174 struct hard_iface *hard_iface;
178 175
179 rcu_read_lock(); 176 rcu_read_lock();
180 list_for_each_entry_rcu(batman_if, &if_list, list) { 177 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
181 if ((batman_if->if_status != IF_ACTIVE) && 178 if ((hard_iface->if_status != IF_ACTIVE) &&
182 (batman_if->if_status != IF_TO_BE_ACTIVATED)) 179 (hard_iface->if_status != IF_TO_BE_ACTIVATED))
183 continue; 180 continue;
184 181
185 if (batman_if->net_dev == net_dev) 182 if (hard_iface->net_dev == net_dev)
186 continue; 183 continue;
187 184
188 if (!compare_orig(batman_if->net_dev->dev_addr, 185 if (!compare_eth(hard_iface->net_dev->dev_addr,
189 net_dev->dev_addr)) 186 net_dev->dev_addr))
190 continue; 187 continue;
191 188
192 pr_warning("The newly added mac address (%pM) already exists " 189 pr_warning("The newly added mac address (%pM) already exists "
193 "on: %s\n", net_dev->dev_addr, 190 "on: %s\n", net_dev->dev_addr,
194 batman_if->net_dev->name); 191 hard_iface->net_dev->name);
195 pr_warning("It is strongly recommended to keep mac addresses " 192 pr_warning("It is strongly recommended to keep mac addresses "
196 "unique to avoid problems!\n"); 193 "unique to avoid problems!\n");
197 } 194 }
@@ -201,7 +198,7 @@ static void check_known_mac_addr(struct net_device *net_dev)
201int hardif_min_mtu(struct net_device *soft_iface) 198int hardif_min_mtu(struct net_device *soft_iface)
202{ 199{
203 struct bat_priv *bat_priv = netdev_priv(soft_iface); 200 struct bat_priv *bat_priv = netdev_priv(soft_iface);
204 struct batman_if *batman_if; 201 struct hard_iface *hard_iface;
205 /* allow big frames if all devices are capable to do so 202 /* allow big frames if all devices are capable to do so
206 * (have MTU > 1500 + BAT_HEADER_LEN) */ 203 * (have MTU > 1500 + BAT_HEADER_LEN) */
207 int min_mtu = ETH_DATA_LEN; 204 int min_mtu = ETH_DATA_LEN;
@@ -210,15 +207,15 @@ int hardif_min_mtu(struct net_device *soft_iface)
210 goto out; 207 goto out;
211 208
212 rcu_read_lock(); 209 rcu_read_lock();
213 list_for_each_entry_rcu(batman_if, &if_list, list) { 210 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
214 if ((batman_if->if_status != IF_ACTIVE) && 211 if ((hard_iface->if_status != IF_ACTIVE) &&
215 (batman_if->if_status != IF_TO_BE_ACTIVATED)) 212 (hard_iface->if_status != IF_TO_BE_ACTIVATED))
216 continue; 213 continue;
217 214
218 if (batman_if->soft_iface != soft_iface) 215 if (hard_iface->soft_iface != soft_iface)
219 continue; 216 continue;
220 217
221 min_mtu = min_t(int, batman_if->net_dev->mtu - BAT_HEADER_LEN, 218 min_mtu = min_t(int, hard_iface->net_dev->mtu - BAT_HEADER_LEN,
222 min_mtu); 219 min_mtu);
223 } 220 }
224 rcu_read_unlock(); 221 rcu_read_unlock();
@@ -236,77 +233,95 @@ void update_min_mtu(struct net_device *soft_iface)
236 soft_iface->mtu = min_mtu; 233 soft_iface->mtu = min_mtu;
237} 234}
238 235
239static void hardif_activate_interface(struct batman_if *batman_if) 236static void hardif_activate_interface(struct hard_iface *hard_iface)
240{ 237{
241 struct bat_priv *bat_priv; 238 struct bat_priv *bat_priv;
242 239
243 if (batman_if->if_status != IF_INACTIVE) 240 if (hard_iface->if_status != IF_INACTIVE)
244 return; 241 return;
245 242
246 bat_priv = netdev_priv(batman_if->soft_iface); 243 bat_priv = netdev_priv(hard_iface->soft_iface);
247 244
248 update_mac_addresses(batman_if); 245 update_mac_addresses(hard_iface);
249 batman_if->if_status = IF_TO_BE_ACTIVATED; 246 hard_iface->if_status = IF_TO_BE_ACTIVATED;
250 247
251 /** 248 /**
252 * the first active interface becomes our primary interface or 249 * the first active interface becomes our primary interface or
253 * the next active interface after the old primay interface was removed 250 * the next active interface after the old primay interface was removed
254 */ 251 */
255 if (!bat_priv->primary_if) 252 if (!bat_priv->primary_if)
256 set_primary_if(bat_priv, batman_if); 253 set_primary_if(bat_priv, hard_iface);
257 254
258 bat_info(batman_if->soft_iface, "Interface activated: %s\n", 255 bat_info(hard_iface->soft_iface, "Interface activated: %s\n",
259 batman_if->net_dev->name); 256 hard_iface->net_dev->name);
260 257
261 update_min_mtu(batman_if->soft_iface); 258 update_min_mtu(hard_iface->soft_iface);
262 return; 259 return;
263} 260}
264 261
265static void hardif_deactivate_interface(struct batman_if *batman_if) 262static void hardif_deactivate_interface(struct hard_iface *hard_iface)
266{ 263{
267 if ((batman_if->if_status != IF_ACTIVE) && 264 if ((hard_iface->if_status != IF_ACTIVE) &&
268 (batman_if->if_status != IF_TO_BE_ACTIVATED)) 265 (hard_iface->if_status != IF_TO_BE_ACTIVATED))
269 return; 266 return;
270 267
271 batman_if->if_status = IF_INACTIVE; 268 hard_iface->if_status = IF_INACTIVE;
272 269
273 bat_info(batman_if->soft_iface, "Interface deactivated: %s\n", 270 bat_info(hard_iface->soft_iface, "Interface deactivated: %s\n",
274 batman_if->net_dev->name); 271 hard_iface->net_dev->name);
275 272
276 update_min_mtu(batman_if->soft_iface); 273 update_min_mtu(hard_iface->soft_iface);
277} 274}
278 275
279int hardif_enable_interface(struct batman_if *batman_if, char *iface_name) 276int hardif_enable_interface(struct hard_iface *hard_iface, char *iface_name)
280{ 277{
281 struct bat_priv *bat_priv; 278 struct bat_priv *bat_priv;
282 struct batman_packet *batman_packet; 279 struct batman_packet *batman_packet;
280 struct net_device *soft_iface;
281 int ret;
283 282
284 if (batman_if->if_status != IF_NOT_IN_USE) 283 if (hard_iface->if_status != IF_NOT_IN_USE)
285 goto out; 284 goto out;
286 285
287 batman_if->soft_iface = dev_get_by_name(&init_net, iface_name); 286 if (!atomic_inc_not_zero(&hard_iface->refcount))
287 goto out;
288 288
289 if (!batman_if->soft_iface) { 289 soft_iface = dev_get_by_name(&init_net, iface_name);
290 batman_if->soft_iface = softif_create(iface_name);
291 290
292 if (!batman_if->soft_iface) 291 if (!soft_iface) {
292 soft_iface = softif_create(iface_name);
293
294 if (!soft_iface) {
295 ret = -ENOMEM;
293 goto err; 296 goto err;
297 }
294 298
295 /* dev_get_by_name() increases the reference counter for us */ 299 /* dev_get_by_name() increases the reference counter for us */
296 dev_hold(batman_if->soft_iface); 300 dev_hold(soft_iface);
301 }
302
303 if (!softif_is_valid(soft_iface)) {
304 pr_err("Can't create batman mesh interface %s: "
305 "already exists as regular interface\n",
306 soft_iface->name);
307 dev_put(soft_iface);
308 ret = -EINVAL;
309 goto err;
297 } 310 }
298 311
299 bat_priv = netdev_priv(batman_if->soft_iface); 312 hard_iface->soft_iface = soft_iface;
300 batman_if->packet_len = BAT_PACKET_LEN; 313 bat_priv = netdev_priv(hard_iface->soft_iface);
301 batman_if->packet_buff = kmalloc(batman_if->packet_len, GFP_ATOMIC); 314 hard_iface->packet_len = BAT_PACKET_LEN;
315 hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
302 316
303 if (!batman_if->packet_buff) { 317 if (!hard_iface->packet_buff) {
304 bat_err(batman_if->soft_iface, "Can't add interface packet " 318 bat_err(hard_iface->soft_iface, "Can't add interface packet "
305 "(%s): out of memory\n", batman_if->net_dev->name); 319 "(%s): out of memory\n", hard_iface->net_dev->name);
320 ret = -ENOMEM;
306 goto err; 321 goto err;
307 } 322 }
308 323
309 batman_packet = (struct batman_packet *)(batman_if->packet_buff); 324 batman_packet = (struct batman_packet *)(hard_iface->packet_buff);
310 batman_packet->packet_type = BAT_PACKET; 325 batman_packet->packet_type = BAT_PACKET;
311 batman_packet->version = COMPAT_VERSION; 326 batman_packet->version = COMPAT_VERSION;
312 batman_packet->flags = 0; 327 batman_packet->flags = 0;
@@ -314,107 +329,107 @@ int hardif_enable_interface(struct batman_if *batman_if, char *iface_name)
314 batman_packet->tq = TQ_MAX_VALUE; 329 batman_packet->tq = TQ_MAX_VALUE;
315 batman_packet->num_hna = 0; 330 batman_packet->num_hna = 0;
316 331
317 batman_if->if_num = bat_priv->num_ifaces; 332 hard_iface->if_num = bat_priv->num_ifaces;
318 bat_priv->num_ifaces++; 333 bat_priv->num_ifaces++;
319 batman_if->if_status = IF_INACTIVE; 334 hard_iface->if_status = IF_INACTIVE;
320 orig_hash_add_if(batman_if, bat_priv->num_ifaces); 335 orig_hash_add_if(hard_iface, bat_priv->num_ifaces);
321 336
322 batman_if->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN); 337 hard_iface->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN);
323 batman_if->batman_adv_ptype.func = batman_skb_recv; 338 hard_iface->batman_adv_ptype.func = batman_skb_recv;
324 batman_if->batman_adv_ptype.dev = batman_if->net_dev; 339 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
325 kref_get(&batman_if->refcount); 340 dev_add_pack(&hard_iface->batman_adv_ptype);
326 dev_add_pack(&batman_if->batman_adv_ptype);
327 341
328 atomic_set(&batman_if->seqno, 1); 342 atomic_set(&hard_iface->seqno, 1);
329 atomic_set(&batman_if->frag_seqno, 1); 343 atomic_set(&hard_iface->frag_seqno, 1);
330 bat_info(batman_if->soft_iface, "Adding interface: %s\n", 344 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
331 batman_if->net_dev->name); 345 hard_iface->net_dev->name);
332 346
333 if (atomic_read(&bat_priv->fragmentation) && batman_if->net_dev->mtu < 347 if (atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu <
334 ETH_DATA_LEN + BAT_HEADER_LEN) 348 ETH_DATA_LEN + BAT_HEADER_LEN)
335 bat_info(batman_if->soft_iface, 349 bat_info(hard_iface->soft_iface,
336 "The MTU of interface %s is too small (%i) to handle " 350 "The MTU of interface %s is too small (%i) to handle "
337 "the transport of batman-adv packets. Packets going " 351 "the transport of batman-adv packets. Packets going "
338 "over this interface will be fragmented on layer2 " 352 "over this interface will be fragmented on layer2 "
339 "which could impact the performance. Setting the MTU " 353 "which could impact the performance. Setting the MTU "
340 "to %zi would solve the problem.\n", 354 "to %zi would solve the problem.\n",
341 batman_if->net_dev->name, batman_if->net_dev->mtu, 355 hard_iface->net_dev->name, hard_iface->net_dev->mtu,
342 ETH_DATA_LEN + BAT_HEADER_LEN); 356 ETH_DATA_LEN + BAT_HEADER_LEN);
343 357
344 if (!atomic_read(&bat_priv->fragmentation) && batman_if->net_dev->mtu < 358 if (!atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu <
345 ETH_DATA_LEN + BAT_HEADER_LEN) 359 ETH_DATA_LEN + BAT_HEADER_LEN)
346 bat_info(batman_if->soft_iface, 360 bat_info(hard_iface->soft_iface,
347 "The MTU of interface %s is too small (%i) to handle " 361 "The MTU of interface %s is too small (%i) to handle "
348 "the transport of batman-adv packets. If you experience" 362 "the transport of batman-adv packets. If you experience"
349 " problems getting traffic through try increasing the " 363 " problems getting traffic through try increasing the "
350 "MTU to %zi.\n", 364 "MTU to %zi.\n",
351 batman_if->net_dev->name, batman_if->net_dev->mtu, 365 hard_iface->net_dev->name, hard_iface->net_dev->mtu,
352 ETH_DATA_LEN + BAT_HEADER_LEN); 366 ETH_DATA_LEN + BAT_HEADER_LEN);
353 367
354 if (hardif_is_iface_up(batman_if)) 368 if (hardif_is_iface_up(hard_iface))
355 hardif_activate_interface(batman_if); 369 hardif_activate_interface(hard_iface);
356 else 370 else
357 bat_err(batman_if->soft_iface, "Not using interface %s " 371 bat_err(hard_iface->soft_iface, "Not using interface %s "
358 "(retrying later): interface not active\n", 372 "(retrying later): interface not active\n",
359 batman_if->net_dev->name); 373 hard_iface->net_dev->name);
360 374
361 /* begin scheduling originator messages on that interface */ 375 /* begin scheduling originator messages on that interface */
362 schedule_own_packet(batman_if); 376 schedule_own_packet(hard_iface);
363 377
364out: 378out:
365 return 0; 379 return 0;
366 380
367err: 381err:
368 return -ENOMEM; 382 hardif_free_ref(hard_iface);
383 return ret;
369} 384}
370 385
371void hardif_disable_interface(struct batman_if *batman_if) 386void hardif_disable_interface(struct hard_iface *hard_iface)
372{ 387{
373 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); 388 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
374 389
375 if (batman_if->if_status == IF_ACTIVE) 390 if (hard_iface->if_status == IF_ACTIVE)
376 hardif_deactivate_interface(batman_if); 391 hardif_deactivate_interface(hard_iface);
377 392
378 if (batman_if->if_status != IF_INACTIVE) 393 if (hard_iface->if_status != IF_INACTIVE)
379 return; 394 return;
380 395
381 bat_info(batman_if->soft_iface, "Removing interface: %s\n", 396 bat_info(hard_iface->soft_iface, "Removing interface: %s\n",
382 batman_if->net_dev->name); 397 hard_iface->net_dev->name);
383 dev_remove_pack(&batman_if->batman_adv_ptype); 398 dev_remove_pack(&hard_iface->batman_adv_ptype);
384 kref_put(&batman_if->refcount, hardif_free_ref);
385 399
386 bat_priv->num_ifaces--; 400 bat_priv->num_ifaces--;
387 orig_hash_del_if(batman_if, bat_priv->num_ifaces); 401 orig_hash_del_if(hard_iface, bat_priv->num_ifaces);
388 402
389 if (batman_if == bat_priv->primary_if) { 403 if (hard_iface == bat_priv->primary_if) {
390 struct batman_if *new_if; 404 struct hard_iface *new_if;
391 405
392 new_if = get_active_batman_if(batman_if->soft_iface); 406 new_if = hardif_get_active(hard_iface->soft_iface);
393 set_primary_if(bat_priv, new_if); 407 set_primary_if(bat_priv, new_if);
394 408
395 if (new_if) 409 if (new_if)
396 kref_put(&new_if->refcount, hardif_free_ref); 410 hardif_free_ref(new_if);
397 } 411 }
398 412
399 kfree(batman_if->packet_buff); 413 kfree(hard_iface->packet_buff);
400 batman_if->packet_buff = NULL; 414 hard_iface->packet_buff = NULL;
401 batman_if->if_status = IF_NOT_IN_USE; 415 hard_iface->if_status = IF_NOT_IN_USE;
402 416
403 /* delete all references to this batman_if */ 417 /* delete all references to this hard_iface */
404 purge_orig_ref(bat_priv); 418 purge_orig_ref(bat_priv);
405 purge_outstanding_packets(bat_priv, batman_if); 419 purge_outstanding_packets(bat_priv, hard_iface);
406 dev_put(batman_if->soft_iface); 420 dev_put(hard_iface->soft_iface);
407 421
408 /* nobody uses this interface anymore */ 422 /* nobody uses this interface anymore */
409 if (!bat_priv->num_ifaces) 423 if (!bat_priv->num_ifaces)
410 softif_destroy(batman_if->soft_iface); 424 softif_destroy(hard_iface->soft_iface);
411 425
412 batman_if->soft_iface = NULL; 426 hard_iface->soft_iface = NULL;
427 hardif_free_ref(hard_iface);
413} 428}
414 429
415static struct batman_if *hardif_add_interface(struct net_device *net_dev) 430static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
416{ 431{
417 struct batman_if *batman_if; 432 struct hard_iface *hard_iface;
418 int ret; 433 int ret;
419 434
420 ret = is_valid_iface(net_dev); 435 ret = is_valid_iface(net_dev);
@@ -423,73 +438,73 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev)
423 438
424 dev_hold(net_dev); 439 dev_hold(net_dev);
425 440
426 batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC); 441 hard_iface = kmalloc(sizeof(struct hard_iface), GFP_ATOMIC);
427 if (!batman_if) { 442 if (!hard_iface) {
428 pr_err("Can't add interface (%s): out of memory\n", 443 pr_err("Can't add interface (%s): out of memory\n",
429 net_dev->name); 444 net_dev->name);
430 goto release_dev; 445 goto release_dev;
431 } 446 }
432 447
433 ret = sysfs_add_hardif(&batman_if->hardif_obj, net_dev); 448 ret = sysfs_add_hardif(&hard_iface->hardif_obj, net_dev);
434 if (ret) 449 if (ret)
435 goto free_if; 450 goto free_if;
436 451
437 batman_if->if_num = -1; 452 hard_iface->if_num = -1;
438 batman_if->net_dev = net_dev; 453 hard_iface->net_dev = net_dev;
439 batman_if->soft_iface = NULL; 454 hard_iface->soft_iface = NULL;
440 batman_if->if_status = IF_NOT_IN_USE; 455 hard_iface->if_status = IF_NOT_IN_USE;
441 INIT_LIST_HEAD(&batman_if->list); 456 INIT_LIST_HEAD(&hard_iface->list);
442 kref_init(&batman_if->refcount); 457 /* extra reference for return */
458 atomic_set(&hard_iface->refcount, 2);
443 459
444 check_known_mac_addr(batman_if->net_dev); 460 check_known_mac_addr(hard_iface->net_dev);
445 461
446 spin_lock(&if_list_lock); 462 spin_lock(&hardif_list_lock);
447 list_add_tail_rcu(&batman_if->list, &if_list); 463 list_add_tail_rcu(&hard_iface->list, &hardif_list);
448 spin_unlock(&if_list_lock); 464 spin_unlock(&hardif_list_lock);
449 465
450 /* extra reference for return */ 466 return hard_iface;
451 kref_get(&batman_if->refcount);
452 return batman_if;
453 467
454free_if: 468free_if:
455 kfree(batman_if); 469 kfree(hard_iface);
456release_dev: 470release_dev:
457 dev_put(net_dev); 471 dev_put(net_dev);
458out: 472out:
459 return NULL; 473 return NULL;
460} 474}
461 475
462static void hardif_remove_interface(struct batman_if *batman_if) 476static void hardif_remove_interface(struct hard_iface *hard_iface)
463{ 477{
464 /* first deactivate interface */ 478 /* first deactivate interface */
465 if (batman_if->if_status != IF_NOT_IN_USE) 479 if (hard_iface->if_status != IF_NOT_IN_USE)
466 hardif_disable_interface(batman_if); 480 hardif_disable_interface(hard_iface);
467 481
468 if (batman_if->if_status != IF_NOT_IN_USE) 482 if (hard_iface->if_status != IF_NOT_IN_USE)
469 return; 483 return;
470 484
471 batman_if->if_status = IF_TO_BE_REMOVED; 485 hard_iface->if_status = IF_TO_BE_REMOVED;
472 sysfs_del_hardif(&batman_if->hardif_obj); 486 sysfs_del_hardif(&hard_iface->hardif_obj);
473 call_rcu(&batman_if->rcu, hardif_free_rcu); 487 hardif_free_ref(hard_iface);
474} 488}
475 489
476void hardif_remove_interfaces(void) 490void hardif_remove_interfaces(void)
477{ 491{
478 struct batman_if *batman_if, *batman_if_tmp; 492 struct hard_iface *hard_iface, *hard_iface_tmp;
479 struct list_head if_queue; 493 struct list_head if_queue;
480 494
481 INIT_LIST_HEAD(&if_queue); 495 INIT_LIST_HEAD(&if_queue);
482 496
483 spin_lock(&if_list_lock); 497 spin_lock(&hardif_list_lock);
484 list_for_each_entry_safe(batman_if, batman_if_tmp, &if_list, list) { 498 list_for_each_entry_safe(hard_iface, hard_iface_tmp,
485 list_del_rcu(&batman_if->list); 499 &hardif_list, list) {
486 list_add_tail(&batman_if->list, &if_queue); 500 list_del_rcu(&hard_iface->list);
501 list_add_tail(&hard_iface->list, &if_queue);
487 } 502 }
488 spin_unlock(&if_list_lock); 503 spin_unlock(&hardif_list_lock);
489 504
490 rtnl_lock(); 505 rtnl_lock();
491 list_for_each_entry_safe(batman_if, batman_if_tmp, &if_queue, list) { 506 list_for_each_entry_safe(hard_iface, hard_iface_tmp, &if_queue, list) {
492 hardif_remove_interface(batman_if); 507 hardif_remove_interface(hard_iface);
493 } 508 }
494 rtnl_unlock(); 509 rtnl_unlock();
495} 510}
@@ -498,43 +513,43 @@ static int hard_if_event(struct notifier_block *this,
498 unsigned long event, void *ptr) 513 unsigned long event, void *ptr)
499{ 514{
500 struct net_device *net_dev = (struct net_device *)ptr; 515 struct net_device *net_dev = (struct net_device *)ptr;
501 struct batman_if *batman_if = get_batman_if_by_netdev(net_dev); 516 struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
502 struct bat_priv *bat_priv; 517 struct bat_priv *bat_priv;
503 518
504 if (!batman_if && event == NETDEV_REGISTER) 519 if (!hard_iface && event == NETDEV_REGISTER)
505 batman_if = hardif_add_interface(net_dev); 520 hard_iface = hardif_add_interface(net_dev);
506 521
507 if (!batman_if) 522 if (!hard_iface)
508 goto out; 523 goto out;
509 524
510 switch (event) { 525 switch (event) {
511 case NETDEV_UP: 526 case NETDEV_UP:
512 hardif_activate_interface(batman_if); 527 hardif_activate_interface(hard_iface);
513 break; 528 break;
514 case NETDEV_GOING_DOWN: 529 case NETDEV_GOING_DOWN:
515 case NETDEV_DOWN: 530 case NETDEV_DOWN:
516 hardif_deactivate_interface(batman_if); 531 hardif_deactivate_interface(hard_iface);
517 break; 532 break;
518 case NETDEV_UNREGISTER: 533 case NETDEV_UNREGISTER:
519 spin_lock(&if_list_lock); 534 spin_lock(&hardif_list_lock);
520 list_del_rcu(&batman_if->list); 535 list_del_rcu(&hard_iface->list);
521 spin_unlock(&if_list_lock); 536 spin_unlock(&hardif_list_lock);
522 537
523 hardif_remove_interface(batman_if); 538 hardif_remove_interface(hard_iface);
524 break; 539 break;
525 case NETDEV_CHANGEMTU: 540 case NETDEV_CHANGEMTU:
526 if (batman_if->soft_iface) 541 if (hard_iface->soft_iface)
527 update_min_mtu(batman_if->soft_iface); 542 update_min_mtu(hard_iface->soft_iface);
528 break; 543 break;
529 case NETDEV_CHANGEADDR: 544 case NETDEV_CHANGEADDR:
530 if (batman_if->if_status == IF_NOT_IN_USE) 545 if (hard_iface->if_status == IF_NOT_IN_USE)
531 goto hardif_put; 546 goto hardif_put;
532 547
533 check_known_mac_addr(batman_if->net_dev); 548 check_known_mac_addr(hard_iface->net_dev);
534 update_mac_addresses(batman_if); 549 update_mac_addresses(hard_iface);
535 550
536 bat_priv = netdev_priv(batman_if->soft_iface); 551 bat_priv = netdev_priv(hard_iface->soft_iface);
537 if (batman_if == bat_priv->primary_if) 552 if (hard_iface == bat_priv->primary_if)
538 update_primary_addr(bat_priv); 553 update_primary_addr(bat_priv);
539 break; 554 break;
540 default: 555 default:
@@ -542,22 +557,23 @@ static int hard_if_event(struct notifier_block *this,
542 }; 557 };
543 558
544hardif_put: 559hardif_put:
545 kref_put(&batman_if->refcount, hardif_free_ref); 560 hardif_free_ref(hard_iface);
546out: 561out:
547 return NOTIFY_DONE; 562 return NOTIFY_DONE;
548} 563}
549 564
550/* receive a packet with the batman ethertype coming on a hard 565/* receive a packet with the batman ethertype coming on a hard
551 * interface */ 566 * interface */
552int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, 567static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
553 struct packet_type *ptype, struct net_device *orig_dev) 568 struct packet_type *ptype,
569 struct net_device *orig_dev)
554{ 570{
555 struct bat_priv *bat_priv; 571 struct bat_priv *bat_priv;
556 struct batman_packet *batman_packet; 572 struct batman_packet *batman_packet;
557 struct batman_if *batman_if; 573 struct hard_iface *hard_iface;
558 int ret; 574 int ret;
559 575
560 batman_if = container_of(ptype, struct batman_if, batman_adv_ptype); 576 hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype);
561 skb = skb_share_check(skb, GFP_ATOMIC); 577 skb = skb_share_check(skb, GFP_ATOMIC);
562 578
563 /* skb was released by skb_share_check() */ 579 /* skb was released by skb_share_check() */
@@ -573,16 +589,16 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
573 || !skb_mac_header(skb))) 589 || !skb_mac_header(skb)))
574 goto err_free; 590 goto err_free;
575 591
576 if (!batman_if->soft_iface) 592 if (!hard_iface->soft_iface)
577 goto err_free; 593 goto err_free;
578 594
579 bat_priv = netdev_priv(batman_if->soft_iface); 595 bat_priv = netdev_priv(hard_iface->soft_iface);
580 596
581 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) 597 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
582 goto err_free; 598 goto err_free;
583 599
584 /* discard frames on not active interfaces */ 600 /* discard frames on not active interfaces */
585 if (batman_if->if_status != IF_ACTIVE) 601 if (hard_iface->if_status != IF_ACTIVE)
586 goto err_free; 602 goto err_free;
587 603
588 batman_packet = (struct batman_packet *)skb->data; 604 batman_packet = (struct batman_packet *)skb->data;
@@ -600,32 +616,32 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
600 switch (batman_packet->packet_type) { 616 switch (batman_packet->packet_type) {
601 /* batman originator packet */ 617 /* batman originator packet */
602 case BAT_PACKET: 618 case BAT_PACKET:
603 ret = recv_bat_packet(skb, batman_if); 619 ret = recv_bat_packet(skb, hard_iface);
604 break; 620 break;
605 621
606 /* batman icmp packet */ 622 /* batman icmp packet */
607 case BAT_ICMP: 623 case BAT_ICMP:
608 ret = recv_icmp_packet(skb, batman_if); 624 ret = recv_icmp_packet(skb, hard_iface);
609 break; 625 break;
610 626
611 /* unicast packet */ 627 /* unicast packet */
612 case BAT_UNICAST: 628 case BAT_UNICAST:
613 ret = recv_unicast_packet(skb, batman_if); 629 ret = recv_unicast_packet(skb, hard_iface);
614 break; 630 break;
615 631
616 /* fragmented unicast packet */ 632 /* fragmented unicast packet */
617 case BAT_UNICAST_FRAG: 633 case BAT_UNICAST_FRAG:
618 ret = recv_ucast_frag_packet(skb, batman_if); 634 ret = recv_ucast_frag_packet(skb, hard_iface);
619 break; 635 break;
620 636
621 /* broadcast packet */ 637 /* broadcast packet */
622 case BAT_BCAST: 638 case BAT_BCAST:
623 ret = recv_bcast_packet(skb, batman_if); 639 ret = recv_bcast_packet(skb, hard_iface);
624 break; 640 break;
625 641
626 /* vis packet */ 642 /* vis packet */
627 case BAT_VIS: 643 case BAT_VIS:
628 ret = recv_vis_packet(skb, batman_if); 644 ret = recv_vis_packet(skb, hard_iface);
629 break; 645 break;
630 default: 646 default:
631 ret = NET_RX_DROP; 647 ret = NET_RX_DROP;