diff options
Diffstat (limited to 'net/batman-adv')
-rw-r--r-- | net/batman-adv/aggregation.c | 8 | ||||
-rw-r--r-- | net/batman-adv/aggregation.h | 4 | ||||
-rw-r--r-- | net/batman-adv/bat_sysfs.c | 41 | ||||
-rw-r--r-- | net/batman-adv/hard-interface.c | 353 | ||||
-rw-r--r-- | net/batman-adv/hard-interface.h | 12 | ||||
-rw-r--r-- | net/batman-adv/main.c | 8 | ||||
-rw-r--r-- | net/batman-adv/originator.c | 28 | ||||
-rw-r--r-- | net/batman-adv/originator.h | 6 | ||||
-rw-r--r-- | net/batman-adv/routing.c | 48 | ||||
-rw-r--r-- | net/batman-adv/routing.h | 20 | ||||
-rw-r--r-- | net/batman-adv/send.c | 101 | ||||
-rw-r--r-- | net/batman-adv/send.h | 8 | ||||
-rw-r--r-- | net/batman-adv/soft-interface.c | 2 | ||||
-rw-r--r-- | net/batman-adv/soft-interface.h | 2 | ||||
-rw-r--r-- | net/batman-adv/types.h | 8 | ||||
-rw-r--r-- | net/batman-adv/unicast.c | 8 | ||||
-rw-r--r-- | net/batman-adv/unicast.h | 2 | ||||
-rw-r--r-- | net/batman-adv/vis.c | 6 |
18 files changed, 335 insertions, 330 deletions
diff --git a/net/batman-adv/aggregation.c b/net/batman-adv/aggregation.c index 1997725a243b..af45d6b2031f 100644 --- a/net/batman-adv/aggregation.c +++ b/net/batman-adv/aggregation.c | |||
@@ -35,7 +35,7 @@ static bool can_aggregate_with(struct batman_packet *new_batman_packet, | |||
35 | int packet_len, | 35 | int packet_len, |
36 | unsigned long send_time, | 36 | unsigned long send_time, |
37 | bool directlink, | 37 | bool directlink, |
38 | struct batman_if *if_incoming, | 38 | struct hard_iface *if_incoming, |
39 | struct forw_packet *forw_packet) | 39 | struct forw_packet *forw_packet) |
40 | { | 40 | { |
41 | struct batman_packet *batman_packet = | 41 | struct batman_packet *batman_packet = |
@@ -99,7 +99,7 @@ static bool can_aggregate_with(struct batman_packet *new_batman_packet, | |||
99 | /* create a new aggregated packet and add this packet to it */ | 99 | /* create a new aggregated packet and add this packet to it */ |
100 | static void new_aggregated_packet(unsigned char *packet_buff, int packet_len, | 100 | static void new_aggregated_packet(unsigned char *packet_buff, int packet_len, |
101 | unsigned long send_time, bool direct_link, | 101 | unsigned long send_time, bool direct_link, |
102 | struct batman_if *if_incoming, | 102 | struct hard_iface *if_incoming, |
103 | int own_packet) | 103 | int own_packet) |
104 | { | 104 | { |
105 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); | 105 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); |
@@ -188,7 +188,7 @@ static void aggregate(struct forw_packet *forw_packet_aggr, | |||
188 | 188 | ||
189 | void add_bat_packet_to_list(struct bat_priv *bat_priv, | 189 | void add_bat_packet_to_list(struct bat_priv *bat_priv, |
190 | unsigned char *packet_buff, int packet_len, | 190 | unsigned char *packet_buff, int packet_len, |
191 | struct batman_if *if_incoming, char own_packet, | 191 | struct hard_iface *if_incoming, char own_packet, |
192 | unsigned long send_time) | 192 | unsigned long send_time) |
193 | { | 193 | { |
194 | /** | 194 | /** |
@@ -247,7 +247,7 @@ void add_bat_packet_to_list(struct bat_priv *bat_priv, | |||
247 | 247 | ||
248 | /* unpack the aggregated packets and process them one by one */ | 248 | /* unpack the aggregated packets and process them one by one */ |
249 | void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff, | 249 | void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff, |
250 | int packet_len, struct batman_if *if_incoming) | 250 | int packet_len, struct hard_iface *if_incoming) |
251 | { | 251 | { |
252 | struct batman_packet *batman_packet; | 252 | struct batman_packet *batman_packet; |
253 | int buff_pos = 0; | 253 | int buff_pos = 0; |
diff --git a/net/batman-adv/aggregation.h b/net/batman-adv/aggregation.h index 6ce305b40017..062204289d1f 100644 --- a/net/batman-adv/aggregation.h +++ b/net/batman-adv/aggregation.h | |||
@@ -35,9 +35,9 @@ static inline int aggregated_packet(int buff_pos, int packet_len, int num_hna) | |||
35 | 35 | ||
36 | void add_bat_packet_to_list(struct bat_priv *bat_priv, | 36 | void add_bat_packet_to_list(struct bat_priv *bat_priv, |
37 | unsigned char *packet_buff, int packet_len, | 37 | unsigned char *packet_buff, int packet_len, |
38 | struct batman_if *if_incoming, char own_packet, | 38 | struct hard_iface *if_incoming, char own_packet, |
39 | unsigned long send_time); | 39 | unsigned long send_time); |
40 | void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff, | 40 | void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff, |
41 | int packet_len, struct batman_if *if_incoming); | 41 | int packet_len, struct hard_iface *if_incoming); |
42 | 42 | ||
43 | #endif /* _NET_BATMAN_ADV_AGGREGATION_H_ */ | 43 | #endif /* _NET_BATMAN_ADV_AGGREGATION_H_ */ |
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c index 93ae20aaad0a..e449bf6353e0 100644 --- a/net/batman-adv/bat_sysfs.c +++ b/net/batman-adv/bat_sysfs.c | |||
@@ -441,16 +441,16 @@ static ssize_t show_mesh_iface(struct kobject *kobj, struct attribute *attr, | |||
441 | char *buff) | 441 | char *buff) |
442 | { | 442 | { |
443 | struct net_device *net_dev = kobj_to_netdev(kobj); | 443 | struct net_device *net_dev = kobj_to_netdev(kobj); |
444 | struct batman_if *batman_if = get_batman_if_by_netdev(net_dev); | 444 | struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); |
445 | ssize_t length; | 445 | ssize_t length; |
446 | 446 | ||
447 | if (!batman_if) | 447 | if (!hard_iface) |
448 | return 0; | 448 | return 0; |
449 | 449 | ||
450 | length = sprintf(buff, "%s\n", batman_if->if_status == IF_NOT_IN_USE ? | 450 | length = sprintf(buff, "%s\n", hard_iface->if_status == IF_NOT_IN_USE ? |
451 | "none" : batman_if->soft_iface->name); | 451 | "none" : hard_iface->soft_iface->name); |
452 | 452 | ||
453 | hardif_free_ref(batman_if); | 453 | hardif_free_ref(hard_iface); |
454 | 454 | ||
455 | return length; | 455 | return length; |
456 | } | 456 | } |
@@ -459,11 +459,11 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr, | |||
459 | char *buff, size_t count) | 459 | char *buff, size_t count) |
460 | { | 460 | { |
461 | struct net_device *net_dev = kobj_to_netdev(kobj); | 461 | struct net_device *net_dev = kobj_to_netdev(kobj); |
462 | struct batman_if *batman_if = get_batman_if_by_netdev(net_dev); | 462 | struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); |
463 | int status_tmp = -1; | 463 | int status_tmp = -1; |
464 | int ret = count; | 464 | int ret = count; |
465 | 465 | ||
466 | if (!batman_if) | 466 | if (!hard_iface) |
467 | return count; | 467 | return count; |
468 | 468 | ||
469 | if (buff[count - 1] == '\n') | 469 | if (buff[count - 1] == '\n') |
@@ -472,7 +472,7 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr, | |||
472 | if (strlen(buff) >= IFNAMSIZ) { | 472 | if (strlen(buff) >= IFNAMSIZ) { |
473 | pr_err("Invalid parameter for 'mesh_iface' setting received: " | 473 | pr_err("Invalid parameter for 'mesh_iface' setting received: " |
474 | "interface name too long '%s'\n", buff); | 474 | "interface name too long '%s'\n", buff); |
475 | hardif_free_ref(batman_if); | 475 | hardif_free_ref(hard_iface); |
476 | return -EINVAL; | 476 | return -EINVAL; |
477 | } | 477 | } |
478 | 478 | ||
@@ -481,28 +481,31 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr, | |||
481 | else | 481 | else |
482 | status_tmp = IF_I_WANT_YOU; | 482 | status_tmp = IF_I_WANT_YOU; |
483 | 483 | ||
484 | if ((batman_if->if_status == status_tmp) || ((batman_if->soft_iface) && | 484 | if (hard_iface->if_status == status_tmp) |
485 | (strncmp(batman_if->soft_iface->name, buff, IFNAMSIZ) == 0))) | 485 | goto out; |
486 | |||
487 | if ((hard_iface->soft_iface) && | ||
488 | (strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0)) | ||
486 | goto out; | 489 | goto out; |
487 | 490 | ||
488 | if (status_tmp == IF_NOT_IN_USE) { | 491 | if (status_tmp == IF_NOT_IN_USE) { |
489 | rtnl_lock(); | 492 | rtnl_lock(); |
490 | hardif_disable_interface(batman_if); | 493 | hardif_disable_interface(hard_iface); |
491 | rtnl_unlock(); | 494 | rtnl_unlock(); |
492 | goto out; | 495 | goto out; |
493 | } | 496 | } |
494 | 497 | ||
495 | /* if the interface already is in use */ | 498 | /* if the interface already is in use */ |
496 | if (batman_if->if_status != IF_NOT_IN_USE) { | 499 | if (hard_iface->if_status != IF_NOT_IN_USE) { |
497 | rtnl_lock(); | 500 | rtnl_lock(); |
498 | hardif_disable_interface(batman_if); | 501 | hardif_disable_interface(hard_iface); |
499 | rtnl_unlock(); | 502 | rtnl_unlock(); |
500 | } | 503 | } |
501 | 504 | ||
502 | ret = hardif_enable_interface(batman_if, buff); | 505 | ret = hardif_enable_interface(hard_iface, buff); |
503 | 506 | ||
504 | out: | 507 | out: |
505 | hardif_free_ref(batman_if); | 508 | hardif_free_ref(hard_iface); |
506 | return ret; | 509 | return ret; |
507 | } | 510 | } |
508 | 511 | ||
@@ -510,13 +513,13 @@ static ssize_t show_iface_status(struct kobject *kobj, struct attribute *attr, | |||
510 | char *buff) | 513 | char *buff) |
511 | { | 514 | { |
512 | struct net_device *net_dev = kobj_to_netdev(kobj); | 515 | struct net_device *net_dev = kobj_to_netdev(kobj); |
513 | struct batman_if *batman_if = get_batman_if_by_netdev(net_dev); | 516 | struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); |
514 | ssize_t length; | 517 | ssize_t length; |
515 | 518 | ||
516 | if (!batman_if) | 519 | if (!hard_iface) |
517 | return 0; | 520 | return 0; |
518 | 521 | ||
519 | switch (batman_if->if_status) { | 522 | switch (hard_iface->if_status) { |
520 | case IF_TO_BE_REMOVED: | 523 | case IF_TO_BE_REMOVED: |
521 | length = sprintf(buff, "disabling\n"); | 524 | length = sprintf(buff, "disabling\n"); |
522 | break; | 525 | break; |
@@ -535,7 +538,7 @@ static ssize_t show_iface_status(struct kobject *kobj, struct attribute *attr, | |||
535 | break; | 538 | break; |
536 | } | 539 | } |
537 | 540 | ||
538 | hardif_free_ref(batman_if); | 541 | hardif_free_ref(hard_iface); |
539 | 542 | ||
540 | return length; | 543 | return length; |
541 | } | 544 | } |
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 4a2e6e33ebc0..95a35b695700 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c | |||
@@ -42,29 +42,29 @@ static int batman_skb_recv(struct sk_buff *skb, | |||
42 | 42 | ||
43 | void hardif_free_rcu(struct rcu_head *rcu) | 43 | void hardif_free_rcu(struct rcu_head *rcu) |
44 | { | 44 | { |
45 | struct batman_if *batman_if; | 45 | struct hard_iface *hard_iface; |
46 | 46 | ||
47 | batman_if = container_of(rcu, struct batman_if, rcu); | 47 | hard_iface = container_of(rcu, struct hard_iface, rcu); |
48 | dev_put(batman_if->net_dev); | 48 | dev_put(hard_iface->net_dev); |
49 | kfree(batman_if); | 49 | kfree(hard_iface); |
50 | } | 50 | } |
51 | 51 | ||
52 | struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev) | 52 | struct hard_iface *hardif_get_by_netdev(struct net_device *net_dev) |
53 | { | 53 | { |
54 | struct batman_if *batman_if; | 54 | struct hard_iface *hard_iface; |
55 | 55 | ||
56 | rcu_read_lock(); | 56 | rcu_read_lock(); |
57 | list_for_each_entry_rcu(batman_if, &hardif_list, list) { | 57 | list_for_each_entry_rcu(hard_iface, &hardif_list, list) { |
58 | if (batman_if->net_dev == net_dev && | 58 | if (hard_iface->net_dev == net_dev && |
59 | atomic_inc_not_zero(&batman_if->refcount)) | 59 | atomic_inc_not_zero(&hard_iface->refcount)) |
60 | goto out; | 60 | goto out; |
61 | } | 61 | } |
62 | 62 | ||
63 | batman_if = NULL; | 63 | hard_iface = NULL; |
64 | 64 | ||
65 | out: | 65 | out: |
66 | rcu_read_unlock(); | 66 | rcu_read_unlock(); |
67 | return batman_if; | 67 | return hard_iface; |
68 | } | 68 | } |
69 | 69 | ||
70 | static int is_valid_iface(struct net_device *net_dev) | 70 | static int is_valid_iface(struct net_device *net_dev) |
@@ -94,25 +94,25 @@ static int is_valid_iface(struct net_device *net_dev) | |||
94 | return 1; | 94 | return 1; |
95 | } | 95 | } |
96 | 96 | ||
97 | static struct batman_if *get_active_batman_if(struct net_device *soft_iface) | 97 | static struct hard_iface *hardif_get_active(struct net_device *soft_iface) |
98 | { | 98 | { |
99 | struct batman_if *batman_if; | 99 | struct hard_iface *hard_iface; |
100 | 100 | ||
101 | rcu_read_lock(); | 101 | rcu_read_lock(); |
102 | list_for_each_entry_rcu(batman_if, &hardif_list, list) { | 102 | list_for_each_entry_rcu(hard_iface, &hardif_list, list) { |
103 | if (batman_if->soft_iface != soft_iface) | 103 | if (hard_iface->soft_iface != soft_iface) |
104 | continue; | 104 | continue; |
105 | 105 | ||
106 | if (batman_if->if_status == IF_ACTIVE && | 106 | if (hard_iface->if_status == IF_ACTIVE && |
107 | atomic_inc_not_zero(&batman_if->refcount)) | 107 | atomic_inc_not_zero(&hard_iface->refcount)) |
108 | goto out; | 108 | goto out; |
109 | } | 109 | } |
110 | 110 | ||
111 | batman_if = NULL; | 111 | hard_iface = NULL; |
112 | 112 | ||
113 | out: | 113 | out: |
114 | rcu_read_unlock(); | 114 | rcu_read_unlock(); |
115 | return batman_if; | 115 | return hard_iface; |
116 | } | 116 | } |
117 | 117 | ||
118 | static void update_primary_addr(struct bat_priv *bat_priv) | 118 | static void update_primary_addr(struct bat_priv *bat_priv) |
@@ -128,16 +128,16 @@ static void update_primary_addr(struct bat_priv *bat_priv) | |||
128 | } | 128 | } |
129 | 129 | ||
130 | static void set_primary_if(struct bat_priv *bat_priv, | 130 | static void set_primary_if(struct bat_priv *bat_priv, |
131 | struct batman_if *batman_if) | 131 | struct hard_iface *hard_iface) |
132 | { | 132 | { |
133 | struct batman_packet *batman_packet; | 133 | struct batman_packet *batman_packet; |
134 | struct batman_if *old_if; | 134 | struct hard_iface *old_if; |
135 | 135 | ||
136 | if (batman_if && !atomic_inc_not_zero(&batman_if->refcount)) | 136 | if (hard_iface && !atomic_inc_not_zero(&hard_iface->refcount)) |
137 | batman_if = NULL; | 137 | hard_iface = NULL; |
138 | 138 | ||
139 | old_if = bat_priv->primary_if; | 139 | old_if = bat_priv->primary_if; |
140 | bat_priv->primary_if = batman_if; | 140 | bat_priv->primary_if = hard_iface; |
141 | 141 | ||
142 | if (old_if) | 142 | if (old_if) |
143 | hardif_free_ref(old_if); | 143 | hardif_free_ref(old_if); |
@@ -145,7 +145,7 @@ static void set_primary_if(struct bat_priv *bat_priv, | |||
145 | if (!bat_priv->primary_if) | 145 | if (!bat_priv->primary_if) |
146 | return; | 146 | return; |
147 | 147 | ||
148 | batman_packet = (struct batman_packet *)(batman_if->packet_buff); | 148 | batman_packet = (struct batman_packet *)(hard_iface->packet_buff); |
149 | batman_packet->flags = PRIMARIES_FIRST_HOP; | 149 | batman_packet->flags = PRIMARIES_FIRST_HOP; |
150 | batman_packet->ttl = TTL; | 150 | batman_packet->ttl = TTL; |
151 | 151 | ||
@@ -158,42 +158,42 @@ static void set_primary_if(struct bat_priv *bat_priv, | |||
158 | atomic_set(&bat_priv->hna_local_changed, 1); | 158 | atomic_set(&bat_priv->hna_local_changed, 1); |
159 | } | 159 | } |
160 | 160 | ||
161 | static bool hardif_is_iface_up(struct batman_if *batman_if) | 161 | static bool hardif_is_iface_up(struct hard_iface *hard_iface) |
162 | { | 162 | { |
163 | if (batman_if->net_dev->flags & IFF_UP) | 163 | if (hard_iface->net_dev->flags & IFF_UP) |
164 | return true; | 164 | return true; |
165 | 165 | ||
166 | return false; | 166 | return false; |
167 | } | 167 | } |
168 | 168 | ||
169 | static void update_mac_addresses(struct batman_if *batman_if) | 169 | static void update_mac_addresses(struct hard_iface *hard_iface) |
170 | { | 170 | { |
171 | memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig, | 171 | memcpy(((struct batman_packet *)(hard_iface->packet_buff))->orig, |
172 | batman_if->net_dev->dev_addr, ETH_ALEN); | 172 | hard_iface->net_dev->dev_addr, ETH_ALEN); |
173 | memcpy(((struct batman_packet *)(batman_if->packet_buff))->prev_sender, | 173 | memcpy(((struct batman_packet *)(hard_iface->packet_buff))->prev_sender, |
174 | batman_if->net_dev->dev_addr, ETH_ALEN); | 174 | hard_iface->net_dev->dev_addr, ETH_ALEN); |
175 | } | 175 | } |
176 | 176 | ||
177 | static void check_known_mac_addr(struct net_device *net_dev) | 177 | static void check_known_mac_addr(struct net_device *net_dev) |
178 | { | 178 | { |
179 | struct batman_if *batman_if; | 179 | struct hard_iface *hard_iface; |
180 | 180 | ||
181 | rcu_read_lock(); | 181 | rcu_read_lock(); |
182 | list_for_each_entry_rcu(batman_if, &hardif_list, list) { | 182 | list_for_each_entry_rcu(hard_iface, &hardif_list, list) { |
183 | if ((batman_if->if_status != IF_ACTIVE) && | 183 | if ((hard_iface->if_status != IF_ACTIVE) && |
184 | (batman_if->if_status != IF_TO_BE_ACTIVATED)) | 184 | (hard_iface->if_status != IF_TO_BE_ACTIVATED)) |
185 | continue; | 185 | continue; |
186 | 186 | ||
187 | if (batman_if->net_dev == net_dev) | 187 | if (hard_iface->net_dev == net_dev) |
188 | continue; | 188 | continue; |
189 | 189 | ||
190 | if (!compare_eth(batman_if->net_dev->dev_addr, | 190 | if (!compare_eth(hard_iface->net_dev->dev_addr, |
191 | net_dev->dev_addr)) | 191 | net_dev->dev_addr)) |
192 | continue; | 192 | continue; |
193 | 193 | ||
194 | pr_warning("The newly added mac address (%pM) already exists " | 194 | pr_warning("The newly added mac address (%pM) already exists " |
195 | "on: %s\n", net_dev->dev_addr, | 195 | "on: %s\n", net_dev->dev_addr, |
196 | batman_if->net_dev->name); | 196 | hard_iface->net_dev->name); |
197 | pr_warning("It is strongly recommended to keep mac addresses " | 197 | pr_warning("It is strongly recommended to keep mac addresses " |
198 | "unique to avoid problems!\n"); | 198 | "unique to avoid problems!\n"); |
199 | } | 199 | } |
@@ -203,7 +203,7 @@ static void check_known_mac_addr(struct net_device *net_dev) | |||
203 | int hardif_min_mtu(struct net_device *soft_iface) | 203 | int hardif_min_mtu(struct net_device *soft_iface) |
204 | { | 204 | { |
205 | struct bat_priv *bat_priv = netdev_priv(soft_iface); | 205 | struct bat_priv *bat_priv = netdev_priv(soft_iface); |
206 | struct batman_if *batman_if; | 206 | struct hard_iface *hard_iface; |
207 | /* allow big frames if all devices are capable to do so | 207 | /* allow big frames if all devices are capable to do so |
208 | * (have MTU > 1500 + BAT_HEADER_LEN) */ | 208 | * (have MTU > 1500 + BAT_HEADER_LEN) */ |
209 | int min_mtu = ETH_DATA_LEN; | 209 | int min_mtu = ETH_DATA_LEN; |
@@ -212,15 +212,15 @@ int hardif_min_mtu(struct net_device *soft_iface) | |||
212 | goto out; | 212 | goto out; |
213 | 213 | ||
214 | rcu_read_lock(); | 214 | rcu_read_lock(); |
215 | list_for_each_entry_rcu(batman_if, &hardif_list, list) { | 215 | list_for_each_entry_rcu(hard_iface, &hardif_list, list) { |
216 | if ((batman_if->if_status != IF_ACTIVE) && | 216 | if ((hard_iface->if_status != IF_ACTIVE) && |
217 | (batman_if->if_status != IF_TO_BE_ACTIVATED)) | 217 | (hard_iface->if_status != IF_TO_BE_ACTIVATED)) |
218 | continue; | 218 | continue; |
219 | 219 | ||
220 | if (batman_if->soft_iface != soft_iface) | 220 | if (hard_iface->soft_iface != soft_iface) |
221 | continue; | 221 | continue; |
222 | 222 | ||
223 | min_mtu = min_t(int, batman_if->net_dev->mtu - BAT_HEADER_LEN, | 223 | min_mtu = min_t(int, hard_iface->net_dev->mtu - BAT_HEADER_LEN, |
224 | min_mtu); | 224 | min_mtu); |
225 | } | 225 | } |
226 | rcu_read_unlock(); | 226 | rcu_read_unlock(); |
@@ -238,80 +238,80 @@ void update_min_mtu(struct net_device *soft_iface) | |||
238 | soft_iface->mtu = min_mtu; | 238 | soft_iface->mtu = min_mtu; |
239 | } | 239 | } |
240 | 240 | ||
241 | static void hardif_activate_interface(struct batman_if *batman_if) | 241 | static void hardif_activate_interface(struct hard_iface *hard_iface) |
242 | { | 242 | { |
243 | struct bat_priv *bat_priv; | 243 | struct bat_priv *bat_priv; |
244 | 244 | ||
245 | if (batman_if->if_status != IF_INACTIVE) | 245 | if (hard_iface->if_status != IF_INACTIVE) |
246 | return; | 246 | return; |
247 | 247 | ||
248 | bat_priv = netdev_priv(batman_if->soft_iface); | 248 | bat_priv = netdev_priv(hard_iface->soft_iface); |
249 | 249 | ||
250 | update_mac_addresses(batman_if); | 250 | update_mac_addresses(hard_iface); |
251 | batman_if->if_status = IF_TO_BE_ACTIVATED; | 251 | hard_iface->if_status = IF_TO_BE_ACTIVATED; |
252 | 252 | ||
253 | /** | 253 | /** |
254 | * the first active interface becomes our primary interface or | 254 | * the first active interface becomes our primary interface or |
255 | * the next active interface after the old primay interface was removed | 255 | * the next active interface after the old primay interface was removed |
256 | */ | 256 | */ |
257 | if (!bat_priv->primary_if) | 257 | if (!bat_priv->primary_if) |
258 | set_primary_if(bat_priv, batman_if); | 258 | set_primary_if(bat_priv, hard_iface); |
259 | 259 | ||
260 | bat_info(batman_if->soft_iface, "Interface activated: %s\n", | 260 | bat_info(hard_iface->soft_iface, "Interface activated: %s\n", |
261 | batman_if->net_dev->name); | 261 | hard_iface->net_dev->name); |
262 | 262 | ||
263 | update_min_mtu(batman_if->soft_iface); | 263 | update_min_mtu(hard_iface->soft_iface); |
264 | return; | 264 | return; |
265 | } | 265 | } |
266 | 266 | ||
267 | static void hardif_deactivate_interface(struct batman_if *batman_if) | 267 | static void hardif_deactivate_interface(struct hard_iface *hard_iface) |
268 | { | 268 | { |
269 | if ((batman_if->if_status != IF_ACTIVE) && | 269 | if ((hard_iface->if_status != IF_ACTIVE) && |
270 | (batman_if->if_status != IF_TO_BE_ACTIVATED)) | 270 | (hard_iface->if_status != IF_TO_BE_ACTIVATED)) |
271 | return; | 271 | return; |
272 | 272 | ||
273 | batman_if->if_status = IF_INACTIVE; | 273 | hard_iface->if_status = IF_INACTIVE; |
274 | 274 | ||
275 | bat_info(batman_if->soft_iface, "Interface deactivated: %s\n", | 275 | bat_info(hard_iface->soft_iface, "Interface deactivated: %s\n", |
276 | batman_if->net_dev->name); | 276 | hard_iface->net_dev->name); |
277 | 277 | ||
278 | update_min_mtu(batman_if->soft_iface); | 278 | update_min_mtu(hard_iface->soft_iface); |
279 | } | 279 | } |
280 | 280 | ||
281 | int hardif_enable_interface(struct batman_if *batman_if, char *iface_name) | 281 | int hardif_enable_interface(struct hard_iface *hard_iface, char *iface_name) |
282 | { | 282 | { |
283 | struct bat_priv *bat_priv; | 283 | struct bat_priv *bat_priv; |
284 | struct batman_packet *batman_packet; | 284 | struct batman_packet *batman_packet; |
285 | 285 | ||
286 | if (batman_if->if_status != IF_NOT_IN_USE) | 286 | if (hard_iface->if_status != IF_NOT_IN_USE) |
287 | goto out; | 287 | goto out; |
288 | 288 | ||
289 | if (!atomic_inc_not_zero(&batman_if->refcount)) | 289 | if (!atomic_inc_not_zero(&hard_iface->refcount)) |
290 | goto out; | 290 | goto out; |
291 | 291 | ||
292 | batman_if->soft_iface = dev_get_by_name(&init_net, iface_name); | 292 | hard_iface->soft_iface = dev_get_by_name(&init_net, iface_name); |
293 | 293 | ||
294 | if (!batman_if->soft_iface) { | 294 | if (!hard_iface->soft_iface) { |
295 | batman_if->soft_iface = softif_create(iface_name); | 295 | hard_iface->soft_iface = softif_create(iface_name); |
296 | 296 | ||
297 | if (!batman_if->soft_iface) | 297 | if (!hard_iface->soft_iface) |
298 | goto err; | 298 | goto err; |
299 | 299 | ||
300 | /* dev_get_by_name() increases the reference counter for us */ | 300 | /* dev_get_by_name() increases the reference counter for us */ |
301 | dev_hold(batman_if->soft_iface); | 301 | dev_hold(hard_iface->soft_iface); |
302 | } | 302 | } |
303 | 303 | ||
304 | bat_priv = netdev_priv(batman_if->soft_iface); | 304 | bat_priv = netdev_priv(hard_iface->soft_iface); |
305 | batman_if->packet_len = BAT_PACKET_LEN; | 305 | hard_iface->packet_len = BAT_PACKET_LEN; |
306 | batman_if->packet_buff = kmalloc(batman_if->packet_len, GFP_ATOMIC); | 306 | hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC); |
307 | 307 | ||
308 | if (!batman_if->packet_buff) { | 308 | if (!hard_iface->packet_buff) { |
309 | bat_err(batman_if->soft_iface, "Can't add interface packet " | 309 | bat_err(hard_iface->soft_iface, "Can't add interface packet " |
310 | "(%s): out of memory\n", batman_if->net_dev->name); | 310 | "(%s): out of memory\n", hard_iface->net_dev->name); |
311 | goto err; | 311 | goto err; |
312 | } | 312 | } |
313 | 313 | ||
314 | batman_packet = (struct batman_packet *)(batman_if->packet_buff); | 314 | batman_packet = (struct batman_packet *)(hard_iface->packet_buff); |
315 | batman_packet->packet_type = BAT_PACKET; | 315 | batman_packet->packet_type = BAT_PACKET; |
316 | batman_packet->version = COMPAT_VERSION; | 316 | batman_packet->version = COMPAT_VERSION; |
317 | batman_packet->flags = 0; | 317 | batman_packet->flags = 0; |
@@ -319,107 +319,107 @@ int hardif_enable_interface(struct batman_if *batman_if, char *iface_name) | |||
319 | batman_packet->tq = TQ_MAX_VALUE; | 319 | batman_packet->tq = TQ_MAX_VALUE; |
320 | batman_packet->num_hna = 0; | 320 | batman_packet->num_hna = 0; |
321 | 321 | ||
322 | batman_if->if_num = bat_priv->num_ifaces; | 322 | hard_iface->if_num = bat_priv->num_ifaces; |
323 | bat_priv->num_ifaces++; | 323 | bat_priv->num_ifaces++; |
324 | batman_if->if_status = IF_INACTIVE; | 324 | hard_iface->if_status = IF_INACTIVE; |
325 | orig_hash_add_if(batman_if, bat_priv->num_ifaces); | 325 | orig_hash_add_if(hard_iface, bat_priv->num_ifaces); |
326 | 326 | ||
327 | batman_if->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN); | 327 | hard_iface->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN); |
328 | batman_if->batman_adv_ptype.func = batman_skb_recv; | 328 | hard_iface->batman_adv_ptype.func = batman_skb_recv; |
329 | batman_if->batman_adv_ptype.dev = batman_if->net_dev; | 329 | hard_iface->batman_adv_ptype.dev = hard_iface->net_dev; |
330 | dev_add_pack(&batman_if->batman_adv_ptype); | 330 | dev_add_pack(&hard_iface->batman_adv_ptype); |
331 | 331 | ||
332 | atomic_set(&batman_if->seqno, 1); | 332 | atomic_set(&hard_iface->seqno, 1); |
333 | atomic_set(&batman_if->frag_seqno, 1); | 333 | atomic_set(&hard_iface->frag_seqno, 1); |
334 | bat_info(batman_if->soft_iface, "Adding interface: %s\n", | 334 | bat_info(hard_iface->soft_iface, "Adding interface: %s\n", |
335 | batman_if->net_dev->name); | 335 | hard_iface->net_dev->name); |
336 | 336 | ||
337 | if (atomic_read(&bat_priv->fragmentation) && batman_if->net_dev->mtu < | 337 | if (atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu < |
338 | ETH_DATA_LEN + BAT_HEADER_LEN) | 338 | ETH_DATA_LEN + BAT_HEADER_LEN) |
339 | bat_info(batman_if->soft_iface, | 339 | bat_info(hard_iface->soft_iface, |
340 | "The MTU of interface %s is too small (%i) to handle " | 340 | "The MTU of interface %s is too small (%i) to handle " |
341 | "the transport of batman-adv packets. Packets going " | 341 | "the transport of batman-adv packets. Packets going " |
342 | "over this interface will be fragmented on layer2 " | 342 | "over this interface will be fragmented on layer2 " |
343 | "which could impact the performance. Setting the MTU " | 343 | "which could impact the performance. Setting the MTU " |
344 | "to %zi would solve the problem.\n", | 344 | "to %zi would solve the problem.\n", |
345 | batman_if->net_dev->name, batman_if->net_dev->mtu, | 345 | hard_iface->net_dev->name, hard_iface->net_dev->mtu, |
346 | ETH_DATA_LEN + BAT_HEADER_LEN); | 346 | ETH_DATA_LEN + BAT_HEADER_LEN); |
347 | 347 | ||
348 | if (!atomic_read(&bat_priv->fragmentation) && batman_if->net_dev->mtu < | 348 | if (!atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu < |
349 | ETH_DATA_LEN + BAT_HEADER_LEN) | 349 | ETH_DATA_LEN + BAT_HEADER_LEN) |
350 | bat_info(batman_if->soft_iface, | 350 | bat_info(hard_iface->soft_iface, |
351 | "The MTU of interface %s is too small (%i) to handle " | 351 | "The MTU of interface %s is too small (%i) to handle " |
352 | "the transport of batman-adv packets. If you experience" | 352 | "the transport of batman-adv packets. If you experience" |
353 | " problems getting traffic through try increasing the " | 353 | " problems getting traffic through try increasing the " |
354 | "MTU to %zi.\n", | 354 | "MTU to %zi.\n", |
355 | batman_if->net_dev->name, batman_if->net_dev->mtu, | 355 | hard_iface->net_dev->name, hard_iface->net_dev->mtu, |
356 | ETH_DATA_LEN + BAT_HEADER_LEN); | 356 | ETH_DATA_LEN + BAT_HEADER_LEN); |
357 | 357 | ||
358 | if (hardif_is_iface_up(batman_if)) | 358 | if (hardif_is_iface_up(hard_iface)) |
359 | hardif_activate_interface(batman_if); | 359 | hardif_activate_interface(hard_iface); |
360 | else | 360 | else |
361 | bat_err(batman_if->soft_iface, "Not using interface %s " | 361 | bat_err(hard_iface->soft_iface, "Not using interface %s " |
362 | "(retrying later): interface not active\n", | 362 | "(retrying later): interface not active\n", |
363 | batman_if->net_dev->name); | 363 | hard_iface->net_dev->name); |
364 | 364 | ||
365 | /* begin scheduling originator messages on that interface */ | 365 | /* begin scheduling originator messages on that interface */ |
366 | schedule_own_packet(batman_if); | 366 | schedule_own_packet(hard_iface); |
367 | 367 | ||
368 | out: | 368 | out: |
369 | return 0; | 369 | return 0; |
370 | 370 | ||
371 | err: | 371 | err: |
372 | hardif_free_ref(batman_if); | 372 | hardif_free_ref(hard_iface); |
373 | return -ENOMEM; | 373 | return -ENOMEM; |
374 | } | 374 | } |
375 | 375 | ||
376 | void hardif_disable_interface(struct batman_if *batman_if) | 376 | void hardif_disable_interface(struct hard_iface *hard_iface) |
377 | { | 377 | { |
378 | struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); | 378 | struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
379 | 379 | ||
380 | if (batman_if->if_status == IF_ACTIVE) | 380 | if (hard_iface->if_status == IF_ACTIVE) |
381 | hardif_deactivate_interface(batman_if); | 381 | hardif_deactivate_interface(hard_iface); |
382 | 382 | ||
383 | if (batman_if->if_status != IF_INACTIVE) | 383 | if (hard_iface->if_status != IF_INACTIVE) |
384 | return; | 384 | return; |
385 | 385 | ||
386 | bat_info(batman_if->soft_iface, "Removing interface: %s\n", | 386 | bat_info(hard_iface->soft_iface, "Removing interface: %s\n", |
387 | batman_if->net_dev->name); | 387 | hard_iface->net_dev->name); |
388 | dev_remove_pack(&batman_if->batman_adv_ptype); | 388 | dev_remove_pack(&hard_iface->batman_adv_ptype); |
389 | 389 | ||
390 | bat_priv->num_ifaces--; | 390 | bat_priv->num_ifaces--; |
391 | orig_hash_del_if(batman_if, bat_priv->num_ifaces); | 391 | orig_hash_del_if(hard_iface, bat_priv->num_ifaces); |
392 | 392 | ||
393 | if (batman_if == bat_priv->primary_if) { | 393 | if (hard_iface == bat_priv->primary_if) { |
394 | struct batman_if *new_if; | 394 | struct hard_iface *new_if; |
395 | 395 | ||
396 | new_if = get_active_batman_if(batman_if->soft_iface); | 396 | new_if = hardif_get_active(hard_iface->soft_iface); |
397 | set_primary_if(bat_priv, new_if); | 397 | set_primary_if(bat_priv, new_if); |
398 | 398 | ||
399 | if (new_if) | 399 | if (new_if) |
400 | hardif_free_ref(new_if); | 400 | hardif_free_ref(new_if); |
401 | } | 401 | } |
402 | 402 | ||
403 | kfree(batman_if->packet_buff); | 403 | kfree(hard_iface->packet_buff); |
404 | batman_if->packet_buff = NULL; | 404 | hard_iface->packet_buff = NULL; |
405 | batman_if->if_status = IF_NOT_IN_USE; | 405 | hard_iface->if_status = IF_NOT_IN_USE; |
406 | 406 | ||
407 | /* delete all references to this batman_if */ | 407 | /* delete all references to this hard_iface */ |
408 | purge_orig_ref(bat_priv); | 408 | purge_orig_ref(bat_priv); |
409 | purge_outstanding_packets(bat_priv, batman_if); | 409 | purge_outstanding_packets(bat_priv, hard_iface); |
410 | dev_put(batman_if->soft_iface); | 410 | dev_put(hard_iface->soft_iface); |
411 | 411 | ||
412 | /* nobody uses this interface anymore */ | 412 | /* nobody uses this interface anymore */ |
413 | if (!bat_priv->num_ifaces) | 413 | if (!bat_priv->num_ifaces) |
414 | softif_destroy(batman_if->soft_iface); | 414 | softif_destroy(hard_iface->soft_iface); |
415 | 415 | ||
416 | batman_if->soft_iface = NULL; | 416 | hard_iface->soft_iface = NULL; |
417 | hardif_free_ref(batman_if); | 417 | hardif_free_ref(hard_iface); |
418 | } | 418 | } |
419 | 419 | ||
420 | static struct batman_if *hardif_add_interface(struct net_device *net_dev) | 420 | static struct hard_iface *hardif_add_interface(struct net_device *net_dev) |
421 | { | 421 | { |
422 | struct batman_if *batman_if; | 422 | struct hard_iface *hard_iface; |
423 | int ret; | 423 | int ret; |
424 | 424 | ||
425 | ret = is_valid_iface(net_dev); | 425 | ret = is_valid_iface(net_dev); |
@@ -428,72 +428,73 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev) | |||
428 | 428 | ||
429 | dev_hold(net_dev); | 429 | dev_hold(net_dev); |
430 | 430 | ||
431 | batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC); | 431 | hard_iface = kmalloc(sizeof(struct hard_iface), GFP_ATOMIC); |
432 | if (!batman_if) { | 432 | if (!hard_iface) { |
433 | pr_err("Can't add interface (%s): out of memory\n", | 433 | pr_err("Can't add interface (%s): out of memory\n", |
434 | net_dev->name); | 434 | net_dev->name); |
435 | goto release_dev; | 435 | goto release_dev; |
436 | } | 436 | } |
437 | 437 | ||
438 | ret = sysfs_add_hardif(&batman_if->hardif_obj, net_dev); | 438 | ret = sysfs_add_hardif(&hard_iface->hardif_obj, net_dev); |
439 | if (ret) | 439 | if (ret) |
440 | goto free_if; | 440 | goto free_if; |
441 | 441 | ||
442 | batman_if->if_num = -1; | 442 | hard_iface->if_num = -1; |
443 | batman_if->net_dev = net_dev; | 443 | hard_iface->net_dev = net_dev; |
444 | batman_if->soft_iface = NULL; | 444 | hard_iface->soft_iface = NULL; |
445 | batman_if->if_status = IF_NOT_IN_USE; | 445 | hard_iface->if_status = IF_NOT_IN_USE; |
446 | INIT_LIST_HEAD(&batman_if->list); | 446 | INIT_LIST_HEAD(&hard_iface->list); |
447 | /* extra reference for return */ | 447 | /* extra reference for return */ |
448 | atomic_set(&batman_if->refcount, 2); | 448 | atomic_set(&hard_iface->refcount, 2); |
449 | 449 | ||
450 | check_known_mac_addr(batman_if->net_dev); | 450 | check_known_mac_addr(hard_iface->net_dev); |
451 | 451 | ||
452 | spin_lock(&hardif_list_lock); | 452 | spin_lock(&hardif_list_lock); |
453 | list_add_tail_rcu(&batman_if->list, &hardif_list); | 453 | list_add_tail_rcu(&hard_iface->list, &hardif_list); |
454 | spin_unlock(&hardif_list_lock); | 454 | spin_unlock(&hardif_list_lock); |
455 | 455 | ||
456 | return batman_if; | 456 | return hard_iface; |
457 | 457 | ||
458 | free_if: | 458 | free_if: |
459 | kfree(batman_if); | 459 | kfree(hard_iface); |
460 | release_dev: | 460 | release_dev: |
461 | dev_put(net_dev); | 461 | dev_put(net_dev); |
462 | out: | 462 | out: |
463 | return NULL; | 463 | return NULL; |
464 | } | 464 | } |
465 | 465 | ||
466 | static void hardif_remove_interface(struct batman_if *batman_if) | 466 | static void hardif_remove_interface(struct hard_iface *hard_iface) |
467 | { | 467 | { |
468 | /* first deactivate interface */ | 468 | /* first deactivate interface */ |
469 | if (batman_if->if_status != IF_NOT_IN_USE) | 469 | if (hard_iface->if_status != IF_NOT_IN_USE) |
470 | hardif_disable_interface(batman_if); | 470 | hardif_disable_interface(hard_iface); |
471 | 471 | ||
472 | if (batman_if->if_status != IF_NOT_IN_USE) | 472 | if (hard_iface->if_status != IF_NOT_IN_USE) |
473 | return; | 473 | return; |
474 | 474 | ||
475 | batman_if->if_status = IF_TO_BE_REMOVED; | 475 | hard_iface->if_status = IF_TO_BE_REMOVED; |
476 | sysfs_del_hardif(&batman_if->hardif_obj); | 476 | sysfs_del_hardif(&hard_iface->hardif_obj); |
477 | hardif_free_ref(batman_if); | 477 | hardif_free_ref(hard_iface); |
478 | } | 478 | } |
479 | 479 | ||
480 | void hardif_remove_interfaces(void) | 480 | void hardif_remove_interfaces(void) |
481 | { | 481 | { |
482 | struct batman_if *batman_if, *batman_if_tmp; | 482 | struct hard_iface *hard_iface, *hard_iface_tmp; |
483 | struct list_head if_queue; | 483 | struct list_head if_queue; |
484 | 484 | ||
485 | INIT_LIST_HEAD(&if_queue); | 485 | INIT_LIST_HEAD(&if_queue); |
486 | 486 | ||
487 | spin_lock(&hardif_list_lock); | 487 | spin_lock(&hardif_list_lock); |
488 | list_for_each_entry_safe(batman_if, batman_if_tmp, &hardif_list, list) { | 488 | list_for_each_entry_safe(hard_iface, hard_iface_tmp, |
489 | list_del_rcu(&batman_if->list); | 489 | &hardif_list, list) { |
490 | list_add_tail(&batman_if->list, &if_queue); | 490 | list_del_rcu(&hard_iface->list); |
491 | list_add_tail(&hard_iface->list, &if_queue); | ||
491 | } | 492 | } |
492 | spin_unlock(&hardif_list_lock); | 493 | spin_unlock(&hardif_list_lock); |
493 | 494 | ||
494 | rtnl_lock(); | 495 | rtnl_lock(); |
495 | list_for_each_entry_safe(batman_if, batman_if_tmp, &if_queue, list) { | 496 | list_for_each_entry_safe(hard_iface, hard_iface_tmp, &if_queue, list) { |
496 | hardif_remove_interface(batman_if); | 497 | hardif_remove_interface(hard_iface); |
497 | } | 498 | } |
498 | rtnl_unlock(); | 499 | rtnl_unlock(); |
499 | } | 500 | } |
@@ -502,43 +503,43 @@ static int hard_if_event(struct notifier_block *this, | |||
502 | unsigned long event, void *ptr) | 503 | unsigned long event, void *ptr) |
503 | { | 504 | { |
504 | struct net_device *net_dev = (struct net_device *)ptr; | 505 | struct net_device *net_dev = (struct net_device *)ptr; |
505 | struct batman_if *batman_if = get_batman_if_by_netdev(net_dev); | 506 | struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); |
506 | struct bat_priv *bat_priv; | 507 | struct bat_priv *bat_priv; |
507 | 508 | ||
508 | if (!batman_if && event == NETDEV_REGISTER) | 509 | if (!hard_iface && event == NETDEV_REGISTER) |
509 | batman_if = hardif_add_interface(net_dev); | 510 | hard_iface = hardif_add_interface(net_dev); |
510 | 511 | ||
511 | if (!batman_if) | 512 | if (!hard_iface) |
512 | goto out; | 513 | goto out; |
513 | 514 | ||
514 | switch (event) { | 515 | switch (event) { |
515 | case NETDEV_UP: | 516 | case NETDEV_UP: |
516 | hardif_activate_interface(batman_if); | 517 | hardif_activate_interface(hard_iface); |
517 | break; | 518 | break; |
518 | case NETDEV_GOING_DOWN: | 519 | case NETDEV_GOING_DOWN: |
519 | case NETDEV_DOWN: | 520 | case NETDEV_DOWN: |
520 | hardif_deactivate_interface(batman_if); | 521 | hardif_deactivate_interface(hard_iface); |
521 | break; | 522 | break; |
522 | case NETDEV_UNREGISTER: | 523 | case NETDEV_UNREGISTER: |
523 | spin_lock(&hardif_list_lock); | 524 | spin_lock(&hardif_list_lock); |
524 | list_del_rcu(&batman_if->list); | 525 | list_del_rcu(&hard_iface->list); |
525 | spin_unlock(&hardif_list_lock); | 526 | spin_unlock(&hardif_list_lock); |
526 | 527 | ||
527 | hardif_remove_interface(batman_if); | 528 | hardif_remove_interface(hard_iface); |
528 | break; | 529 | break; |
529 | case NETDEV_CHANGEMTU: | 530 | case NETDEV_CHANGEMTU: |
530 | if (batman_if->soft_iface) | 531 | if (hard_iface->soft_iface) |
531 | update_min_mtu(batman_if->soft_iface); | 532 | update_min_mtu(hard_iface->soft_iface); |
532 | break; | 533 | break; |
533 | case NETDEV_CHANGEADDR: | 534 | case NETDEV_CHANGEADDR: |
534 | if (batman_if->if_status == IF_NOT_IN_USE) | 535 | if (hard_iface->if_status == IF_NOT_IN_USE) |
535 | goto hardif_put; | 536 | goto hardif_put; |
536 | 537 | ||
537 | check_known_mac_addr(batman_if->net_dev); | 538 | check_known_mac_addr(hard_iface->net_dev); |
538 | update_mac_addresses(batman_if); | 539 | update_mac_addresses(hard_iface); |
539 | 540 | ||
540 | bat_priv = netdev_priv(batman_if->soft_iface); | 541 | bat_priv = netdev_priv(hard_iface->soft_iface); |
541 | if (batman_if == bat_priv->primary_if) | 542 | if (hard_iface == bat_priv->primary_if) |
542 | update_primary_addr(bat_priv); | 543 | update_primary_addr(bat_priv); |
543 | break; | 544 | break; |
544 | default: | 545 | default: |
@@ -546,7 +547,7 @@ static int hard_if_event(struct notifier_block *this, | |||
546 | }; | 547 | }; |
547 | 548 | ||
548 | hardif_put: | 549 | hardif_put: |
549 | hardif_free_ref(batman_if); | 550 | hardif_free_ref(hard_iface); |
550 | out: | 551 | out: |
551 | return NOTIFY_DONE; | 552 | return NOTIFY_DONE; |
552 | } | 553 | } |
@@ -559,10 +560,10 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, | |||
559 | { | 560 | { |
560 | struct bat_priv *bat_priv; | 561 | struct bat_priv *bat_priv; |
561 | struct batman_packet *batman_packet; | 562 | struct batman_packet *batman_packet; |
562 | struct batman_if *batman_if; | 563 | struct hard_iface *hard_iface; |
563 | int ret; | 564 | int ret; |
564 | 565 | ||
565 | batman_if = container_of(ptype, struct batman_if, batman_adv_ptype); | 566 | hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype); |
566 | skb = skb_share_check(skb, GFP_ATOMIC); | 567 | skb = skb_share_check(skb, GFP_ATOMIC); |
567 | 568 | ||
568 | /* skb was released by skb_share_check() */ | 569 | /* skb was released by skb_share_check() */ |
@@ -578,16 +579,16 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, | |||
578 | || !skb_mac_header(skb))) | 579 | || !skb_mac_header(skb))) |
579 | goto err_free; | 580 | goto err_free; |
580 | 581 | ||
581 | if (!batman_if->soft_iface) | 582 | if (!hard_iface->soft_iface) |
582 | goto err_free; | 583 | goto err_free; |
583 | 584 | ||
584 | bat_priv = netdev_priv(batman_if->soft_iface); | 585 | bat_priv = netdev_priv(hard_iface->soft_iface); |
585 | 586 | ||
586 | if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) | 587 | if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) |
587 | goto err_free; | 588 | goto err_free; |
588 | 589 | ||
589 | /* discard frames on not active interfaces */ | 590 | /* discard frames on not active interfaces */ |
590 | if (batman_if->if_status != IF_ACTIVE) | 591 | if (hard_iface->if_status != IF_ACTIVE) |
591 | goto err_free; | 592 | goto err_free; |
592 | 593 | ||
593 | batman_packet = (struct batman_packet *)skb->data; | 594 | batman_packet = (struct batman_packet *)skb->data; |
@@ -605,32 +606,32 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, | |||
605 | switch (batman_packet->packet_type) { | 606 | switch (batman_packet->packet_type) { |
606 | /* batman originator packet */ | 607 | /* batman originator packet */ |
607 | case BAT_PACKET: | 608 | case BAT_PACKET: |
608 | ret = recv_bat_packet(skb, batman_if); | 609 | ret = recv_bat_packet(skb, hard_iface); |
609 | break; | 610 | break; |
610 | 611 | ||
611 | /* batman icmp packet */ | 612 | /* batman icmp packet */ |
612 | case BAT_ICMP: | 613 | case BAT_ICMP: |
613 | ret = recv_icmp_packet(skb, batman_if); | 614 | ret = recv_icmp_packet(skb, hard_iface); |
614 | break; | 615 | break; |
615 | 616 | ||
616 | /* unicast packet */ | 617 | /* unicast packet */ |
617 | case BAT_UNICAST: | 618 | case BAT_UNICAST: |
618 | ret = recv_unicast_packet(skb, batman_if); | 619 | ret = recv_unicast_packet(skb, hard_iface); |
619 | break; | 620 | break; |
620 | 621 | ||
621 | /* fragmented unicast packet */ | 622 | /* fragmented unicast packet */ |
622 | case BAT_UNICAST_FRAG: | 623 | case BAT_UNICAST_FRAG: |
623 | ret = recv_ucast_frag_packet(skb, batman_if); | 624 | ret = recv_ucast_frag_packet(skb, hard_iface); |
624 | break; | 625 | break; |
625 | 626 | ||
626 | /* broadcast packet */ | 627 | /* broadcast packet */ |
627 | case BAT_BCAST: | 628 | case BAT_BCAST: |
628 | ret = recv_bcast_packet(skb, batman_if); | 629 | ret = recv_bcast_packet(skb, hard_iface); |
629 | break; | 630 | break; |
630 | 631 | ||
631 | /* vis packet */ | 632 | /* vis packet */ |
632 | case BAT_VIS: | 633 | case BAT_VIS: |
633 | ret = recv_vis_packet(skb, batman_if); | 634 | ret = recv_vis_packet(skb, hard_iface); |
634 | break; | 635 | break; |
635 | default: | 636 | default: |
636 | ret = NET_RX_DROP; | 637 | ret = NET_RX_DROP; |
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h index e488b90b8fea..a9ddf36e51c8 100644 --- a/net/batman-adv/hard-interface.h +++ b/net/batman-adv/hard-interface.h | |||
@@ -31,18 +31,18 @@ | |||
31 | 31 | ||
32 | extern struct notifier_block hard_if_notifier; | 32 | extern struct notifier_block hard_if_notifier; |
33 | 33 | ||
34 | struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev); | 34 | struct hard_iface *hardif_get_by_netdev(struct net_device *net_dev); |
35 | int hardif_enable_interface(struct batman_if *batman_if, char *iface_name); | 35 | int hardif_enable_interface(struct hard_iface *hard_iface, char *iface_name); |
36 | void hardif_disable_interface(struct batman_if *batman_if); | 36 | void hardif_disable_interface(struct hard_iface *hard_iface); |
37 | void hardif_remove_interfaces(void); | 37 | void hardif_remove_interfaces(void); |
38 | int hardif_min_mtu(struct net_device *soft_iface); | 38 | int hardif_min_mtu(struct net_device *soft_iface); |
39 | void update_min_mtu(struct net_device *soft_iface); | 39 | void update_min_mtu(struct net_device *soft_iface); |
40 | void hardif_free_rcu(struct rcu_head *rcu); | 40 | void hardif_free_rcu(struct rcu_head *rcu); |
41 | 41 | ||
42 | static inline void hardif_free_ref(struct batman_if *batman_if) | 42 | static inline void hardif_free_ref(struct hard_iface *hard_iface) |
43 | { | 43 | { |
44 | if (atomic_dec_and_test(&batman_if->refcount)) | 44 | if (atomic_dec_and_test(&hard_iface->refcount)) |
45 | call_rcu(&batman_if->rcu, hardif_free_rcu); | 45 | call_rcu(&hard_iface->rcu, hardif_free_rcu); |
46 | } | 46 | } |
47 | 47 | ||
48 | #endif /* _NET_BATMAN_ADV_HARD_INTERFACE_H_ */ | 48 | #endif /* _NET_BATMAN_ADV_HARD_INTERFACE_H_ */ |
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index 57aea9bcdb33..709b33bbdf43 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c | |||
@@ -153,14 +153,14 @@ void dec_module_count(void) | |||
153 | 153 | ||
154 | int is_my_mac(uint8_t *addr) | 154 | int is_my_mac(uint8_t *addr) |
155 | { | 155 | { |
156 | struct batman_if *batman_if; | 156 | struct hard_iface *hard_iface; |
157 | 157 | ||
158 | rcu_read_lock(); | 158 | rcu_read_lock(); |
159 | list_for_each_entry_rcu(batman_if, &hardif_list, list) { | 159 | list_for_each_entry_rcu(hard_iface, &hardif_list, list) { |
160 | if (batman_if->if_status != IF_ACTIVE) | 160 | if (hard_iface->if_status != IF_ACTIVE) |
161 | continue; | 161 | continue; |
162 | 162 | ||
163 | if (compare_eth(batman_if->net_dev->dev_addr, addr)) { | 163 | if (compare_eth(hard_iface->net_dev->dev_addr, addr)) { |
164 | rcu_read_unlock(); | 164 | rcu_read_unlock(); |
165 | return 1; | 165 | return 1; |
166 | } | 166 | } |
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 84ef9ae6c770..0b9133022d2d 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c | |||
@@ -73,7 +73,7 @@ void neigh_node_free_ref(struct neigh_node *neigh_node) | |||
73 | struct neigh_node *create_neighbor(struct orig_node *orig_node, | 73 | struct neigh_node *create_neighbor(struct orig_node *orig_node, |
74 | struct orig_node *orig_neigh_node, | 74 | struct orig_node *orig_neigh_node, |
75 | uint8_t *neigh, | 75 | uint8_t *neigh, |
76 | struct batman_if *if_incoming) | 76 | struct hard_iface *if_incoming) |
77 | { | 77 | { |
78 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); | 78 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); |
79 | struct neigh_node *neigh_node; | 79 | struct neigh_node *neigh_node; |
@@ -487,9 +487,9 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num) | |||
487 | return 0; | 487 | return 0; |
488 | } | 488 | } |
489 | 489 | ||
490 | int orig_hash_add_if(struct batman_if *batman_if, int max_if_num) | 490 | int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num) |
491 | { | 491 | { |
492 | struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); | 492 | struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
493 | struct hashtable_t *hash = bat_priv->orig_hash; | 493 | struct hashtable_t *hash = bat_priv->orig_hash; |
494 | struct hlist_node *node; | 494 | struct hlist_node *node; |
495 | struct hlist_head *head; | 495 | struct hlist_head *head; |
@@ -572,13 +572,13 @@ free_own_sum: | |||
572 | return 0; | 572 | return 0; |
573 | } | 573 | } |
574 | 574 | ||
575 | int orig_hash_del_if(struct batman_if *batman_if, int max_if_num) | 575 | int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num) |
576 | { | 576 | { |
577 | struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); | 577 | struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
578 | struct hashtable_t *hash = bat_priv->orig_hash; | 578 | struct hashtable_t *hash = bat_priv->orig_hash; |
579 | struct hlist_node *node; | 579 | struct hlist_node *node; |
580 | struct hlist_head *head; | 580 | struct hlist_head *head; |
581 | struct batman_if *batman_if_tmp; | 581 | struct hard_iface *hard_iface_tmp; |
582 | struct orig_node *orig_node; | 582 | struct orig_node *orig_node; |
583 | int i, ret; | 583 | int i, ret; |
584 | 584 | ||
@@ -591,7 +591,7 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num) | |||
591 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { | 591 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { |
592 | spin_lock_bh(&orig_node->ogm_cnt_lock); | 592 | spin_lock_bh(&orig_node->ogm_cnt_lock); |
593 | ret = orig_node_del_if(orig_node, max_if_num, | 593 | ret = orig_node_del_if(orig_node, max_if_num, |
594 | batman_if->if_num); | 594 | hard_iface->if_num); |
595 | spin_unlock_bh(&orig_node->ogm_cnt_lock); | 595 | spin_unlock_bh(&orig_node->ogm_cnt_lock); |
596 | 596 | ||
597 | if (ret == -1) | 597 | if (ret == -1) |
@@ -602,22 +602,22 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num) | |||
602 | 602 | ||
603 | /* renumber remaining batman interfaces _inside_ of orig_hash_lock */ | 603 | /* renumber remaining batman interfaces _inside_ of orig_hash_lock */ |
604 | rcu_read_lock(); | 604 | rcu_read_lock(); |
605 | list_for_each_entry_rcu(batman_if_tmp, &hardif_list, list) { | 605 | list_for_each_entry_rcu(hard_iface_tmp, &hardif_list, list) { |
606 | if (batman_if_tmp->if_status == IF_NOT_IN_USE) | 606 | if (hard_iface_tmp->if_status == IF_NOT_IN_USE) |
607 | continue; | 607 | continue; |
608 | 608 | ||
609 | if (batman_if == batman_if_tmp) | 609 | if (hard_iface == hard_iface_tmp) |
610 | continue; | 610 | continue; |
611 | 611 | ||
612 | if (batman_if->soft_iface != batman_if_tmp->soft_iface) | 612 | if (hard_iface->soft_iface != hard_iface_tmp->soft_iface) |
613 | continue; | 613 | continue; |
614 | 614 | ||
615 | if (batman_if_tmp->if_num > batman_if->if_num) | 615 | if (hard_iface_tmp->if_num > hard_iface->if_num) |
616 | batman_if_tmp->if_num--; | 616 | hard_iface_tmp->if_num--; |
617 | } | 617 | } |
618 | rcu_read_unlock(); | 618 | rcu_read_unlock(); |
619 | 619 | ||
620 | batman_if->if_num = -1; | 620 | hard_iface->if_num = -1; |
621 | return 0; | 621 | return 0; |
622 | 622 | ||
623 | err: | 623 | err: |
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h index 3d7a39d4df0f..5cc011057da1 100644 --- a/net/batman-adv/originator.h +++ b/net/batman-adv/originator.h | |||
@@ -32,11 +32,11 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr); | |||
32 | struct neigh_node *create_neighbor(struct orig_node *orig_node, | 32 | struct neigh_node *create_neighbor(struct orig_node *orig_node, |
33 | struct orig_node *orig_neigh_node, | 33 | struct orig_node *orig_neigh_node, |
34 | uint8_t *neigh, | 34 | uint8_t *neigh, |
35 | struct batman_if *if_incoming); | 35 | struct hard_iface *if_incoming); |
36 | void neigh_node_free_ref(struct neigh_node *neigh_node); | 36 | void neigh_node_free_ref(struct neigh_node *neigh_node); |
37 | int orig_seq_print_text(struct seq_file *seq, void *offset); | 37 | int orig_seq_print_text(struct seq_file *seq, void *offset); |
38 | int orig_hash_add_if(struct batman_if *batman_if, int max_if_num); | 38 | int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num); |
39 | int orig_hash_del_if(struct batman_if *batman_if, int max_if_num); | 39 | int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num); |
40 | 40 | ||
41 | 41 | ||
42 | /* returns 1 if they are the same originator */ | 42 | /* returns 1 if they are the same originator */ |
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 21e93b39b2a4..42cb6e2e44f5 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c | |||
@@ -35,9 +35,9 @@ | |||
35 | #include "gateway_client.h" | 35 | #include "gateway_client.h" |
36 | #include "unicast.h" | 36 | #include "unicast.h" |
37 | 37 | ||
38 | void slide_own_bcast_window(struct batman_if *batman_if) | 38 | void slide_own_bcast_window(struct hard_iface *hard_iface) |
39 | { | 39 | { |
40 | struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); | 40 | struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
41 | struct hashtable_t *hash = bat_priv->orig_hash; | 41 | struct hashtable_t *hash = bat_priv->orig_hash; |
42 | struct hlist_node *node; | 42 | struct hlist_node *node; |
43 | struct hlist_head *head; | 43 | struct hlist_head *head; |
@@ -52,11 +52,11 @@ void slide_own_bcast_window(struct batman_if *batman_if) | |||
52 | rcu_read_lock(); | 52 | rcu_read_lock(); |
53 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { | 53 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { |
54 | spin_lock_bh(&orig_node->ogm_cnt_lock); | 54 | spin_lock_bh(&orig_node->ogm_cnt_lock); |
55 | word_index = batman_if->if_num * NUM_WORDS; | 55 | word_index = hard_iface->if_num * NUM_WORDS; |
56 | word = &(orig_node->bcast_own[word_index]); | 56 | word = &(orig_node->bcast_own[word_index]); |
57 | 57 | ||
58 | bit_get_packet(bat_priv, word, 1, 0); | 58 | bit_get_packet(bat_priv, word, 1, 0); |
59 | orig_node->bcast_own_sum[batman_if->if_num] = | 59 | orig_node->bcast_own_sum[hard_iface->if_num] = |
60 | bit_packet_count(word); | 60 | bit_packet_count(word); |
61 | spin_unlock_bh(&orig_node->ogm_cnt_lock); | 61 | spin_unlock_bh(&orig_node->ogm_cnt_lock); |
62 | } | 62 | } |
@@ -143,7 +143,7 @@ void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, | |||
143 | static int is_bidirectional_neigh(struct orig_node *orig_node, | 143 | static int is_bidirectional_neigh(struct orig_node *orig_node, |
144 | struct orig_node *orig_neigh_node, | 144 | struct orig_node *orig_neigh_node, |
145 | struct batman_packet *batman_packet, | 145 | struct batman_packet *batman_packet, |
146 | struct batman_if *if_incoming) | 146 | struct hard_iface *if_incoming) |
147 | { | 147 | { |
148 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); | 148 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); |
149 | struct neigh_node *neigh_node = NULL, *tmp_neigh_node; | 149 | struct neigh_node *neigh_node = NULL, *tmp_neigh_node; |
@@ -368,7 +368,7 @@ static void update_orig(struct bat_priv *bat_priv, | |||
368 | struct orig_node *orig_node, | 368 | struct orig_node *orig_node, |
369 | struct ethhdr *ethhdr, | 369 | struct ethhdr *ethhdr, |
370 | struct batman_packet *batman_packet, | 370 | struct batman_packet *batman_packet, |
371 | struct batman_if *if_incoming, | 371 | struct hard_iface *if_incoming, |
372 | unsigned char *hna_buff, int hna_buff_len, | 372 | unsigned char *hna_buff, int hna_buff_len, |
373 | char is_duplicate) | 373 | char is_duplicate) |
374 | { | 374 | { |
@@ -533,7 +533,7 @@ static int window_protected(struct bat_priv *bat_priv, | |||
533 | */ | 533 | */ |
534 | static char count_real_packets(struct ethhdr *ethhdr, | 534 | static char count_real_packets(struct ethhdr *ethhdr, |
535 | struct batman_packet *batman_packet, | 535 | struct batman_packet *batman_packet, |
536 | struct batman_if *if_incoming) | 536 | struct hard_iface *if_incoming) |
537 | { | 537 | { |
538 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); | 538 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); |
539 | struct orig_node *orig_node; | 539 | struct orig_node *orig_node; |
@@ -598,10 +598,10 @@ out: | |||
598 | void receive_bat_packet(struct ethhdr *ethhdr, | 598 | void receive_bat_packet(struct ethhdr *ethhdr, |
599 | struct batman_packet *batman_packet, | 599 | struct batman_packet *batman_packet, |
600 | unsigned char *hna_buff, int hna_buff_len, | 600 | unsigned char *hna_buff, int hna_buff_len, |
601 | struct batman_if *if_incoming) | 601 | struct hard_iface *if_incoming) |
602 | { | 602 | { |
603 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); | 603 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); |
604 | struct batman_if *batman_if; | 604 | struct hard_iface *hard_iface; |
605 | struct orig_node *orig_neigh_node, *orig_node; | 605 | struct orig_node *orig_neigh_node, *orig_node; |
606 | char has_directlink_flag; | 606 | char has_directlink_flag; |
607 | char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0; | 607 | char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0; |
@@ -643,23 +643,23 @@ void receive_bat_packet(struct ethhdr *ethhdr, | |||
643 | has_directlink_flag); | 643 | has_directlink_flag); |
644 | 644 | ||
645 | rcu_read_lock(); | 645 | rcu_read_lock(); |
646 | list_for_each_entry_rcu(batman_if, &hardif_list, list) { | 646 | list_for_each_entry_rcu(hard_iface, &hardif_list, list) { |
647 | if (batman_if->if_status != IF_ACTIVE) | 647 | if (hard_iface->if_status != IF_ACTIVE) |
648 | continue; | 648 | continue; |
649 | 649 | ||
650 | if (batman_if->soft_iface != if_incoming->soft_iface) | 650 | if (hard_iface->soft_iface != if_incoming->soft_iface) |
651 | continue; | 651 | continue; |
652 | 652 | ||
653 | if (compare_eth(ethhdr->h_source, | 653 | if (compare_eth(ethhdr->h_source, |
654 | batman_if->net_dev->dev_addr)) | 654 | hard_iface->net_dev->dev_addr)) |
655 | is_my_addr = 1; | 655 | is_my_addr = 1; |
656 | 656 | ||
657 | if (compare_eth(batman_packet->orig, | 657 | if (compare_eth(batman_packet->orig, |
658 | batman_if->net_dev->dev_addr)) | 658 | hard_iface->net_dev->dev_addr)) |
659 | is_my_orig = 1; | 659 | is_my_orig = 1; |
660 | 660 | ||
661 | if (compare_eth(batman_packet->prev_sender, | 661 | if (compare_eth(batman_packet->prev_sender, |
662 | batman_if->net_dev->dev_addr)) | 662 | hard_iface->net_dev->dev_addr)) |
663 | is_my_oldorig = 1; | 663 | is_my_oldorig = 1; |
664 | 664 | ||
665 | if (compare_eth(ethhdr->h_source, broadcast_addr)) | 665 | if (compare_eth(ethhdr->h_source, broadcast_addr)) |
@@ -828,7 +828,7 @@ out: | |||
828 | orig_node_free_ref(orig_node); | 828 | orig_node_free_ref(orig_node); |
829 | } | 829 | } |
830 | 830 | ||
831 | int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if) | 831 | int recv_bat_packet(struct sk_buff *skb, struct hard_iface *hard_iface) |
832 | { | 832 | { |
833 | struct ethhdr *ethhdr; | 833 | struct ethhdr *ethhdr; |
834 | 834 | ||
@@ -859,7 +859,7 @@ int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if) | |||
859 | receive_aggr_bat_packet(ethhdr, | 859 | receive_aggr_bat_packet(ethhdr, |
860 | skb->data, | 860 | skb->data, |
861 | skb_headlen(skb), | 861 | skb_headlen(skb), |
862 | batman_if); | 862 | hard_iface); |
863 | 863 | ||
864 | kfree_skb(skb); | 864 | kfree_skb(skb); |
865 | return NET_RX_SUCCESS; | 865 | return NET_RX_SUCCESS; |
@@ -997,7 +997,7 @@ out: | |||
997 | } | 997 | } |
998 | 998 | ||
999 | 999 | ||
1000 | int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if) | 1000 | int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if) |
1001 | { | 1001 | { |
1002 | struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); | 1002 | struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); |
1003 | struct icmp_packet_rr *icmp_packet; | 1003 | struct icmp_packet_rr *icmp_packet; |
@@ -1097,7 +1097,7 @@ out: | |||
1097 | * refcount.*/ | 1097 | * refcount.*/ |
1098 | struct neigh_node *find_router(struct bat_priv *bat_priv, | 1098 | struct neigh_node *find_router(struct bat_priv *bat_priv, |
1099 | struct orig_node *orig_node, | 1099 | struct orig_node *orig_node, |
1100 | struct batman_if *recv_if) | 1100 | struct hard_iface *recv_if) |
1101 | { | 1101 | { |
1102 | struct orig_node *primary_orig_node; | 1102 | struct orig_node *primary_orig_node; |
1103 | struct orig_node *router_orig; | 1103 | struct orig_node *router_orig; |
@@ -1263,7 +1263,7 @@ static int check_unicast_packet(struct sk_buff *skb, int hdr_size) | |||
1263 | return 0; | 1263 | return 0; |
1264 | } | 1264 | } |
1265 | 1265 | ||
1266 | int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if, | 1266 | int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if, |
1267 | int hdr_size) | 1267 | int hdr_size) |
1268 | { | 1268 | { |
1269 | struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); | 1269 | struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); |
@@ -1349,7 +1349,7 @@ out: | |||
1349 | return ret; | 1349 | return ret; |
1350 | } | 1350 | } |
1351 | 1351 | ||
1352 | int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if) | 1352 | int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) |
1353 | { | 1353 | { |
1354 | struct unicast_packet *unicast_packet; | 1354 | struct unicast_packet *unicast_packet; |
1355 | int hdr_size = sizeof(struct unicast_packet); | 1355 | int hdr_size = sizeof(struct unicast_packet); |
@@ -1368,7 +1368,7 @@ int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if) | |||
1368 | return route_unicast_packet(skb, recv_if, hdr_size); | 1368 | return route_unicast_packet(skb, recv_if, hdr_size); |
1369 | } | 1369 | } |
1370 | 1370 | ||
1371 | int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if) | 1371 | int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if) |
1372 | { | 1372 | { |
1373 | struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); | 1373 | struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); |
1374 | struct unicast_frag_packet *unicast_packet; | 1374 | struct unicast_frag_packet *unicast_packet; |
@@ -1402,7 +1402,7 @@ int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if) | |||
1402 | } | 1402 | } |
1403 | 1403 | ||
1404 | 1404 | ||
1405 | int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if) | 1405 | int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if) |
1406 | { | 1406 | { |
1407 | struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); | 1407 | struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); |
1408 | struct orig_node *orig_node = NULL; | 1408 | struct orig_node *orig_node = NULL; |
@@ -1487,7 +1487,7 @@ out: | |||
1487 | return ret; | 1487 | return ret; |
1488 | } | 1488 | } |
1489 | 1489 | ||
1490 | int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if) | 1490 | int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if) |
1491 | { | 1491 | { |
1492 | struct vis_packet *vis_packet; | 1492 | struct vis_packet *vis_packet; |
1493 | struct ethhdr *ethhdr; | 1493 | struct ethhdr *ethhdr; |
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h index e2a9872a1589..5efceaf4b978 100644 --- a/net/batman-adv/routing.h +++ b/net/batman-adv/routing.h | |||
@@ -22,25 +22,25 @@ | |||
22 | #ifndef _NET_BATMAN_ADV_ROUTING_H_ | 22 | #ifndef _NET_BATMAN_ADV_ROUTING_H_ |
23 | #define _NET_BATMAN_ADV_ROUTING_H_ | 23 | #define _NET_BATMAN_ADV_ROUTING_H_ |
24 | 24 | ||
25 | void slide_own_bcast_window(struct batman_if *batman_if); | 25 | void slide_own_bcast_window(struct hard_iface *hard_iface); |
26 | void receive_bat_packet(struct ethhdr *ethhdr, | 26 | void receive_bat_packet(struct ethhdr *ethhdr, |
27 | struct batman_packet *batman_packet, | 27 | struct batman_packet *batman_packet, |
28 | unsigned char *hna_buff, int hna_buff_len, | 28 | unsigned char *hna_buff, int hna_buff_len, |
29 | struct batman_if *if_incoming); | 29 | struct hard_iface *if_incoming); |
30 | void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, | 30 | void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, |
31 | struct neigh_node *neigh_node, unsigned char *hna_buff, | 31 | struct neigh_node *neigh_node, unsigned char *hna_buff, |
32 | int hna_buff_len); | 32 | int hna_buff_len); |
33 | int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if, | 33 | int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if, |
34 | int hdr_size); | 34 | int hdr_size); |
35 | int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if); | 35 | int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if); |
36 | int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if); | 36 | int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); |
37 | int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if); | 37 | int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if); |
38 | int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if); | 38 | int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if); |
39 | int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if); | 39 | int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if); |
40 | int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if); | 40 | int recv_bat_packet(struct sk_buff *skb, struct hard_iface *recv_if); |
41 | struct neigh_node *find_router(struct bat_priv *bat_priv, | 41 | struct neigh_node *find_router(struct bat_priv *bat_priv, |
42 | struct orig_node *orig_node, | 42 | struct orig_node *orig_node, |
43 | struct batman_if *recv_if); | 43 | struct hard_iface *recv_if); |
44 | void bonding_candidate_del(struct orig_node *orig_node, | 44 | void bonding_candidate_del(struct orig_node *orig_node, |
45 | struct neigh_node *neigh_node); | 45 | struct neigh_node *neigh_node); |
46 | 46 | ||
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index c4f3e4988b63..d49e54d932af 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c | |||
@@ -56,20 +56,20 @@ static unsigned long forward_send_time(void) | |||
56 | /* send out an already prepared packet to the given address via the | 56 | /* send out an already prepared packet to the given address via the |
57 | * specified batman interface */ | 57 | * specified batman interface */ |
58 | int send_skb_packet(struct sk_buff *skb, | 58 | int send_skb_packet(struct sk_buff *skb, |
59 | struct batman_if *batman_if, | 59 | struct hard_iface *hard_iface, |
60 | uint8_t *dst_addr) | 60 | uint8_t *dst_addr) |
61 | { | 61 | { |
62 | struct ethhdr *ethhdr; | 62 | struct ethhdr *ethhdr; |
63 | 63 | ||
64 | if (batman_if->if_status != IF_ACTIVE) | 64 | if (hard_iface->if_status != IF_ACTIVE) |
65 | goto send_skb_err; | 65 | goto send_skb_err; |
66 | 66 | ||
67 | if (unlikely(!batman_if->net_dev)) | 67 | if (unlikely(!hard_iface->net_dev)) |
68 | goto send_skb_err; | 68 | goto send_skb_err; |
69 | 69 | ||
70 | if (!(batman_if->net_dev->flags & IFF_UP)) { | 70 | if (!(hard_iface->net_dev->flags & IFF_UP)) { |
71 | pr_warning("Interface %s is not up - can't send packet via " | 71 | pr_warning("Interface %s is not up - can't send packet via " |
72 | "that interface!\n", batman_if->net_dev->name); | 72 | "that interface!\n", hard_iface->net_dev->name); |
73 | goto send_skb_err; | 73 | goto send_skb_err; |
74 | } | 74 | } |
75 | 75 | ||
@@ -80,7 +80,7 @@ int send_skb_packet(struct sk_buff *skb, | |||
80 | skb_reset_mac_header(skb); | 80 | skb_reset_mac_header(skb); |
81 | 81 | ||
82 | ethhdr = (struct ethhdr *) skb_mac_header(skb); | 82 | ethhdr = (struct ethhdr *) skb_mac_header(skb); |
83 | memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN); | 83 | memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN); |
84 | memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN); | 84 | memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN); |
85 | ethhdr->h_proto = __constant_htons(ETH_P_BATMAN); | 85 | ethhdr->h_proto = __constant_htons(ETH_P_BATMAN); |
86 | 86 | ||
@@ -88,7 +88,7 @@ int send_skb_packet(struct sk_buff *skb, | |||
88 | skb->priority = TC_PRIO_CONTROL; | 88 | skb->priority = TC_PRIO_CONTROL; |
89 | skb->protocol = __constant_htons(ETH_P_BATMAN); | 89 | skb->protocol = __constant_htons(ETH_P_BATMAN); |
90 | 90 | ||
91 | skb->dev = batman_if->net_dev; | 91 | skb->dev = hard_iface->net_dev; |
92 | 92 | ||
93 | /* dev_queue_xmit() returns a negative result on error. However on | 93 | /* dev_queue_xmit() returns a negative result on error. However on |
94 | * congestion and traffic shaping, it drops and returns NET_XMIT_DROP | 94 | * congestion and traffic shaping, it drops and returns NET_XMIT_DROP |
@@ -102,16 +102,16 @@ send_skb_err: | |||
102 | 102 | ||
103 | /* Send a packet to a given interface */ | 103 | /* Send a packet to a given interface */ |
104 | static void send_packet_to_if(struct forw_packet *forw_packet, | 104 | static void send_packet_to_if(struct forw_packet *forw_packet, |
105 | struct batman_if *batman_if) | 105 | struct hard_iface *hard_iface) |
106 | { | 106 | { |
107 | struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); | 107 | struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
108 | char *fwd_str; | 108 | char *fwd_str; |
109 | uint8_t packet_num; | 109 | uint8_t packet_num; |
110 | int16_t buff_pos; | 110 | int16_t buff_pos; |
111 | struct batman_packet *batman_packet; | 111 | struct batman_packet *batman_packet; |
112 | struct sk_buff *skb; | 112 | struct sk_buff *skb; |
113 | 113 | ||
114 | if (batman_if->if_status != IF_ACTIVE) | 114 | if (hard_iface->if_status != IF_ACTIVE) |
115 | return; | 115 | return; |
116 | 116 | ||
117 | packet_num = 0; | 117 | packet_num = 0; |
@@ -126,7 +126,7 @@ static void send_packet_to_if(struct forw_packet *forw_packet, | |||
126 | /* we might have aggregated direct link packets with an | 126 | /* we might have aggregated direct link packets with an |
127 | * ordinary base packet */ | 127 | * ordinary base packet */ |
128 | if ((forw_packet->direct_link_flags & (1 << packet_num)) && | 128 | if ((forw_packet->direct_link_flags & (1 << packet_num)) && |
129 | (forw_packet->if_incoming == batman_if)) | 129 | (forw_packet->if_incoming == hard_iface)) |
130 | batman_packet->flags |= DIRECTLINK; | 130 | batman_packet->flags |= DIRECTLINK; |
131 | else | 131 | else |
132 | batman_packet->flags &= ~DIRECTLINK; | 132 | batman_packet->flags &= ~DIRECTLINK; |
@@ -142,7 +142,8 @@ static void send_packet_to_if(struct forw_packet *forw_packet, | |||
142 | batman_packet->tq, batman_packet->ttl, | 142 | batman_packet->tq, batman_packet->ttl, |
143 | (batman_packet->flags & DIRECTLINK ? | 143 | (batman_packet->flags & DIRECTLINK ? |
144 | "on" : "off"), | 144 | "on" : "off"), |
145 | batman_if->net_dev->name, batman_if->net_dev->dev_addr); | 145 | hard_iface->net_dev->name, |
146 | hard_iface->net_dev->dev_addr); | ||
146 | 147 | ||
147 | buff_pos += sizeof(struct batman_packet) + | 148 | buff_pos += sizeof(struct batman_packet) + |
148 | (batman_packet->num_hna * ETH_ALEN); | 149 | (batman_packet->num_hna * ETH_ALEN); |
@@ -154,13 +155,13 @@ static void send_packet_to_if(struct forw_packet *forw_packet, | |||
154 | /* create clone because function is called more than once */ | 155 | /* create clone because function is called more than once */ |
155 | skb = skb_clone(forw_packet->skb, GFP_ATOMIC); | 156 | skb = skb_clone(forw_packet->skb, GFP_ATOMIC); |
156 | if (skb) | 157 | if (skb) |
157 | send_skb_packet(skb, batman_if, broadcast_addr); | 158 | send_skb_packet(skb, hard_iface, broadcast_addr); |
158 | } | 159 | } |
159 | 160 | ||
160 | /* send a batman packet */ | 161 | /* send a batman packet */ |
161 | static void send_packet(struct forw_packet *forw_packet) | 162 | static void send_packet(struct forw_packet *forw_packet) |
162 | { | 163 | { |
163 | struct batman_if *batman_if; | 164 | struct hard_iface *hard_iface; |
164 | struct net_device *soft_iface; | 165 | struct net_device *soft_iface; |
165 | struct bat_priv *bat_priv; | 166 | struct bat_priv *bat_priv; |
166 | struct batman_packet *batman_packet = | 167 | struct batman_packet *batman_packet = |
@@ -204,17 +205,17 @@ static void send_packet(struct forw_packet *forw_packet) | |||
204 | 205 | ||
205 | /* broadcast on every interface */ | 206 | /* broadcast on every interface */ |
206 | rcu_read_lock(); | 207 | rcu_read_lock(); |
207 | list_for_each_entry_rcu(batman_if, &hardif_list, list) { | 208 | list_for_each_entry_rcu(hard_iface, &hardif_list, list) { |
208 | if (batman_if->soft_iface != soft_iface) | 209 | if (hard_iface->soft_iface != soft_iface) |
209 | continue; | 210 | continue; |
210 | 211 | ||
211 | send_packet_to_if(forw_packet, batman_if); | 212 | send_packet_to_if(forw_packet, hard_iface); |
212 | } | 213 | } |
213 | rcu_read_unlock(); | 214 | rcu_read_unlock(); |
214 | } | 215 | } |
215 | 216 | ||
216 | static void rebuild_batman_packet(struct bat_priv *bat_priv, | 217 | static void rebuild_batman_packet(struct bat_priv *bat_priv, |
217 | struct batman_if *batman_if) | 218 | struct hard_iface *hard_iface) |
218 | { | 219 | { |
219 | int new_len; | 220 | int new_len; |
220 | unsigned char *new_buff; | 221 | unsigned char *new_buff; |
@@ -226,7 +227,7 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv, | |||
226 | 227 | ||
227 | /* keep old buffer if kmalloc should fail */ | 228 | /* keep old buffer if kmalloc should fail */ |
228 | if (new_buff) { | 229 | if (new_buff) { |
229 | memcpy(new_buff, batman_if->packet_buff, | 230 | memcpy(new_buff, hard_iface->packet_buff, |
230 | sizeof(struct batman_packet)); | 231 | sizeof(struct batman_packet)); |
231 | batman_packet = (struct batman_packet *)new_buff; | 232 | batman_packet = (struct batman_packet *)new_buff; |
232 | 233 | ||
@@ -234,21 +235,21 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv, | |||
234 | new_buff + sizeof(struct batman_packet), | 235 | new_buff + sizeof(struct batman_packet), |
235 | new_len - sizeof(struct batman_packet)); | 236 | new_len - sizeof(struct batman_packet)); |
236 | 237 | ||
237 | kfree(batman_if->packet_buff); | 238 | kfree(hard_iface->packet_buff); |
238 | batman_if->packet_buff = new_buff; | 239 | hard_iface->packet_buff = new_buff; |
239 | batman_if->packet_len = new_len; | 240 | hard_iface->packet_len = new_len; |
240 | } | 241 | } |
241 | } | 242 | } |
242 | 243 | ||
243 | void schedule_own_packet(struct batman_if *batman_if) | 244 | void schedule_own_packet(struct hard_iface *hard_iface) |
244 | { | 245 | { |
245 | struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); | 246 | struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
246 | unsigned long send_time; | 247 | unsigned long send_time; |
247 | struct batman_packet *batman_packet; | 248 | struct batman_packet *batman_packet; |
248 | int vis_server; | 249 | int vis_server; |
249 | 250 | ||
250 | if ((batman_if->if_status == IF_NOT_IN_USE) || | 251 | if ((hard_iface->if_status == IF_NOT_IN_USE) || |
251 | (batman_if->if_status == IF_TO_BE_REMOVED)) | 252 | (hard_iface->if_status == IF_TO_BE_REMOVED)) |
252 | return; | 253 | return; |
253 | 254 | ||
254 | vis_server = atomic_read(&bat_priv->vis_mode); | 255 | vis_server = atomic_read(&bat_priv->vis_mode); |
@@ -260,51 +261,51 @@ void schedule_own_packet(struct batman_if *batman_if) | |||
260 | * outdated packets (especially uninitialized mac addresses) in the | 261 | * outdated packets (especially uninitialized mac addresses) in the |
261 | * packet queue | 262 | * packet queue |
262 | */ | 263 | */ |
263 | if (batman_if->if_status == IF_TO_BE_ACTIVATED) | 264 | if (hard_iface->if_status == IF_TO_BE_ACTIVATED) |
264 | batman_if->if_status = IF_ACTIVE; | 265 | hard_iface->if_status = IF_ACTIVE; |
265 | 266 | ||
266 | /* if local hna has changed and interface is a primary interface */ | 267 | /* if local hna has changed and interface is a primary interface */ |
267 | if ((atomic_read(&bat_priv->hna_local_changed)) && | 268 | if ((atomic_read(&bat_priv->hna_local_changed)) && |
268 | (batman_if == bat_priv->primary_if)) | 269 | (hard_iface == bat_priv->primary_if)) |
269 | rebuild_batman_packet(bat_priv, batman_if); | 270 | rebuild_batman_packet(bat_priv, hard_iface); |
270 | 271 | ||
271 | /** | 272 | /** |
272 | * NOTE: packet_buff might just have been re-allocated in | 273 | * NOTE: packet_buff might just have been re-allocated in |
273 | * rebuild_batman_packet() | 274 | * rebuild_batman_packet() |
274 | */ | 275 | */ |
275 | batman_packet = (struct batman_packet *)batman_if->packet_buff; | 276 | batman_packet = (struct batman_packet *)hard_iface->packet_buff; |
276 | 277 | ||
277 | /* change sequence number to network order */ | 278 | /* change sequence number to network order */ |
278 | batman_packet->seqno = | 279 | batman_packet->seqno = |
279 | htonl((uint32_t)atomic_read(&batman_if->seqno)); | 280 | htonl((uint32_t)atomic_read(&hard_iface->seqno)); |
280 | 281 | ||
281 | if (vis_server == VIS_TYPE_SERVER_SYNC) | 282 | if (vis_server == VIS_TYPE_SERVER_SYNC) |
282 | batman_packet->flags |= VIS_SERVER; | 283 | batman_packet->flags |= VIS_SERVER; |
283 | else | 284 | else |
284 | batman_packet->flags &= ~VIS_SERVER; | 285 | batman_packet->flags &= ~VIS_SERVER; |
285 | 286 | ||
286 | if ((batman_if == bat_priv->primary_if) && | 287 | if ((hard_iface == bat_priv->primary_if) && |
287 | (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)) | 288 | (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)) |
288 | batman_packet->gw_flags = | 289 | batman_packet->gw_flags = |
289 | (uint8_t)atomic_read(&bat_priv->gw_bandwidth); | 290 | (uint8_t)atomic_read(&bat_priv->gw_bandwidth); |
290 | else | 291 | else |
291 | batman_packet->gw_flags = 0; | 292 | batman_packet->gw_flags = 0; |
292 | 293 | ||
293 | atomic_inc(&batman_if->seqno); | 294 | atomic_inc(&hard_iface->seqno); |
294 | 295 | ||
295 | slide_own_bcast_window(batman_if); | 296 | slide_own_bcast_window(hard_iface); |
296 | send_time = own_send_time(bat_priv); | 297 | send_time = own_send_time(bat_priv); |
297 | add_bat_packet_to_list(bat_priv, | 298 | add_bat_packet_to_list(bat_priv, |
298 | batman_if->packet_buff, | 299 | hard_iface->packet_buff, |
299 | batman_if->packet_len, | 300 | hard_iface->packet_len, |
300 | batman_if, 1, send_time); | 301 | hard_iface, 1, send_time); |
301 | } | 302 | } |
302 | 303 | ||
303 | void schedule_forward_packet(struct orig_node *orig_node, | 304 | void schedule_forward_packet(struct orig_node *orig_node, |
304 | struct ethhdr *ethhdr, | 305 | struct ethhdr *ethhdr, |
305 | struct batman_packet *batman_packet, | 306 | struct batman_packet *batman_packet, |
306 | uint8_t directlink, int hna_buff_len, | 307 | uint8_t directlink, int hna_buff_len, |
307 | struct batman_if *if_incoming) | 308 | struct hard_iface *if_incoming) |
308 | { | 309 | { |
309 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); | 310 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); |
310 | unsigned char in_tq, in_ttl, tq_avg = 0; | 311 | unsigned char in_tq, in_ttl, tq_avg = 0; |
@@ -443,7 +444,7 @@ out: | |||
443 | 444 | ||
444 | static void send_outstanding_bcast_packet(struct work_struct *work) | 445 | static void send_outstanding_bcast_packet(struct work_struct *work) |
445 | { | 446 | { |
446 | struct batman_if *batman_if; | 447 | struct hard_iface *hard_iface; |
447 | struct delayed_work *delayed_work = | 448 | struct delayed_work *delayed_work = |
448 | container_of(work, struct delayed_work, work); | 449 | container_of(work, struct delayed_work, work); |
449 | struct forw_packet *forw_packet = | 450 | struct forw_packet *forw_packet = |
@@ -461,14 +462,14 @@ static void send_outstanding_bcast_packet(struct work_struct *work) | |||
461 | 462 | ||
462 | /* rebroadcast packet */ | 463 | /* rebroadcast packet */ |
463 | rcu_read_lock(); | 464 | rcu_read_lock(); |
464 | list_for_each_entry_rcu(batman_if, &hardif_list, list) { | 465 | list_for_each_entry_rcu(hard_iface, &hardif_list, list) { |
465 | if (batman_if->soft_iface != soft_iface) | 466 | if (hard_iface->soft_iface != soft_iface) |
466 | continue; | 467 | continue; |
467 | 468 | ||
468 | /* send a copy of the saved skb */ | 469 | /* send a copy of the saved skb */ |
469 | skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC); | 470 | skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC); |
470 | if (skb1) | 471 | if (skb1) |
471 | send_skb_packet(skb1, batman_if, broadcast_addr); | 472 | send_skb_packet(skb1, hard_iface, broadcast_addr); |
472 | } | 473 | } |
473 | rcu_read_unlock(); | 474 | rcu_read_unlock(); |
474 | 475 | ||
@@ -521,15 +522,15 @@ out: | |||
521 | } | 522 | } |
522 | 523 | ||
523 | void purge_outstanding_packets(struct bat_priv *bat_priv, | 524 | void purge_outstanding_packets(struct bat_priv *bat_priv, |
524 | struct batman_if *batman_if) | 525 | struct hard_iface *hard_iface) |
525 | { | 526 | { |
526 | struct forw_packet *forw_packet; | 527 | struct forw_packet *forw_packet; |
527 | struct hlist_node *tmp_node, *safe_tmp_node; | 528 | struct hlist_node *tmp_node, *safe_tmp_node; |
528 | 529 | ||
529 | if (batman_if) | 530 | if (hard_iface) |
530 | bat_dbg(DBG_BATMAN, bat_priv, | 531 | bat_dbg(DBG_BATMAN, bat_priv, |
531 | "purge_outstanding_packets(): %s\n", | 532 | "purge_outstanding_packets(): %s\n", |
532 | batman_if->net_dev->name); | 533 | hard_iface->net_dev->name); |
533 | else | 534 | else |
534 | bat_dbg(DBG_BATMAN, bat_priv, | 535 | bat_dbg(DBG_BATMAN, bat_priv, |
535 | "purge_outstanding_packets()\n"); | 536 | "purge_outstanding_packets()\n"); |
@@ -543,8 +544,8 @@ void purge_outstanding_packets(struct bat_priv *bat_priv, | |||
543 | * if purge_outstanding_packets() was called with an argmument | 544 | * if purge_outstanding_packets() was called with an argmument |
544 | * we delete only packets belonging to the given interface | 545 | * we delete only packets belonging to the given interface |
545 | */ | 546 | */ |
546 | if ((batman_if) && | 547 | if ((hard_iface) && |
547 | (forw_packet->if_incoming != batman_if)) | 548 | (forw_packet->if_incoming != hard_iface)) |
548 | continue; | 549 | continue; |
549 | 550 | ||
550 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); | 551 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); |
@@ -567,8 +568,8 @@ void purge_outstanding_packets(struct bat_priv *bat_priv, | |||
567 | * if purge_outstanding_packets() was called with an argmument | 568 | * if purge_outstanding_packets() was called with an argmument |
568 | * we delete only packets belonging to the given interface | 569 | * we delete only packets belonging to the given interface |
569 | */ | 570 | */ |
570 | if ((batman_if) && | 571 | if ((hard_iface) && |
571 | (forw_packet->if_incoming != batman_if)) | 572 | (forw_packet->if_incoming != hard_iface)) |
572 | continue; | 573 | continue; |
573 | 574 | ||
574 | spin_unlock_bh(&bat_priv->forw_bat_list_lock); | 575 | spin_unlock_bh(&bat_priv->forw_bat_list_lock); |
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h index b68c272cb84f..7b2ff19c05e7 100644 --- a/net/batman-adv/send.h +++ b/net/batman-adv/send.h | |||
@@ -23,17 +23,17 @@ | |||
23 | #define _NET_BATMAN_ADV_SEND_H_ | 23 | #define _NET_BATMAN_ADV_SEND_H_ |
24 | 24 | ||
25 | int send_skb_packet(struct sk_buff *skb, | 25 | int send_skb_packet(struct sk_buff *skb, |
26 | struct batman_if *batman_if, | 26 | struct hard_iface *hard_iface, |
27 | uint8_t *dst_addr); | 27 | uint8_t *dst_addr); |
28 | void schedule_own_packet(struct batman_if *batman_if); | 28 | void schedule_own_packet(struct hard_iface *hard_iface); |
29 | void schedule_forward_packet(struct orig_node *orig_node, | 29 | void schedule_forward_packet(struct orig_node *orig_node, |
30 | struct ethhdr *ethhdr, | 30 | struct ethhdr *ethhdr, |
31 | struct batman_packet *batman_packet, | 31 | struct batman_packet *batman_packet, |
32 | uint8_t directlink, int hna_buff_len, | 32 | uint8_t directlink, int hna_buff_len, |
33 | struct batman_if *if_outgoing); | 33 | struct hard_iface *if_outgoing); |
34 | int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb); | 34 | int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb); |
35 | void send_outstanding_bat_packet(struct work_struct *work); | 35 | void send_outstanding_bat_packet(struct work_struct *work); |
36 | void purge_outstanding_packets(struct bat_priv *bat_priv, | 36 | void purge_outstanding_packets(struct bat_priv *bat_priv, |
37 | struct batman_if *batman_if); | 37 | struct hard_iface *hard_iface); |
38 | 38 | ||
39 | #endif /* _NET_BATMAN_ADV_SEND_H_ */ | 39 | #endif /* _NET_BATMAN_ADV_SEND_H_ */ |
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index bea2dcf6bef5..95d1c3f86a66 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
@@ -414,7 +414,7 @@ end: | |||
414 | } | 414 | } |
415 | 415 | ||
416 | void interface_rx(struct net_device *soft_iface, | 416 | void interface_rx(struct net_device *soft_iface, |
417 | struct sk_buff *skb, struct batman_if *recv_if, | 417 | struct sk_buff *skb, struct hard_iface *recv_if, |
418 | int hdr_size) | 418 | int hdr_size) |
419 | { | 419 | { |
420 | struct bat_priv *bat_priv = netdev_priv(soft_iface); | 420 | struct bat_priv *bat_priv = netdev_priv(soft_iface); |
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h index e7b0e1a34a55..80a3607df186 100644 --- a/net/batman-adv/soft-interface.h +++ b/net/batman-adv/soft-interface.h | |||
@@ -27,7 +27,7 @@ int softif_neigh_seq_print_text(struct seq_file *seq, void *offset); | |||
27 | void softif_neigh_purge(struct bat_priv *bat_priv); | 27 | void softif_neigh_purge(struct bat_priv *bat_priv); |
28 | int interface_tx(struct sk_buff *skb, struct net_device *soft_iface); | 28 | int interface_tx(struct sk_buff *skb, struct net_device *soft_iface); |
29 | void interface_rx(struct net_device *soft_iface, | 29 | void interface_rx(struct net_device *soft_iface, |
30 | struct sk_buff *skb, struct batman_if *recv_if, | 30 | struct sk_buff *skb, struct hard_iface *recv_if, |
31 | int hdr_size); | 31 | int hdr_size); |
32 | struct net_device *softif_create(char *name); | 32 | struct net_device *softif_create(char *name); |
33 | void softif_destroy(struct net_device *soft_iface); | 33 | void softif_destroy(struct net_device *soft_iface); |
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index a9bf1860819d..83445cf0cc9f 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h | |||
@@ -33,7 +33,7 @@ | |||
33 | sizeof(struct bcast_packet)))) | 33 | sizeof(struct bcast_packet)))) |
34 | 34 | ||
35 | 35 | ||
36 | struct batman_if { | 36 | struct hard_iface { |
37 | struct list_head list; | 37 | struct list_head list; |
38 | int16_t if_num; | 38 | int16_t if_num; |
39 | char if_status; | 39 | char if_status; |
@@ -124,7 +124,7 @@ struct neigh_node { | |||
124 | atomic_t refcount; | 124 | atomic_t refcount; |
125 | struct rcu_head rcu; | 125 | struct rcu_head rcu; |
126 | struct orig_node *orig_node; | 126 | struct orig_node *orig_node; |
127 | struct batman_if *if_incoming; | 127 | struct hard_iface *if_incoming; |
128 | }; | 128 | }; |
129 | 129 | ||
130 | 130 | ||
@@ -148,7 +148,7 @@ struct bat_priv { | |||
148 | struct hlist_head softif_neigh_list; | 148 | struct hlist_head softif_neigh_list; |
149 | struct softif_neigh *softif_neigh; | 149 | struct softif_neigh *softif_neigh; |
150 | struct debug_log *debug_log; | 150 | struct debug_log *debug_log; |
151 | struct batman_if *primary_if; | 151 | struct hard_iface *primary_if; |
152 | struct kobject *mesh_obj; | 152 | struct kobject *mesh_obj; |
153 | struct dentry *debug_dir; | 153 | struct dentry *debug_dir; |
154 | struct hlist_head forw_bat_list; | 154 | struct hlist_head forw_bat_list; |
@@ -217,7 +217,7 @@ struct forw_packet { | |||
217 | uint32_t direct_link_flags; | 217 | uint32_t direct_link_flags; |
218 | uint8_t num_packets; | 218 | uint8_t num_packets; |
219 | struct delayed_work delayed_work; | 219 | struct delayed_work delayed_work; |
220 | struct batman_if *if_incoming; | 220 | struct hard_iface *if_incoming; |
221 | }; | 221 | }; |
222 | 222 | ||
223 | /* While scanning for vis-entries of a particular vis-originator | 223 | /* While scanning for vis-entries of a particular vis-originator |
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c index b4114385dc56..7238f041d3c5 100644 --- a/net/batman-adv/unicast.c +++ b/net/batman-adv/unicast.c | |||
@@ -213,7 +213,7 @@ out: | |||
213 | } | 213 | } |
214 | 214 | ||
215 | int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, | 215 | int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, |
216 | struct batman_if *batman_if, uint8_t dstaddr[]) | 216 | struct hard_iface *hard_iface, uint8_t dstaddr[]) |
217 | { | 217 | { |
218 | struct unicast_packet tmp_uc, *unicast_packet; | 218 | struct unicast_packet tmp_uc, *unicast_packet; |
219 | struct sk_buff *frag_skb; | 219 | struct sk_buff *frag_skb; |
@@ -258,12 +258,12 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, | |||
258 | frag1->flags = UNI_FRAG_HEAD | large_tail; | 258 | frag1->flags = UNI_FRAG_HEAD | large_tail; |
259 | frag2->flags = large_tail; | 259 | frag2->flags = large_tail; |
260 | 260 | ||
261 | seqno = atomic_add_return(2, &batman_if->frag_seqno); | 261 | seqno = atomic_add_return(2, &hard_iface->frag_seqno); |
262 | frag1->seqno = htons(seqno - 1); | 262 | frag1->seqno = htons(seqno - 1); |
263 | frag2->seqno = htons(seqno); | 263 | frag2->seqno = htons(seqno); |
264 | 264 | ||
265 | send_skb_packet(skb, batman_if, dstaddr); | 265 | send_skb_packet(skb, hard_iface, dstaddr); |
266 | send_skb_packet(frag_skb, batman_if, dstaddr); | 266 | send_skb_packet(frag_skb, hard_iface, dstaddr); |
267 | return NET_RX_SUCCESS; | 267 | return NET_RX_SUCCESS; |
268 | 268 | ||
269 | drop_frag: | 269 | drop_frag: |
diff --git a/net/batman-adv/unicast.h b/net/batman-adv/unicast.h index 8897308281d4..16ad7a9242b5 100644 --- a/net/batman-adv/unicast.h +++ b/net/batman-adv/unicast.h | |||
@@ -32,7 +32,7 @@ int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv, | |||
32 | void frag_list_free(struct list_head *head); | 32 | void frag_list_free(struct list_head *head); |
33 | int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv); | 33 | int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv); |
34 | int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, | 34 | int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, |
35 | struct batman_if *batman_if, uint8_t dstaddr[]); | 35 | struct hard_iface *hard_iface, uint8_t dstaddr[]); |
36 | 36 | ||
37 | static inline int frag_can_reassemble(struct sk_buff *skb, int mtu) | 37 | static inline int frag_can_reassemble(struct sk_buff *skb, int mtu) |
38 | { | 38 | { |
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c index e8911cbb8699..3da499baf591 100644 --- a/net/batman-adv/vis.c +++ b/net/batman-adv/vis.c | |||
@@ -730,7 +730,7 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv, | |||
730 | struct orig_node *orig_node; | 730 | struct orig_node *orig_node; |
731 | struct vis_packet *packet; | 731 | struct vis_packet *packet; |
732 | struct sk_buff *skb; | 732 | struct sk_buff *skb; |
733 | struct batman_if *batman_if; | 733 | struct hard_iface *hard_iface; |
734 | uint8_t dstaddr[ETH_ALEN]; | 734 | uint8_t dstaddr[ETH_ALEN]; |
735 | int i; | 735 | int i; |
736 | 736 | ||
@@ -755,12 +755,12 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv, | |||
755 | continue; | 755 | continue; |
756 | 756 | ||
757 | memcpy(packet->target_orig, orig_node->orig, ETH_ALEN); | 757 | memcpy(packet->target_orig, orig_node->orig, ETH_ALEN); |
758 | batman_if = orig_node->router->if_incoming; | 758 | hard_iface = orig_node->router->if_incoming; |
759 | memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); | 759 | memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); |
760 | 760 | ||
761 | skb = skb_clone(info->skb_packet, GFP_ATOMIC); | 761 | skb = skb_clone(info->skb_packet, GFP_ATOMIC); |
762 | if (skb) | 762 | if (skb) |
763 | send_skb_packet(skb, batman_if, dstaddr); | 763 | send_skb_packet(skb, hard_iface, dstaddr); |
764 | 764 | ||
765 | } | 765 | } |
766 | rcu_read_unlock(); | 766 | rcu_read_unlock(); |