diff options
Diffstat (limited to 'net/batman-adv/send.c')
-rw-r--r-- | net/batman-adv/send.c | 147 |
1 files changed, 101 insertions, 46 deletions
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index 33779278f1b..58d14472068 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c | |||
@@ -33,14 +33,14 @@ | |||
33 | static void send_outstanding_bcast_packet(struct work_struct *work); | 33 | static void send_outstanding_bcast_packet(struct work_struct *work); |
34 | 34 | ||
35 | /* apply hop penalty for a normal link */ | 35 | /* apply hop penalty for a normal link */ |
36 | static uint8_t hop_penalty(const uint8_t tq, struct bat_priv *bat_priv) | 36 | static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv) |
37 | { | 37 | { |
38 | int hop_penalty = atomic_read(&bat_priv->hop_penalty); | 38 | int hop_penalty = atomic_read(&bat_priv->hop_penalty); |
39 | return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE); | 39 | return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE); |
40 | } | 40 | } |
41 | 41 | ||
42 | /* when do we schedule our own packet to be sent */ | 42 | /* when do we schedule our own packet to be sent */ |
43 | static unsigned long own_send_time(struct bat_priv *bat_priv) | 43 | static unsigned long own_send_time(const struct bat_priv *bat_priv) |
44 | { | 44 | { |
45 | return jiffies + msecs_to_jiffies( | 45 | return jiffies + msecs_to_jiffies( |
46 | atomic_read(&bat_priv->orig_interval) - | 46 | atomic_read(&bat_priv->orig_interval) - |
@@ -55,9 +55,8 @@ static unsigned long forward_send_time(void) | |||
55 | 55 | ||
56 | /* send out an already prepared packet to the given address via the | 56 | /* send out an already prepared packet to the given address via the |
57 | * specified batman interface */ | 57 | * specified batman interface */ |
58 | int send_skb_packet(struct sk_buff *skb, | 58 | int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface, |
59 | struct hard_iface *hard_iface, | 59 | const uint8_t *dst_addr) |
60 | uint8_t *dst_addr) | ||
61 | { | 60 | { |
62 | struct ethhdr *ethhdr; | 61 | struct ethhdr *ethhdr; |
63 | 62 | ||
@@ -74,7 +73,7 @@ int send_skb_packet(struct sk_buff *skb, | |||
74 | } | 73 | } |
75 | 74 | ||
76 | /* push to the ethernet header. */ | 75 | /* push to the ethernet header. */ |
77 | if (my_skb_head_push(skb, sizeof(struct ethhdr)) < 0) | 76 | if (my_skb_head_push(skb, sizeof(*ethhdr)) < 0) |
78 | goto send_skb_err; | 77 | goto send_skb_err; |
79 | 78 | ||
80 | skb_reset_mac_header(skb); | 79 | skb_reset_mac_header(skb); |
@@ -121,7 +120,7 @@ static void send_packet_to_if(struct forw_packet *forw_packet, | |||
121 | /* adjust all flags and log packets */ | 120 | /* adjust all flags and log packets */ |
122 | while (aggregated_packet(buff_pos, | 121 | while (aggregated_packet(buff_pos, |
123 | forw_packet->packet_len, | 122 | forw_packet->packet_len, |
124 | batman_packet->num_tt)) { | 123 | batman_packet->tt_num_changes)) { |
125 | 124 | ||
126 | /* we might have aggregated direct link packets with an | 125 | /* we might have aggregated direct link packets with an |
127 | * ordinary base packet */ | 126 | * ordinary base packet */ |
@@ -136,17 +135,17 @@ static void send_packet_to_if(struct forw_packet *forw_packet, | |||
136 | "Forwarding")); | 135 | "Forwarding")); |
137 | bat_dbg(DBG_BATMAN, bat_priv, | 136 | bat_dbg(DBG_BATMAN, bat_priv, |
138 | "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d," | 137 | "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d," |
139 | " IDF %s) on interface %s [%pM]\n", | 138 | " IDF %s, hvn %d) on interface %s [%pM]\n", |
140 | fwd_str, (packet_num > 0 ? "aggregated " : ""), | 139 | fwd_str, (packet_num > 0 ? "aggregated " : ""), |
141 | batman_packet->orig, ntohl(batman_packet->seqno), | 140 | batman_packet->orig, ntohl(batman_packet->seqno), |
142 | batman_packet->tq, batman_packet->ttl, | 141 | batman_packet->tq, batman_packet->ttl, |
143 | (batman_packet->flags & DIRECTLINK ? | 142 | (batman_packet->flags & DIRECTLINK ? |
144 | "on" : "off"), | 143 | "on" : "off"), |
145 | hard_iface->net_dev->name, | 144 | batman_packet->ttvn, hard_iface->net_dev->name, |
146 | hard_iface->net_dev->dev_addr); | 145 | hard_iface->net_dev->dev_addr); |
147 | 146 | ||
148 | buff_pos += sizeof(struct batman_packet) + | 147 | buff_pos += sizeof(*batman_packet) + |
149 | (batman_packet->num_tt * ETH_ALEN); | 148 | tt_len(batman_packet->tt_num_changes); |
150 | packet_num++; | 149 | packet_num++; |
151 | batman_packet = (struct batman_packet *) | 150 | batman_packet = (struct batman_packet *) |
152 | (forw_packet->skb->data + buff_pos); | 151 | (forw_packet->skb->data + buff_pos); |
@@ -164,26 +163,31 @@ static void send_packet(struct forw_packet *forw_packet) | |||
164 | struct hard_iface *hard_iface; | 163 | struct hard_iface *hard_iface; |
165 | struct net_device *soft_iface; | 164 | struct net_device *soft_iface; |
166 | struct bat_priv *bat_priv; | 165 | struct bat_priv *bat_priv; |
166 | struct hard_iface *primary_if = NULL; | ||
167 | struct batman_packet *batman_packet = | 167 | struct batman_packet *batman_packet = |
168 | (struct batman_packet *)(forw_packet->skb->data); | 168 | (struct batman_packet *)(forw_packet->skb->data); |
169 | unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0); | 169 | int directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0); |
170 | 170 | ||
171 | if (!forw_packet->if_incoming) { | 171 | if (!forw_packet->if_incoming) { |
172 | pr_err("Error - can't forward packet: incoming iface not " | 172 | pr_err("Error - can't forward packet: incoming iface not " |
173 | "specified\n"); | 173 | "specified\n"); |
174 | return; | 174 | goto out; |
175 | } | 175 | } |
176 | 176 | ||
177 | soft_iface = forw_packet->if_incoming->soft_iface; | 177 | soft_iface = forw_packet->if_incoming->soft_iface; |
178 | bat_priv = netdev_priv(soft_iface); | 178 | bat_priv = netdev_priv(soft_iface); |
179 | 179 | ||
180 | if (forw_packet->if_incoming->if_status != IF_ACTIVE) | 180 | if (forw_packet->if_incoming->if_status != IF_ACTIVE) |
181 | return; | 181 | goto out; |
182 | |||
183 | primary_if = primary_if_get_selected(bat_priv); | ||
184 | if (!primary_if) | ||
185 | goto out; | ||
182 | 186 | ||
183 | /* multihomed peer assumed */ | 187 | /* multihomed peer assumed */ |
184 | /* non-primary OGMs are only broadcasted on their interface */ | 188 | /* non-primary OGMs are only broadcasted on their interface */ |
185 | if ((directlink && (batman_packet->ttl == 1)) || | 189 | if ((directlink && (batman_packet->ttl == 1)) || |
186 | (forw_packet->own && (forw_packet->if_incoming->if_num > 0))) { | 190 | (forw_packet->own && (forw_packet->if_incoming != primary_if))) { |
187 | 191 | ||
188 | /* FIXME: what about aggregated packets ? */ | 192 | /* FIXME: what about aggregated packets ? */ |
189 | bat_dbg(DBG_BATMAN, bat_priv, | 193 | bat_dbg(DBG_BATMAN, bat_priv, |
@@ -200,7 +204,7 @@ static void send_packet(struct forw_packet *forw_packet) | |||
200 | broadcast_addr); | 204 | broadcast_addr); |
201 | forw_packet->skb = NULL; | 205 | forw_packet->skb = NULL; |
202 | 206 | ||
203 | return; | 207 | goto out; |
204 | } | 208 | } |
205 | 209 | ||
206 | /* broadcast on every interface */ | 210 | /* broadcast on every interface */ |
@@ -212,28 +216,24 @@ static void send_packet(struct forw_packet *forw_packet) | |||
212 | send_packet_to_if(forw_packet, hard_iface); | 216 | send_packet_to_if(forw_packet, hard_iface); |
213 | } | 217 | } |
214 | rcu_read_unlock(); | 218 | rcu_read_unlock(); |
219 | |||
220 | out: | ||
221 | if (primary_if) | ||
222 | hardif_free_ref(primary_if); | ||
215 | } | 223 | } |
216 | 224 | ||
217 | static void rebuild_batman_packet(struct bat_priv *bat_priv, | 225 | static void realloc_packet_buffer(struct hard_iface *hard_iface, |
218 | struct hard_iface *hard_iface) | 226 | int new_len) |
219 | { | 227 | { |
220 | int new_len; | ||
221 | unsigned char *new_buff; | 228 | unsigned char *new_buff; |
222 | struct batman_packet *batman_packet; | 229 | struct batman_packet *batman_packet; |
223 | 230 | ||
224 | new_len = sizeof(struct batman_packet) + | ||
225 | (bat_priv->num_local_tt * ETH_ALEN); | ||
226 | new_buff = kmalloc(new_len, GFP_ATOMIC); | 231 | new_buff = kmalloc(new_len, GFP_ATOMIC); |
227 | 232 | ||
228 | /* keep old buffer if kmalloc should fail */ | 233 | /* keep old buffer if kmalloc should fail */ |
229 | if (new_buff) { | 234 | if (new_buff) { |
230 | memcpy(new_buff, hard_iface->packet_buff, | 235 | memcpy(new_buff, hard_iface->packet_buff, |
231 | sizeof(struct batman_packet)); | 236 | sizeof(*batman_packet)); |
232 | batman_packet = (struct batman_packet *)new_buff; | ||
233 | |||
234 | batman_packet->num_tt = tt_local_fill_buffer(bat_priv, | ||
235 | new_buff + sizeof(struct batman_packet), | ||
236 | new_len - sizeof(struct batman_packet)); | ||
237 | 237 | ||
238 | kfree(hard_iface->packet_buff); | 238 | kfree(hard_iface->packet_buff); |
239 | hard_iface->packet_buff = new_buff; | 239 | hard_iface->packet_buff = new_buff; |
@@ -241,6 +241,46 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv, | |||
241 | } | 241 | } |
242 | } | 242 | } |
243 | 243 | ||
244 | /* when calling this function (hard_iface == primary_if) has to be true */ | ||
245 | static void prepare_packet_buffer(struct bat_priv *bat_priv, | ||
246 | struct hard_iface *hard_iface) | ||
247 | { | ||
248 | int new_len; | ||
249 | struct batman_packet *batman_packet; | ||
250 | |||
251 | new_len = BAT_PACKET_LEN + | ||
252 | tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes)); | ||
253 | |||
254 | /* if we have too many changes for one packet don't send any | ||
255 | * and wait for the tt table request which will be fragmented */ | ||
256 | if (new_len > hard_iface->soft_iface->mtu) | ||
257 | new_len = BAT_PACKET_LEN; | ||
258 | |||
259 | realloc_packet_buffer(hard_iface, new_len); | ||
260 | batman_packet = (struct batman_packet *)hard_iface->packet_buff; | ||
261 | |||
262 | atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv)); | ||
263 | |||
264 | /* reset the sending counter */ | ||
265 | atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX); | ||
266 | |||
267 | batman_packet->tt_num_changes = tt_changes_fill_buffer(bat_priv, | ||
268 | hard_iface->packet_buff + BAT_PACKET_LEN, | ||
269 | hard_iface->packet_len - BAT_PACKET_LEN); | ||
270 | |||
271 | } | ||
272 | |||
273 | static void reset_packet_buffer(struct bat_priv *bat_priv, | ||
274 | struct hard_iface *hard_iface) | ||
275 | { | ||
276 | struct batman_packet *batman_packet; | ||
277 | |||
278 | realloc_packet_buffer(hard_iface, BAT_PACKET_LEN); | ||
279 | |||
280 | batman_packet = (struct batman_packet *)hard_iface->packet_buff; | ||
281 | batman_packet->tt_num_changes = 0; | ||
282 | } | ||
283 | |||
244 | void schedule_own_packet(struct hard_iface *hard_iface) | 284 | void schedule_own_packet(struct hard_iface *hard_iface) |
245 | { | 285 | { |
246 | struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); | 286 | struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
@@ -266,14 +306,21 @@ void schedule_own_packet(struct hard_iface *hard_iface) | |||
266 | if (hard_iface->if_status == IF_TO_BE_ACTIVATED) | 306 | if (hard_iface->if_status == IF_TO_BE_ACTIVATED) |
267 | hard_iface->if_status = IF_ACTIVE; | 307 | hard_iface->if_status = IF_ACTIVE; |
268 | 308 | ||
269 | /* if local tt has changed and interface is a primary interface */ | 309 | if (hard_iface == primary_if) { |
270 | if ((atomic_read(&bat_priv->tt_local_changed)) && | 310 | /* if at least one change happened */ |
271 | (hard_iface == primary_if)) | 311 | if (atomic_read(&bat_priv->tt_local_changes) > 0) { |
272 | rebuild_batman_packet(bat_priv, hard_iface); | 312 | tt_commit_changes(bat_priv); |
313 | prepare_packet_buffer(bat_priv, hard_iface); | ||
314 | } | ||
315 | |||
316 | /* if the changes have been sent enough times */ | ||
317 | if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt)) | ||
318 | reset_packet_buffer(bat_priv, hard_iface); | ||
319 | } | ||
273 | 320 | ||
274 | /** | 321 | /** |
275 | * NOTE: packet_buff might just have been re-allocated in | 322 | * NOTE: packet_buff might just have been re-allocated in |
276 | * rebuild_batman_packet() | 323 | * prepare_packet_buffer() or in reset_packet_buffer() |
277 | */ | 324 | */ |
278 | batman_packet = (struct batman_packet *)hard_iface->packet_buff; | 325 | batman_packet = (struct batman_packet *)hard_iface->packet_buff; |
279 | 326 | ||
@@ -281,6 +328,9 @@ void schedule_own_packet(struct hard_iface *hard_iface) | |||
281 | batman_packet->seqno = | 328 | batman_packet->seqno = |
282 | htonl((uint32_t)atomic_read(&hard_iface->seqno)); | 329 | htonl((uint32_t)atomic_read(&hard_iface->seqno)); |
283 | 330 | ||
331 | batman_packet->ttvn = atomic_read(&bat_priv->ttvn); | ||
332 | batman_packet->tt_crc = htons((uint16_t)atomic_read(&bat_priv->tt_crc)); | ||
333 | |||
284 | if (vis_server == VIS_TYPE_SERVER_SYNC) | 334 | if (vis_server == VIS_TYPE_SERVER_SYNC) |
285 | batman_packet->flags |= VIS_SERVER; | 335 | batman_packet->flags |= VIS_SERVER; |
286 | else | 336 | else |
@@ -291,7 +341,7 @@ void schedule_own_packet(struct hard_iface *hard_iface) | |||
291 | batman_packet->gw_flags = | 341 | batman_packet->gw_flags = |
292 | (uint8_t)atomic_read(&bat_priv->gw_bandwidth); | 342 | (uint8_t)atomic_read(&bat_priv->gw_bandwidth); |
293 | else | 343 | else |
294 | batman_packet->gw_flags = 0; | 344 | batman_packet->gw_flags = NO_FLAGS; |
295 | 345 | ||
296 | atomic_inc(&hard_iface->seqno); | 346 | atomic_inc(&hard_iface->seqno); |
297 | 347 | ||
@@ -307,15 +357,16 @@ void schedule_own_packet(struct hard_iface *hard_iface) | |||
307 | } | 357 | } |
308 | 358 | ||
309 | void schedule_forward_packet(struct orig_node *orig_node, | 359 | void schedule_forward_packet(struct orig_node *orig_node, |
310 | struct ethhdr *ethhdr, | 360 | const struct ethhdr *ethhdr, |
311 | struct batman_packet *batman_packet, | 361 | struct batman_packet *batman_packet, |
312 | uint8_t directlink, int tt_buff_len, | 362 | int directlink, |
313 | struct hard_iface *if_incoming) | 363 | struct hard_iface *if_incoming) |
314 | { | 364 | { |
315 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); | 365 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); |
316 | struct neigh_node *router; | 366 | struct neigh_node *router; |
317 | unsigned char in_tq, in_ttl, tq_avg = 0; | 367 | uint8_t in_tq, in_ttl, tq_avg = 0; |
318 | unsigned long send_time; | 368 | unsigned long send_time; |
369 | uint8_t tt_num_changes; | ||
319 | 370 | ||
320 | if (batman_packet->ttl <= 1) { | 371 | if (batman_packet->ttl <= 1) { |
321 | bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n"); | 372 | bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n"); |
@@ -326,6 +377,7 @@ void schedule_forward_packet(struct orig_node *orig_node, | |||
326 | 377 | ||
327 | in_tq = batman_packet->tq; | 378 | in_tq = batman_packet->tq; |
328 | in_ttl = batman_packet->ttl; | 379 | in_ttl = batman_packet->ttl; |
380 | tt_num_changes = batman_packet->tt_num_changes; | ||
329 | 381 | ||
330 | batman_packet->ttl--; | 382 | batman_packet->ttl--; |
331 | memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN); | 383 | memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN); |
@@ -358,6 +410,7 @@ void schedule_forward_packet(struct orig_node *orig_node, | |||
358 | batman_packet->ttl); | 410 | batman_packet->ttl); |
359 | 411 | ||
360 | batman_packet->seqno = htonl(batman_packet->seqno); | 412 | batman_packet->seqno = htonl(batman_packet->seqno); |
413 | batman_packet->tt_crc = htons(batman_packet->tt_crc); | ||
361 | 414 | ||
362 | /* switch of primaries first hop flag when forwarding */ | 415 | /* switch of primaries first hop flag when forwarding */ |
363 | batman_packet->flags &= ~PRIMARIES_FIRST_HOP; | 416 | batman_packet->flags &= ~PRIMARIES_FIRST_HOP; |
@@ -369,7 +422,7 @@ void schedule_forward_packet(struct orig_node *orig_node, | |||
369 | send_time = forward_send_time(); | 422 | send_time = forward_send_time(); |
370 | add_bat_packet_to_list(bat_priv, | 423 | add_bat_packet_to_list(bat_priv, |
371 | (unsigned char *)batman_packet, | 424 | (unsigned char *)batman_packet, |
372 | sizeof(struct batman_packet) + tt_buff_len, | 425 | sizeof(*batman_packet) + tt_len(tt_num_changes), |
373 | if_incoming, 0, send_time); | 426 | if_incoming, 0, send_time); |
374 | } | 427 | } |
375 | 428 | ||
@@ -408,11 +461,13 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv, | |||
408 | * | 461 | * |
409 | * The skb is not consumed, so the caller should make sure that the | 462 | * The skb is not consumed, so the caller should make sure that the |
410 | * skb is freed. */ | 463 | * skb is freed. */ |
411 | int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb) | 464 | int add_bcast_packet_to_list(struct bat_priv *bat_priv, |
465 | const struct sk_buff *skb, unsigned long delay) | ||
412 | { | 466 | { |
413 | struct hard_iface *primary_if = NULL; | 467 | struct hard_iface *primary_if = NULL; |
414 | struct forw_packet *forw_packet; | 468 | struct forw_packet *forw_packet; |
415 | struct bcast_packet *bcast_packet; | 469 | struct bcast_packet *bcast_packet; |
470 | struct sk_buff *newskb; | ||
416 | 471 | ||
417 | if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) { | 472 | if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) { |
418 | bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n"); | 473 | bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n"); |
@@ -423,28 +478,28 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb) | |||
423 | if (!primary_if) | 478 | if (!primary_if) |
424 | goto out_and_inc; | 479 | goto out_and_inc; |
425 | 480 | ||
426 | forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC); | 481 | forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC); |
427 | 482 | ||
428 | if (!forw_packet) | 483 | if (!forw_packet) |
429 | goto out_and_inc; | 484 | goto out_and_inc; |
430 | 485 | ||
431 | skb = skb_copy(skb, GFP_ATOMIC); | 486 | newskb = skb_copy(skb, GFP_ATOMIC); |
432 | if (!skb) | 487 | if (!newskb) |
433 | goto packet_free; | 488 | goto packet_free; |
434 | 489 | ||
435 | /* as we have a copy now, it is safe to decrease the TTL */ | 490 | /* as we have a copy now, it is safe to decrease the TTL */ |
436 | bcast_packet = (struct bcast_packet *)skb->data; | 491 | bcast_packet = (struct bcast_packet *)newskb->data; |
437 | bcast_packet->ttl--; | 492 | bcast_packet->ttl--; |
438 | 493 | ||
439 | skb_reset_mac_header(skb); | 494 | skb_reset_mac_header(newskb); |
440 | 495 | ||
441 | forw_packet->skb = skb; | 496 | forw_packet->skb = newskb; |
442 | forw_packet->if_incoming = primary_if; | 497 | forw_packet->if_incoming = primary_if; |
443 | 498 | ||
444 | /* how often did we send the bcast packet ? */ | 499 | /* how often did we send the bcast packet ? */ |
445 | forw_packet->num_packets = 0; | 500 | forw_packet->num_packets = 0; |
446 | 501 | ||
447 | _add_bcast_packet_to_list(bat_priv, forw_packet, 1); | 502 | _add_bcast_packet_to_list(bat_priv, forw_packet, delay); |
448 | return NETDEV_TX_OK; | 503 | return NETDEV_TX_OK; |
449 | 504 | ||
450 | packet_free: | 505 | packet_free: |
@@ -537,7 +592,7 @@ out: | |||
537 | } | 592 | } |
538 | 593 | ||
539 | void purge_outstanding_packets(struct bat_priv *bat_priv, | 594 | void purge_outstanding_packets(struct bat_priv *bat_priv, |
540 | struct hard_iface *hard_iface) | 595 | const struct hard_iface *hard_iface) |
541 | { | 596 | { |
542 | struct forw_packet *forw_packet; | 597 | struct forw_packet *forw_packet; |
543 | struct hlist_node *tmp_node, *safe_tmp_node; | 598 | struct hlist_node *tmp_node, *safe_tmp_node; |