aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--net/tipc/bcast.c226
-rw-r--r--net/tipc/bcast.h2
-rw-r--r--net/tipc/link.c21
-rw-r--r--net/tipc/node.c4
-rw-r--r--net/tipc/node.h12
5 files changed, 100 insertions, 165 deletions
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index facc216c6a9..1f3b1607d9d 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -157,39 +157,14 @@ u32 tipc_bclink_get_last_sent(void)
157 return bcl->fsm_msg_cnt; 157 return bcl->fsm_msg_cnt;
158} 158}
159 159
160/** 160static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
161 * bclink_set_gap - set gap according to contents of current deferred pkt queue
162 *
163 * Called with 'node' locked, bc_lock unlocked
164 */
165
166static void bclink_set_gap(struct tipc_node *n_ptr)
167{
168 struct sk_buff *buf = n_ptr->bclink.deferred_head;
169
170 n_ptr->bclink.gap_after = n_ptr->bclink.gap_to =
171 mod(n_ptr->bclink.last_in);
172 if (unlikely(buf != NULL))
173 n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1);
174}
175
176/**
177 * bclink_ack_allowed - test if ACK or NACK message can be sent at this moment
178 *
179 * This mechanism endeavours to prevent all nodes in network from trying
180 * to ACK or NACK at the same time.
181 *
182 * Note: TIPC uses a different trigger to distribute ACKs than it does to
183 * distribute NACKs, but tries to use the same spacing (divide by 16).
184 */
185
186static int bclink_ack_allowed(u32 n)
187{ 161{
188 return (n % TIPC_MIN_LINK_WIN) == tipc_own_tag; 162 node->bclink.last_sent = less_eq(node->bclink.last_sent, seqno) ?
163 seqno : node->bclink.last_sent;
189} 164}
190 165
191 166
192/** 167/*
193 * tipc_bclink_retransmit_to - get most recent node to request retransmission 168 * tipc_bclink_retransmit_to - get most recent node to request retransmission
194 * 169 *
195 * Called with bc_lock locked 170 * Called with bc_lock locked
@@ -300,44 +275,56 @@ exit:
300 spin_unlock_bh(&bc_lock); 275 spin_unlock_bh(&bc_lock);
301} 276}
302 277
303/** 278/*
304 * bclink_send_ack - unicast an ACK msg 279 * tipc_bclink_update_link_state - update broadcast link state
305 * 280 *
306 * tipc_net_lock and node lock set 281 * tipc_net_lock and node lock set
307 */ 282 */
308 283
309static void bclink_send_ack(struct tipc_node *n_ptr) 284void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
310{ 285{
311 struct tipc_link *l_ptr = n_ptr->active_links[n_ptr->addr & 1]; 286 struct sk_buff *buf;
312 287
313 if (l_ptr != NULL) 288 /* Ignore "stale" link state info */
314 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
315}
316 289
317/** 290 if (less_eq(last_sent, n_ptr->bclink.last_in))
318 * bclink_send_nack- broadcast a NACK msg 291 return;
319 *
320 * tipc_net_lock and node lock set
321 */
322 292
323static void bclink_send_nack(struct tipc_node *n_ptr) 293 /* Update link synchronization state; quit if in sync */
324{ 294
325 struct sk_buff *buf; 295 bclink_update_last_sent(n_ptr, last_sent);
326 struct tipc_msg *msg; 296
297 if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
298 return;
299
300 /* Update out-of-sync state; quit if loss is still unconfirmed */
327 301
328 if (!less(n_ptr->bclink.gap_after, n_ptr->bclink.gap_to)) 302 if ((++n_ptr->bclink.oos_state) == 1) {
303 if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
304 return;
305 n_ptr->bclink.oos_state++;
306 }
307
308 /* Don't NACK if one has been recently sent (or seen) */
309
310 if (n_ptr->bclink.oos_state & 0x1)
329 return; 311 return;
330 312
313 /* Send NACK */
314
331 buf = tipc_buf_acquire(INT_H_SIZE); 315 buf = tipc_buf_acquire(INT_H_SIZE);
332 if (buf) { 316 if (buf) {
333 msg = buf_msg(buf); 317 struct tipc_msg *msg = buf_msg(buf);
318
334 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, 319 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
335 INT_H_SIZE, n_ptr->addr); 320 INT_H_SIZE, n_ptr->addr);
336 msg_set_non_seq(msg, 1); 321 msg_set_non_seq(msg, 1);
337 msg_set_mc_netid(msg, tipc_net_id); 322 msg_set_mc_netid(msg, tipc_net_id);
338 msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in)); 323 msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
339 msg_set_bcgap_after(msg, n_ptr->bclink.gap_after); 324 msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
340 msg_set_bcgap_to(msg, n_ptr->bclink.gap_to); 325 msg_set_bcgap_to(msg, n_ptr->bclink.deferred_head
326 ? buf_seqno(n_ptr->bclink.deferred_head) - 1
327 : n_ptr->bclink.last_sent);
341 msg_set_bcast_tag(msg, tipc_own_tag); 328 msg_set_bcast_tag(msg, tipc_own_tag);
342 329
343 spin_lock_bh(&bc_lock); 330 spin_lock_bh(&bc_lock);
@@ -346,96 +333,37 @@ static void bclink_send_nack(struct tipc_node *n_ptr)
346 spin_unlock_bh(&bc_lock); 333 spin_unlock_bh(&bc_lock);
347 buf_discard(buf); 334 buf_discard(buf);
348 335
349 /* 336 n_ptr->bclink.oos_state++;
350 * Ensure we doesn't send another NACK msg to the node
351 * until 16 more deferred messages arrive from it
352 * (i.e. helps prevent all nodes from NACK'ing at same time)
353 */
354
355 n_ptr->bclink.nack_sync = tipc_own_tag;
356 } 337 }
357} 338}
358 339
359/** 340/*
360 * tipc_bclink_check_gap - send a NACK if a sequence gap exists 341 * bclink_peek_nack - monitor retransmission requests sent by other nodes
361 * 342 *
362 * tipc_net_lock and node lock set 343 * Delay any upcoming NACK by this node if another node has already
363 */ 344 * requested the first message this node is going to ask for.
364
365void tipc_bclink_check_gap(struct tipc_node *n_ptr, u32 last_sent)
366{
367 if (!n_ptr->bclink.supported ||
368 less_eq(last_sent, mod(n_ptr->bclink.last_in)))
369 return;
370
371 bclink_set_gap(n_ptr);
372 if (n_ptr->bclink.gap_after == n_ptr->bclink.gap_to)
373 n_ptr->bclink.gap_to = last_sent;
374 bclink_send_nack(n_ptr);
375}
376
377/**
378 * tipc_bclink_peek_nack - process a NACK msg meant for another node
379 * 345 *
380 * Only tipc_net_lock set. 346 * Only tipc_net_lock set.
381 */ 347 */
382 348
383static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to) 349static void bclink_peek_nack(struct tipc_msg *msg)
384{ 350{
385 struct tipc_node *n_ptr = tipc_node_find(dest); 351 struct tipc_node *n_ptr = tipc_node_find(msg_destnode(msg));
386 u32 my_after, my_to;
387 352
388 if (unlikely(!n_ptr || !tipc_node_is_up(n_ptr))) 353 if (unlikely(!n_ptr))
389 return; 354 return;
355
390 tipc_node_lock(n_ptr); 356 tipc_node_lock(n_ptr);
391 /*
392 * Modify gap to suppress unnecessary NACKs from this node
393 */
394 my_after = n_ptr->bclink.gap_after;
395 my_to = n_ptr->bclink.gap_to;
396
397 if (less_eq(gap_after, my_after)) {
398 if (less(my_after, gap_to) && less(gap_to, my_to))
399 n_ptr->bclink.gap_after = gap_to;
400 else if (less_eq(my_to, gap_to))
401 n_ptr->bclink.gap_to = n_ptr->bclink.gap_after;
402 } else if (less_eq(gap_after, my_to)) {
403 if (less_eq(my_to, gap_to))
404 n_ptr->bclink.gap_to = gap_after;
405 } else {
406 /*
407 * Expand gap if missing bufs not in deferred queue:
408 */
409 struct sk_buff *buf = n_ptr->bclink.deferred_head;
410 u32 prev = n_ptr->bclink.gap_to;
411 357
412 for (; buf; buf = buf->next) { 358 if (n_ptr->bclink.supported &&
413 u32 seqno = buf_seqno(buf); 359 (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
360 (n_ptr->bclink.last_in == msg_bcgap_after(msg)))
361 n_ptr->bclink.oos_state = 2;
414 362
415 if (mod(seqno - prev) != 1) {
416 buf = NULL;
417 break;
418 }
419 if (seqno == gap_after)
420 break;
421 prev = seqno;
422 }
423 if (buf == NULL)
424 n_ptr->bclink.gap_to = gap_after;
425 }
426 /*
427 * Some nodes may send a complementary NACK now:
428 */
429 if (bclink_ack_allowed(sender_tag + 1)) {
430 if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) {
431 bclink_send_nack(n_ptr);
432 bclink_set_gap(n_ptr);
433 }
434 }
435 tipc_node_unlock(n_ptr); 363 tipc_node_unlock(n_ptr);
436} 364}
437 365
438/** 366/*
439 * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster 367 * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster
440 */ 368 */
441 369
@@ -505,10 +433,7 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
505 spin_unlock_bh(&bc_lock); 433 spin_unlock_bh(&bc_lock);
506 } else { 434 } else {
507 tipc_node_unlock(node); 435 tipc_node_unlock(node);
508 tipc_bclink_peek_nack(msg_destnode(msg), 436 bclink_peek_nack(msg);
509 msg_bcast_tag(msg),
510 msg_bcgap_after(msg),
511 msg_bcgap_to(msg));
512 } 437 }
513 goto exit; 438 goto exit;
514 } 439 }
@@ -519,16 +444,28 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
519 next_in = mod(node->bclink.last_in + 1); 444 next_in = mod(node->bclink.last_in + 1);
520 445
521 if (likely(seqno == next_in)) { 446 if (likely(seqno == next_in)) {
447 bclink_update_last_sent(node, seqno);
522receive: 448receive:
449 node->bclink.last_in = seqno;
450 node->bclink.oos_state = 0;
451
523 spin_lock_bh(&bc_lock); 452 spin_lock_bh(&bc_lock);
524 bcl->stats.recv_info++; 453 bcl->stats.recv_info++;
525 node->bclink.last_in++; 454
526 bclink_set_gap(node); 455 /*
527 if (unlikely(bclink_ack_allowed(seqno))) { 456 * Unicast an ACK periodically, ensuring that
528 bclink_send_ack(node); 457 * all nodes in the cluster don't ACK at the same time
458 */
459
460 if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) {
461 tipc_link_send_proto_msg(
462 node->active_links[node->addr & 1],
463 STATE_MSG, 0, 0, 0, 0, 0);
529 bcl->stats.sent_acks++; 464 bcl->stats.sent_acks++;
530 } 465 }
531 466
467 /* Deliver message to destination */
468
532 if (likely(msg_isdata(msg))) { 469 if (likely(msg_isdata(msg))) {
533 spin_unlock_bh(&bc_lock); 470 spin_unlock_bh(&bc_lock);
534 tipc_node_unlock(node); 471 tipc_node_unlock(node);
@@ -567,9 +504,14 @@ receive:
567 if (unlikely(!tipc_node_is_up(node))) 504 if (unlikely(!tipc_node_is_up(node)))
568 goto unlock; 505 goto unlock;
569 506
570 if (!node->bclink.deferred_head) 507 if (node->bclink.last_in == node->bclink.last_sent)
571 goto unlock; 508 goto unlock;
572 509
510 if (!node->bclink.deferred_head) {
511 node->bclink.oos_state = 1;
512 goto unlock;
513 }
514
573 msg = buf_msg(node->bclink.deferred_head); 515 msg = buf_msg(node->bclink.deferred_head);
574 seqno = msg_seqno(msg); 516 seqno = msg_seqno(msg);
575 next_in = mod(next_in + 1); 517 next_in = mod(next_in + 1);
@@ -580,31 +522,19 @@ receive:
580 522
581 buf = node->bclink.deferred_head; 523 buf = node->bclink.deferred_head;
582 node->bclink.deferred_head = buf->next; 524 node->bclink.deferred_head = buf->next;
525 node->bclink.deferred_size--;
583 goto receive; 526 goto receive;
584 } 527 }
585 528
586 /* Handle out-of-sequence broadcast message */ 529 /* Handle out-of-sequence broadcast message */
587 530
588 if (less(next_in, seqno)) { 531 if (less(next_in, seqno)) {
589 u32 gap_after = node->bclink.gap_after;
590 u32 gap_to = node->bclink.gap_to;
591
592 deferred = tipc_link_defer_pkt(&node->bclink.deferred_head, 532 deferred = tipc_link_defer_pkt(&node->bclink.deferred_head,
593 &node->bclink.deferred_tail, 533 &node->bclink.deferred_tail,
594 buf); 534 buf);
595 if (deferred) { 535 node->bclink.deferred_size += deferred;
596 node->bclink.nack_sync++; 536 bclink_update_last_sent(node, seqno);
597 if (seqno == mod(gap_after + 1))
598 node->bclink.gap_after = seqno;
599 else if (less(gap_after, seqno) && less(seqno, gap_to))
600 node->bclink.gap_to = seqno;
601 }
602 buf = NULL; 537 buf = NULL;
603 if (bclink_ack_allowed(node->bclink.nack_sync)) {
604 if (gap_to != gap_after)
605 bclink_send_nack(node);
606 bclink_set_gap(node);
607 }
608 } else 538 } else
609 deferred = 0; 539 deferred = 0;
610 540
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index b009666c60b..5571394098f 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -96,7 +96,7 @@ int tipc_bclink_send_msg(struct sk_buff *buf);
96void tipc_bclink_recv_pkt(struct sk_buff *buf); 96void tipc_bclink_recv_pkt(struct sk_buff *buf);
97u32 tipc_bclink_get_last_sent(void); 97u32 tipc_bclink_get_last_sent(void);
98u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr); 98u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr);
99void tipc_bclink_check_gap(struct tipc_node *n_ptr, u32 seqno); 99void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent);
100int tipc_bclink_stats(char *stats_buf, const u32 buf_size); 100int tipc_bclink_stats(char *stats_buf, const u32 buf_size);
101int tipc_bclink_reset_stats(void); 101int tipc_bclink_reset_stats(void);
102int tipc_bclink_set_queue_limits(u32 limit); 102int tipc_bclink_set_queue_limits(u32 limit);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 1150ba5a648..cce953723dd 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1501,14 +1501,13 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
1501 tipc_node_lock(n_ptr); 1501 tipc_node_lock(n_ptr);
1502 1502
1503 tipc_addr_string_fill(addr_string, n_ptr->addr); 1503 tipc_addr_string_fill(addr_string, n_ptr->addr);
1504 info("Multicast link info for %s\n", addr_string); 1504 info("Broadcast link info for %s\n", addr_string);
1505 info("Supportable: %d, ", n_ptr->bclink.supportable); 1505 info("Supportable: %d, ", n_ptr->bclink.supportable);
1506 info("Supported: %d, ", n_ptr->bclink.supported); 1506 info("Supported: %d, ", n_ptr->bclink.supported);
1507 info("Acked: %u\n", n_ptr->bclink.acked); 1507 info("Acked: %u\n", n_ptr->bclink.acked);
1508 info("Last in: %u, ", n_ptr->bclink.last_in); 1508 info("Last in: %u, ", n_ptr->bclink.last_in);
1509 info("Gap after: %u, ", n_ptr->bclink.gap_after); 1509 info("Oos state: %u, ", n_ptr->bclink.oos_state);
1510 info("Gap to: %u\n", n_ptr->bclink.gap_to); 1510 info("Last sent: %u\n", n_ptr->bclink.last_sent);
1511 info("Nack sync: %u\n\n", n_ptr->bclink.nack_sync);
1512 1511
1513 tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr); 1512 tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
1514 1513
@@ -1974,7 +1973,7 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
1974 1973
1975 msg_set_type(msg, msg_typ); 1974 msg_set_type(msg, msg_typ);
1976 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane); 1975 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
1977 msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in)); 1976 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1978 msg_set_last_bcast(msg, tipc_bclink_get_last_sent()); 1977 msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
1979 1978
1980 if (msg_typ == STATE_MSG) { 1979 if (msg_typ == STATE_MSG) {
@@ -2133,8 +2132,12 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
2133 2132
2134 /* Synchronize broadcast link info, if not done previously */ 2133 /* Synchronize broadcast link info, if not done previously */
2135 2134
2136 if (!tipc_node_is_up(l_ptr->owner)) 2135 if (!tipc_node_is_up(l_ptr->owner)) {
2137 l_ptr->owner->bclink.last_in = msg_last_bcast(msg); 2136 l_ptr->owner->bclink.last_sent =
2137 l_ptr->owner->bclink.last_in =
2138 msg_last_bcast(msg);
2139 l_ptr->owner->bclink.oos_state = 0;
2140 }
2138 2141
2139 l_ptr->peer_session = msg_session(msg); 2142 l_ptr->peer_session = msg_session(msg);
2140 l_ptr->peer_bearer_id = msg_bearer_id(msg); 2143 l_ptr->peer_bearer_id = msg_bearer_id(msg);
@@ -2181,7 +2184,9 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
2181 2184
2182 /* Protocol message before retransmits, reduce loss risk */ 2185 /* Protocol message before retransmits, reduce loss risk */
2183 2186
2184 tipc_bclink_check_gap(l_ptr->owner, msg_last_bcast(msg)); 2187 if (l_ptr->owner->bclink.supported)
2188 tipc_bclink_update_link_state(l_ptr->owner,
2189 msg_last_bcast(msg));
2185 2190
2186 if (rec_gap || (msg_probe(msg))) { 2191 if (rec_gap || (msg_probe(msg))) {
2187 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 2192 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 9196f943b83..6d8bdfd95cd 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -339,12 +339,12 @@ static void node_lost_contact(struct tipc_node *n_ptr)
339 /* Flush broadcast link info associated with lost node */ 339 /* Flush broadcast link info associated with lost node */
340 340
341 if (n_ptr->bclink.supported) { 341 if (n_ptr->bclink.supported) {
342 n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0;
343 while (n_ptr->bclink.deferred_head) { 342 while (n_ptr->bclink.deferred_head) {
344 struct sk_buff *buf = n_ptr->bclink.deferred_head; 343 struct sk_buff *buf = n_ptr->bclink.deferred_head;
345 n_ptr->bclink.deferred_head = buf->next; 344 n_ptr->bclink.deferred_head = buf->next;
346 buf_discard(buf); 345 buf_discard(buf);
347 } 346 }
347 n_ptr->bclink.deferred_size = 0;
348 348
349 if (n_ptr->bclink.defragm) { 349 if (n_ptr->bclink.defragm) {
350 buf_discard(n_ptr->bclink.defragm); 350 buf_discard(n_ptr->bclink.defragm);
@@ -450,7 +450,7 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
450 450
451 read_lock_bh(&tipc_net_lock); 451 read_lock_bh(&tipc_net_lock);
452 452
453 /* Get space for all unicast links + multicast link */ 453 /* Get space for all unicast links + broadcast link */
454 454
455 payload_size = TLV_SPACE(sizeof(link_info)) * 455 payload_size = TLV_SPACE(sizeof(link_info)) *
456 (atomic_read(&tipc_num_links) + 1); 456 (atomic_read(&tipc_num_links) + 1);
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 90689f48761..c88ce64f8a3 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -66,9 +66,9 @@
66 * @supported: non-zero if node supports TIPC b'cast capability 66 * @supported: non-zero if node supports TIPC b'cast capability
67 * @acked: sequence # of last outbound b'cast message acknowledged by node 67 * @acked: sequence # of last outbound b'cast message acknowledged by node
68 * @last_in: sequence # of last in-sequence b'cast message received from node 68 * @last_in: sequence # of last in-sequence b'cast message received from node
69 * @gap_after: sequence # of last message not requiring a NAK request 69 * @last_sent: sequence # of last b'cast message sent by node
70 * @gap_to: sequence # of last message requiring a NAK request 70 * @oos_state: state tracker for handling OOS b'cast messages
71 * @nack_sync: counter that determines when NAK requests should be sent 71 * @deferred_size: number of OOS b'cast messages in deferred queue
72 * @deferred_head: oldest OOS b'cast message received from node 72 * @deferred_head: oldest OOS b'cast message received from node
73 * @deferred_tail: newest OOS b'cast message received from node 73 * @deferred_tail: newest OOS b'cast message received from node
74 * @defragm: list of partially reassembled b'cast message fragments from node 74 * @defragm: list of partially reassembled b'cast message fragments from node
@@ -91,9 +91,9 @@ struct tipc_node {
91 u8 supported; 91 u8 supported;
92 u32 acked; 92 u32 acked;
93 u32 last_in; 93 u32 last_in;
94 u32 gap_after; 94 u32 last_sent;
95 u32 gap_to; 95 u32 oos_state;
96 u32 nack_sync; 96 u32 deferred_size;
97 struct sk_buff *deferred_head; 97 struct sk_buff *deferred_head;
98 struct sk_buff *deferred_tail; 98 struct sk_buff *deferred_tail;
99 struct sk_buff *defragm; 99 struct sk_buff *defragm;