diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-05 21:44:59 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-05 21:44:59 -0500 |
commit | 15b0669072127f282896b3bef2e9df4ec5d7264f (patch) | |
tree | 8480e09bbc7c26cd5c9ef048b734664cb6fe76be /net | |
parent | c155b914651753f843445d2f860bc00137df5d52 (diff) | |
parent | 3537d54c0c39de5738bba8d19f128478b0b96a71 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (44 commits)
qlge: Fix sparse warnings for tx ring indexes.
qlge: Fix sparse warning regarding rx buffer queues.
qlge: Fix sparse endian warning in ql_hw_csum_setup().
qlge: Fix sparse endian warning for inbound packet control block flags.
qlge: Fix sparse warnings for byte swapping in qlge_ethool.c
myri10ge: print MAC and serial number on probe failure
pkt_sched: cls_u32: Fix locking in u32_change()
iucv: fix cpu hotplug
af_iucv: Free iucv path/socket in path_pending callback
af_iucv: avoid left over IUCV connections from failing connects
af_iucv: New error return codes for connect()
net/ehea: bitops work on unsigned longs
Revert "net: Fix for initial link state in 2.6.28"
tcp: Kill extraneous SPLICE_F_NONBLOCK checks.
tcp: don't mask EOF and socket errors on nonblocking splice receive
dccp: Integrate the TFRC library with DCCP
dccp: Clean up ccid.c after integration of CCID plugins
dccp: Lockless integration of CCID congestion-control plugins
qeth: get rid of extra argument after printk to dev_* conversion
qeth: No large send using EDDP for HiperSockets.
...
Diffstat (limited to 'net')
-rw-r--r-- | net/can/bcm.c | 208 | ||||
-rw-r--r-- | net/core/dev.c | 93 | ||||
-rw-r--r-- | net/core/skbuff.c | 15 | ||||
-rw-r--r-- | net/dcb/dcbnl.c | 14 | ||||
-rw-r--r-- | net/dccp/Kconfig | 4 | ||||
-rw-r--r-- | net/dccp/Makefile | 15 | ||||
-rw-r--r-- | net/dccp/ackvec.h | 49 | ||||
-rw-r--r-- | net/dccp/ccid.c | 254 | ||||
-rw-r--r-- | net/dccp/ccid.h | 14 | ||||
-rw-r--r-- | net/dccp/ccids/Kconfig | 79 | ||||
-rw-r--r-- | net/dccp/ccids/Makefile | 9 | ||||
-rw-r--r-- | net/dccp/ccids/ccid2.c | 22 | ||||
-rw-r--r-- | net/dccp/ccids/ccid3.c | 23 | ||||
-rw-r--r-- | net/dccp/ccids/lib/Makefile | 3 | ||||
-rw-r--r-- | net/dccp/ccids/lib/loss_interval.c | 3 | ||||
-rw-r--r-- | net/dccp/ccids/lib/packet_history.c | 9 | ||||
-rw-r--r-- | net/dccp/ccids/lib/tfrc.c | 19 | ||||
-rw-r--r-- | net/dccp/ccids/lib/tfrc.h | 11 | ||||
-rw-r--r-- | net/dccp/ccids/lib/tfrc_equation.c | 4 | ||||
-rw-r--r-- | net/dccp/dccp.h | 2 | ||||
-rw-r--r-- | net/dccp/feat.c | 6 | ||||
-rw-r--r-- | net/dccp/input.c | 2 | ||||
-rw-r--r-- | net/dccp/proto.c | 7 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 9 | ||||
-rw-r--r-- | net/ipv6/ipv6_sockglue.c | 2 | ||||
-rw-r--r-- | net/ipv6/route.c | 52 | ||||
-rw-r--r-- | net/iucv/af_iucv.c | 28 | ||||
-rw-r--r-- | net/iucv/iucv.c | 18 | ||||
-rw-r--r-- | net/rfkill/rfkill.c | 4 | ||||
-rw-r--r-- | net/sched/cls_u32.c | 3 |
30 files changed, 488 insertions, 493 deletions
diff --git a/net/can/bcm.c b/net/can/bcm.c index da0d426c0ce4..6248ae2502c7 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
@@ -70,7 +70,7 @@ | |||
70 | 70 | ||
71 | #define CAN_BCM_VERSION CAN_VERSION | 71 | #define CAN_BCM_VERSION CAN_VERSION |
72 | static __initdata const char banner[] = KERN_INFO | 72 | static __initdata const char banner[] = KERN_INFO |
73 | "can: broadcast manager protocol (rev " CAN_BCM_VERSION ")\n"; | 73 | "can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n"; |
74 | 74 | ||
75 | MODULE_DESCRIPTION("PF_CAN broadcast manager protocol"); | 75 | MODULE_DESCRIPTION("PF_CAN broadcast manager protocol"); |
76 | MODULE_LICENSE("Dual BSD/GPL"); | 76 | MODULE_LICENSE("Dual BSD/GPL"); |
@@ -90,6 +90,7 @@ struct bcm_op { | |||
90 | unsigned long frames_abs, frames_filtered; | 90 | unsigned long frames_abs, frames_filtered; |
91 | struct timeval ival1, ival2; | 91 | struct timeval ival1, ival2; |
92 | struct hrtimer timer, thrtimer; | 92 | struct hrtimer timer, thrtimer; |
93 | struct tasklet_struct tsklet, thrtsklet; | ||
93 | ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg; | 94 | ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg; |
94 | int rx_ifindex; | 95 | int rx_ifindex; |
95 | int count; | 96 | int count; |
@@ -341,6 +342,23 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head, | |||
341 | } | 342 | } |
342 | } | 343 | } |
343 | 344 | ||
345 | static void bcm_tx_timeout_tsklet(unsigned long data) | ||
346 | { | ||
347 | struct bcm_op *op = (struct bcm_op *)data; | ||
348 | struct bcm_msg_head msg_head; | ||
349 | |||
350 | /* create notification to user */ | ||
351 | msg_head.opcode = TX_EXPIRED; | ||
352 | msg_head.flags = op->flags; | ||
353 | msg_head.count = op->count; | ||
354 | msg_head.ival1 = op->ival1; | ||
355 | msg_head.ival2 = op->ival2; | ||
356 | msg_head.can_id = op->can_id; | ||
357 | msg_head.nframes = 0; | ||
358 | |||
359 | bcm_send_to_user(op, &msg_head, NULL, 0); | ||
360 | } | ||
361 | |||
344 | /* | 362 | /* |
345 | * bcm_tx_timeout_handler - performes cyclic CAN frame transmissions | 363 | * bcm_tx_timeout_handler - performes cyclic CAN frame transmissions |
346 | */ | 364 | */ |
@@ -352,20 +370,8 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer) | |||
352 | if (op->kt_ival1.tv64 && (op->count > 0)) { | 370 | if (op->kt_ival1.tv64 && (op->count > 0)) { |
353 | 371 | ||
354 | op->count--; | 372 | op->count--; |
355 | if (!op->count && (op->flags & TX_COUNTEVT)) { | 373 | if (!op->count && (op->flags & TX_COUNTEVT)) |
356 | struct bcm_msg_head msg_head; | 374 | tasklet_schedule(&op->tsklet); |
357 | |||
358 | /* create notification to user */ | ||
359 | msg_head.opcode = TX_EXPIRED; | ||
360 | msg_head.flags = op->flags; | ||
361 | msg_head.count = op->count; | ||
362 | msg_head.ival1 = op->ival1; | ||
363 | msg_head.ival2 = op->ival2; | ||
364 | msg_head.can_id = op->can_id; | ||
365 | msg_head.nframes = 0; | ||
366 | |||
367 | bcm_send_to_user(op, &msg_head, NULL, 0); | ||
368 | } | ||
369 | } | 375 | } |
370 | 376 | ||
371 | if (op->kt_ival1.tv64 && (op->count > 0)) { | 377 | if (op->kt_ival1.tv64 && (op->count > 0)) { |
@@ -402,6 +408,9 @@ static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data) | |||
402 | if (op->frames_filtered > ULONG_MAX/100) | 408 | if (op->frames_filtered > ULONG_MAX/100) |
403 | op->frames_filtered = op->frames_abs = 0; | 409 | op->frames_filtered = op->frames_abs = 0; |
404 | 410 | ||
411 | /* this element is not throttled anymore */ | ||
412 | data->can_dlc &= (BCM_CAN_DLC_MASK|RX_RECV); | ||
413 | |||
405 | head.opcode = RX_CHANGED; | 414 | head.opcode = RX_CHANGED; |
406 | head.flags = op->flags; | 415 | head.flags = op->flags; |
407 | head.count = op->count; | 416 | head.count = op->count; |
@@ -420,37 +429,32 @@ static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data) | |||
420 | */ | 429 | */ |
421 | static void bcm_rx_update_and_send(struct bcm_op *op, | 430 | static void bcm_rx_update_and_send(struct bcm_op *op, |
422 | struct can_frame *lastdata, | 431 | struct can_frame *lastdata, |
423 | struct can_frame *rxdata) | 432 | const struct can_frame *rxdata) |
424 | { | 433 | { |
425 | memcpy(lastdata, rxdata, CFSIZ); | 434 | memcpy(lastdata, rxdata, CFSIZ); |
426 | 435 | ||
427 | /* mark as used */ | 436 | /* mark as used and throttled by default */ |
428 | lastdata->can_dlc |= RX_RECV; | 437 | lastdata->can_dlc |= (RX_RECV|RX_THR); |
429 | 438 | ||
430 | /* throtteling mode inactive OR data update already on the run ? */ | 439 | /* throtteling mode inactive ? */ |
431 | if (!op->kt_ival2.tv64 || hrtimer_callback_running(&op->thrtimer)) { | 440 | if (!op->kt_ival2.tv64) { |
432 | /* send RX_CHANGED to the user immediately */ | 441 | /* send RX_CHANGED to the user immediately */ |
433 | bcm_rx_changed(op, rxdata); | 442 | bcm_rx_changed(op, lastdata); |
434 | return; | 443 | return; |
435 | } | 444 | } |
436 | 445 | ||
437 | if (hrtimer_active(&op->thrtimer)) { | 446 | /* with active throttling timer we are just done here */ |
438 | /* mark as 'throttled' */ | 447 | if (hrtimer_active(&op->thrtimer)) |
439 | lastdata->can_dlc |= RX_THR; | ||
440 | return; | 448 | return; |
441 | } | ||
442 | 449 | ||
443 | if (!op->kt_lastmsg.tv64) { | 450 | /* first receiption with enabled throttling mode */ |
444 | /* send first RX_CHANGED to the user immediately */ | 451 | if (!op->kt_lastmsg.tv64) |
445 | bcm_rx_changed(op, rxdata); | 452 | goto rx_changed_settime; |
446 | op->kt_lastmsg = ktime_get(); | ||
447 | return; | ||
448 | } | ||
449 | 453 | ||
454 | /* got a second frame inside a potential throttle period? */ | ||
450 | if (ktime_us_delta(ktime_get(), op->kt_lastmsg) < | 455 | if (ktime_us_delta(ktime_get(), op->kt_lastmsg) < |
451 | ktime_to_us(op->kt_ival2)) { | 456 | ktime_to_us(op->kt_ival2)) { |
452 | /* mark as 'throttled' and start timer */ | 457 | /* do not send the saved data - only start throttle timer */ |
453 | lastdata->can_dlc |= RX_THR; | ||
454 | hrtimer_start(&op->thrtimer, | 458 | hrtimer_start(&op->thrtimer, |
455 | ktime_add(op->kt_lastmsg, op->kt_ival2), | 459 | ktime_add(op->kt_lastmsg, op->kt_ival2), |
456 | HRTIMER_MODE_ABS); | 460 | HRTIMER_MODE_ABS); |
@@ -458,7 +462,8 @@ static void bcm_rx_update_and_send(struct bcm_op *op, | |||
458 | } | 462 | } |
459 | 463 | ||
460 | /* the gap was that big, that throttling was not needed here */ | 464 | /* the gap was that big, that throttling was not needed here */ |
461 | bcm_rx_changed(op, rxdata); | 465 | rx_changed_settime: |
466 | bcm_rx_changed(op, lastdata); | ||
462 | op->kt_lastmsg = ktime_get(); | 467 | op->kt_lastmsg = ktime_get(); |
463 | } | 468 | } |
464 | 469 | ||
@@ -467,7 +472,7 @@ static void bcm_rx_update_and_send(struct bcm_op *op, | |||
467 | * received data stored in op->last_frames[] | 472 | * received data stored in op->last_frames[] |
468 | */ | 473 | */ |
469 | static void bcm_rx_cmp_to_index(struct bcm_op *op, int index, | 474 | static void bcm_rx_cmp_to_index(struct bcm_op *op, int index, |
470 | struct can_frame *rxdata) | 475 | const struct can_frame *rxdata) |
471 | { | 476 | { |
472 | /* | 477 | /* |
473 | * no one uses the MSBs of can_dlc for comparation, | 478 | * no one uses the MSBs of can_dlc for comparation, |
@@ -511,14 +516,12 @@ static void bcm_rx_starttimer(struct bcm_op *op) | |||
511 | hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL); | 516 | hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL); |
512 | } | 517 | } |
513 | 518 | ||
514 | /* | 519 | static void bcm_rx_timeout_tsklet(unsigned long data) |
515 | * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out | ||
516 | */ | ||
517 | static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer) | ||
518 | { | 520 | { |
519 | struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); | 521 | struct bcm_op *op = (struct bcm_op *)data; |
520 | struct bcm_msg_head msg_head; | 522 | struct bcm_msg_head msg_head; |
521 | 523 | ||
524 | /* create notification to user */ | ||
522 | msg_head.opcode = RX_TIMEOUT; | 525 | msg_head.opcode = RX_TIMEOUT; |
523 | msg_head.flags = op->flags; | 526 | msg_head.flags = op->flags; |
524 | msg_head.count = op->count; | 527 | msg_head.count = op->count; |
@@ -528,6 +531,17 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer) | |||
528 | msg_head.nframes = 0; | 531 | msg_head.nframes = 0; |
529 | 532 | ||
530 | bcm_send_to_user(op, &msg_head, NULL, 0); | 533 | bcm_send_to_user(op, &msg_head, NULL, 0); |
534 | } | ||
535 | |||
536 | /* | ||
537 | * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out | ||
538 | */ | ||
539 | static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer) | ||
540 | { | ||
541 | struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); | ||
542 | |||
543 | /* schedule before NET_RX_SOFTIRQ */ | ||
544 | tasklet_hi_schedule(&op->tsklet); | ||
531 | 545 | ||
532 | /* no restart of the timer is done here! */ | 546 | /* no restart of the timer is done here! */ |
533 | 547 | ||
@@ -541,9 +555,25 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer) | |||
541 | } | 555 | } |
542 | 556 | ||
543 | /* | 557 | /* |
558 | * bcm_rx_do_flush - helper for bcm_rx_thr_flush | ||
559 | */ | ||
560 | static inline int bcm_rx_do_flush(struct bcm_op *op, int update, int index) | ||
561 | { | ||
562 | if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) { | ||
563 | if (update) | ||
564 | bcm_rx_changed(op, &op->last_frames[index]); | ||
565 | return 1; | ||
566 | } | ||
567 | return 0; | ||
568 | } | ||
569 | |||
570 | /* | ||
544 | * bcm_rx_thr_flush - Check for throttled data and send it to the userspace | 571 | * bcm_rx_thr_flush - Check for throttled data and send it to the userspace |
572 | * | ||
573 | * update == 0 : just check if throttled data is available (any irq context) | ||
574 | * update == 1 : check and send throttled data to userspace (soft_irq context) | ||
545 | */ | 575 | */ |
546 | static int bcm_rx_thr_flush(struct bcm_op *op) | 576 | static int bcm_rx_thr_flush(struct bcm_op *op, int update) |
547 | { | 577 | { |
548 | int updated = 0; | 578 | int updated = 0; |
549 | 579 | ||
@@ -551,27 +581,25 @@ static int bcm_rx_thr_flush(struct bcm_op *op) | |||
551 | int i; | 581 | int i; |
552 | 582 | ||
553 | /* for MUX filter we start at index 1 */ | 583 | /* for MUX filter we start at index 1 */ |
554 | for (i = 1; i < op->nframes; i++) { | 584 | for (i = 1; i < op->nframes; i++) |
555 | if ((op->last_frames) && | 585 | updated += bcm_rx_do_flush(op, update, i); |
556 | (op->last_frames[i].can_dlc & RX_THR)) { | ||
557 | op->last_frames[i].can_dlc &= ~RX_THR; | ||
558 | bcm_rx_changed(op, &op->last_frames[i]); | ||
559 | updated++; | ||
560 | } | ||
561 | } | ||
562 | 586 | ||
563 | } else { | 587 | } else { |
564 | /* for RX_FILTER_ID and simple filter */ | 588 | /* for RX_FILTER_ID and simple filter */ |
565 | if (op->last_frames && (op->last_frames[0].can_dlc & RX_THR)) { | 589 | updated += bcm_rx_do_flush(op, update, 0); |
566 | op->last_frames[0].can_dlc &= ~RX_THR; | ||
567 | bcm_rx_changed(op, &op->last_frames[0]); | ||
568 | updated++; | ||
569 | } | ||
570 | } | 590 | } |
571 | 591 | ||
572 | return updated; | 592 | return updated; |
573 | } | 593 | } |
574 | 594 | ||
595 | static void bcm_rx_thr_tsklet(unsigned long data) | ||
596 | { | ||
597 | struct bcm_op *op = (struct bcm_op *)data; | ||
598 | |||
599 | /* push the changed data to the userspace */ | ||
600 | bcm_rx_thr_flush(op, 1); | ||
601 | } | ||
602 | |||
575 | /* | 603 | /* |
576 | * bcm_rx_thr_handler - the time for blocked content updates is over now: | 604 | * bcm_rx_thr_handler - the time for blocked content updates is over now: |
577 | * Check for throttled data and send it to the userspace | 605 | * Check for throttled data and send it to the userspace |
@@ -580,7 +608,9 @@ static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer) | |||
580 | { | 608 | { |
581 | struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer); | 609 | struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer); |
582 | 610 | ||
583 | if (bcm_rx_thr_flush(op)) { | 611 | tasklet_schedule(&op->thrtsklet); |
612 | |||
613 | if (bcm_rx_thr_flush(op, 0)) { | ||
584 | hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2); | 614 | hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2); |
585 | return HRTIMER_RESTART; | 615 | return HRTIMER_RESTART; |
586 | } else { | 616 | } else { |
@@ -596,48 +626,38 @@ static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer) | |||
596 | static void bcm_rx_handler(struct sk_buff *skb, void *data) | 626 | static void bcm_rx_handler(struct sk_buff *skb, void *data) |
597 | { | 627 | { |
598 | struct bcm_op *op = (struct bcm_op *)data; | 628 | struct bcm_op *op = (struct bcm_op *)data; |
599 | struct can_frame rxframe; | 629 | const struct can_frame *rxframe = (struct can_frame *)skb->data; |
600 | int i; | 630 | int i; |
601 | 631 | ||
602 | /* disable timeout */ | 632 | /* disable timeout */ |
603 | hrtimer_cancel(&op->timer); | 633 | hrtimer_cancel(&op->timer); |
604 | 634 | ||
605 | if (skb->len == sizeof(rxframe)) { | 635 | if (op->can_id != rxframe->can_id) |
606 | memcpy(&rxframe, skb->data, sizeof(rxframe)); | 636 | goto rx_freeskb; |
607 | /* save rx timestamp */ | ||
608 | op->rx_stamp = skb->tstamp; | ||
609 | /* save originator for recvfrom() */ | ||
610 | op->rx_ifindex = skb->dev->ifindex; | ||
611 | /* update statistics */ | ||
612 | op->frames_abs++; | ||
613 | kfree_skb(skb); | ||
614 | 637 | ||
615 | } else { | 638 | /* save rx timestamp */ |
616 | kfree_skb(skb); | 639 | op->rx_stamp = skb->tstamp; |
617 | return; | 640 | /* save originator for recvfrom() */ |
618 | } | 641 | op->rx_ifindex = skb->dev->ifindex; |
619 | 642 | /* update statistics */ | |
620 | if (op->can_id != rxframe.can_id) | 643 | op->frames_abs++; |
621 | return; | ||
622 | 644 | ||
623 | if (op->flags & RX_RTR_FRAME) { | 645 | if (op->flags & RX_RTR_FRAME) { |
624 | /* send reply for RTR-request (placed in op->frames[0]) */ | 646 | /* send reply for RTR-request (placed in op->frames[0]) */ |
625 | bcm_can_tx(op); | 647 | bcm_can_tx(op); |
626 | return; | 648 | goto rx_freeskb; |
627 | } | 649 | } |
628 | 650 | ||
629 | if (op->flags & RX_FILTER_ID) { | 651 | if (op->flags & RX_FILTER_ID) { |
630 | /* the easiest case */ | 652 | /* the easiest case */ |
631 | bcm_rx_update_and_send(op, &op->last_frames[0], &rxframe); | 653 | bcm_rx_update_and_send(op, &op->last_frames[0], rxframe); |
632 | bcm_rx_starttimer(op); | 654 | goto rx_freeskb_starttimer; |
633 | return; | ||
634 | } | 655 | } |
635 | 656 | ||
636 | if (op->nframes == 1) { | 657 | if (op->nframes == 1) { |
637 | /* simple compare with index 0 */ | 658 | /* simple compare with index 0 */ |
638 | bcm_rx_cmp_to_index(op, 0, &rxframe); | 659 | bcm_rx_cmp_to_index(op, 0, rxframe); |
639 | bcm_rx_starttimer(op); | 660 | goto rx_freeskb_starttimer; |
640 | return; | ||
641 | } | 661 | } |
642 | 662 | ||
643 | if (op->nframes > 1) { | 663 | if (op->nframes > 1) { |
@@ -649,15 +669,19 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data) | |||
649 | */ | 669 | */ |
650 | 670 | ||
651 | for (i = 1; i < op->nframes; i++) { | 671 | for (i = 1; i < op->nframes; i++) { |
652 | if ((GET_U64(&op->frames[0]) & GET_U64(&rxframe)) == | 672 | if ((GET_U64(&op->frames[0]) & GET_U64(rxframe)) == |
653 | (GET_U64(&op->frames[0]) & | 673 | (GET_U64(&op->frames[0]) & |
654 | GET_U64(&op->frames[i]))) { | 674 | GET_U64(&op->frames[i]))) { |
655 | bcm_rx_cmp_to_index(op, i, &rxframe); | 675 | bcm_rx_cmp_to_index(op, i, rxframe); |
656 | break; | 676 | break; |
657 | } | 677 | } |
658 | } | 678 | } |
659 | bcm_rx_starttimer(op); | ||
660 | } | 679 | } |
680 | |||
681 | rx_freeskb_starttimer: | ||
682 | bcm_rx_starttimer(op); | ||
683 | rx_freeskb: | ||
684 | kfree_skb(skb); | ||
661 | } | 685 | } |
662 | 686 | ||
663 | /* | 687 | /* |
@@ -681,6 +705,12 @@ static void bcm_remove_op(struct bcm_op *op) | |||
681 | hrtimer_cancel(&op->timer); | 705 | hrtimer_cancel(&op->timer); |
682 | hrtimer_cancel(&op->thrtimer); | 706 | hrtimer_cancel(&op->thrtimer); |
683 | 707 | ||
708 | if (op->tsklet.func) | ||
709 | tasklet_kill(&op->tsklet); | ||
710 | |||
711 | if (op->thrtsklet.func) | ||
712 | tasklet_kill(&op->thrtsklet); | ||
713 | |||
684 | if ((op->frames) && (op->frames != &op->sframe)) | 714 | if ((op->frames) && (op->frames != &op->sframe)) |
685 | kfree(op->frames); | 715 | kfree(op->frames); |
686 | 716 | ||
@@ -891,6 +921,10 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, | |||
891 | hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 921 | hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
892 | op->timer.function = bcm_tx_timeout_handler; | 922 | op->timer.function = bcm_tx_timeout_handler; |
893 | 923 | ||
924 | /* initialize tasklet for tx countevent notification */ | ||
925 | tasklet_init(&op->tsklet, bcm_tx_timeout_tsklet, | ||
926 | (unsigned long) op); | ||
927 | |||
894 | /* currently unused in tx_ops */ | 928 | /* currently unused in tx_ops */ |
895 | hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 929 | hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
896 | 930 | ||
@@ -1054,9 +1088,17 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, | |||
1054 | hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 1088 | hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
1055 | op->timer.function = bcm_rx_timeout_handler; | 1089 | op->timer.function = bcm_rx_timeout_handler; |
1056 | 1090 | ||
1091 | /* initialize tasklet for rx timeout notification */ | ||
1092 | tasklet_init(&op->tsklet, bcm_rx_timeout_tsklet, | ||
1093 | (unsigned long) op); | ||
1094 | |||
1057 | hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 1095 | hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
1058 | op->thrtimer.function = bcm_rx_thr_handler; | 1096 | op->thrtimer.function = bcm_rx_thr_handler; |
1059 | 1097 | ||
1098 | /* initialize tasklet for rx throttle handling */ | ||
1099 | tasklet_init(&op->thrtsklet, bcm_rx_thr_tsklet, | ||
1100 | (unsigned long) op); | ||
1101 | |||
1060 | /* add this bcm_op to the list of the rx_ops */ | 1102 | /* add this bcm_op to the list of the rx_ops */ |
1061 | list_add(&op->list, &bo->rx_ops); | 1103 | list_add(&op->list, &bo->rx_ops); |
1062 | 1104 | ||
@@ -1102,7 +1144,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, | |||
1102 | */ | 1144 | */ |
1103 | op->kt_lastmsg = ktime_set(0, 0); | 1145 | op->kt_lastmsg = ktime_set(0, 0); |
1104 | hrtimer_cancel(&op->thrtimer); | 1146 | hrtimer_cancel(&op->thrtimer); |
1105 | bcm_rx_thr_flush(op); | 1147 | bcm_rx_thr_flush(op, 1); |
1106 | } | 1148 | } |
1107 | 1149 | ||
1108 | if ((op->flags & STARTTIMER) && op->kt_ival1.tv64) | 1150 | if ((op->flags & STARTTIMER) && op->kt_ival1.tv64) |
diff --git a/net/core/dev.c b/net/core/dev.c index 09c66a449da6..382df6c09eec 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -132,6 +132,9 @@ | |||
132 | /* Instead of increasing this, you should create a hash table. */ | 132 | /* Instead of increasing this, you should create a hash table. */ |
133 | #define MAX_GRO_SKBS 8 | 133 | #define MAX_GRO_SKBS 8 |
134 | 134 | ||
135 | /* This should be increased if a protocol with a bigger head is added. */ | ||
136 | #define GRO_MAX_HEAD (MAX_HEADER + 128) | ||
137 | |||
135 | /* | 138 | /* |
136 | * The list of packet types we will receive (as opposed to discard) | 139 | * The list of packet types we will receive (as opposed to discard) |
137 | * and the routines to invoke. | 140 | * and the routines to invoke. |
@@ -2345,7 +2348,7 @@ static int napi_gro_complete(struct sk_buff *skb) | |||
2345 | struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; | 2348 | struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; |
2346 | int err = -ENOENT; | 2349 | int err = -ENOENT; |
2347 | 2350 | ||
2348 | if (!skb_shinfo(skb)->frag_list) | 2351 | if (NAPI_GRO_CB(skb)->count == 1) |
2349 | goto out; | 2352 | goto out; |
2350 | 2353 | ||
2351 | rcu_read_lock(); | 2354 | rcu_read_lock(); |
@@ -2365,6 +2368,7 @@ static int napi_gro_complete(struct sk_buff *skb) | |||
2365 | } | 2368 | } |
2366 | 2369 | ||
2367 | out: | 2370 | out: |
2371 | skb_shinfo(skb)->gso_size = 0; | ||
2368 | __skb_push(skb, -skb_network_offset(skb)); | 2372 | __skb_push(skb, -skb_network_offset(skb)); |
2369 | return netif_receive_skb(skb); | 2373 | return netif_receive_skb(skb); |
2370 | } | 2374 | } |
@@ -2383,7 +2387,7 @@ void napi_gro_flush(struct napi_struct *napi) | |||
2383 | } | 2387 | } |
2384 | EXPORT_SYMBOL(napi_gro_flush); | 2388 | EXPORT_SYMBOL(napi_gro_flush); |
2385 | 2389 | ||
2386 | int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | 2390 | static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) |
2387 | { | 2391 | { |
2388 | struct sk_buff **pp = NULL; | 2392 | struct sk_buff **pp = NULL; |
2389 | struct packet_type *ptype; | 2393 | struct packet_type *ptype; |
@@ -2392,6 +2396,7 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
2392 | int count = 0; | 2396 | int count = 0; |
2393 | int same_flow; | 2397 | int same_flow; |
2394 | int mac_len; | 2398 | int mac_len; |
2399 | int free; | ||
2395 | 2400 | ||
2396 | if (!(skb->dev->features & NETIF_F_GRO)) | 2401 | if (!(skb->dev->features & NETIF_F_GRO)) |
2397 | goto normal; | 2402 | goto normal; |
@@ -2408,6 +2413,7 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
2408 | skb->mac_len = mac_len; | 2413 | skb->mac_len = mac_len; |
2409 | NAPI_GRO_CB(skb)->same_flow = 0; | 2414 | NAPI_GRO_CB(skb)->same_flow = 0; |
2410 | NAPI_GRO_CB(skb)->flush = 0; | 2415 | NAPI_GRO_CB(skb)->flush = 0; |
2416 | NAPI_GRO_CB(skb)->free = 0; | ||
2411 | 2417 | ||
2412 | for (p = napi->gro_list; p; p = p->next) { | 2418 | for (p = napi->gro_list; p; p = p->next) { |
2413 | count++; | 2419 | count++; |
@@ -2427,6 +2433,7 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
2427 | goto normal; | 2433 | goto normal; |
2428 | 2434 | ||
2429 | same_flow = NAPI_GRO_CB(skb)->same_flow; | 2435 | same_flow = NAPI_GRO_CB(skb)->same_flow; |
2436 | free = NAPI_GRO_CB(skb)->free; | ||
2430 | 2437 | ||
2431 | if (pp) { | 2438 | if (pp) { |
2432 | struct sk_buff *nskb = *pp; | 2439 | struct sk_buff *nskb = *pp; |
@@ -2446,17 +2453,91 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
2446 | } | 2453 | } |
2447 | 2454 | ||
2448 | NAPI_GRO_CB(skb)->count = 1; | 2455 | NAPI_GRO_CB(skb)->count = 1; |
2456 | skb_shinfo(skb)->gso_size = skb->len; | ||
2449 | skb->next = napi->gro_list; | 2457 | skb->next = napi->gro_list; |
2450 | napi->gro_list = skb; | 2458 | napi->gro_list = skb; |
2451 | 2459 | ||
2452 | ok: | 2460 | ok: |
2453 | return NET_RX_SUCCESS; | 2461 | return free; |
2454 | 2462 | ||
2455 | normal: | 2463 | normal: |
2456 | return netif_receive_skb(skb); | 2464 | return -1; |
2465 | } | ||
2466 | |||
2467 | int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | ||
2468 | { | ||
2469 | switch (__napi_gro_receive(napi, skb)) { | ||
2470 | case -1: | ||
2471 | return netif_receive_skb(skb); | ||
2472 | |||
2473 | case 1: | ||
2474 | kfree_skb(skb); | ||
2475 | break; | ||
2476 | } | ||
2477 | |||
2478 | return NET_RX_SUCCESS; | ||
2457 | } | 2479 | } |
2458 | EXPORT_SYMBOL(napi_gro_receive); | 2480 | EXPORT_SYMBOL(napi_gro_receive); |
2459 | 2481 | ||
2482 | int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info) | ||
2483 | { | ||
2484 | struct net_device *dev = napi->dev; | ||
2485 | struct sk_buff *skb = napi->skb; | ||
2486 | int err = NET_RX_DROP; | ||
2487 | |||
2488 | napi->skb = NULL; | ||
2489 | |||
2490 | if (!skb) { | ||
2491 | skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN); | ||
2492 | if (!skb) | ||
2493 | goto out; | ||
2494 | |||
2495 | skb_reserve(skb, NET_IP_ALIGN); | ||
2496 | } | ||
2497 | |||
2498 | BUG_ON(info->nr_frags > MAX_SKB_FRAGS); | ||
2499 | skb_shinfo(skb)->nr_frags = info->nr_frags; | ||
2500 | memcpy(skb_shinfo(skb)->frags, info->frags, sizeof(info->frags)); | ||
2501 | |||
2502 | skb->data_len = info->len; | ||
2503 | skb->len += info->len; | ||
2504 | skb->truesize += info->len; | ||
2505 | |||
2506 | if (!pskb_may_pull(skb, ETH_HLEN)) | ||
2507 | goto reuse; | ||
2508 | |||
2509 | err = NET_RX_SUCCESS; | ||
2510 | |||
2511 | skb->protocol = eth_type_trans(skb, dev); | ||
2512 | |||
2513 | skb->ip_summed = info->ip_summed; | ||
2514 | skb->csum = info->csum; | ||
2515 | |||
2516 | switch (__napi_gro_receive(napi, skb)) { | ||
2517 | case -1: | ||
2518 | return netif_receive_skb(skb); | ||
2519 | |||
2520 | case 0: | ||
2521 | goto out; | ||
2522 | } | ||
2523 | |||
2524 | reuse: | ||
2525 | skb_shinfo(skb)->nr_frags = 0; | ||
2526 | |||
2527 | skb->len -= skb->data_len; | ||
2528 | skb->truesize -= skb->data_len; | ||
2529 | skb->data_len = 0; | ||
2530 | |||
2531 | __skb_pull(skb, skb_headlen(skb)); | ||
2532 | skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb)); | ||
2533 | |||
2534 | napi->skb = skb; | ||
2535 | |||
2536 | out: | ||
2537 | return err; | ||
2538 | } | ||
2539 | EXPORT_SYMBOL(napi_gro_frags); | ||
2540 | |||
2460 | static int process_backlog(struct napi_struct *napi, int quota) | 2541 | static int process_backlog(struct napi_struct *napi, int quota) |
2461 | { | 2542 | { |
2462 | int work = 0; | 2543 | int work = 0; |
@@ -2535,11 +2616,12 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, | |||
2535 | { | 2616 | { |
2536 | INIT_LIST_HEAD(&napi->poll_list); | 2617 | INIT_LIST_HEAD(&napi->poll_list); |
2537 | napi->gro_list = NULL; | 2618 | napi->gro_list = NULL; |
2619 | napi->skb = NULL; | ||
2538 | napi->poll = poll; | 2620 | napi->poll = poll; |
2539 | napi->weight = weight; | 2621 | napi->weight = weight; |
2540 | list_add(&napi->dev_list, &dev->napi_list); | 2622 | list_add(&napi->dev_list, &dev->napi_list); |
2541 | #ifdef CONFIG_NETPOLL | ||
2542 | napi->dev = dev; | 2623 | napi->dev = dev; |
2624 | #ifdef CONFIG_NETPOLL | ||
2543 | spin_lock_init(&napi->poll_lock); | 2625 | spin_lock_init(&napi->poll_lock); |
2544 | napi->poll_owner = -1; | 2626 | napi->poll_owner = -1; |
2545 | #endif | 2627 | #endif |
@@ -2552,6 +2634,7 @@ void netif_napi_del(struct napi_struct *napi) | |||
2552 | struct sk_buff *skb, *next; | 2634 | struct sk_buff *skb, *next; |
2553 | 2635 | ||
2554 | list_del_init(&napi->dev_list); | 2636 | list_del_init(&napi->dev_list); |
2637 | kfree(napi->skb); | ||
2555 | 2638 | ||
2556 | for (skb = napi->gro_list; skb; skb = next) { | 2639 | for (skb = napi->gro_list; skb; skb = next) { |
2557 | next = skb->next; | 2640 | next = skb->next; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index b8d0abb26433..5110b359c758 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -2594,6 +2594,17 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
2594 | 2594 | ||
2595 | if (skb_shinfo(p)->frag_list) | 2595 | if (skb_shinfo(p)->frag_list) |
2596 | goto merge; | 2596 | goto merge; |
2597 | else if (!skb_headlen(p) && !skb_headlen(skb) && | ||
2598 | skb_shinfo(p)->nr_frags + skb_shinfo(skb)->nr_frags < | ||
2599 | MAX_SKB_FRAGS) { | ||
2600 | memcpy(skb_shinfo(p)->frags + skb_shinfo(p)->nr_frags, | ||
2601 | skb_shinfo(skb)->frags, | ||
2602 | skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); | ||
2603 | |||
2604 | skb_shinfo(p)->nr_frags += skb_shinfo(skb)->nr_frags; | ||
2605 | NAPI_GRO_CB(skb)->free = 1; | ||
2606 | goto done; | ||
2607 | } | ||
2597 | 2608 | ||
2598 | headroom = skb_headroom(p); | 2609 | headroom = skb_headroom(p); |
2599 | nskb = netdev_alloc_skb(p->dev, headroom); | 2610 | nskb = netdev_alloc_skb(p->dev, headroom); |
@@ -2613,6 +2624,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
2613 | 2624 | ||
2614 | *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); | 2625 | *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); |
2615 | skb_shinfo(nskb)->frag_list = p; | 2626 | skb_shinfo(nskb)->frag_list = p; |
2627 | skb_shinfo(nskb)->gso_size = skb_shinfo(p)->gso_size; | ||
2616 | skb_header_release(p); | 2628 | skb_header_release(p); |
2617 | nskb->prev = p; | 2629 | nskb->prev = p; |
2618 | 2630 | ||
@@ -2627,11 +2639,12 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
2627 | p = nskb; | 2639 | p = nskb; |
2628 | 2640 | ||
2629 | merge: | 2641 | merge: |
2630 | NAPI_GRO_CB(p)->count++; | ||
2631 | p->prev->next = skb; | 2642 | p->prev->next = skb; |
2632 | p->prev = skb; | 2643 | p->prev = skb; |
2633 | skb_header_release(skb); | 2644 | skb_header_release(skb); |
2634 | 2645 | ||
2646 | done: | ||
2647 | NAPI_GRO_CB(p)->count++; | ||
2635 | p->data_len += skb->len; | 2648 | p->data_len += skb->len; |
2636 | p->truesize += skb->len; | 2649 | p->truesize += skb->len; |
2637 | p->len += skb->len; | 2650 | p->len += skb->len; |
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 5dbfe5fdc0d6..8379496de82b 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c | |||
@@ -191,7 +191,7 @@ static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid, | |||
191 | return 0; | 191 | return 0; |
192 | nlmsg_failure: | 192 | nlmsg_failure: |
193 | err: | 193 | err: |
194 | kfree(dcbnl_skb); | 194 | kfree_skb(dcbnl_skb); |
195 | return ret; | 195 | return ret; |
196 | } | 196 | } |
197 | 197 | ||
@@ -272,7 +272,7 @@ static int dcbnl_getpfccfg(struct net_device *netdev, struct nlattr **tb, | |||
272 | return 0; | 272 | return 0; |
273 | nlmsg_failure: | 273 | nlmsg_failure: |
274 | err: | 274 | err: |
275 | kfree(dcbnl_skb); | 275 | kfree_skb(dcbnl_skb); |
276 | err_out: | 276 | err_out: |
277 | return -EINVAL; | 277 | return -EINVAL; |
278 | } | 278 | } |
@@ -314,7 +314,7 @@ static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlattr **tb, | |||
314 | 314 | ||
315 | nlmsg_failure: | 315 | nlmsg_failure: |
316 | err: | 316 | err: |
317 | kfree(dcbnl_skb); | 317 | kfree_skb(dcbnl_skb); |
318 | err_out: | 318 | err_out: |
319 | return -EINVAL; | 319 | return -EINVAL; |
320 | } | 320 | } |
@@ -380,7 +380,7 @@ static int dcbnl_getcap(struct net_device *netdev, struct nlattr **tb, | |||
380 | return 0; | 380 | return 0; |
381 | nlmsg_failure: | 381 | nlmsg_failure: |
382 | err: | 382 | err: |
383 | kfree(dcbnl_skb); | 383 | kfree_skb(dcbnl_skb); |
384 | err_out: | 384 | err_out: |
385 | return -EINVAL; | 385 | return -EINVAL; |
386 | } | 386 | } |
@@ -458,7 +458,7 @@ static int dcbnl_getnumtcs(struct net_device *netdev, struct nlattr **tb, | |||
458 | return 0; | 458 | return 0; |
459 | nlmsg_failure: | 459 | nlmsg_failure: |
460 | err: | 460 | err: |
461 | kfree(dcbnl_skb); | 461 | kfree_skb(dcbnl_skb); |
462 | err_out: | 462 | err_out: |
463 | return ret; | 463 | return ret; |
464 | } | 464 | } |
@@ -687,7 +687,7 @@ err_pg: | |||
687 | nla_nest_cancel(dcbnl_skb, pg_nest); | 687 | nla_nest_cancel(dcbnl_skb, pg_nest); |
688 | nlmsg_failure: | 688 | nlmsg_failure: |
689 | err: | 689 | err: |
690 | kfree(dcbnl_skb); | 690 | kfree_skb(dcbnl_skb); |
691 | err_out: | 691 | err_out: |
692 | ret = -EINVAL; | 692 | ret = -EINVAL; |
693 | return ret; | 693 | return ret; |
@@ -949,7 +949,7 @@ err_bcn: | |||
949 | nla_nest_cancel(dcbnl_skb, bcn_nest); | 949 | nla_nest_cancel(dcbnl_skb, bcn_nest); |
950 | nlmsg_failure: | 950 | nlmsg_failure: |
951 | err: | 951 | err: |
952 | kfree(dcbnl_skb); | 952 | kfree_skb(dcbnl_skb); |
953 | err_out: | 953 | err_out: |
954 | ret = -EINVAL; | 954 | ret = -EINVAL; |
955 | return ret; | 955 | return ret; |
diff --git a/net/dccp/Kconfig b/net/dccp/Kconfig index 7aa2a7acc7ec..ad6dffd9070e 100644 --- a/net/dccp/Kconfig +++ b/net/dccp/Kconfig | |||
@@ -1,7 +1,6 @@ | |||
1 | menuconfig IP_DCCP | 1 | menuconfig IP_DCCP |
2 | tristate "The DCCP Protocol (EXPERIMENTAL)" | 2 | tristate "The DCCP Protocol (EXPERIMENTAL)" |
3 | depends on INET && EXPERIMENTAL | 3 | depends on INET && EXPERIMENTAL |
4 | select IP_DCCP_CCID2 | ||
5 | ---help--- | 4 | ---help--- |
6 | Datagram Congestion Control Protocol (RFC 4340) | 5 | Datagram Congestion Control Protocol (RFC 4340) |
7 | 6 | ||
@@ -25,9 +24,6 @@ config INET_DCCP_DIAG | |||
25 | def_tristate y if (IP_DCCP = y && INET_DIAG = y) | 24 | def_tristate y if (IP_DCCP = y && INET_DIAG = y) |
26 | def_tristate m | 25 | def_tristate m |
27 | 26 | ||
28 | config IP_DCCP_ACKVEC | ||
29 | bool | ||
30 | |||
31 | source "net/dccp/ccids/Kconfig" | 27 | source "net/dccp/ccids/Kconfig" |
32 | 28 | ||
33 | menu "DCCP Kernel Hacking" | 29 | menu "DCCP Kernel Hacking" |
diff --git a/net/dccp/Makefile b/net/dccp/Makefile index f4f8793aafff..2991efcc8dea 100644 --- a/net/dccp/Makefile +++ b/net/dccp/Makefile | |||
@@ -2,14 +2,23 @@ obj-$(CONFIG_IP_DCCP) += dccp.o dccp_ipv4.o | |||
2 | 2 | ||
3 | dccp-y := ccid.o feat.o input.o minisocks.o options.o output.o proto.o timer.o | 3 | dccp-y := ccid.o feat.o input.o minisocks.o options.o output.o proto.o timer.o |
4 | 4 | ||
5 | # | ||
6 | # CCID algorithms to be used by dccp.ko | ||
7 | # | ||
8 | # CCID-2 is default (RFC 4340, p. 77) and has Ack Vectors as dependency | ||
9 | dccp-y += ccids/ccid2.o ackvec.o | ||
10 | dccp-$(CONFIG_IP_DCCP_CCID3) += ccids/ccid3.o | ||
11 | dccp-$(CONFIG_IP_DCCP_TFRC_LIB) += ccids/lib/tfrc.o \ | ||
12 | ccids/lib/tfrc_equation.o \ | ||
13 | ccids/lib/packet_history.o \ | ||
14 | ccids/lib/loss_interval.o | ||
15 | |||
5 | dccp_ipv4-y := ipv4.o | 16 | dccp_ipv4-y := ipv4.o |
6 | 17 | ||
7 | # build dccp_ipv6 as module whenever either IPv6 or DCCP is a module | 18 | # build dccp_ipv6 as module whenever either IPv6 or DCCP is a module |
8 | obj-$(subst y,$(CONFIG_IP_DCCP),$(CONFIG_IPV6)) += dccp_ipv6.o | 19 | obj-$(subst y,$(CONFIG_IP_DCCP),$(CONFIG_IPV6)) += dccp_ipv6.o |
9 | dccp_ipv6-y := ipv6.o | 20 | dccp_ipv6-y := ipv6.o |
10 | 21 | ||
11 | dccp-$(CONFIG_IP_DCCP_ACKVEC) += ackvec.o | ||
12 | |||
13 | obj-$(CONFIG_INET_DCCP_DIAG) += dccp_diag.o | 22 | obj-$(CONFIG_INET_DCCP_DIAG) += dccp_diag.o |
14 | obj-$(CONFIG_NET_DCCPPROBE) += dccp_probe.o | 23 | obj-$(CONFIG_NET_DCCPPROBE) += dccp_probe.o |
15 | 24 | ||
@@ -17,5 +26,3 @@ dccp-$(CONFIG_SYSCTL) += sysctl.o | |||
17 | 26 | ||
18 | dccp_diag-y := diag.o | 27 | dccp_diag-y := diag.o |
19 | dccp_probe-y := probe.o | 28 | dccp_probe-y := probe.o |
20 | |||
21 | obj-y += ccids/ | ||
diff --git a/net/dccp/ackvec.h b/net/dccp/ackvec.h index 4ccee030524e..45f95e55f873 100644 --- a/net/dccp/ackvec.h +++ b/net/dccp/ackvec.h | |||
@@ -84,7 +84,6 @@ struct dccp_ackvec_record { | |||
84 | struct sock; | 84 | struct sock; |
85 | struct sk_buff; | 85 | struct sk_buff; |
86 | 86 | ||
87 | #ifdef CONFIG_IP_DCCP_ACKVEC | ||
88 | extern int dccp_ackvec_init(void); | 87 | extern int dccp_ackvec_init(void); |
89 | extern void dccp_ackvec_exit(void); | 88 | extern void dccp_ackvec_exit(void); |
90 | 89 | ||
@@ -106,52 +105,4 @@ static inline int dccp_ackvec_pending(const struct dccp_ackvec *av) | |||
106 | { | 105 | { |
107 | return av->av_vec_len; | 106 | return av->av_vec_len; |
108 | } | 107 | } |
109 | #else /* CONFIG_IP_DCCP_ACKVEC */ | ||
110 | static inline int dccp_ackvec_init(void) | ||
111 | { | ||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | static inline void dccp_ackvec_exit(void) | ||
116 | { | ||
117 | } | ||
118 | |||
119 | static inline struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority) | ||
120 | { | ||
121 | return NULL; | ||
122 | } | ||
123 | |||
124 | static inline void dccp_ackvec_free(struct dccp_ackvec *av) | ||
125 | { | ||
126 | } | ||
127 | |||
128 | static inline int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk, | ||
129 | const u64 ackno, const u8 state) | ||
130 | { | ||
131 | return -1; | ||
132 | } | ||
133 | |||
134 | static inline void dccp_ackvec_check_rcv_ackno(struct dccp_ackvec *av, | ||
135 | struct sock *sk, const u64 ackno) | ||
136 | { | ||
137 | } | ||
138 | |||
139 | static inline int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb, | ||
140 | const u64 *ackno, const u8 opt, | ||
141 | const u8 *value, const u8 len) | ||
142 | { | ||
143 | return -1; | ||
144 | } | ||
145 | |||
146 | static inline int dccp_insert_option_ackvec(const struct sock *sk, | ||
147 | const struct sk_buff *skb) | ||
148 | { | ||
149 | return -1; | ||
150 | } | ||
151 | |||
152 | static inline int dccp_ackvec_pending(const struct dccp_ackvec *av) | ||
153 | { | ||
154 | return 0; | ||
155 | } | ||
156 | #endif /* CONFIG_IP_DCCP_ACKVEC */ | ||
157 | #endif /* _ACKVEC_H */ | 108 | #endif /* _ACKVEC_H */ |
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c index bcc643f992ae..f3e9ba1cfd01 100644 --- a/net/dccp/ccid.c +++ b/net/dccp/ccid.c | |||
@@ -12,56 +12,70 @@ | |||
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include "ccid.h" | 14 | #include "ccid.h" |
15 | #include "ccids/lib/tfrc.h" | ||
15 | 16 | ||
16 | static u8 builtin_ccids[] = { | 17 | static struct ccid_operations *ccids[] = { |
17 | DCCPC_CCID2, /* CCID2 is supported by default */ | 18 | &ccid2_ops, |
18 | #if defined(CONFIG_IP_DCCP_CCID3) || defined(CONFIG_IP_DCCP_CCID3_MODULE) | 19 | #ifdef CONFIG_IP_DCCP_CCID3 |
19 | DCCPC_CCID3, | 20 | &ccid3_ops, |
20 | #endif | 21 | #endif |
21 | }; | 22 | }; |
22 | 23 | ||
23 | static struct ccid_operations *ccids[CCID_MAX]; | 24 | static struct ccid_operations *ccid_by_number(const u8 id) |
24 | #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) | ||
25 | static atomic_t ccids_lockct = ATOMIC_INIT(0); | ||
26 | static DEFINE_SPINLOCK(ccids_lock); | ||
27 | |||
28 | /* | ||
29 | * The strategy is: modifications ccids vector are short, do not sleep and | ||
30 | * veeery rare, but read access should be free of any exclusive locks. | ||
31 | */ | ||
32 | static void ccids_write_lock(void) | ||
33 | { | 25 | { |
34 | spin_lock(&ccids_lock); | 26 | int i; |
35 | while (atomic_read(&ccids_lockct) != 0) { | 27 | |
36 | spin_unlock(&ccids_lock); | 28 | for (i = 0; i < ARRAY_SIZE(ccids); i++) |
37 | yield(); | 29 | if (ccids[i]->ccid_id == id) |
38 | spin_lock(&ccids_lock); | 30 | return ccids[i]; |
39 | } | 31 | return NULL; |
40 | } | 32 | } |
41 | 33 | ||
42 | static inline void ccids_write_unlock(void) | 34 | /* check that up to @array_len members in @ccid_array are supported */ |
35 | bool ccid_support_check(u8 const *ccid_array, u8 array_len) | ||
43 | { | 36 | { |
44 | spin_unlock(&ccids_lock); | 37 | while (array_len > 0) |
38 | if (ccid_by_number(ccid_array[--array_len]) == NULL) | ||
39 | return false; | ||
40 | return true; | ||
45 | } | 41 | } |
46 | 42 | ||
47 | static inline void ccids_read_lock(void) | 43 | /** |
44 | * ccid_get_builtin_ccids - Populate a list of built-in CCIDs | ||
45 | * @ccid_array: pointer to copy into | ||
46 | * @array_len: value to return length into | ||
47 | * This function allocates memory - caller must see that it is freed after use. | ||
48 | */ | ||
49 | int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len) | ||
48 | { | 50 | { |
49 | atomic_inc(&ccids_lockct); | 51 | *ccid_array = kmalloc(ARRAY_SIZE(ccids), gfp_any()); |
50 | smp_mb__after_atomic_inc(); | 52 | if (*ccid_array == NULL) |
51 | spin_unlock_wait(&ccids_lock); | 53 | return -ENOBUFS; |
54 | |||
55 | for (*array_len = 0; *array_len < ARRAY_SIZE(ccids); *array_len += 1) | ||
56 | (*ccid_array)[*array_len] = ccids[*array_len]->ccid_id; | ||
57 | return 0; | ||
52 | } | 58 | } |
53 | 59 | ||
54 | static inline void ccids_read_unlock(void) | 60 | int ccid_getsockopt_builtin_ccids(struct sock *sk, int len, |
61 | char __user *optval, int __user *optlen) | ||
55 | { | 62 | { |
56 | atomic_dec(&ccids_lockct); | 63 | u8 *ccid_array, array_len; |
57 | } | 64 | int err = 0; |
58 | 65 | ||
59 | #else | 66 | if (len < ARRAY_SIZE(ccids)) |
60 | #define ccids_write_lock() do { } while(0) | 67 | return -EINVAL; |
61 | #define ccids_write_unlock() do { } while(0) | 68 | |
62 | #define ccids_read_lock() do { } while(0) | 69 | if (ccid_get_builtin_ccids(&ccid_array, &array_len)) |
63 | #define ccids_read_unlock() do { } while(0) | 70 | return -ENOBUFS; |
64 | #endif | 71 | |
72 | if (put_user(array_len, optlen) || | ||
73 | copy_to_user(optval, ccid_array, array_len)) | ||
74 | err = -EFAULT; | ||
75 | |||
76 | kfree(ccid_array); | ||
77 | return err; | ||
78 | } | ||
65 | 79 | ||
66 | static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...) | 80 | static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...) |
67 | { | 81 | { |
@@ -93,48 +107,7 @@ static void ccid_kmem_cache_destroy(struct kmem_cache *slab) | |||
93 | } | 107 | } |
94 | } | 108 | } |
95 | 109 | ||
96 | /* check that up to @array_len members in @ccid_array are supported */ | 110 | static int ccid_activate(struct ccid_operations *ccid_ops) |
97 | bool ccid_support_check(u8 const *ccid_array, u8 array_len) | ||
98 | { | ||
99 | u8 i, j, found; | ||
100 | |||
101 | for (i = 0, found = 0; i < array_len; i++, found = 0) { | ||
102 | for (j = 0; !found && j < ARRAY_SIZE(builtin_ccids); j++) | ||
103 | found = (ccid_array[i] == builtin_ccids[j]); | ||
104 | if (!found) | ||
105 | return false; | ||
106 | } | ||
107 | return true; | ||
108 | } | ||
109 | |||
110 | /** | ||
111 | * ccid_get_builtin_ccids - Provide copy of `builtin' CCID array | ||
112 | * @ccid_array: pointer to copy into | ||
113 | * @array_len: value to return length into | ||
114 | * This function allocates memory - caller must see that it is freed after use. | ||
115 | */ | ||
116 | int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len) | ||
117 | { | ||
118 | *ccid_array = kmemdup(builtin_ccids, sizeof(builtin_ccids), gfp_any()); | ||
119 | if (*ccid_array == NULL) | ||
120 | return -ENOBUFS; | ||
121 | *array_len = ARRAY_SIZE(builtin_ccids); | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | int ccid_getsockopt_builtin_ccids(struct sock *sk, int len, | ||
126 | char __user *optval, int __user *optlen) | ||
127 | { | ||
128 | if (len < sizeof(builtin_ccids)) | ||
129 | return -EINVAL; | ||
130 | |||
131 | if (put_user(sizeof(builtin_ccids), optlen) || | ||
132 | copy_to_user(optval, builtin_ccids, sizeof(builtin_ccids))) | ||
133 | return -EFAULT; | ||
134 | return 0; | ||
135 | } | ||
136 | |||
137 | int ccid_register(struct ccid_operations *ccid_ops) | ||
138 | { | 111 | { |
139 | int err = -ENOBUFS; | 112 | int err = -ENOBUFS; |
140 | 113 | ||
@@ -152,79 +125,40 @@ int ccid_register(struct ccid_operations *ccid_ops) | |||
152 | if (ccid_ops->ccid_hc_tx_slab == NULL) | 125 | if (ccid_ops->ccid_hc_tx_slab == NULL) |
153 | goto out_free_rx_slab; | 126 | goto out_free_rx_slab; |
154 | 127 | ||
155 | ccids_write_lock(); | 128 | pr_info("CCID: Activated CCID %d (%s)\n", |
156 | err = -EEXIST; | ||
157 | if (ccids[ccid_ops->ccid_id] == NULL) { | ||
158 | ccids[ccid_ops->ccid_id] = ccid_ops; | ||
159 | err = 0; | ||
160 | } | ||
161 | ccids_write_unlock(); | ||
162 | if (err != 0) | ||
163 | goto out_free_tx_slab; | ||
164 | |||
165 | pr_info("CCID: Registered CCID %d (%s)\n", | ||
166 | ccid_ops->ccid_id, ccid_ops->ccid_name); | 129 | ccid_ops->ccid_id, ccid_ops->ccid_name); |
130 | err = 0; | ||
167 | out: | 131 | out: |
168 | return err; | 132 | return err; |
169 | out_free_tx_slab: | ||
170 | ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab); | ||
171 | ccid_ops->ccid_hc_tx_slab = NULL; | ||
172 | goto out; | ||
173 | out_free_rx_slab: | 133 | out_free_rx_slab: |
174 | ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab); | 134 | ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab); |
175 | ccid_ops->ccid_hc_rx_slab = NULL; | 135 | ccid_ops->ccid_hc_rx_slab = NULL; |
176 | goto out; | 136 | goto out; |
177 | } | 137 | } |
178 | 138 | ||
179 | EXPORT_SYMBOL_GPL(ccid_register); | 139 | static void ccid_deactivate(struct ccid_operations *ccid_ops) |
180 | |||
181 | int ccid_unregister(struct ccid_operations *ccid_ops) | ||
182 | { | 140 | { |
183 | ccids_write_lock(); | ||
184 | ccids[ccid_ops->ccid_id] = NULL; | ||
185 | ccids_write_unlock(); | ||
186 | |||
187 | ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab); | 141 | ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab); |
188 | ccid_ops->ccid_hc_tx_slab = NULL; | 142 | ccid_ops->ccid_hc_tx_slab = NULL; |
189 | ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab); | 143 | ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab); |
190 | ccid_ops->ccid_hc_rx_slab = NULL; | 144 | ccid_ops->ccid_hc_rx_slab = NULL; |
191 | 145 | ||
192 | pr_info("CCID: Unregistered CCID %d (%s)\n", | 146 | pr_info("CCID: Deactivated CCID %d (%s)\n", |
193 | ccid_ops->ccid_id, ccid_ops->ccid_name); | 147 | ccid_ops->ccid_id, ccid_ops->ccid_name); |
194 | return 0; | ||
195 | } | 148 | } |
196 | 149 | ||
197 | EXPORT_SYMBOL_GPL(ccid_unregister); | 150 | struct ccid *ccid_new(const u8 id, struct sock *sk, bool rx) |
198 | |||
199 | struct ccid *ccid_new(unsigned char id, struct sock *sk, int rx, gfp_t gfp) | ||
200 | { | 151 | { |
201 | struct ccid_operations *ccid_ops; | 152 | struct ccid_operations *ccid_ops = ccid_by_number(id); |
202 | struct ccid *ccid = NULL; | 153 | struct ccid *ccid = NULL; |
203 | 154 | ||
204 | ccids_read_lock(); | ||
205 | #ifdef CONFIG_MODULES | ||
206 | if (ccids[id] == NULL) { | ||
207 | /* We only try to load if in process context */ | ||
208 | ccids_read_unlock(); | ||
209 | if (gfp & GFP_ATOMIC) | ||
210 | goto out; | ||
211 | request_module("net-dccp-ccid-%d", id); | ||
212 | ccids_read_lock(); | ||
213 | } | ||
214 | #endif | ||
215 | ccid_ops = ccids[id]; | ||
216 | if (ccid_ops == NULL) | 155 | if (ccid_ops == NULL) |
217 | goto out_unlock; | 156 | goto out; |
218 | |||
219 | if (!try_module_get(ccid_ops->ccid_owner)) | ||
220 | goto out_unlock; | ||
221 | |||
222 | ccids_read_unlock(); | ||
223 | 157 | ||
224 | ccid = kmem_cache_alloc(rx ? ccid_ops->ccid_hc_rx_slab : | 158 | ccid = kmem_cache_alloc(rx ? ccid_ops->ccid_hc_rx_slab : |
225 | ccid_ops->ccid_hc_tx_slab, gfp); | 159 | ccid_ops->ccid_hc_tx_slab, gfp_any()); |
226 | if (ccid == NULL) | 160 | if (ccid == NULL) |
227 | goto out_module_put; | 161 | goto out; |
228 | ccid->ccid_ops = ccid_ops; | 162 | ccid->ccid_ops = ccid_ops; |
229 | if (rx) { | 163 | if (rx) { |
230 | memset(ccid + 1, 0, ccid_ops->ccid_hc_rx_obj_size); | 164 | memset(ccid + 1, 0, ccid_ops->ccid_hc_rx_obj_size); |
@@ -239,53 +173,57 @@ struct ccid *ccid_new(unsigned char id, struct sock *sk, int rx, gfp_t gfp) | |||
239 | } | 173 | } |
240 | out: | 174 | out: |
241 | return ccid; | 175 | return ccid; |
242 | out_unlock: | ||
243 | ccids_read_unlock(); | ||
244 | goto out; | ||
245 | out_free_ccid: | 176 | out_free_ccid: |
246 | kmem_cache_free(rx ? ccid_ops->ccid_hc_rx_slab : | 177 | kmem_cache_free(rx ? ccid_ops->ccid_hc_rx_slab : |
247 | ccid_ops->ccid_hc_tx_slab, ccid); | 178 | ccid_ops->ccid_hc_tx_slab, ccid); |
248 | ccid = NULL; | 179 | ccid = NULL; |
249 | out_module_put: | ||
250 | module_put(ccid_ops->ccid_owner); | ||
251 | goto out; | 180 | goto out; |
252 | } | 181 | } |
253 | 182 | ||
254 | EXPORT_SYMBOL_GPL(ccid_new); | 183 | void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk) |
255 | |||
256 | static void ccid_delete(struct ccid *ccid, struct sock *sk, int rx) | ||
257 | { | 184 | { |
258 | struct ccid_operations *ccid_ops; | 185 | if (ccid != NULL) { |
259 | 186 | if (ccid->ccid_ops->ccid_hc_rx_exit != NULL) | |
260 | if (ccid == NULL) | 187 | ccid->ccid_ops->ccid_hc_rx_exit(sk); |
261 | return; | 188 | kmem_cache_free(ccid->ccid_ops->ccid_hc_rx_slab, ccid); |
262 | |||
263 | ccid_ops = ccid->ccid_ops; | ||
264 | if (rx) { | ||
265 | if (ccid_ops->ccid_hc_rx_exit != NULL) | ||
266 | ccid_ops->ccid_hc_rx_exit(sk); | ||
267 | kmem_cache_free(ccid_ops->ccid_hc_rx_slab, ccid); | ||
268 | } else { | ||
269 | if (ccid_ops->ccid_hc_tx_exit != NULL) | ||
270 | ccid_ops->ccid_hc_tx_exit(sk); | ||
271 | kmem_cache_free(ccid_ops->ccid_hc_tx_slab, ccid); | ||
272 | } | 189 | } |
273 | ccids_read_lock(); | ||
274 | if (ccids[ccid_ops->ccid_id] != NULL) | ||
275 | module_put(ccid_ops->ccid_owner); | ||
276 | ccids_read_unlock(); | ||
277 | } | 190 | } |
278 | 191 | ||
279 | void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk) | 192 | void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk) |
280 | { | 193 | { |
281 | ccid_delete(ccid, sk, 1); | 194 | if (ccid != NULL) { |
195 | if (ccid->ccid_ops->ccid_hc_tx_exit != NULL) | ||
196 | ccid->ccid_ops->ccid_hc_tx_exit(sk); | ||
197 | kmem_cache_free(ccid->ccid_ops->ccid_hc_tx_slab, ccid); | ||
198 | } | ||
282 | } | 199 | } |
283 | 200 | ||
284 | EXPORT_SYMBOL_GPL(ccid_hc_rx_delete); | 201 | int __init ccid_initialize_builtins(void) |
285 | |||
286 | void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk) | ||
287 | { | 202 | { |
288 | ccid_delete(ccid, sk, 0); | 203 | int i, err = tfrc_lib_init(); |
204 | |||
205 | if (err) | ||
206 | return err; | ||
207 | |||
208 | for (i = 0; i < ARRAY_SIZE(ccids); i++) { | ||
209 | err = ccid_activate(ccids[i]); | ||
210 | if (err) | ||
211 | goto unwind_registrations; | ||
212 | } | ||
213 | return 0; | ||
214 | |||
215 | unwind_registrations: | ||
216 | while(--i >= 0) | ||
217 | ccid_deactivate(ccids[i]); | ||
218 | tfrc_lib_exit(); | ||
219 | return err; | ||
289 | } | 220 | } |
290 | 221 | ||
291 | EXPORT_SYMBOL_GPL(ccid_hc_tx_delete); | 222 | void ccid_cleanup_builtins(void) |
223 | { | ||
224 | int i; | ||
225 | |||
226 | for (i = 0; i < ARRAY_SIZE(ccids); i++) | ||
227 | ccid_deactivate(ccids[i]); | ||
228 | tfrc_lib_exit(); | ||
229 | } | ||
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h index 18f69423a708..facedd20b531 100644 --- a/net/dccp/ccid.h +++ b/net/dccp/ccid.h | |||
@@ -29,7 +29,6 @@ struct tcp_info; | |||
29 | * @ccid_id: numerical CCID ID (up to %CCID_MAX, cf. table 5 in RFC 4340, 10.) | 29 | * @ccid_id: numerical CCID ID (up to %CCID_MAX, cf. table 5 in RFC 4340, 10.) |
30 | * @ccid_ccmps: the CCMPS including network/transport headers (0 when disabled) | 30 | * @ccid_ccmps: the CCMPS including network/transport headers (0 when disabled) |
31 | * @ccid_name: alphabetical identifier string for @ccid_id | 31 | * @ccid_name: alphabetical identifier string for @ccid_id |
32 | * @ccid_owner: module which implements/owns this CCID | ||
33 | * @ccid_hc_{r,t}x_slab: memory pool for the receiver/sender half-connection | 32 | * @ccid_hc_{r,t}x_slab: memory pool for the receiver/sender half-connection |
34 | * @ccid_hc_{r,t}x_obj_size: size of the receiver/sender half-connection socket | 33 | * @ccid_hc_{r,t}x_obj_size: size of the receiver/sender half-connection socket |
35 | * | 34 | * |
@@ -48,7 +47,6 @@ struct ccid_operations { | |||
48 | unsigned char ccid_id; | 47 | unsigned char ccid_id; |
49 | __u32 ccid_ccmps; | 48 | __u32 ccid_ccmps; |
50 | const char *ccid_name; | 49 | const char *ccid_name; |
51 | struct module *ccid_owner; | ||
52 | struct kmem_cache *ccid_hc_rx_slab, | 50 | struct kmem_cache *ccid_hc_rx_slab, |
53 | *ccid_hc_tx_slab; | 51 | *ccid_hc_tx_slab; |
54 | __u32 ccid_hc_rx_obj_size, | 52 | __u32 ccid_hc_rx_obj_size, |
@@ -90,8 +88,13 @@ struct ccid_operations { | |||
90 | int __user *optlen); | 88 | int __user *optlen); |
91 | }; | 89 | }; |
92 | 90 | ||
93 | extern int ccid_register(struct ccid_operations *ccid_ops); | 91 | extern struct ccid_operations ccid2_ops; |
94 | extern int ccid_unregister(struct ccid_operations *ccid_ops); | 92 | #ifdef CONFIG_IP_DCCP_CCID3 |
93 | extern struct ccid_operations ccid3_ops; | ||
94 | #endif | ||
95 | |||
96 | extern int ccid_initialize_builtins(void); | ||
97 | extern void ccid_cleanup_builtins(void); | ||
95 | 98 | ||
96 | struct ccid { | 99 | struct ccid { |
97 | struct ccid_operations *ccid_ops; | 100 | struct ccid_operations *ccid_ops; |
@@ -108,8 +111,7 @@ extern int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len); | |||
108 | extern int ccid_getsockopt_builtin_ccids(struct sock *sk, int len, | 111 | extern int ccid_getsockopt_builtin_ccids(struct sock *sk, int len, |
109 | char __user *, int __user *); | 112 | char __user *, int __user *); |
110 | 113 | ||
111 | extern struct ccid *ccid_new(unsigned char id, struct sock *sk, int rx, | 114 | extern struct ccid *ccid_new(const u8 id, struct sock *sk, bool rx); |
112 | gfp_t gfp); | ||
113 | 115 | ||
114 | static inline int ccid_get_current_rx_ccid(struct dccp_sock *dp) | 116 | static inline int ccid_get_current_rx_ccid(struct dccp_sock *dp) |
115 | { | 117 | { |
diff --git a/net/dccp/ccids/Kconfig b/net/dccp/ccids/Kconfig index 12275943eab8..b28bf962edc3 100644 --- a/net/dccp/ccids/Kconfig +++ b/net/dccp/ccids/Kconfig | |||
@@ -1,80 +1,51 @@ | |||
1 | menu "DCCP CCIDs Configuration (EXPERIMENTAL)" | 1 | menu "DCCP CCIDs Configuration (EXPERIMENTAL)" |
2 | depends on EXPERIMENTAL | 2 | depends on EXPERIMENTAL |
3 | 3 | ||
4 | config IP_DCCP_CCID2 | ||
5 | tristate "CCID2 (TCP-Like) (EXPERIMENTAL)" | ||
6 | def_tristate IP_DCCP | ||
7 | select IP_DCCP_ACKVEC | ||
8 | ---help--- | ||
9 | CCID 2, TCP-like Congestion Control, denotes Additive Increase, | ||
10 | Multiplicative Decrease (AIMD) congestion control with behavior | ||
11 | modelled directly on TCP, including congestion window, slow start, | ||
12 | timeouts, and so forth [RFC 2581]. CCID 2 achieves maximum | ||
13 | bandwidth over the long term, consistent with the use of end-to-end | ||
14 | congestion control, but halves its congestion window in response to | ||
15 | each congestion event. This leads to the abrupt rate changes | ||
16 | typical of TCP. Applications should use CCID 2 if they prefer | ||
17 | maximum bandwidth utilization to steadiness of rate. This is often | ||
18 | the case for applications that are not playing their data directly | ||
19 | to the user. For example, a hypothetical application that | ||
20 | transferred files over DCCP, using application-level retransmissions | ||
21 | for lost packets, would prefer CCID 2 to CCID 3. On-line games may | ||
22 | also prefer CCID 2. See RFC 4341 for further details. | ||
23 | |||
24 | CCID2 is the default CCID used by DCCP. | ||
25 | |||
26 | config IP_DCCP_CCID2_DEBUG | 4 | config IP_DCCP_CCID2_DEBUG |
27 | bool "CCID2 debugging messages" | 5 | bool "CCID-2 debugging messages" |
28 | depends on IP_DCCP_CCID2 | 6 | ---help--- |
29 | ---help--- | 7 | Enable CCID-2 specific debugging messages. |
30 | Enable CCID2-specific debugging messages. | ||
31 | 8 | ||
32 | When compiling CCID2 as a module, this debugging output can | 9 | The debugging output can additionally be toggled by setting the |
33 | additionally be toggled by setting the ccid2_debug module | 10 | ccid2_debug parameter to 0 or 1. |
34 | parameter to 0 or 1. | ||
35 | 11 | ||
36 | If in doubt, say N. | 12 | If in doubt, say N. |
37 | 13 | ||
38 | config IP_DCCP_CCID3 | 14 | config IP_DCCP_CCID3 |
39 | tristate "CCID3 (TCP-Friendly) (EXPERIMENTAL)" | 15 | bool "CCID-3 (TCP-Friendly) (EXPERIMENTAL)" |
40 | def_tristate IP_DCCP | 16 | def_bool y if (IP_DCCP = y || IP_DCCP = m) |
41 | select IP_DCCP_TFRC_LIB | ||
42 | ---help--- | 17 | ---help--- |
43 | CCID 3 denotes TCP-Friendly Rate Control (TFRC), an equation-based | 18 | CCID-3 denotes TCP-Friendly Rate Control (TFRC), an equation-based |
44 | rate-controlled congestion control mechanism. TFRC is designed to | 19 | rate-controlled congestion control mechanism. TFRC is designed to |
45 | be reasonably fair when competing for bandwidth with TCP-like flows, | 20 | be reasonably fair when competing for bandwidth with TCP-like flows, |
46 | where a flow is "reasonably fair" if its sending rate is generally | 21 | where a flow is "reasonably fair" if its sending rate is generally |
47 | within a factor of two of the sending rate of a TCP flow under the | 22 | within a factor of two of the sending rate of a TCP flow under the |
48 | same conditions. However, TFRC has a much lower variation of | 23 | same conditions. However, TFRC has a much lower variation of |
49 | throughput over time compared with TCP, which makes CCID 3 more | 24 | throughput over time compared with TCP, which makes CCID-3 more |
50 | suitable than CCID 2 for applications such streaming media where a | 25 | suitable than CCID-2 for applications such streaming media where a |
51 | relatively smooth sending rate is of importance. | 26 | relatively smooth sending rate is of importance. |
52 | 27 | ||
53 | CCID 3 is further described in RFC 4342, | 28 | CCID-3 is further described in RFC 4342, |
54 | http://www.ietf.org/rfc/rfc4342.txt | 29 | http://www.ietf.org/rfc/rfc4342.txt |
55 | 30 | ||
56 | The TFRC congestion control algorithms were initially described in | 31 | The TFRC congestion control algorithms were initially described in |
57 | RFC 3448. | 32 | RFC 5448. |
58 | 33 | ||
59 | This text was extracted from RFC 4340 (sec. 10.2), | 34 | This text was extracted from RFC 4340 (sec. 10.2), |
60 | http://www.ietf.org/rfc/rfc4340.txt | 35 | http://www.ietf.org/rfc/rfc4340.txt |
61 | |||
62 | To compile this CCID as a module, choose M here: the module will be | ||
63 | called dccp_ccid3. | ||
64 | 36 | ||
65 | If in doubt, say M. | 37 | If in doubt, say N. |
66 | 38 | ||
67 | config IP_DCCP_CCID3_DEBUG | 39 | config IP_DCCP_CCID3_DEBUG |
68 | bool "CCID3 debugging messages" | 40 | bool "CCID-3 debugging messages" |
69 | depends on IP_DCCP_CCID3 | 41 | depends on IP_DCCP_CCID3 |
70 | ---help--- | 42 | ---help--- |
71 | Enable CCID3-specific debugging messages. | 43 | Enable CCID-3 specific debugging messages. |
72 | 44 | ||
73 | When compiling CCID3 as a module, this debugging output can | 45 | The debugging output can additionally be toggled by setting the |
74 | additionally be toggled by setting the ccid3_debug module | 46 | ccid3_debug parameter to 0 or 1. |
75 | parameter to 0 or 1. | ||
76 | 47 | ||
77 | If in doubt, say N. | 48 | If in doubt, say N. |
78 | 49 | ||
79 | config IP_DCCP_CCID3_RTO | 50 | config IP_DCCP_CCID3_RTO |
80 | int "Use higher bound for nofeedback timer" | 51 | int "Use higher bound for nofeedback timer" |
@@ -108,12 +79,8 @@ config IP_DCCP_CCID3_RTO | |||
108 | therefore not be performed on WANs. | 79 | therefore not be performed on WANs. |
109 | 80 | ||
110 | config IP_DCCP_TFRC_LIB | 81 | config IP_DCCP_TFRC_LIB |
111 | tristate | 82 | def_bool y if IP_DCCP_CCID3 |
112 | default n | ||
113 | 83 | ||
114 | config IP_DCCP_TFRC_DEBUG | 84 | config IP_DCCP_TFRC_DEBUG |
115 | bool | 85 | def_bool y if IP_DCCP_CCID3_DEBUG |
116 | depends on IP_DCCP_TFRC_LIB | ||
117 | default y if IP_DCCP_CCID3_DEBUG | ||
118 | |||
119 | endmenu | 86 | endmenu |
diff --git a/net/dccp/ccids/Makefile b/net/dccp/ccids/Makefile deleted file mode 100644 index 438f20bccff7..000000000000 --- a/net/dccp/ccids/Makefile +++ /dev/null | |||
@@ -1,9 +0,0 @@ | |||
1 | obj-$(CONFIG_IP_DCCP_CCID3) += dccp_ccid3.o | ||
2 | |||
3 | dccp_ccid3-y := ccid3.o | ||
4 | |||
5 | obj-$(CONFIG_IP_DCCP_CCID2) += dccp_ccid2.o | ||
6 | |||
7 | dccp_ccid2-y := ccid2.o | ||
8 | |||
9 | obj-y += lib/ | ||
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c index c9ea19a4d85e..d235294ace23 100644 --- a/net/dccp/ccids/ccid2.c +++ b/net/dccp/ccids/ccid2.c | |||
@@ -768,10 +768,9 @@ static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
768 | } | 768 | } |
769 | } | 769 | } |
770 | 770 | ||
771 | static struct ccid_operations ccid2 = { | 771 | struct ccid_operations ccid2_ops = { |
772 | .ccid_id = DCCPC_CCID2, | 772 | .ccid_id = DCCPC_CCID2, |
773 | .ccid_name = "TCP-like", | 773 | .ccid_name = "TCP-like", |
774 | .ccid_owner = THIS_MODULE, | ||
775 | .ccid_hc_tx_obj_size = sizeof(struct ccid2_hc_tx_sock), | 774 | .ccid_hc_tx_obj_size = sizeof(struct ccid2_hc_tx_sock), |
776 | .ccid_hc_tx_init = ccid2_hc_tx_init, | 775 | .ccid_hc_tx_init = ccid2_hc_tx_init, |
777 | .ccid_hc_tx_exit = ccid2_hc_tx_exit, | 776 | .ccid_hc_tx_exit = ccid2_hc_tx_exit, |
@@ -784,22 +783,5 @@ static struct ccid_operations ccid2 = { | |||
784 | 783 | ||
785 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG | 784 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG |
786 | module_param(ccid2_debug, bool, 0644); | 785 | module_param(ccid2_debug, bool, 0644); |
787 | MODULE_PARM_DESC(ccid2_debug, "Enable debug messages"); | 786 | MODULE_PARM_DESC(ccid2_debug, "Enable CCID-2 debug messages"); |
788 | #endif | 787 | #endif |
789 | |||
790 | static __init int ccid2_module_init(void) | ||
791 | { | ||
792 | return ccid_register(&ccid2); | ||
793 | } | ||
794 | module_init(ccid2_module_init); | ||
795 | |||
796 | static __exit void ccid2_module_exit(void) | ||
797 | { | ||
798 | ccid_unregister(&ccid2); | ||
799 | } | ||
800 | module_exit(ccid2_module_exit); | ||
801 | |||
802 | MODULE_AUTHOR("Andrea Bittau <a.bittau@cs.ucl.ac.uk>"); | ||
803 | MODULE_DESCRIPTION("DCCP TCP-Like (CCID2) CCID"); | ||
804 | MODULE_LICENSE("GPL"); | ||
805 | MODULE_ALIAS("net-dccp-ccid-2"); | ||
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index 3b8bd7ca6761..a27b7f4c19c5 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c | |||
@@ -940,10 +940,9 @@ static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len, | |||
940 | return 0; | 940 | return 0; |
941 | } | 941 | } |
942 | 942 | ||
943 | static struct ccid_operations ccid3 = { | 943 | struct ccid_operations ccid3_ops = { |
944 | .ccid_id = DCCPC_CCID3, | 944 | .ccid_id = DCCPC_CCID3, |
945 | .ccid_name = "TCP-Friendly Rate Control", | 945 | .ccid_name = "TCP-Friendly Rate Control", |
946 | .ccid_owner = THIS_MODULE, | ||
947 | .ccid_hc_tx_obj_size = sizeof(struct ccid3_hc_tx_sock), | 946 | .ccid_hc_tx_obj_size = sizeof(struct ccid3_hc_tx_sock), |
948 | .ccid_hc_tx_init = ccid3_hc_tx_init, | 947 | .ccid_hc_tx_init = ccid3_hc_tx_init, |
949 | .ccid_hc_tx_exit = ccid3_hc_tx_exit, | 948 | .ccid_hc_tx_exit = ccid3_hc_tx_exit, |
@@ -964,23 +963,5 @@ static struct ccid_operations ccid3 = { | |||
964 | 963 | ||
965 | #ifdef CONFIG_IP_DCCP_CCID3_DEBUG | 964 | #ifdef CONFIG_IP_DCCP_CCID3_DEBUG |
966 | module_param(ccid3_debug, bool, 0644); | 965 | module_param(ccid3_debug, bool, 0644); |
967 | MODULE_PARM_DESC(ccid3_debug, "Enable debug messages"); | 966 | MODULE_PARM_DESC(ccid3_debug, "Enable CCID-3 debug messages"); |
968 | #endif | 967 | #endif |
969 | |||
970 | static __init int ccid3_module_init(void) | ||
971 | { | ||
972 | return ccid_register(&ccid3); | ||
973 | } | ||
974 | module_init(ccid3_module_init); | ||
975 | |||
976 | static __exit void ccid3_module_exit(void) | ||
977 | { | ||
978 | ccid_unregister(&ccid3); | ||
979 | } | ||
980 | module_exit(ccid3_module_exit); | ||
981 | |||
982 | MODULE_AUTHOR("Ian McDonald <ian.mcdonald@jandi.co.nz>, " | ||
983 | "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>"); | ||
984 | MODULE_DESCRIPTION("DCCP TFRC CCID3 CCID"); | ||
985 | MODULE_LICENSE("GPL"); | ||
986 | MODULE_ALIAS("net-dccp-ccid-3"); | ||
diff --git a/net/dccp/ccids/lib/Makefile b/net/dccp/ccids/lib/Makefile deleted file mode 100644 index 68c93e3d89dc..000000000000 --- a/net/dccp/ccids/lib/Makefile +++ /dev/null | |||
@@ -1,3 +0,0 @@ | |||
1 | obj-$(CONFIG_IP_DCCP_TFRC_LIB) += dccp_tfrc_lib.o | ||
2 | |||
3 | dccp_tfrc_lib-y := tfrc.o tfrc_equation.o packet_history.o loss_interval.o | ||
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c index 5b3ce0688c5c..4d1e40127264 100644 --- a/net/dccp/ccids/lib/loss_interval.c +++ b/net/dccp/ccids/lib/loss_interval.c | |||
@@ -60,7 +60,6 @@ void tfrc_lh_cleanup(struct tfrc_loss_hist *lh) | |||
60 | lh->ring[LIH_INDEX(lh->counter)] = NULL; | 60 | lh->ring[LIH_INDEX(lh->counter)] = NULL; |
61 | } | 61 | } |
62 | } | 62 | } |
63 | EXPORT_SYMBOL_GPL(tfrc_lh_cleanup); | ||
64 | 63 | ||
65 | static void tfrc_lh_calc_i_mean(struct tfrc_loss_hist *lh) | 64 | static void tfrc_lh_calc_i_mean(struct tfrc_loss_hist *lh) |
66 | { | 65 | { |
@@ -121,7 +120,6 @@ u8 tfrc_lh_update_i_mean(struct tfrc_loss_hist *lh, struct sk_buff *skb) | |||
121 | 120 | ||
122 | return (lh->i_mean < old_i_mean); | 121 | return (lh->i_mean < old_i_mean); |
123 | } | 122 | } |
124 | EXPORT_SYMBOL_GPL(tfrc_lh_update_i_mean); | ||
125 | 123 | ||
126 | /* Determine if `new_loss' does begin a new loss interval [RFC 4342, 10.2] */ | 124 | /* Determine if `new_loss' does begin a new loss interval [RFC 4342, 10.2] */ |
127 | static inline u8 tfrc_lh_is_new_loss(struct tfrc_loss_interval *cur, | 125 | static inline u8 tfrc_lh_is_new_loss(struct tfrc_loss_interval *cur, |
@@ -169,7 +167,6 @@ int tfrc_lh_interval_add(struct tfrc_loss_hist *lh, struct tfrc_rx_hist *rh, | |||
169 | } | 167 | } |
170 | return 1; | 168 | return 1; |
171 | } | 169 | } |
172 | EXPORT_SYMBOL_GPL(tfrc_lh_interval_add); | ||
173 | 170 | ||
174 | int __init tfrc_li_init(void) | 171 | int __init tfrc_li_init(void) |
175 | { | 172 | { |
diff --git a/net/dccp/ccids/lib/packet_history.c b/net/dccp/ccids/lib/packet_history.c index 6cc108afdc3b..b7785b3581ec 100644 --- a/net/dccp/ccids/lib/packet_history.c +++ b/net/dccp/ccids/lib/packet_history.c | |||
@@ -94,7 +94,6 @@ int tfrc_tx_hist_add(struct tfrc_tx_hist_entry **headp, u64 seqno) | |||
94 | *headp = entry; | 94 | *headp = entry; |
95 | return 0; | 95 | return 0; |
96 | } | 96 | } |
97 | EXPORT_SYMBOL_GPL(tfrc_tx_hist_add); | ||
98 | 97 | ||
99 | void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp) | 98 | void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp) |
100 | { | 99 | { |
@@ -109,7 +108,6 @@ void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp) | |||
109 | 108 | ||
110 | *headp = NULL; | 109 | *headp = NULL; |
111 | } | 110 | } |
112 | EXPORT_SYMBOL_GPL(tfrc_tx_hist_purge); | ||
113 | 111 | ||
114 | u32 tfrc_tx_hist_rtt(struct tfrc_tx_hist_entry *head, const u64 seqno, | 112 | u32 tfrc_tx_hist_rtt(struct tfrc_tx_hist_entry *head, const u64 seqno, |
115 | const ktime_t now) | 113 | const ktime_t now) |
@@ -127,7 +125,6 @@ u32 tfrc_tx_hist_rtt(struct tfrc_tx_hist_entry *head, const u64 seqno, | |||
127 | 125 | ||
128 | return rtt; | 126 | return rtt; |
129 | } | 127 | } |
130 | EXPORT_SYMBOL_GPL(tfrc_tx_hist_rtt); | ||
131 | 128 | ||
132 | 129 | ||
133 | /* | 130 | /* |
@@ -172,7 +169,6 @@ void tfrc_rx_hist_add_packet(struct tfrc_rx_hist *h, | |||
172 | 169 | ||
173 | tfrc_rx_hist_entry_from_skb(entry, skb, ndp); | 170 | tfrc_rx_hist_entry_from_skb(entry, skb, ndp); |
174 | } | 171 | } |
175 | EXPORT_SYMBOL_GPL(tfrc_rx_hist_add_packet); | ||
176 | 172 | ||
177 | /* has the packet contained in skb been seen before? */ | 173 | /* has the packet contained in skb been seen before? */ |
178 | int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb) | 174 | int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb) |
@@ -189,7 +185,6 @@ int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb) | |||
189 | 185 | ||
190 | return 0; | 186 | return 0; |
191 | } | 187 | } |
192 | EXPORT_SYMBOL_GPL(tfrc_rx_hist_duplicate); | ||
193 | 188 | ||
194 | static void tfrc_rx_hist_swap(struct tfrc_rx_hist *h, const u8 a, const u8 b) | 189 | static void tfrc_rx_hist_swap(struct tfrc_rx_hist *h, const u8 a, const u8 b) |
195 | { | 190 | { |
@@ -390,7 +385,6 @@ int tfrc_rx_handle_loss(struct tfrc_rx_hist *h, | |||
390 | } | 385 | } |
391 | return is_new_loss; | 386 | return is_new_loss; |
392 | } | 387 | } |
393 | EXPORT_SYMBOL_GPL(tfrc_rx_handle_loss); | ||
394 | 388 | ||
395 | int tfrc_rx_hist_alloc(struct tfrc_rx_hist *h) | 389 | int tfrc_rx_hist_alloc(struct tfrc_rx_hist *h) |
396 | { | 390 | { |
@@ -412,7 +406,6 @@ out_free: | |||
412 | } | 406 | } |
413 | return -ENOBUFS; | 407 | return -ENOBUFS; |
414 | } | 408 | } |
415 | EXPORT_SYMBOL_GPL(tfrc_rx_hist_alloc); | ||
416 | 409 | ||
417 | void tfrc_rx_hist_purge(struct tfrc_rx_hist *h) | 410 | void tfrc_rx_hist_purge(struct tfrc_rx_hist *h) |
418 | { | 411 | { |
@@ -424,7 +417,6 @@ void tfrc_rx_hist_purge(struct tfrc_rx_hist *h) | |||
424 | h->ring[i] = NULL; | 417 | h->ring[i] = NULL; |
425 | } | 418 | } |
426 | } | 419 | } |
427 | EXPORT_SYMBOL_GPL(tfrc_rx_hist_purge); | ||
428 | 420 | ||
429 | /** | 421 | /** |
430 | * tfrc_rx_hist_rtt_last_s - reference entry to compute RTT samples against | 422 | * tfrc_rx_hist_rtt_last_s - reference entry to compute RTT samples against |
@@ -495,4 +487,3 @@ keep_ref_for_next_time: | |||
495 | 487 | ||
496 | return sample; | 488 | return sample; |
497 | } | 489 | } |
498 | EXPORT_SYMBOL_GPL(tfrc_rx_hist_sample_rtt); | ||
diff --git a/net/dccp/ccids/lib/tfrc.c b/net/dccp/ccids/lib/tfrc.c index 185916218e07..60c412ccfeef 100644 --- a/net/dccp/ccids/lib/tfrc.c +++ b/net/dccp/ccids/lib/tfrc.c | |||
@@ -1,20 +1,18 @@ | |||
1 | /* | 1 | /* |
2 | * TFRC: main module holding the pieces of the TFRC library together | 2 | * TFRC library initialisation |
3 | * | 3 | * |
4 | * Copyright (c) 2007 The University of Aberdeen, Scotland, UK | 4 | * Copyright (c) 2007 The University of Aberdeen, Scotland, UK |
5 | * Copyright (c) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> | 5 | * Copyright (c) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> |
6 | */ | 6 | */ |
7 | #include <linux/module.h> | ||
8 | #include <linux/moduleparam.h> | ||
9 | #include "tfrc.h" | 7 | #include "tfrc.h" |
10 | 8 | ||
11 | #ifdef CONFIG_IP_DCCP_TFRC_DEBUG | 9 | #ifdef CONFIG_IP_DCCP_TFRC_DEBUG |
12 | int tfrc_debug; | 10 | int tfrc_debug; |
13 | module_param(tfrc_debug, bool, 0644); | 11 | module_param(tfrc_debug, bool, 0644); |
14 | MODULE_PARM_DESC(tfrc_debug, "Enable debug messages"); | 12 | MODULE_PARM_DESC(tfrc_debug, "Enable TFRC debug messages"); |
15 | #endif | 13 | #endif |
16 | 14 | ||
17 | static int __init tfrc_module_init(void) | 15 | int __init tfrc_lib_init(void) |
18 | { | 16 | { |
19 | int rc = tfrc_li_init(); | 17 | int rc = tfrc_li_init(); |
20 | 18 | ||
@@ -38,18 +36,9 @@ out: | |||
38 | return rc; | 36 | return rc; |
39 | } | 37 | } |
40 | 38 | ||
41 | static void __exit tfrc_module_exit(void) | 39 | void __exit tfrc_lib_exit(void) |
42 | { | 40 | { |
43 | tfrc_rx_packet_history_exit(); | 41 | tfrc_rx_packet_history_exit(); |
44 | tfrc_tx_packet_history_exit(); | 42 | tfrc_tx_packet_history_exit(); |
45 | tfrc_li_exit(); | 43 | tfrc_li_exit(); |
46 | } | 44 | } |
47 | |||
48 | module_init(tfrc_module_init); | ||
49 | module_exit(tfrc_module_exit); | ||
50 | |||
51 | MODULE_AUTHOR("Gerrit Renker <gerrit@erg.abdn.ac.uk>, " | ||
52 | "Ian McDonald <ian.mcdonald@jandi.co.nz>, " | ||
53 | "Arnaldo Carvalho de Melo <acme@redhat.com>"); | ||
54 | MODULE_DESCRIPTION("DCCP TFRC library"); | ||
55 | MODULE_LICENSE("GPL"); | ||
diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h index ed9857527acf..e9720b143275 100644 --- a/net/dccp/ccids/lib/tfrc.h +++ b/net/dccp/ccids/lib/tfrc.h | |||
@@ -17,7 +17,8 @@ | |||
17 | #include <linux/types.h> | 17 | #include <linux/types.h> |
18 | #include <linux/math64.h> | 18 | #include <linux/math64.h> |
19 | #include "../../dccp.h" | 19 | #include "../../dccp.h" |
20 | /* internal includes that this module exports: */ | 20 | |
21 | /* internal includes that this library exports: */ | ||
21 | #include "loss_interval.h" | 22 | #include "loss_interval.h" |
22 | #include "packet_history.h" | 23 | #include "packet_history.h" |
23 | 24 | ||
@@ -66,4 +67,12 @@ extern void tfrc_rx_packet_history_exit(void); | |||
66 | 67 | ||
67 | extern int tfrc_li_init(void); | 68 | extern int tfrc_li_init(void); |
68 | extern void tfrc_li_exit(void); | 69 | extern void tfrc_li_exit(void); |
70 | |||
71 | #ifdef CONFIG_IP_DCCP_TFRC_LIB | ||
72 | extern int tfrc_lib_init(void); | ||
73 | extern void tfrc_lib_exit(void); | ||
74 | #else | ||
75 | #define tfrc_lib_init() (0) | ||
76 | #define tfrc_lib_exit() | ||
77 | #endif | ||
69 | #endif /* _TFRC_H_ */ | 78 | #endif /* _TFRC_H_ */ |
diff --git a/net/dccp/ccids/lib/tfrc_equation.c b/net/dccp/ccids/lib/tfrc_equation.c index 2f20a29cffe4..c5d3a9e5a5a4 100644 --- a/net/dccp/ccids/lib/tfrc_equation.c +++ b/net/dccp/ccids/lib/tfrc_equation.c | |||
@@ -659,8 +659,6 @@ u32 tfrc_calc_x(u16 s, u32 R, u32 p) | |||
659 | return scaled_div32(result, f); | 659 | return scaled_div32(result, f); |
660 | } | 660 | } |
661 | 661 | ||
662 | EXPORT_SYMBOL_GPL(tfrc_calc_x); | ||
663 | |||
664 | /** | 662 | /** |
665 | * tfrc_calc_x_reverse_lookup - try to find p given f(p) | 663 | * tfrc_calc_x_reverse_lookup - try to find p given f(p) |
666 | * | 664 | * |
@@ -693,5 +691,3 @@ u32 tfrc_calc_x_reverse_lookup(u32 fvalue) | |||
693 | index = tfrc_binsearch(fvalue, 0); | 691 | index = tfrc_binsearch(fvalue, 0); |
694 | return (index + 1) * 1000000 / TFRC_CALC_X_ARRSIZE; | 692 | return (index + 1) * 1000000 / TFRC_CALC_X_ARRSIZE; |
695 | } | 693 | } |
696 | |||
697 | EXPORT_SYMBOL_GPL(tfrc_calc_x_reverse_lookup); | ||
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 0bc4c9a02e19..f2230fc168e1 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h | |||
@@ -432,10 +432,8 @@ static inline int dccp_ack_pending(const struct sock *sk) | |||
432 | { | 432 | { |
433 | const struct dccp_sock *dp = dccp_sk(sk); | 433 | const struct dccp_sock *dp = dccp_sk(sk); |
434 | return dp->dccps_timestamp_echo != 0 || | 434 | return dp->dccps_timestamp_echo != 0 || |
435 | #ifdef CONFIG_IP_DCCP_ACKVEC | ||
436 | (dp->dccps_hc_rx_ackvec != NULL && | 435 | (dp->dccps_hc_rx_ackvec != NULL && |
437 | dccp_ackvec_pending(dp->dccps_hc_rx_ackvec)) || | 436 | dccp_ackvec_pending(dp->dccps_hc_rx_ackvec)) || |
438 | #endif | ||
439 | inet_csk_ack_scheduled(sk); | 437 | inet_csk_ack_scheduled(sk); |
440 | } | 438 | } |
441 | 439 | ||
diff --git a/net/dccp/feat.c b/net/dccp/feat.c index 30f9fb76b921..4152308958ab 100644 --- a/net/dccp/feat.c +++ b/net/dccp/feat.c | |||
@@ -34,7 +34,7 @@ | |||
34 | static int dccp_hdlr_ccid(struct sock *sk, u64 ccid, bool rx) | 34 | static int dccp_hdlr_ccid(struct sock *sk, u64 ccid, bool rx) |
35 | { | 35 | { |
36 | struct dccp_sock *dp = dccp_sk(sk); | 36 | struct dccp_sock *dp = dccp_sk(sk); |
37 | struct ccid *new_ccid = ccid_new(ccid, sk, rx, gfp_any()); | 37 | struct ccid *new_ccid = ccid_new(ccid, sk, rx); |
38 | 38 | ||
39 | if (new_ccid == NULL) | 39 | if (new_ccid == NULL) |
40 | return -ENOMEM; | 40 | return -ENOMEM; |
@@ -1214,8 +1214,6 @@ const char *dccp_feat_typename(const u8 type) | |||
1214 | return NULL; | 1214 | return NULL; |
1215 | } | 1215 | } |
1216 | 1216 | ||
1217 | EXPORT_SYMBOL_GPL(dccp_feat_typename); | ||
1218 | |||
1219 | const char *dccp_feat_name(const u8 feat) | 1217 | const char *dccp_feat_name(const u8 feat) |
1220 | { | 1218 | { |
1221 | static const char *feature_names[] = { | 1219 | static const char *feature_names[] = { |
@@ -1240,6 +1238,4 @@ const char *dccp_feat_name(const u8 feat) | |||
1240 | 1238 | ||
1241 | return feature_names[feat]; | 1239 | return feature_names[feat]; |
1242 | } | 1240 | } |
1243 | |||
1244 | EXPORT_SYMBOL_GPL(dccp_feat_name); | ||
1245 | #endif /* CONFIG_IP_DCCP_DEBUG */ | 1241 | #endif /* CONFIG_IP_DCCP_DEBUG */ |
diff --git a/net/dccp/input.c b/net/dccp/input.c index 5eb443f656c1..7648f316310f 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c | |||
@@ -741,5 +741,3 @@ u32 dccp_sample_rtt(struct sock *sk, long delta) | |||
741 | 741 | ||
742 | return delta; | 742 | return delta; |
743 | } | 743 | } |
744 | |||
745 | EXPORT_SYMBOL_GPL(dccp_sample_rtt); | ||
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 1747ccae8e8d..945b4d5d23b3 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -1118,9 +1118,15 @@ static int __init dccp_init(void) | |||
1118 | if (rc) | 1118 | if (rc) |
1119 | goto out_ackvec_exit; | 1119 | goto out_ackvec_exit; |
1120 | 1120 | ||
1121 | rc = ccid_initialize_builtins(); | ||
1122 | if (rc) | ||
1123 | goto out_sysctl_exit; | ||
1124 | |||
1121 | dccp_timestamping_init(); | 1125 | dccp_timestamping_init(); |
1122 | out: | 1126 | out: |
1123 | return rc; | 1127 | return rc; |
1128 | out_sysctl_exit: | ||
1129 | dccp_sysctl_exit(); | ||
1124 | out_ackvec_exit: | 1130 | out_ackvec_exit: |
1125 | dccp_ackvec_exit(); | 1131 | dccp_ackvec_exit(); |
1126 | out_free_dccp_mib: | 1132 | out_free_dccp_mib: |
@@ -1143,6 +1149,7 @@ out_free_percpu: | |||
1143 | 1149 | ||
1144 | static void __exit dccp_fini(void) | 1150 | static void __exit dccp_fini(void) |
1145 | { | 1151 | { |
1152 | ccid_cleanup_builtins(); | ||
1146 | dccp_mib_exit(); | 1153 | dccp_mib_exit(); |
1147 | free_pages((unsigned long)dccp_hashinfo.bhash, | 1154 | free_pages((unsigned long)dccp_hashinfo.bhash, |
1148 | get_order(dccp_hashinfo.bhash_size * | 1155 | get_order(dccp_hashinfo.bhash_size * |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index f28acf11fc67..35bcddf8a932 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -580,10 +580,6 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, | |||
580 | else if (!ret) { | 580 | else if (!ret) { |
581 | if (spliced) | 581 | if (spliced) |
582 | break; | 582 | break; |
583 | if (flags & SPLICE_F_NONBLOCK) { | ||
584 | ret = -EAGAIN; | ||
585 | break; | ||
586 | } | ||
587 | if (sock_flag(sk, SOCK_DONE)) | 583 | if (sock_flag(sk, SOCK_DONE)) |
588 | break; | 584 | break; |
589 | if (sk->sk_err) { | 585 | if (sk->sk_err) { |
@@ -2519,9 +2515,7 @@ found: | |||
2519 | flush |= memcmp(th + 1, th2 + 1, thlen - sizeof(*th)); | 2515 | flush |= memcmp(th + 1, th2 + 1, thlen - sizeof(*th)); |
2520 | 2516 | ||
2521 | total = p->len; | 2517 | total = p->len; |
2522 | mss = total; | 2518 | mss = skb_shinfo(p)->gso_size; |
2523 | if (skb_shinfo(p)->frag_list) | ||
2524 | mss = skb_shinfo(p)->frag_list->len; | ||
2525 | 2519 | ||
2526 | flush |= skb->len > mss || skb->len <= 0; | 2520 | flush |= skb->len > mss || skb->len <= 0; |
2527 | flush |= ntohl(th2->seq) + total != ntohl(th->seq); | 2521 | flush |= ntohl(th2->seq) + total != ntohl(th->seq); |
@@ -2557,7 +2551,6 @@ int tcp_gro_complete(struct sk_buff *skb) | |||
2557 | skb->csum_offset = offsetof(struct tcphdr, check); | 2551 | skb->csum_offset = offsetof(struct tcphdr, check); |
2558 | skb->ip_summed = CHECKSUM_PARTIAL; | 2552 | skb->ip_summed = CHECKSUM_PARTIAL; |
2559 | 2553 | ||
2560 | skb_shinfo(skb)->gso_size = skb_shinfo(skb)->frag_list->len; | ||
2561 | skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; | 2554 | skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; |
2562 | 2555 | ||
2563 | if (th->cwr) | 2556 | if (th->cwr) |
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index eeeaad2e8b5c..40f324655e24 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
@@ -404,7 +404,7 @@ sticky_done: | |||
404 | else if (optlen < sizeof(struct in6_pktinfo) || optval == NULL) | 404 | else if (optlen < sizeof(struct in6_pktinfo) || optval == NULL) |
405 | goto e_inval; | 405 | goto e_inval; |
406 | 406 | ||
407 | if (copy_from_user(&pkt, optval, optlen)) { | 407 | if (copy_from_user(&pkt, optval, sizeof(struct in6_pktinfo))) { |
408 | retv = -EFAULT; | 408 | retv = -EFAULT; |
409 | break; | 409 | break; |
410 | } | 410 | } |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 18c486cf4987..76f06b94ab9f 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -627,6 +627,9 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *dad | |||
627 | rt = ip6_rt_copy(ort); | 627 | rt = ip6_rt_copy(ort); |
628 | 628 | ||
629 | if (rt) { | 629 | if (rt) { |
630 | struct neighbour *neigh; | ||
631 | int attempts = !in_softirq(); | ||
632 | |||
630 | if (!(rt->rt6i_flags&RTF_GATEWAY)) { | 633 | if (!(rt->rt6i_flags&RTF_GATEWAY)) { |
631 | if (rt->rt6i_dst.plen != 128 && | 634 | if (rt->rt6i_dst.plen != 128 && |
632 | ipv6_addr_equal(&rt->rt6i_dst.addr, daddr)) | 635 | ipv6_addr_equal(&rt->rt6i_dst.addr, daddr)) |
@@ -646,7 +649,35 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *dad | |||
646 | } | 649 | } |
647 | #endif | 650 | #endif |
648 | 651 | ||
649 | rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway); | 652 | retry: |
653 | neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway); | ||
654 | if (IS_ERR(neigh)) { | ||
655 | struct net *net = dev_net(rt->rt6i_dev); | ||
656 | int saved_rt_min_interval = | ||
657 | net->ipv6.sysctl.ip6_rt_gc_min_interval; | ||
658 | int saved_rt_elasticity = | ||
659 | net->ipv6.sysctl.ip6_rt_gc_elasticity; | ||
660 | |||
661 | if (attempts-- > 0) { | ||
662 | net->ipv6.sysctl.ip6_rt_gc_elasticity = 1; | ||
663 | net->ipv6.sysctl.ip6_rt_gc_min_interval = 0; | ||
664 | |||
665 | ip6_dst_gc(net->ipv6.ip6_dst_ops); | ||
666 | |||
667 | net->ipv6.sysctl.ip6_rt_gc_elasticity = | ||
668 | saved_rt_elasticity; | ||
669 | net->ipv6.sysctl.ip6_rt_gc_min_interval = | ||
670 | saved_rt_min_interval; | ||
671 | goto retry; | ||
672 | } | ||
673 | |||
674 | if (net_ratelimit()) | ||
675 | printk(KERN_WARNING | ||
676 | "Neighbour table overflow.\n"); | ||
677 | dst_free(&rt->u.dst); | ||
678 | return NULL; | ||
679 | } | ||
680 | rt->rt6i_nexthop = neigh; | ||
650 | 681 | ||
651 | } | 682 | } |
652 | 683 | ||
@@ -945,8 +976,11 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev, | |||
945 | dev_hold(dev); | 976 | dev_hold(dev); |
946 | if (neigh) | 977 | if (neigh) |
947 | neigh_hold(neigh); | 978 | neigh_hold(neigh); |
948 | else | 979 | else { |
949 | neigh = ndisc_get_neigh(dev, addr); | 980 | neigh = ndisc_get_neigh(dev, addr); |
981 | if (IS_ERR(neigh)) | ||
982 | neigh = NULL; | ||
983 | } | ||
950 | 984 | ||
951 | rt->rt6i_dev = dev; | 985 | rt->rt6i_dev = dev; |
952 | rt->rt6i_idev = idev; | 986 | rt->rt6i_idev = idev; |
@@ -1887,6 +1921,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, | |||
1887 | { | 1921 | { |
1888 | struct net *net = dev_net(idev->dev); | 1922 | struct net *net = dev_net(idev->dev); |
1889 | struct rt6_info *rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops); | 1923 | struct rt6_info *rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops); |
1924 | struct neighbour *neigh; | ||
1890 | 1925 | ||
1891 | if (rt == NULL) | 1926 | if (rt == NULL) |
1892 | return ERR_PTR(-ENOMEM); | 1927 | return ERR_PTR(-ENOMEM); |
@@ -1909,11 +1944,18 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, | |||
1909 | rt->rt6i_flags |= RTF_ANYCAST; | 1944 | rt->rt6i_flags |= RTF_ANYCAST; |
1910 | else | 1945 | else |
1911 | rt->rt6i_flags |= RTF_LOCAL; | 1946 | rt->rt6i_flags |= RTF_LOCAL; |
1912 | rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway); | 1947 | neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway); |
1913 | if (rt->rt6i_nexthop == NULL) { | 1948 | if (IS_ERR(neigh)) { |
1914 | dst_free(&rt->u.dst); | 1949 | dst_free(&rt->u.dst); |
1915 | return ERR_PTR(-ENOMEM); | 1950 | |
1951 | /* We are casting this because that is the return | ||
1952 | * value type. But an errno encoded pointer is the | ||
1953 | * same regardless of the underlying pointer type, | ||
1954 | * and that's what we are returning. So this is OK. | ||
1955 | */ | ||
1956 | return (struct rt6_info *) neigh; | ||
1916 | } | 1957 | } |
1958 | rt->rt6i_nexthop = neigh; | ||
1917 | 1959 | ||
1918 | ipv6_addr_copy(&rt->rt6i_dst.addr, addr); | 1960 | ipv6_addr_copy(&rt->rt6i_dst.addr, addr); |
1919 | rt->rt6i_dst.plen = 128; | 1961 | rt->rt6i_dst.plen = 128; |
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index af3192d2a5a3..eb8a2a0b6eb7 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -494,7 +494,21 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr, | |||
494 | if (err) { | 494 | if (err) { |
495 | iucv_path_free(iucv->path); | 495 | iucv_path_free(iucv->path); |
496 | iucv->path = NULL; | 496 | iucv->path = NULL; |
497 | err = -ECONNREFUSED; | 497 | switch (err) { |
498 | case 0x0b: /* Target communicator is not logged on */ | ||
499 | err = -ENETUNREACH; | ||
500 | break; | ||
501 | case 0x0d: /* Max connections for this guest exceeded */ | ||
502 | case 0x0e: /* Max connections for target guest exceeded */ | ||
503 | err = -EAGAIN; | ||
504 | break; | ||
505 | case 0x0f: /* Missing IUCV authorization */ | ||
506 | err = -EACCES; | ||
507 | break; | ||
508 | default: | ||
509 | err = -ECONNREFUSED; | ||
510 | break; | ||
511 | } | ||
498 | goto done; | 512 | goto done; |
499 | } | 513 | } |
500 | 514 | ||
@@ -507,6 +521,13 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr, | |||
507 | release_sock(sk); | 521 | release_sock(sk); |
508 | return -ECONNREFUSED; | 522 | return -ECONNREFUSED; |
509 | } | 523 | } |
524 | |||
525 | if (err) { | ||
526 | iucv_path_sever(iucv->path, NULL); | ||
527 | iucv_path_free(iucv->path); | ||
528 | iucv->path = NULL; | ||
529 | } | ||
530 | |||
510 | done: | 531 | done: |
511 | release_sock(sk); | 532 | release_sock(sk); |
512 | return err; | 533 | return err; |
@@ -1021,12 +1042,14 @@ static int iucv_callback_connreq(struct iucv_path *path, | |||
1021 | ASCEBC(user_data, sizeof(user_data)); | 1042 | ASCEBC(user_data, sizeof(user_data)); |
1022 | if (sk->sk_state != IUCV_LISTEN) { | 1043 | if (sk->sk_state != IUCV_LISTEN) { |
1023 | err = iucv_path_sever(path, user_data); | 1044 | err = iucv_path_sever(path, user_data); |
1045 | iucv_path_free(path); | ||
1024 | goto fail; | 1046 | goto fail; |
1025 | } | 1047 | } |
1026 | 1048 | ||
1027 | /* Check for backlog size */ | 1049 | /* Check for backlog size */ |
1028 | if (sk_acceptq_is_full(sk)) { | 1050 | if (sk_acceptq_is_full(sk)) { |
1029 | err = iucv_path_sever(path, user_data); | 1051 | err = iucv_path_sever(path, user_data); |
1052 | iucv_path_free(path); | ||
1030 | goto fail; | 1053 | goto fail; |
1031 | } | 1054 | } |
1032 | 1055 | ||
@@ -1034,6 +1057,7 @@ static int iucv_callback_connreq(struct iucv_path *path, | |||
1034 | nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC); | 1057 | nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC); |
1035 | if (!nsk) { | 1058 | if (!nsk) { |
1036 | err = iucv_path_sever(path, user_data); | 1059 | err = iucv_path_sever(path, user_data); |
1060 | iucv_path_free(path); | ||
1037 | goto fail; | 1061 | goto fail; |
1038 | } | 1062 | } |
1039 | 1063 | ||
@@ -1057,6 +1081,8 @@ static int iucv_callback_connreq(struct iucv_path *path, | |||
1057 | err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk); | 1081 | err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk); |
1058 | if (err) { | 1082 | if (err) { |
1059 | err = iucv_path_sever(path, user_data); | 1083 | err = iucv_path_sever(path, user_data); |
1084 | iucv_path_free(path); | ||
1085 | iucv_sock_kill(nsk); | ||
1060 | goto fail; | 1086 | goto fail; |
1061 | } | 1087 | } |
1062 | 1088 | ||
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 8f57d4f4328a..032f61e98595 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c | |||
@@ -517,6 +517,7 @@ static int iucv_enable(void) | |||
517 | size_t alloc_size; | 517 | size_t alloc_size; |
518 | int cpu, rc; | 518 | int cpu, rc; |
519 | 519 | ||
520 | get_online_cpus(); | ||
520 | rc = -ENOMEM; | 521 | rc = -ENOMEM; |
521 | alloc_size = iucv_max_pathid * sizeof(struct iucv_path); | 522 | alloc_size = iucv_max_pathid * sizeof(struct iucv_path); |
522 | iucv_path_table = kzalloc(alloc_size, GFP_KERNEL); | 523 | iucv_path_table = kzalloc(alloc_size, GFP_KERNEL); |
@@ -524,19 +525,17 @@ static int iucv_enable(void) | |||
524 | goto out; | 525 | goto out; |
525 | /* Declare per cpu buffers. */ | 526 | /* Declare per cpu buffers. */ |
526 | rc = -EIO; | 527 | rc = -EIO; |
527 | get_online_cpus(); | ||
528 | for_each_online_cpu(cpu) | 528 | for_each_online_cpu(cpu) |
529 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); | 529 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); |
530 | if (cpus_empty(iucv_buffer_cpumask)) | 530 | if (cpus_empty(iucv_buffer_cpumask)) |
531 | /* No cpu could declare an iucv buffer. */ | 531 | /* No cpu could declare an iucv buffer. */ |
532 | goto out_path; | 532 | goto out; |
533 | put_online_cpus(); | 533 | put_online_cpus(); |
534 | return 0; | 534 | return 0; |
535 | |||
536 | out_path: | ||
537 | put_online_cpus(); | ||
538 | kfree(iucv_path_table); | ||
539 | out: | 535 | out: |
536 | kfree(iucv_path_table); | ||
537 | iucv_path_table = NULL; | ||
538 | put_online_cpus(); | ||
540 | return rc; | 539 | return rc; |
541 | } | 540 | } |
542 | 541 | ||
@@ -551,8 +550,9 @@ static void iucv_disable(void) | |||
551 | { | 550 | { |
552 | get_online_cpus(); | 551 | get_online_cpus(); |
553 | on_each_cpu(iucv_retrieve_cpu, NULL, 1); | 552 | on_each_cpu(iucv_retrieve_cpu, NULL, 1); |
554 | put_online_cpus(); | ||
555 | kfree(iucv_path_table); | 553 | kfree(iucv_path_table); |
554 | iucv_path_table = NULL; | ||
555 | put_online_cpus(); | ||
556 | } | 556 | } |
557 | 557 | ||
558 | static int __cpuinit iucv_cpu_notify(struct notifier_block *self, | 558 | static int __cpuinit iucv_cpu_notify(struct notifier_block *self, |
@@ -589,10 +589,14 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self, | |||
589 | case CPU_ONLINE_FROZEN: | 589 | case CPU_ONLINE_FROZEN: |
590 | case CPU_DOWN_FAILED: | 590 | case CPU_DOWN_FAILED: |
591 | case CPU_DOWN_FAILED_FROZEN: | 591 | case CPU_DOWN_FAILED_FROZEN: |
592 | if (!iucv_path_table) | ||
593 | break; | ||
592 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); | 594 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); |
593 | break; | 595 | break; |
594 | case CPU_DOWN_PREPARE: | 596 | case CPU_DOWN_PREPARE: |
595 | case CPU_DOWN_PREPARE_FROZEN: | 597 | case CPU_DOWN_PREPARE_FROZEN: |
598 | if (!iucv_path_table) | ||
599 | break; | ||
596 | cpumask = iucv_buffer_cpumask; | 600 | cpumask = iucv_buffer_cpumask; |
597 | cpu_clear(cpu, cpumask); | 601 | cpu_clear(cpu, cpumask); |
598 | if (cpus_empty(cpumask)) | 602 | if (cpus_empty(cpumask)) |
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c index 3c94f76d5525..3eaa39403c13 100644 --- a/net/rfkill/rfkill.c +++ b/net/rfkill/rfkill.c | |||
@@ -54,10 +54,10 @@ static unsigned long rfkill_states_lockdflt[BITS_TO_LONGS(RFKILL_TYPE_MAX)]; | |||
54 | static bool rfkill_epo_lock_active; | 54 | static bool rfkill_epo_lock_active; |
55 | 55 | ||
56 | 56 | ||
57 | #ifdef CONFIG_RFKILL_LEDS | ||
57 | static void rfkill_led_trigger(struct rfkill *rfkill, | 58 | static void rfkill_led_trigger(struct rfkill *rfkill, |
58 | enum rfkill_state state) | 59 | enum rfkill_state state) |
59 | { | 60 | { |
60 | #ifdef CONFIG_RFKILL_LEDS | ||
61 | struct led_trigger *led = &rfkill->led_trigger; | 61 | struct led_trigger *led = &rfkill->led_trigger; |
62 | 62 | ||
63 | if (!led->name) | 63 | if (!led->name) |
@@ -66,10 +66,8 @@ static void rfkill_led_trigger(struct rfkill *rfkill, | |||
66 | led_trigger_event(led, LED_OFF); | 66 | led_trigger_event(led, LED_OFF); |
67 | else | 67 | else |
68 | led_trigger_event(led, LED_FULL); | 68 | led_trigger_event(led, LED_FULL); |
69 | #endif /* CONFIG_RFKILL_LEDS */ | ||
70 | } | 69 | } |
71 | 70 | ||
72 | #ifdef CONFIG_RFKILL_LEDS | ||
73 | static void rfkill_led_trigger_activate(struct led_classdev *led) | 71 | static void rfkill_led_trigger_activate(struct led_classdev *led) |
74 | { | 72 | { |
75 | struct rfkill *rfkill = container_of(led->trigger, | 73 | struct rfkill *rfkill = container_of(led->trigger, |
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 05d178008cbc..07372f60bee3 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
@@ -638,8 +638,9 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle, | |||
638 | break; | 638 | break; |
639 | 639 | ||
640 | n->next = *ins; | 640 | n->next = *ins; |
641 | wmb(); | 641 | tcf_tree_lock(tp); |
642 | *ins = n; | 642 | *ins = n; |
643 | tcf_tree_unlock(tp); | ||
643 | 644 | ||
644 | *arg = (unsigned long)n; | 645 | *arg = (unsigned long)n; |
645 | return 0; | 646 | return 0; |